LLVM  13.0.0git
Instructions.h
Go to the documentation of this file.
1 //===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file exposes the class definitions of all of the subclasses of the
10 // Instruction class. This is meant to be an easy way to get access to all
11 // instruction subclasses.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #ifndef LLVM_IR_INSTRUCTIONS_H
16 #define LLVM_IR_INSTRUCTIONS_H
17 
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/Bitfields.h"
20 #include "llvm/ADT/None.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/StringRef.h"
24 #include "llvm/ADT/Twine.h"
25 #include "llvm/ADT/iterator.h"
27 #include "llvm/IR/Attributes.h"
28 #include "llvm/IR/BasicBlock.h"
29 #include "llvm/IR/CallingConv.h"
30 #include "llvm/IR/CFG.h"
31 #include "llvm/IR/Constant.h"
32 #include "llvm/IR/DerivedTypes.h"
33 #include "llvm/IR/Function.h"
34 #include "llvm/IR/InstrTypes.h"
35 #include "llvm/IR/Instruction.h"
36 #include "llvm/IR/OperandTraits.h"
37 #include "llvm/IR/Type.h"
38 #include "llvm/IR/Use.h"
39 #include "llvm/IR/User.h"
40 #include "llvm/IR/Value.h"
42 #include "llvm/Support/Casting.h"
44 #include <cassert>
45 #include <cstddef>
46 #include <cstdint>
47 #include <iterator>
48 
49 namespace llvm {
50 
51 class APInt;
52 class ConstantInt;
53 class DataLayout;
54 class LLVMContext;
55 
56 //===----------------------------------------------------------------------===//
57 // AllocaInst Class
58 //===----------------------------------------------------------------------===//
59 
60 /// an instruction to allocate memory on the stack
61 class AllocaInst : public UnaryInstruction {
62  Type *AllocatedType;
63 
64  using AlignmentField = AlignmentBitfieldElementT<0>;
65  using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>;
67  static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField,
68  SwiftErrorField>(),
69  "Bitfields must be contiguous");
70 
71 protected:
72  // Note: Instruction needs to be a friend here to call cloneImpl.
73  friend class Instruction;
74 
75  AllocaInst *cloneImpl() const;
76 
77 public:
78  explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
79  const Twine &Name, Instruction *InsertBefore);
80  AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
81  const Twine &Name, BasicBlock *InsertAtEnd);
82 
83  AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
84  Instruction *InsertBefore);
85  AllocaInst(Type *Ty, unsigned AddrSpace,
86  const Twine &Name, BasicBlock *InsertAtEnd);
87 
88  AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
89  const Twine &Name = "", Instruction *InsertBefore = nullptr);
90  AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
91  const Twine &Name, BasicBlock *InsertAtEnd);
92 
93  /// Return true if there is an allocation size parameter to the allocation
94  /// instruction that is not 1.
95  bool isArrayAllocation() const;
96 
97  /// Get the number of elements allocated. For a simple allocation of a single
98  /// element, this will return a constant 1 value.
99  const Value *getArraySize() const { return getOperand(0); }
100  Value *getArraySize() { return getOperand(0); }
101 
102  /// Overload to return most specific pointer type.
103  PointerType *getType() const {
104  return cast<PointerType>(Instruction::getType());
105  }
106 
107  /// Get allocation size in bits. Returns None if size can't be determined,
108  /// e.g. in case of a VLA.
110 
111  /// Return the type that is being allocated by the instruction.
112  Type *getAllocatedType() const { return AllocatedType; }
113  /// for use only in special circumstances that need to generically
114  /// transform a whole instruction (eg: IR linking and vectorization).
115  void setAllocatedType(Type *Ty) { AllocatedType = Ty; }
116 
117  /// Return the alignment of the memory that is being allocated by the
118  /// instruction.
119  Align getAlign() const {
120  return Align(1ULL << getSubclassData<AlignmentField>());
121  }
122 
124  setSubclassData<AlignmentField>(Log2(Align));
125  }
126 
127  // FIXME: Remove this one transition to Align is over.
128  unsigned getAlignment() const { return getAlign().value(); }
129 
130  /// Return true if this alloca is in the entry block of the function and is a
131  /// constant size. If so, the code generator will fold it into the
132  /// prolog/epilog code, so it is basically free.
133  bool isStaticAlloca() const;
134 
135  /// Return true if this alloca is used as an inalloca argument to a call. Such
136  /// allocas are never considered static even if they are in the entry block.
137  bool isUsedWithInAlloca() const {
138  return getSubclassData<UsedWithInAllocaField>();
139  }
140 
141  /// Specify whether this alloca is used to represent the arguments to a call.
142  void setUsedWithInAlloca(bool V) {
143  setSubclassData<UsedWithInAllocaField>(V);
144  }
145 
146  /// Return true if this alloca is used as a swifterror argument to a call.
147  bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); }
148  /// Specify whether this alloca is used to represent a swifterror.
149  void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); }
150 
151  // Methods for support type inquiry through isa, cast, and dyn_cast:
152  static bool classof(const Instruction *I) {
153  return (I->getOpcode() == Instruction::Alloca);
154  }
155  static bool classof(const Value *V) {
156  return isa<Instruction>(V) && classof(cast<Instruction>(V));
157  }
158 
159 private:
160  // Shadow Instruction::setInstructionSubclassData with a private forwarding
161  // method so that subclasses cannot accidentally use it.
162  template <typename Bitfield>
163  void setSubclassData(typename Bitfield::Type Value) {
164  Instruction::setSubclassData<Bitfield>(Value);
165  }
166 };
167 
168 //===----------------------------------------------------------------------===//
169 // LoadInst Class
170 //===----------------------------------------------------------------------===//
171 
172 /// An instruction for reading from memory. This uses the SubclassData field in
173 /// Value to store whether or not the load is volatile.
174 class LoadInst : public UnaryInstruction {
175  using VolatileField = BoolBitfieldElementT<0>;
178  static_assert(
179  Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
180  "Bitfields must be contiguous");
181 
182  void AssertOK();
183 
184 protected:
185  // Note: Instruction needs to be a friend here to call cloneImpl.
186  friend class Instruction;
187 
188  LoadInst *cloneImpl() const;
189 
190 public:
191  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr,
192  Instruction *InsertBefore);
193  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
194  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
195  Instruction *InsertBefore);
196  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
197  BasicBlock *InsertAtEnd);
198  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
199  Align Align, Instruction *InsertBefore = nullptr);
200  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
201  Align Align, BasicBlock *InsertAtEnd);
202  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
203  Align Align, AtomicOrdering Order,
205  Instruction *InsertBefore = nullptr);
206  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
208  BasicBlock *InsertAtEnd);
209 
210  /// Return true if this is a load from a volatile memory location.
211  bool isVolatile() const { return getSubclassData<VolatileField>(); }
212 
213  /// Specify whether this is a volatile load or not.
214  void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
215 
216  /// Return the alignment of the access that is being performed.
217  /// FIXME: Remove this function once transition to Align is over.
218  /// Use getAlign() instead.
219  unsigned getAlignment() const { return getAlign().value(); }
220 
221  /// Return the alignment of the access that is being performed.
222  Align getAlign() const {
223  return Align(1ULL << (getSubclassData<AlignmentField>()));
224  }
225 
227  setSubclassData<AlignmentField>(Log2(Align));
228  }
229 
230  /// Returns the ordering constraint of this load instruction.
232  return getSubclassData<OrderingField>();
233  }
234  /// Sets the ordering constraint of this load instruction. May not be Release
235  /// or AcquireRelease.
236  void setOrdering(AtomicOrdering Ordering) {
237  setSubclassData<OrderingField>(Ordering);
238  }
239 
240  /// Returns the synchronization scope ID of this load instruction.
242  return SSID;
243  }
244 
245  /// Sets the synchronization scope ID of this load instruction.
247  this->SSID = SSID;
248  }
249 
250  /// Sets the ordering constraint and the synchronization scope ID of this load
251  /// instruction.
252  void setAtomic(AtomicOrdering Ordering,
254  setOrdering(Ordering);
255  setSyncScopeID(SSID);
256  }
257 
258  bool isSimple() const { return !isAtomic() && !isVolatile(); }
259 
260  bool isUnordered() const {
261  return (getOrdering() == AtomicOrdering::NotAtomic ||
263  !isVolatile();
264  }
265 
267  const Value *getPointerOperand() const { return getOperand(0); }
268  static unsigned getPointerOperandIndex() { return 0U; }
270 
271  /// Returns the address space of the pointer operand.
272  unsigned getPointerAddressSpace() const {
274  }
275 
276  // Methods for support type inquiry through isa, cast, and dyn_cast:
277  static bool classof(const Instruction *I) {
278  return I->getOpcode() == Instruction::Load;
279  }
280  static bool classof(const Value *V) {
281  return isa<Instruction>(V) && classof(cast<Instruction>(V));
282  }
283 
284 private:
285  // Shadow Instruction::setInstructionSubclassData with a private forwarding
286  // method so that subclasses cannot accidentally use it.
287  template <typename Bitfield>
288  void setSubclassData(typename Bitfield::Type Value) {
289  Instruction::setSubclassData<Bitfield>(Value);
290  }
291 
292  /// The synchronization scope ID of this load instruction. Not quite enough
293  /// room in SubClassData for everything, so synchronization scope ID gets its
294  /// own field.
295  SyncScope::ID SSID;
296 };
297 
298 //===----------------------------------------------------------------------===//
299 // StoreInst Class
300 //===----------------------------------------------------------------------===//
301 
302 /// An instruction for storing to memory.
303 class StoreInst : public Instruction {
304  using VolatileField = BoolBitfieldElementT<0>;
307  static_assert(
308  Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
309  "Bitfields must be contiguous");
310 
311  void AssertOK();
312 
313 protected:
314  // Note: Instruction needs to be a friend here to call cloneImpl.
315  friend class Instruction;
316 
317  StoreInst *cloneImpl() const;
318 
319 public:
320  StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore);
321  StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
322  StoreInst(Value *Val, Value *Ptr, bool isVolatile, Instruction *InsertBefore);
323  StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
324  StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
325  Instruction *InsertBefore = nullptr);
326  StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
327  BasicBlock *InsertAtEnd);
328  StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
330  Instruction *InsertBefore = nullptr);
331  StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
332  AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd);
333 
334  // allocate space for exactly two operands
335  void *operator new(size_t s) {
336  return User::operator new(s, 2);
337  }
338 
339  /// Return true if this is a store to a volatile memory location.
340  bool isVolatile() const { return getSubclassData<VolatileField>(); }
341 
342  /// Specify whether this is a volatile store or not.
343  void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
344 
345  /// Transparently provide more efficient getOperand methods.
347 
348  /// Return the alignment of the access that is being performed
349  /// FIXME: Remove this function once transition to Align is over.
350  /// Use getAlign() instead.
351  unsigned getAlignment() const { return getAlign().value(); }
352 
353  Align getAlign() const {
354  return Align(1ULL << (getSubclassData<AlignmentField>()));
355  }
356 
358  setSubclassData<AlignmentField>(Log2(Align));
359  }
360 
361  /// Returns the ordering constraint of this store instruction.
363  return getSubclassData<OrderingField>();
364  }
365 
366  /// Sets the ordering constraint of this store instruction. May not be
367  /// Acquire or AcquireRelease.
368  void setOrdering(AtomicOrdering Ordering) {
369  setSubclassData<OrderingField>(Ordering);
370  }
371 
372  /// Returns the synchronization scope ID of this store instruction.
374  return SSID;
375  }
376 
377  /// Sets the synchronization scope ID of this store instruction.
379  this->SSID = SSID;
380  }
381 
382  /// Sets the ordering constraint and the synchronization scope ID of this
383  /// store instruction.
384  void setAtomic(AtomicOrdering Ordering,
386  setOrdering(Ordering);
387  setSyncScopeID(SSID);
388  }
389 
390  bool isSimple() const { return !isAtomic() && !isVolatile(); }
391 
392  bool isUnordered() const {
393  return (getOrdering() == AtomicOrdering::NotAtomic ||
395  !isVolatile();
396  }
397 
398  Value *getValueOperand() { return getOperand(0); }
399  const Value *getValueOperand() const { return getOperand(0); }
400 
402  const Value *getPointerOperand() const { return getOperand(1); }
403  static unsigned getPointerOperandIndex() { return 1U; }
405 
406  /// Returns the address space of the pointer operand.
407  unsigned getPointerAddressSpace() const {
409  }
410 
411  // Methods for support type inquiry through isa, cast, and dyn_cast:
412  static bool classof(const Instruction *I) {
413  return I->getOpcode() == Instruction::Store;
414  }
415  static bool classof(const Value *V) {
416  return isa<Instruction>(V) && classof(cast<Instruction>(V));
417  }
418 
419 private:
420  // Shadow Instruction::setInstructionSubclassData with a private forwarding
421  // method so that subclasses cannot accidentally use it.
422  template <typename Bitfield>
423  void setSubclassData(typename Bitfield::Type Value) {
424  Instruction::setSubclassData<Bitfield>(Value);
425  }
426 
427  /// The synchronization scope ID of this store instruction. Not quite enough
428  /// room in SubClassData for everything, so synchronization scope ID gets its
429  /// own field.
430  SyncScope::ID SSID;
431 };
432 
433 template <>
434 struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> {
435 };
436 
438 
439 //===----------------------------------------------------------------------===//
440 // FenceInst Class
441 //===----------------------------------------------------------------------===//
442 
443 /// An instruction for ordering other memory operations.
444 class FenceInst : public Instruction {
445  using OrderingField = AtomicOrderingBitfieldElementT<0>;
446 
447  void Init(AtomicOrdering Ordering, SyncScope::ID SSID);
448 
449 protected:
450  // Note: Instruction needs to be a friend here to call cloneImpl.
451  friend class Instruction;
452 
453  FenceInst *cloneImpl() const;
454 
455 public:
456  // Ordering may only be Acquire, Release, AcquireRelease, or
457  // SequentiallyConsistent.
460  Instruction *InsertBefore = nullptr);
462  BasicBlock *InsertAtEnd);
463 
464  // allocate space for exactly zero operands
465  void *operator new(size_t s) {
466  return User::operator new(s, 0);
467  }
468 
469  /// Returns the ordering constraint of this fence instruction.
471  return getSubclassData<OrderingField>();
472  }
473 
474  /// Sets the ordering constraint of this fence instruction. May only be
475  /// Acquire, Release, AcquireRelease, or SequentiallyConsistent.
476  void setOrdering(AtomicOrdering Ordering) {
477  setSubclassData<OrderingField>(Ordering);
478  }
479 
480  /// Returns the synchronization scope ID of this fence instruction.
482  return SSID;
483  }
484 
485  /// Sets the synchronization scope ID of this fence instruction.
487  this->SSID = SSID;
488  }
489 
490  // Methods for support type inquiry through isa, cast, and dyn_cast:
491  static bool classof(const Instruction *I) {
492  return I->getOpcode() == Instruction::Fence;
493  }
494  static bool classof(const Value *V) {
495  return isa<Instruction>(V) && classof(cast<Instruction>(V));
496  }
497 
498 private:
499  // Shadow Instruction::setInstructionSubclassData with a private forwarding
500  // method so that subclasses cannot accidentally use it.
501  template <typename Bitfield>
502  void setSubclassData(typename Bitfield::Type Value) {
503  Instruction::setSubclassData<Bitfield>(Value);
504  }
505 
506  /// The synchronization scope ID of this fence instruction. Not quite enough
507  /// room in SubClassData for everything, so synchronization scope ID gets its
508  /// own field.
509  SyncScope::ID SSID;
510 };
511 
512 //===----------------------------------------------------------------------===//
513 // AtomicCmpXchgInst Class
514 //===----------------------------------------------------------------------===//
515 
516 /// An instruction that atomically checks whether a
517 /// specified value is in a memory location, and, if it is, stores a new value
518 /// there. The value returned by this instruction is a pair containing the
519 /// original value as first element, and an i1 indicating success (true) or
520 /// failure (false) as second element.
521 ///
523  void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align,
524  AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
525  SyncScope::ID SSID);
526 
527  template <unsigned Offset>
528  using AtomicOrderingBitfieldElement =
531 
532 protected:
533  // Note: Instruction needs to be a friend here to call cloneImpl.
534  friend class Instruction;
535 
536  AtomicCmpXchgInst *cloneImpl() const;
537 
538 public:
539  AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
540  AtomicOrdering SuccessOrdering,
541  AtomicOrdering FailureOrdering, SyncScope::ID SSID,
542  Instruction *InsertBefore = nullptr);
543  AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
544  AtomicOrdering SuccessOrdering,
545  AtomicOrdering FailureOrdering, SyncScope::ID SSID,
546  BasicBlock *InsertAtEnd);
547 
548  // allocate space for exactly three operands
549  void *operator new(size_t s) {
550  return User::operator new(s, 3);
551  }
552 
555  using SuccessOrderingField =
557  using FailureOrderingField =
559  using AlignmentField =
561  static_assert(
564  "Bitfields must be contiguous");
565 
566  /// Return the alignment of the memory that is being allocated by the
567  /// instruction.
568  Align getAlign() const {
569  return Align(1ULL << getSubclassData<AlignmentField>());
570  }
571 
573  setSubclassData<AlignmentField>(Log2(Align));
574  }
575 
576  /// Return true if this is a cmpxchg from a volatile memory
577  /// location.
578  ///
579  bool isVolatile() const { return getSubclassData<VolatileField>(); }
580 
581  /// Specify whether this is a volatile cmpxchg.
582  ///
583  void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
584 
585  /// Return true if this cmpxchg may spuriously fail.
586  bool isWeak() const { return getSubclassData<WeakField>(); }
587 
588  void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); }
589 
590  /// Transparently provide more efficient getOperand methods.
592 
593  /// Returns the success ordering constraint of this cmpxchg instruction.
595  return getSubclassData<SuccessOrderingField>();
596  }
597 
598  /// Sets the success ordering constraint of this cmpxchg instruction.
600  assert(Ordering != AtomicOrdering::NotAtomic &&
601  "CmpXchg instructions can only be atomic.");
602  setSubclassData<SuccessOrderingField>(Ordering);
603  }
604 
605  /// Returns the failure ordering constraint of this cmpxchg instruction.
607  return getSubclassData<FailureOrderingField>();
608  }
609 
610  /// Sets the failure ordering constraint of this cmpxchg instruction.
612  assert(Ordering != AtomicOrdering::NotAtomic &&
613  "CmpXchg instructions can only be atomic.");
614  setSubclassData<FailureOrderingField>(Ordering);
615  }
616 
617  /// Returns the synchronization scope ID of this cmpxchg instruction.
619  return SSID;
620  }
621 
622  /// Sets the synchronization scope ID of this cmpxchg instruction.
624  this->SSID = SSID;
625  }
626 
628  const Value *getPointerOperand() const { return getOperand(0); }
629  static unsigned getPointerOperandIndex() { return 0U; }
630 
632  const Value *getCompareOperand() const { return getOperand(1); }
633 
635  const Value *getNewValOperand() const { return getOperand(2); }
636 
637  /// Returns the address space of the pointer operand.
638  unsigned getPointerAddressSpace() const {
640  }
641 
642  /// Returns the strongest permitted ordering on failure, given the
643  /// desired ordering on success.
644  ///
645  /// If the comparison in a cmpxchg operation fails, there is no atomic store
646  /// so release semantics cannot be provided. So this function drops explicit
647  /// Release requests from the AtomicOrdering. A SequentiallyConsistent
648  /// operation would remain SequentiallyConsistent.
649  static AtomicOrdering
651  switch (SuccessOrdering) {
652  default:
653  llvm_unreachable("invalid cmpxchg success ordering");
662  }
663  }
664 
665  // Methods for support type inquiry through isa, cast, and dyn_cast:
666  static bool classof(const Instruction *I) {
667  return I->getOpcode() == Instruction::AtomicCmpXchg;
668  }
669  static bool classof(const Value *V) {
670  return isa<Instruction>(V) && classof(cast<Instruction>(V));
671  }
672 
673 private:
674  // Shadow Instruction::setInstructionSubclassData with a private forwarding
675  // method so that subclasses cannot accidentally use it.
676  template <typename Bitfield>
677  void setSubclassData(typename Bitfield::Type Value) {
678  Instruction::setSubclassData<Bitfield>(Value);
679  }
680 
681  /// The synchronization scope ID of this cmpxchg instruction. Not quite
682  /// enough room in SubClassData for everything, so synchronization scope ID
683  /// gets its own field.
684  SyncScope::ID SSID;
685 };
686 
687 template <>
689  public FixedNumOperandTraits<AtomicCmpXchgInst, 3> {
690 };
691 
693 
694 //===----------------------------------------------------------------------===//
695 // AtomicRMWInst Class
696 //===----------------------------------------------------------------------===//
697 
698 /// an instruction that atomically reads a memory location,
699 /// combines it with another value, and then stores the result back. Returns
700 /// the old value.
701 ///
702 class AtomicRMWInst : public Instruction {
703 protected:
704  // Note: Instruction needs to be a friend here to call cloneImpl.
705  friend class Instruction;
706 
707  AtomicRMWInst *cloneImpl() const;
708 
709 public:
710  /// This enumeration lists the possible modifications atomicrmw can make. In
711  /// the descriptions, 'p' is the pointer to the instruction's memory location,
712  /// 'old' is the initial value of *p, and 'v' is the other value passed to the
713  /// instruction. These instructions always return 'old'.
714  enum BinOp : unsigned {
715  /// *p = v
717  /// *p = old + v
719  /// *p = old - v
721  /// *p = old & v
723  /// *p = ~(old & v)
725  /// *p = old | v
726  Or,
727  /// *p = old ^ v
729  /// *p = old >signed v ? old : v
731  /// *p = old <signed v ? old : v
733  /// *p = old >unsigned v ? old : v
735  /// *p = old <unsigned v ? old : v
737 
738  /// *p = old + v
740 
741  /// *p = old - v
743 
744  FIRST_BINOP = Xchg,
745  LAST_BINOP = FSub,
746  BAD_BINOP
747  };
748 
749 private:
750  template <unsigned Offset>
751  using AtomicOrderingBitfieldElement =
754 
755  template <unsigned Offset>
756  using BinOpBitfieldElement =
758 
759 public:
760  AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
761  AtomicOrdering Ordering, SyncScope::ID SSID,
762  Instruction *InsertBefore = nullptr);
763  AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
764  AtomicOrdering Ordering, SyncScope::ID SSID,
765  BasicBlock *InsertAtEnd);
766 
767  // allocate space for exactly two operands
768  void *operator new(size_t s) {
769  return User::operator new(s, 2);
770  }
771 
773  using AtomicOrderingField =
775  using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>;
779  "Bitfields must be contiguous");
780 
781  BinOp getOperation() const { return getSubclassData<OperationField>(); }
782 
783  static StringRef getOperationName(BinOp Op);
784 
785  static bool isFPOperation(BinOp Op) {
786  switch (Op) {
787  case AtomicRMWInst::FAdd:
788  case AtomicRMWInst::FSub:
789  return true;
790  default:
791  return false;
792  }
793  }
794 
796  setSubclassData<OperationField>(Operation);
797  }
798 
799  /// Return the alignment of the memory that is being allocated by the
800  /// instruction.
801  Align getAlign() const {
802  return Align(1ULL << getSubclassData<AlignmentField>());
803  }
804 
806  setSubclassData<AlignmentField>(Log2(Align));
807  }
808 
809  /// Return true if this is a RMW on a volatile memory location.
810  ///
811  bool isVolatile() const { return getSubclassData<VolatileField>(); }
812 
813  /// Specify whether this is a volatile RMW or not.
814  ///
815  void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
816 
817  /// Transparently provide more efficient getOperand methods.
819 
820  /// Returns the ordering constraint of this rmw instruction.
822  return getSubclassData<AtomicOrderingField>();
823  }
824 
825  /// Sets the ordering constraint of this rmw instruction.
826  void setOrdering(AtomicOrdering Ordering) {
827  assert(Ordering != AtomicOrdering::NotAtomic &&
828  "atomicrmw instructions can only be atomic.");
829  setSubclassData<AtomicOrderingField>(Ordering);
830  }
831 
832  /// Returns the synchronization scope ID of this rmw instruction.
834  return SSID;
835  }
836 
837  /// Sets the synchronization scope ID of this rmw instruction.
839  this->SSID = SSID;
840  }
841 
842  Value *getPointerOperand() { return getOperand(0); }
843  const Value *getPointerOperand() const { return getOperand(0); }
844  static unsigned getPointerOperandIndex() { return 0U; }
845 
846  Value *getValOperand() { return getOperand(1); }
847  const Value *getValOperand() const { return getOperand(1); }
848 
849  /// Returns the address space of the pointer operand.
850  unsigned getPointerAddressSpace() const {
852  }
853 
855  return isFPOperation(getOperation());
856  }
857 
858  // Methods for support type inquiry through isa, cast, and dyn_cast:
859  static bool classof(const Instruction *I) {
860  return I->getOpcode() == Instruction::AtomicRMW;
861  }
862  static bool classof(const Value *V) {
863  return isa<Instruction>(V) && classof(cast<Instruction>(V));
864  }
865 
866 private:
867  void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align,
868  AtomicOrdering Ordering, SyncScope::ID SSID);
869 
870  // Shadow Instruction::setInstructionSubclassData with a private forwarding
871  // method so that subclasses cannot accidentally use it.
872  template <typename Bitfield>
873  void setSubclassData(typename Bitfield::Type Value) {
874  Instruction::setSubclassData<Bitfield>(Value);
875  }
876 
877  /// The synchronization scope ID of this rmw instruction. Not quite enough
878  /// room in SubClassData for everything, so synchronization scope ID gets its
879  /// own field.
880  SyncScope::ID SSID;
881 };
882 
883 template <>
885  : public FixedNumOperandTraits<AtomicRMWInst,2> {
886 };
887 
889 
890 //===----------------------------------------------------------------------===//
891 // GetElementPtrInst Class
892 //===----------------------------------------------------------------------===//
893 
894 // checkGEPType - Simple wrapper function to give a better assertion failure
895 // message on bad indexes for a gep instruction.
896 //
898  assert(Ty && "Invalid GetElementPtrInst indices for type!");
899  return Ty;
900 }
901 
902 /// an instruction for type-safe pointer arithmetic to
903 /// access elements of arrays and structs
904 ///
906  Type *SourceElementType;
907  Type *ResultElementType;
908 
910 
911  /// Constructors - Create a getelementptr instruction with a base pointer an
912  /// list of indices. The first ctor can optionally insert before an existing
913  /// instruction, the second appends the new instruction to the specified
914  /// BasicBlock.
915  inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
916  ArrayRef<Value *> IdxList, unsigned Values,
917  const Twine &NameStr, Instruction *InsertBefore);
918  inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
919  ArrayRef<Value *> IdxList, unsigned Values,
920  const Twine &NameStr, BasicBlock *InsertAtEnd);
921 
922  void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr);
923 
924 protected:
925  // Note: Instruction needs to be a friend here to call cloneImpl.
926  friend class Instruction;
927 
928  GetElementPtrInst *cloneImpl() const;
929 
930 public:
931  static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
932  ArrayRef<Value *> IdxList,
933  const Twine &NameStr = "",
934  Instruction *InsertBefore = nullptr) {
935  unsigned Values = 1 + unsigned(IdxList.size());
936  if (!PointeeType)
937  PointeeType =
938  cast<PointerType>(Ptr->getType()->getScalarType())->getElementType();
939  else
940  assert(
941  PointeeType ==
942  cast<PointerType>(Ptr->getType()->getScalarType())->getElementType());
943  return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
944  NameStr, InsertBefore);
945  }
946 
947  static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
948  ArrayRef<Value *> IdxList,
949  const Twine &NameStr,
950  BasicBlock *InsertAtEnd) {
951  unsigned Values = 1 + unsigned(IdxList.size());
952  if (!PointeeType)
953  PointeeType =
954  cast<PointerType>(Ptr->getType()->getScalarType())->getElementType();
955  else
956  assert(
957  PointeeType ==
958  cast<PointerType>(Ptr->getType()->getScalarType())->getElementType());
959  return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
960  NameStr, InsertAtEnd);
961  }
962 
963  /// Create an "inbounds" getelementptr. See the documentation for the
964  /// "inbounds" flag in LangRef.html for details.
966  ArrayRef<Value *> IdxList,
967  const Twine &NameStr = "",
968  Instruction *InsertBefore = nullptr){
969  return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertBefore);
970  }
971 
972  static GetElementPtrInst *
973  CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList,
974  const Twine &NameStr = "",
975  Instruction *InsertBefore = nullptr) {
977  Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore);
978  GEP->setIsInBounds(true);
979  return GEP;
980  }
981 
983  ArrayRef<Value *> IdxList,
984  const Twine &NameStr,
985  BasicBlock *InsertAtEnd) {
986  return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertAtEnd);
987  }
988 
989  static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr,
990  ArrayRef<Value *> IdxList,
991  const Twine &NameStr,
992  BasicBlock *InsertAtEnd) {
994  Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd);
995  GEP->setIsInBounds(true);
996  return GEP;
997  }
998 
999  /// Transparently provide more efficient getOperand methods.
1001 
1002  Type *getSourceElementType() const { return SourceElementType; }
1003 
1004  void setSourceElementType(Type *Ty) { SourceElementType = Ty; }
1005  void setResultElementType(Type *Ty) { ResultElementType = Ty; }
1006 
1008  assert(ResultElementType ==
1009  cast<PointerType>(getType()->getScalarType())->getElementType());
1010  return ResultElementType;
1011  }
1012 
1013  /// Returns the address space of this instruction's pointer type.
1014  unsigned getAddressSpace() const {
1015  // Note that this is always the same as the pointer operand's address space
1016  // and that is cheaper to compute, so cheat here.
1017  return getPointerAddressSpace();
1018  }
1019 
1020  /// Returns the result type of a getelementptr with the given source
1021  /// element type and indexes.
1022  ///
1023  /// Null is returned if the indices are invalid for the specified
1024  /// source element type.
1025  static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList);
1026  static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList);
1027  static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList);
1028 
1029  /// Return the type of the element at the given index of an indexable
1030  /// type. This is equivalent to "getIndexedType(Agg, {Zero, Idx})".
1031  ///
1032  /// Returns null if the type can't be indexed, or the given index is not
1033  /// legal for the given type.
1034  static Type *getTypeAtIndex(Type *Ty, Value *Idx);
1035  static Type *getTypeAtIndex(Type *Ty, uint64_t Idx);
1036 
1037  inline op_iterator idx_begin() { return op_begin()+1; }
1038  inline const_op_iterator idx_begin() const { return op_begin()+1; }
1039  inline op_iterator idx_end() { return op_end(); }
1040  inline const_op_iterator idx_end() const { return op_end(); }
1041 
1043  return make_range(idx_begin(), idx_end());
1044  }
1045 
1047  return make_range(idx_begin(), idx_end());
1048  }
1049 
1051  return getOperand(0);
1052  }
1053  const Value *getPointerOperand() const {
1054  return getOperand(0);
1055  }
1056  static unsigned getPointerOperandIndex() {
1057  return 0U; // get index for modifying correct operand.
1058  }
1059 
1060  /// Method to return the pointer operand as a
1061  /// PointerType.
1063  return getPointerOperand()->getType();
1064  }
1065 
1066  /// Returns the address space of the pointer operand.
1067  unsigned getPointerAddressSpace() const {
1069  }
1070 
1071  /// Returns the pointer type returned by the GEP
1072  /// instruction, which may be a vector of pointers.
1073  static Type *getGEPReturnType(Type *ElTy, Value *Ptr,
1074  ArrayRef<Value *> IdxList) {
1075  Type *PtrTy = PointerType::get(checkGEPType(getIndexedType(ElTy, IdxList)),
1076  Ptr->getType()->getPointerAddressSpace());
1077  // Vector GEP
1078  if (auto *PtrVTy = dyn_cast<VectorType>(Ptr->getType())) {
1079  ElementCount EltCount = PtrVTy->getElementCount();
1080  return VectorType::get(PtrTy, EltCount);
1081  }
1082  for (Value *Index : IdxList)
1083  if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) {
1084  ElementCount EltCount = IndexVTy->getElementCount();
1085  return VectorType::get(PtrTy, EltCount);
1086  }
1087  // Scalar GEP
1088  return PtrTy;
1089  }
1090 
1091  unsigned getNumIndices() const { // Note: always non-negative
1092  return getNumOperands() - 1;
1093  }
1094 
1095  bool hasIndices() const {
1096  return getNumOperands() > 1;
1097  }
1098 
1099  /// Return true if all of the indices of this GEP are
1100  /// zeros. If so, the result pointer and the first operand have the same
1101  /// value, just potentially different types.
1102  bool hasAllZeroIndices() const;
1103 
1104  /// Return true if all of the indices of this GEP are
1105  /// constant integers. If so, the result pointer and the first operand have
1106  /// a constant offset between them.
1107  bool hasAllConstantIndices() const;
1108 
1109  /// Set or clear the inbounds flag on this GEP instruction.
1110  /// See LangRef.html for the meaning of inbounds on a getelementptr.
1111  void setIsInBounds(bool b = true);
1112 
1113  /// Determine whether the GEP has the inbounds flag.
1114  bool isInBounds() const;
1115 
1116  /// Accumulate the constant address offset of this GEP if possible.
1117  ///
1118  /// This routine accepts an APInt into which it will accumulate the constant
1119  /// offset of this GEP if the GEP is in fact constant. If the GEP is not
1120  /// all-constant, it returns false and the value of the offset APInt is
1121  /// undefined (it is *not* preserved!). The APInt passed into this routine
1122  /// must be at least as wide as the IntPtr type for the address space of
1123  /// the base GEP pointer.
1124  bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const;
1125 
1126  // Methods for support type inquiry through isa, cast, and dyn_cast:
1127  static bool classof(const Instruction *I) {
1128  return (I->getOpcode() == Instruction::GetElementPtr);
1129  }
1130  static bool classof(const Value *V) {
1131  return isa<Instruction>(V) && classof(cast<Instruction>(V));
1132  }
1133 };
1134 
1135 template <>
1137  public VariadicOperandTraits<GetElementPtrInst, 1> {
1138 };
1139 
1140 GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1141  ArrayRef<Value *> IdxList, unsigned Values,
1142  const Twine &NameStr,
1143  Instruction *InsertBefore)
1144  : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1145  OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1146  Values, InsertBefore),
1147  SourceElementType(PointeeType),
1148  ResultElementType(getIndexedType(PointeeType, IdxList)) {
1149  assert(ResultElementType ==
1150  cast<PointerType>(getType()->getScalarType())->getElementType());
1151  init(Ptr, IdxList, NameStr);
1152 }
1153 
1154 GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1155  ArrayRef<Value *> IdxList, unsigned Values,
1156  const Twine &NameStr,
1157  BasicBlock *InsertAtEnd)
1158  : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1159  OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1160  Values, InsertAtEnd),
1161  SourceElementType(PointeeType),
1162  ResultElementType(getIndexedType(PointeeType, IdxList)) {
1163  assert(ResultElementType ==
1164  cast<PointerType>(getType()->getScalarType())->getElementType());
1165  init(Ptr, IdxList, NameStr);
1166 }
1167 
1168 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)
1169 
1170 //===----------------------------------------------------------------------===//
1171 // ICmpInst Class
1172 //===----------------------------------------------------------------------===//
1173 
1174 /// This instruction compares its operands according to the predicate given
1175 /// to the constructor. It only operates on integers or pointers. The operands
1176 /// must be identical types.
1177 /// Represent an integer comparison operator.
1178 class ICmpInst: public CmpInst {
1179  void AssertOK() {
1180  assert(isIntPredicate() &&
1181  "Invalid ICmp predicate value");
1182  assert(getOperand(0)->getType() == getOperand(1)->getType() &&
1183  "Both operands to ICmp instruction are not of the same type!");
1184  // Check that the operands are the right type
1185  assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||
1186  getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
1187  "Invalid operand types for ICmp instruction");
1188  }
1189 
1190 protected:
1191  // Note: Instruction needs to be a friend here to call cloneImpl.
1192  friend class Instruction;
1193 
1194  /// Clone an identical ICmpInst
1195  ICmpInst *cloneImpl() const;
1196 
1197 public:
1198  /// Constructor with insert-before-instruction semantics.
1200  Instruction *InsertBefore, ///< Where to insert
1201  Predicate pred, ///< The predicate to use for the comparison
1202  Value *LHS, ///< The left-hand-side of the expression
1203  Value *RHS, ///< The right-hand-side of the expression
1204  const Twine &NameStr = "" ///< Name of the instruction
1205  ) : CmpInst(makeCmpResultType(LHS->getType()),
1206  Instruction::ICmp, pred, LHS, RHS, NameStr,
1207  InsertBefore) {
1208 #ifndef NDEBUG
1209  AssertOK();
1210 #endif
1211  }
1212 
1213  /// Constructor with insert-at-end semantics.
1215  BasicBlock &InsertAtEnd, ///< Block to insert into.
1216  Predicate pred, ///< The predicate to use for the comparison
1217  Value *LHS, ///< The left-hand-side of the expression
1218  Value *RHS, ///< The right-hand-side of the expression
1219  const Twine &NameStr = "" ///< Name of the instruction
1220  ) : CmpInst(makeCmpResultType(LHS->getType()),
1221  Instruction::ICmp, pred, LHS, RHS, NameStr,
1222  &InsertAtEnd) {
1223 #ifndef NDEBUG
1224  AssertOK();
1225 #endif
1226  }
1227 
1228  /// Constructor with no-insertion semantics
1230  Predicate pred, ///< The predicate to use for the comparison
1231  Value *LHS, ///< The left-hand-side of the expression
1232  Value *RHS, ///< The right-hand-side of the expression
1233  const Twine &NameStr = "" ///< Name of the instruction
1234  ) : CmpInst(makeCmpResultType(LHS->getType()),
1235  Instruction::ICmp, pred, LHS, RHS, NameStr) {
1236 #ifndef NDEBUG
1237  AssertOK();
1238 #endif
1239  }
1240 
1241  /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
1242  /// @returns the predicate that would be the result if the operand were
1243  /// regarded as signed.
1244  /// Return the signed version of the predicate
1246  return getSignedPredicate(getPredicate());
1247  }
1248 
1249  /// This is a static version that you can use without an instruction.
1250  /// Return the signed version of the predicate.
1251  static Predicate getSignedPredicate(Predicate pred);
1252 
1253  /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
1254  /// @returns the predicate that would be the result if the operand were
1255  /// regarded as unsigned.
1256  /// Return the unsigned version of the predicate
1258  return getUnsignedPredicate(getPredicate());
1259  }
1260 
1261  /// This is a static version that you can use without an instruction.
1262  /// Return the unsigned version of the predicate.
1263  static Predicate getUnsignedPredicate(Predicate pred);
1264 
1265  /// Return true if this predicate is either EQ or NE. This also
1266  /// tests for commutativity.
1267  static bool isEquality(Predicate P) {
1268  return P == ICMP_EQ || P == ICMP_NE;
1269  }
1270 
1271  /// Return true if this predicate is either EQ or NE. This also
1272  /// tests for commutativity.
1273  bool isEquality() const {
1274  return isEquality(getPredicate());
1275  }
1276 
1277  /// @returns true if the predicate of this ICmpInst is commutative
1278  /// Determine if this relation is commutative.
1279  bool isCommutative() const { return isEquality(); }
1280 
1281  /// Return true if the predicate is relational (not EQ or NE).
1282  ///
1283  bool isRelational() const {
1284  return !isEquality();
1285  }
1286 
1287  /// Return true if the predicate is relational (not EQ or NE).
1288  ///
1289  static bool isRelational(Predicate P) {
1290  return !isEquality(P);
1291  }
1292 
1293  /// Return true if the predicate is SGT or UGT.
1294  ///
1295  static bool isGT(Predicate P) {
1296  return P == ICMP_SGT || P == ICMP_UGT;
1297  }
1298 
1299  /// Return true if the predicate is SLT or ULT.
1300  ///
1301  static bool isLT(Predicate P) {
1302  return P == ICMP_SLT || P == ICMP_ULT;
1303  }
1304 
1305  /// Return true if the predicate is SGE or UGE.
1306  ///
1307  static bool isGE(Predicate P) {
1308  return P == ICMP_SGE || P == ICMP_UGE;
1309  }
1310 
1311  /// Return true if the predicate is SLE or ULE.
1312  ///
1313  static bool isLE(Predicate P) {
1314  return P == ICMP_SLE || P == ICMP_ULE;
1315  }
1316 
1317  /// Exchange the two operands to this instruction in such a way that it does
1318  /// not modify the semantics of the instruction. The predicate value may be
1319  /// changed to retain the same result if the predicate is order dependent
1320  /// (e.g. ult).
1321  /// Swap operands and adjust predicate.
1322  void swapOperands() {
1323  setPredicate(getSwappedPredicate());
1324  Op<0>().swap(Op<1>());
1325  }
1326 
1327  // Methods for support type inquiry through isa, cast, and dyn_cast:
1328  static bool classof(const Instruction *I) {
1329  return I->getOpcode() == Instruction::ICmp;
1330  }
1331  static bool classof(const Value *V) {
1332  return isa<Instruction>(V) && classof(cast<Instruction>(V));
1333  }
1334 };
1335 
1336 //===----------------------------------------------------------------------===//
1337 // FCmpInst Class
1338 //===----------------------------------------------------------------------===//
1339 
1340 /// This instruction compares its operands according to the predicate given
1341 /// to the constructor. It only operates on floating point values or packed
1342 /// vectors of floating point values. The operands must be identical types.
1343 /// Represents a floating point comparison operator.
1344 class FCmpInst: public CmpInst {
1345  void AssertOK() {
1346  assert(isFPPredicate() && "Invalid FCmp predicate value");
1347  assert(getOperand(0)->getType() == getOperand(1)->getType() &&
1348  "Both operands to FCmp instruction are not of the same type!");
1349  // Check that the operands are the right type
1350  assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&
1351  "Invalid operand types for FCmp instruction");
1352  }
1353 
1354 protected:
1355  // Note: Instruction needs to be a friend here to call cloneImpl.
1356  friend class Instruction;
1357 
1358  /// Clone an identical FCmpInst
1359  FCmpInst *cloneImpl() const;
1360 
1361 public:
1362  /// Constructor with insert-before-instruction semantics.
1364  Instruction *InsertBefore, ///< Where to insert
1365  Predicate pred, ///< The predicate to use for the comparison
1366  Value *LHS, ///< The left-hand-side of the expression
1367  Value *RHS, ///< The right-hand-side of the expression
1368  const Twine &NameStr = "" ///< Name of the instruction
1369  ) : CmpInst(makeCmpResultType(LHS->getType()),
1370  Instruction::FCmp, pred, LHS, RHS, NameStr,
1371  InsertBefore) {
1372  AssertOK();
1373  }
1374 
1375  /// Constructor with insert-at-end semantics.
1377  BasicBlock &InsertAtEnd, ///< Block to insert into.
1378  Predicate pred, ///< The predicate to use for the comparison
1379  Value *LHS, ///< The left-hand-side of the expression
1380  Value *RHS, ///< The right-hand-side of the expression
1381  const Twine &NameStr = "" ///< Name of the instruction
1382  ) : CmpInst(makeCmpResultType(LHS->getType()),
1383  Instruction::FCmp, pred, LHS, RHS, NameStr,
1384  &InsertAtEnd) {
1385  AssertOK();
1386  }
1387 
1388  /// Constructor with no-insertion semantics
1390  Predicate Pred, ///< The predicate to use for the comparison
1391  Value *LHS, ///< The left-hand-side of the expression
1392  Value *RHS, ///< The right-hand-side of the expression
1393  const Twine &NameStr = "", ///< Name of the instruction
1394  Instruction *FlagsSource = nullptr
1395  ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS,
1396  RHS, NameStr, nullptr, FlagsSource) {
1397  AssertOK();
1398  }
1399 
1400  /// @returns true if the predicate of this instruction is EQ or NE.
1401  /// Determine if this is an equality predicate.
1402  static bool isEquality(Predicate Pred) {
1403  return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ ||
1404  Pred == FCMP_UNE;
1405  }
1406 
1407  /// @returns true if the predicate of this instruction is EQ or NE.
1408  /// Determine if this is an equality predicate.
1409  bool isEquality() const { return isEquality(getPredicate()); }
1410 
1411  /// @returns true if the predicate of this instruction is commutative.
1412  /// Determine if this is a commutative predicate.
1413  bool isCommutative() const {
1414  return isEquality() ||
1415  getPredicate() == FCMP_FALSE ||
1416  getPredicate() == FCMP_TRUE ||
1417  getPredicate() == FCMP_ORD ||
1418  getPredicate() == FCMP_UNO;
1419  }
1420 
1421  /// @returns true if the predicate is relational (not EQ or NE).
1422  /// Determine if this a relational predicate.
1423  bool isRelational() const { return !isEquality(); }
1424 
1425  /// Exchange the two operands to this instruction in such a way that it does
1426  /// not modify the semantics of the instruction. The predicate value may be
1427  /// changed to retain the same result if the predicate is order dependent
1428  /// (e.g. ult).
1429  /// Swap operands and adjust predicate.
1430  void swapOperands() {
1432  Op<0>().swap(Op<1>());
1433  }
1434 
1435  /// Methods for support type inquiry through isa, cast, and dyn_cast:
1436  static bool classof(const Instruction *I) {
1437  return I->getOpcode() == Instruction::FCmp;
1438  }
1439  static bool classof(const Value *V) {
1440  return isa<Instruction>(V) && classof(cast<Instruction>(V));
1441  }
1442 };
1443 
1444 //===----------------------------------------------------------------------===//
1445 /// This class represents a function call, abstracting a target
1446 /// machine's calling convention. This class uses low bit of the SubClassData
1447 /// field to indicate whether or not this is a tail call. The rest of the bits
1448 /// hold the calling convention of the call.
1449 ///
1450 class CallInst : public CallBase {
1451  CallInst(const CallInst &CI);
1452 
1453  /// Construct a CallInst given a range of arguments.
1454  /// Construct a CallInst from a range of arguments
1455  inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1456  ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1457  Instruction *InsertBefore);
1458 
1459  inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1460  const Twine &NameStr, Instruction *InsertBefore)
1461  : CallInst(Ty, Func, Args, None, NameStr, InsertBefore) {}
1462 
1463  /// Construct a CallInst given a range of arguments.
1464  /// Construct a CallInst from a range of arguments
1465  inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1466  ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1467  BasicBlock *InsertAtEnd);
1468 
1469  explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr,
1470  Instruction *InsertBefore);
1471 
1472  CallInst(FunctionType *ty, Value *F, const Twine &NameStr,
1473  BasicBlock *InsertAtEnd);
1474 
1475  void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
1476  ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
1477  void init(FunctionType *FTy, Value *Func, const Twine &NameStr);
1478 
1479  /// Compute the number of operands to allocate.
1480  static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
1481  // We need one operand for the called function, plus the input operand
1482  // counts provided.
1483  return 1 + NumArgs + NumBundleInputs;
1484  }
1485 
1486 protected:
1487  // Note: Instruction needs to be a friend here to call cloneImpl.
1488  friend class Instruction;
1489 
1490  CallInst *cloneImpl() const;
1491 
1492 public:
1493  static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "",
1494  Instruction *InsertBefore = nullptr) {
1495  return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore);
1496  }
1497 
1499  const Twine &NameStr,
1500  Instruction *InsertBefore = nullptr) {
1501  return new (ComputeNumOperands(Args.size()))
1502  CallInst(Ty, Func, Args, None, NameStr, InsertBefore);
1503  }
1504 
1506  ArrayRef<OperandBundleDef> Bundles = None,
1507  const Twine &NameStr = "",
1508  Instruction *InsertBefore = nullptr) {
1509  const int NumOperands =
1510  ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1511  const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1512 
1513  return new (NumOperands, DescriptorBytes)
1514  CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore);
1515  }
1516 
1517  static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr,
1518  BasicBlock *InsertAtEnd) {
1519  return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertAtEnd);
1520  }
1521 
1523  const Twine &NameStr, BasicBlock *InsertAtEnd) {
1524  return new (ComputeNumOperands(Args.size()))
1525  CallInst(Ty, Func, Args, None, NameStr, InsertAtEnd);
1526  }
1527 
1530  const Twine &NameStr, BasicBlock *InsertAtEnd) {
1531  const int NumOperands =
1532  ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1533  const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1534 
1535  return new (NumOperands, DescriptorBytes)
1536  CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd);
1537  }
1538 
1539  static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "",
1540  Instruction *InsertBefore = nullptr) {
1541  return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1542  InsertBefore);
1543  }
1544 
1546  ArrayRef<OperandBundleDef> Bundles = None,
1547  const Twine &NameStr = "",
1548  Instruction *InsertBefore = nullptr) {
1549  return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1550  NameStr, InsertBefore);
1551  }
1552 
1554  const Twine &NameStr,
1555  Instruction *InsertBefore = nullptr) {
1556  return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1557  InsertBefore);
1558  }
1559 
1560  static CallInst *Create(FunctionCallee Func, const Twine &NameStr,
1561  BasicBlock *InsertAtEnd) {
1562  return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1563  InsertAtEnd);
1564  }
1565 
1567  const Twine &NameStr, BasicBlock *InsertAtEnd) {
1568  return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1569  InsertAtEnd);
1570  }
1571 
1574  const Twine &NameStr, BasicBlock *InsertAtEnd) {
1575  return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1576  NameStr, InsertAtEnd);
1577  }
1578 
1579  /// Create a clone of \p CI with a different set of operand bundles and
1580  /// insert it before \p InsertPt.
1581  ///
1582  /// The returned call instruction is identical \p CI in every way except that
1583  /// the operand bundles for the new instruction are set to the operand bundles
1584  /// in \p Bundles.
1585  static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles,
1586  Instruction *InsertPt = nullptr);
1587 
1588  /// Generate the IR for a call to malloc:
1589  /// 1. Compute the malloc call's argument as the specified type's size,
1590  /// possibly multiplied by the array size if the array size is not
1591  /// constant 1.
1592  /// 2. Call malloc with that argument.
1593  /// 3. Bitcast the result of the malloc call to the specified type.
1594  static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1595  Type *AllocTy, Value *AllocSize,
1596  Value *ArraySize = nullptr,
1597  Function *MallocF = nullptr,
1598  const Twine &Name = "");
1599  static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1600  Type *AllocTy, Value *AllocSize,
1601  Value *ArraySize = nullptr,
1602  Function *MallocF = nullptr,
1603  const Twine &Name = "");
1604  static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1605  Type *AllocTy, Value *AllocSize,
1606  Value *ArraySize = nullptr,
1607  ArrayRef<OperandBundleDef> Bundles = None,
1608  Function *MallocF = nullptr,
1609  const Twine &Name = "");
1610  static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1611  Type *AllocTy, Value *AllocSize,
1612  Value *ArraySize = nullptr,
1613  ArrayRef<OperandBundleDef> Bundles = None,
1614  Function *MallocF = nullptr,
1615  const Twine &Name = "");
1616  /// Generate the IR for a call to the builtin free function.
1617  static Instruction *CreateFree(Value *Source, Instruction *InsertBefore);
1618  static Instruction *CreateFree(Value *Source, BasicBlock *InsertAtEnd);
1621  Instruction *InsertBefore);
1624  BasicBlock *InsertAtEnd);
1625 
1626  // Note that 'musttail' implies 'tail'.
1627  enum TailCallKind : unsigned {
1633  };
1634 
1636  static_assert(
1637  Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(),
1638  "Bitfields must be contiguous");
1639 
1641  return getSubclassData<TailCallKindField>();
1642  }
1643 
1644  bool isTailCall() const {
1646  return Kind == TCK_Tail || Kind == TCK_MustTail;
1647  }
1648 
1649  bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; }
1650 
1651  bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; }
1652 
1654  setSubclassData<TailCallKindField>(TCK);
1655  }
1656 
1657  void setTailCall(bool IsTc = true) {
1658  setTailCallKind(IsTc ? TCK_Tail : TCK_None);
1659  }
1660 
1661  /// Return true if the call can return twice
1662  bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); }
1664  addAttribute(AttributeList::FunctionIndex, Attribute::ReturnsTwice);
1665  }
1666 
1667  // Methods for support type inquiry through isa, cast, and dyn_cast:
1668  static bool classof(const Instruction *I) {
1669  return I->getOpcode() == Instruction::Call;
1670  }
1671  static bool classof(const Value *V) {
1672  return isa<Instruction>(V) && classof(cast<Instruction>(V));
1673  }
1674 
1675  /// Updates profile metadata by scaling it by \p S / \p T.
1676  void updateProfWeight(uint64_t S, uint64_t T);
1677 
1678 private:
1679  // Shadow Instruction::setInstructionSubclassData with a private forwarding
1680  // method so that subclasses cannot accidentally use it.
1681  template <typename Bitfield>
1682  void setSubclassData(typename Bitfield::Type Value) {
1683  Instruction::setSubclassData<Bitfield>(Value);
1684  }
1685 };
1686 
1687 CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1688  ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1689  BasicBlock *InsertAtEnd)
1690  : CallBase(Ty->getReturnType(), Instruction::Call,
1691  OperandTraits<CallBase>::op_end(this) -
1692  (Args.size() + CountBundleInputs(Bundles) + 1),
1693  unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1694  InsertAtEnd) {
1695  init(Ty, Func, Args, Bundles, NameStr);
1696 }
1697 
1698 CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1699  ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1700  Instruction *InsertBefore)
1701  : CallBase(Ty->getReturnType(), Instruction::Call,
1702  OperandTraits<CallBase>::op_end(this) -
1703  (Args.size() + CountBundleInputs(Bundles) + 1),
1704  unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1705  InsertBefore) {
1706  init(Ty, Func, Args, Bundles, NameStr);
1707 }
1708 
1709 //===----------------------------------------------------------------------===//
1710 // SelectInst Class
1711 //===----------------------------------------------------------------------===//
1712 
1713 /// This class represents the LLVM 'select' instruction.
1714 ///
1715 class SelectInst : public Instruction {
1716  SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1717  Instruction *InsertBefore)
1719  &Op<0>(), 3, InsertBefore) {
1720  init(C, S1, S2);
1721  setName(NameStr);
1722  }
1723 
1724  SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1725  BasicBlock *InsertAtEnd)
1727  &Op<0>(), 3, InsertAtEnd) {
1728  init(C, S1, S2);
1729  setName(NameStr);
1730  }
1731 
1732  void init(Value *C, Value *S1, Value *S2) {
1733  assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select");
1734  Op<0>() = C;
1735  Op<1>() = S1;
1736  Op<2>() = S2;
1737  }
1738 
1739 protected:
1740  // Note: Instruction needs to be a friend here to call cloneImpl.
1741  friend class Instruction;
1742 
1743  SelectInst *cloneImpl() const;
1744 
1745 public:
1746  static SelectInst *Create(Value *C, Value *S1, Value *S2,
1747  const Twine &NameStr = "",
1748  Instruction *InsertBefore = nullptr,
1749  Instruction *MDFrom = nullptr) {
1750  SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore);
1751  if (MDFrom)
1752  Sel->copyMetadata(*MDFrom);
1753  return Sel;
1754  }
1755 
1756  static SelectInst *Create(Value *C, Value *S1, Value *S2,
1757  const Twine &NameStr,
1758  BasicBlock *InsertAtEnd) {
1759  return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd);
1760  }
1761 
1762  const Value *getCondition() const { return Op<0>(); }
1763  const Value *getTrueValue() const { return Op<1>(); }
1764  const Value *getFalseValue() const { return Op<2>(); }
1765  Value *getCondition() { return Op<0>(); }
1766  Value *getTrueValue() { return Op<1>(); }
1767  Value *getFalseValue() { return Op<2>(); }
1768 
1769  void setCondition(Value *V) { Op<0>() = V; }
1770  void setTrueValue(Value *V) { Op<1>() = V; }
1771  void setFalseValue(Value *V) { Op<2>() = V; }
1772 
1773  /// Swap the true and false values of the select instruction.
1774  /// This doesn't swap prof metadata.
1775  void swapValues() { Op<1>().swap(Op<2>()); }
1776 
1777  /// Return a string if the specified operands are invalid
1778  /// for a select operation, otherwise return null.
1779  static const char *areInvalidOperands(Value *Cond, Value *True, Value *False);
1780 
1781  /// Transparently provide more efficient getOperand methods.
1783 
1785  return static_cast<OtherOps>(Instruction::getOpcode());
1786  }
1787 
1788  // Methods for support type inquiry through isa, cast, and dyn_cast:
1789  static bool classof(const Instruction *I) {
1790  return I->getOpcode() == Instruction::Select;
1791  }
1792  static bool classof(const Value *V) {
1793  return isa<Instruction>(V) && classof(cast<Instruction>(V));
1794  }
1795 };
1796 
1797 template <>
1798 struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> {
1799 };
1800 
1802 
1803 //===----------------------------------------------------------------------===//
1804 // VAArgInst Class
1805 //===----------------------------------------------------------------------===//
1806 
1807 /// This class represents the va_arg llvm instruction, which returns
1808 /// an argument of the specified type given a va_list and increments that list
1809 ///
1810 class VAArgInst : public UnaryInstruction {
1811 protected:
1812  // Note: Instruction needs to be a friend here to call cloneImpl.
1813  friend class Instruction;
1814 
1815  VAArgInst *cloneImpl() const;
1816 
1817 public:
1818  VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "",
1819  Instruction *InsertBefore = nullptr)
1820  : UnaryInstruction(Ty, VAArg, List, InsertBefore) {
1821  setName(NameStr);
1822  }
1823 
1824  VAArgInst(Value *List, Type *Ty, const Twine &NameStr,
1825  BasicBlock *InsertAtEnd)
1826  : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) {
1827  setName(NameStr);
1828  }
1829 
1830  Value *getPointerOperand() { return getOperand(0); }
1831  const Value *getPointerOperand() const { return getOperand(0); }
1832  static unsigned getPointerOperandIndex() { return 0U; }
1833 
1834  // Methods for support type inquiry through isa, cast, and dyn_cast:
1835  static bool classof(const Instruction *I) {
1836  return I->getOpcode() == VAArg;
1837  }
1838  static bool classof(const Value *V) {
1839  return isa<Instruction>(V) && classof(cast<Instruction>(V));
1840  }
1841 };
1842 
1843 //===----------------------------------------------------------------------===//
1844 // ExtractElementInst Class
1845 //===----------------------------------------------------------------------===//
1846 
1847 /// This instruction extracts a single (scalar)
1848 /// element from a VectorType value
1849 ///
1851  ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "",
1852  Instruction *InsertBefore = nullptr);
1853  ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr,
1854  BasicBlock *InsertAtEnd);
1855 
1856 protected:
1857  // Note: Instruction needs to be a friend here to call cloneImpl.
1858  friend class Instruction;
1859 
1860  ExtractElementInst *cloneImpl() const;
1861 
1862 public:
1863  static ExtractElementInst *Create(Value *Vec, Value *Idx,
1864  const Twine &NameStr = "",
1865  Instruction *InsertBefore = nullptr) {
1866  return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore);
1867  }
1868 
1869  static ExtractElementInst *Create(Value *Vec, Value *Idx,
1870  const Twine &NameStr,
1871  BasicBlock *InsertAtEnd) {
1872  return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd);
1873  }
1874 
1875  /// Return true if an extractelement instruction can be
1876  /// formed with the specified operands.
1877  static bool isValidOperands(const Value *Vec, const Value *Idx);
1878 
1879  Value *getVectorOperand() { return Op<0>(); }
1880  Value *getIndexOperand() { return Op<1>(); }
1881  const Value *getVectorOperand() const { return Op<0>(); }
1882  const Value *getIndexOperand() const { return Op<1>(); }
1883 
1885  return cast<VectorType>(getVectorOperand()->getType());
1886  }
1887 
1888  /// Transparently provide more efficient getOperand methods.
1890 
1891  // Methods for support type inquiry through isa, cast, and dyn_cast:
1892  static bool classof(const Instruction *I) {
1893  return I->getOpcode() == Instruction::ExtractElement;
1894  }
1895  static bool classof(const Value *V) {
1896  return isa<Instruction>(V) && classof(cast<Instruction>(V));
1897  }
1898 };
1899 
1900 template <>
1902  public FixedNumOperandTraits<ExtractElementInst, 2> {
1903 };
1904 
1906 
1907 //===----------------------------------------------------------------------===//
1908 // InsertElementInst Class
1909 //===----------------------------------------------------------------------===//
1910 
1911 /// This instruction inserts a single (scalar)
1912 /// element into a VectorType value
1913 ///
1915  InsertElementInst(Value *Vec, Value *NewElt, Value *Idx,
1916  const Twine &NameStr = "",
1917  Instruction *InsertBefore = nullptr);
1918  InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr,
1919  BasicBlock *InsertAtEnd);
1920 
1921 protected:
1922  // Note: Instruction needs to be a friend here to call cloneImpl.
1923  friend class Instruction;
1924 
1925  InsertElementInst *cloneImpl() const;
1926 
1927 public:
1928  static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1929  const Twine &NameStr = "",
1930  Instruction *InsertBefore = nullptr) {
1931  return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore);
1932  }
1933 
1934  static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1935  const Twine &NameStr,
1936  BasicBlock *InsertAtEnd) {
1937  return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd);
1938  }
1939 
1940  /// Return true if an insertelement instruction can be
1941  /// formed with the specified operands.
1942  static bool isValidOperands(const Value *Vec, const Value *NewElt,
1943  const Value *Idx);
1944 
1945  /// Overload to return most specific vector type.
1946  ///
1947  VectorType *getType() const {
1948  return cast<VectorType>(Instruction::getType());
1949  }
1950 
1951  /// Transparently provide more efficient getOperand methods.
1953 
1954  // Methods for support type inquiry through isa, cast, and dyn_cast:
1955  static bool classof(const Instruction *I) {
1956  return I->getOpcode() == Instruction::InsertElement;
1957  }
1958  static bool classof(const Value *V) {
1959  return isa<Instruction>(V) && classof(cast<Instruction>(V));
1960  }
1961 };
1962 
1963 template <>
1965  public FixedNumOperandTraits<InsertElementInst, 3> {
1966 };
1967 
1969 
1970 //===----------------------------------------------------------------------===//
1971 // ShuffleVectorInst Class
1972 //===----------------------------------------------------------------------===//
1973 
1974 constexpr int UndefMaskElem = -1;
1975 
1976 /// This instruction constructs a fixed permutation of two
1977 /// input vectors.
1978 ///
1979 /// For each element of the result vector, the shuffle mask selects an element
1980 /// from one of the input vectors to copy to the result. Non-negative elements
1981 /// in the mask represent an index into the concatenated pair of input vectors.
1982 /// UndefMaskElem (-1) specifies that the result element is undefined.
1983 ///
1984 /// For scalable vectors, all the elements of the mask must be 0 or -1. This
1985 /// requirement may be relaxed in the future.
1987  SmallVector<int, 4> ShuffleMask;
1988  Constant *ShuffleMaskForBitcode;
1989 
1990 protected:
1991  // Note: Instruction needs to be a friend here to call cloneImpl.
1992  friend class Instruction;
1993 
1994  ShuffleVectorInst *cloneImpl() const;
1995 
1996 public:
1998  const Twine &NameStr = "",
1999  Instruction *InsertBefor = nullptr);
2001  const Twine &NameStr, BasicBlock *InsertAtEnd);
2003  const Twine &NameStr = "",
2004  Instruction *InsertBefor = nullptr);
2006  const Twine &NameStr, BasicBlock *InsertAtEnd);
2007 
2008  void *operator new(size_t s) { return User::operator new(s, 2); }
2009 
2010  /// Swap the operands and adjust the mask to preserve the semantics
2011  /// of the instruction.
2012  void commute();
2013 
2014  /// Return true if a shufflevector instruction can be
2015  /// formed with the specified operands.
2016  static bool isValidOperands(const Value *V1, const Value *V2,
2017  const Value *Mask);
2018  static bool isValidOperands(const Value *V1, const Value *V2,
2020 
2021  /// Overload to return most specific vector type.
2022  ///
2023  VectorType *getType() const {
2024  return cast<VectorType>(Instruction::getType());
2025  }
2026 
2027  /// Transparently provide more efficient getOperand methods.
2029 
2030  /// Return the shuffle mask value of this instruction for the given element
2031  /// index. Return UndefMaskElem if the element is undef.
2032  int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; }
2033 
2034  /// Convert the input shuffle mask operand to a vector of integers. Undefined
2035  /// elements of the mask are returned as UndefMaskElem.
2036  static void getShuffleMask(const Constant *Mask,
2037  SmallVectorImpl<int> &Result);
2038 
2039  /// Return the mask for this instruction as a vector of integers. Undefined
2040  /// elements of the mask are returned as UndefMaskElem.
2041  void getShuffleMask(SmallVectorImpl<int> &Result) const {
2042  Result.assign(ShuffleMask.begin(), ShuffleMask.end());
2043  }
2044 
2045  /// Return the mask for this instruction, for use in bitcode.
2046  ///
2047  /// TODO: This is temporary until we decide a new bitcode encoding for
2048  /// shufflevector.
2049  Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; }
2050 
2051  static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask,
2052  Type *ResultTy);
2053 
2054  void setShuffleMask(ArrayRef<int> Mask);
2055 
2056  ArrayRef<int> getShuffleMask() const { return ShuffleMask; }
2057 
2058  /// Return true if this shuffle returns a vector with a different number of
2059  /// elements than its source vectors.
2060  /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3>
2061  /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5>
2062  bool changesLength() const {
2063  unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2064  ->getElementCount()
2065  .getKnownMinValue();
2066  unsigned NumMaskElts = ShuffleMask.size();
2067  return NumSourceElts != NumMaskElts;
2068  }
2069 
2070  /// Return true if this shuffle returns a vector with a greater number of
2071  /// elements than its source vectors.
2072  /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3>
2073  bool increasesLength() const {
2074  unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2075  ->getElementCount()
2076  .getKnownMinValue();
2077  unsigned NumMaskElts = ShuffleMask.size();
2078  return NumSourceElts < NumMaskElts;
2079  }
2080 
2081  /// Return true if this shuffle mask chooses elements from exactly one source
2082  /// vector.
2083  /// Example: <7,5,undef,7>
2084  /// This assumes that vector operands are the same length as the mask.
2085  static bool isSingleSourceMask(ArrayRef<int> Mask);
2086  static bool isSingleSourceMask(const Constant *Mask) {
2087  assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2088  SmallVector<int, 16> MaskAsInts;
2089  getShuffleMask(Mask, MaskAsInts);
2090  return isSingleSourceMask(MaskAsInts);
2091  }
2092 
2093  /// Return true if this shuffle chooses elements from exactly one source
2094  /// vector without changing the length of that vector.
2095  /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3>
2096  /// TODO: Optionally allow length-changing shuffles.
2097  bool isSingleSource() const {
2098  return !changesLength() && isSingleSourceMask(ShuffleMask);
2099  }
2100 
2101  /// Return true if this shuffle mask chooses elements from exactly one source
2102  /// vector without lane crossings. A shuffle using this mask is not
2103  /// necessarily a no-op because it may change the number of elements from its
2104  /// input vectors or it may provide demanded bits knowledge via undef lanes.
2105  /// Example: <undef,undef,2,3>
2106  static bool isIdentityMask(ArrayRef<int> Mask);
2107  static bool isIdentityMask(const Constant *Mask) {
2108  assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2109  SmallVector<int, 16> MaskAsInts;
2110  getShuffleMask(Mask, MaskAsInts);
2111  return isIdentityMask(MaskAsInts);
2112  }
2113 
2114  /// Return true if this shuffle chooses elements from exactly one source
2115  /// vector without lane crossings and does not change the number of elements
2116  /// from its input vectors.
2117  /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef>
2118  bool isIdentity() const {
2119  return !changesLength() && isIdentityMask(ShuffleMask);
2120  }
2121 
2122  /// Return true if this shuffle lengthens exactly one source vector with
2123  /// undefs in the high elements.
2124  bool isIdentityWithPadding() const;
2125 
2126  /// Return true if this shuffle extracts the first N elements of exactly one
2127  /// source vector.
2128  bool isIdentityWithExtract() const;
2129 
2130  /// Return true if this shuffle concatenates its 2 source vectors. This
2131  /// returns false if either input is undefined. In that case, the shuffle is
2132  /// is better classified as an identity with padding operation.
2133  bool isConcat() const;
2134 
2135  /// Return true if this shuffle mask chooses elements from its source vectors
2136  /// without lane crossings. A shuffle using this mask would be
2137  /// equivalent to a vector select with a constant condition operand.
2138  /// Example: <4,1,6,undef>
2139  /// This returns false if the mask does not choose from both input vectors.
2140  /// In that case, the shuffle is better classified as an identity shuffle.
2141  /// This assumes that vector operands are the same length as the mask
2142  /// (a length-changing shuffle can never be equivalent to a vector select).
2143  static bool isSelectMask(ArrayRef<int> Mask);
2144  static bool isSelectMask(const Constant *Mask) {
2145  assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2146  SmallVector<int, 16> MaskAsInts;
2147  getShuffleMask(Mask, MaskAsInts);
2148  return isSelectMask(MaskAsInts);
2149  }
2150 
2151  /// Return true if this shuffle chooses elements from its source vectors
2152  /// without lane crossings and all operands have the same number of elements.
2153  /// In other words, this shuffle is equivalent to a vector select with a
2154  /// constant condition operand.
2155  /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3>
2156  /// This returns false if the mask does not choose from both input vectors.
2157  /// In that case, the shuffle is better classified as an identity shuffle.
2158  /// TODO: Optionally allow length-changing shuffles.
2159  bool isSelect() const {
2160  return !changesLength() && isSelectMask(ShuffleMask);
2161  }
2162 
2163  /// Return true if this shuffle mask swaps the order of elements from exactly
2164  /// one source vector.
2165  /// Example: <7,6,undef,4>
2166  /// This assumes that vector operands are the same length as the mask.
2167  static bool isReverseMask(ArrayRef<int> Mask);
2168  static bool isReverseMask(const Constant *Mask) {
2169  assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2170  SmallVector<int, 16> MaskAsInts;
2171  getShuffleMask(Mask, MaskAsInts);
2172  return isReverseMask(MaskAsInts);
2173  }
2174 
2175  /// Return true if this shuffle swaps the order of elements from exactly
2176  /// one source vector.
2177  /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef>
2178  /// TODO: Optionally allow length-changing shuffles.
2179  bool isReverse() const {
2180  return !changesLength() && isReverseMask(ShuffleMask);
2181  }
2182 
2183  /// Return true if this shuffle mask chooses all elements with the same value
2184  /// as the first element of exactly one source vector.
2185  /// Example: <4,undef,undef,4>
2186  /// This assumes that vector operands are the same length as the mask.
2187  static bool isZeroEltSplatMask(ArrayRef<int> Mask);
2188  static bool isZeroEltSplatMask(const Constant *Mask) {
2189  assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2190  SmallVector<int, 16> MaskAsInts;
2191  getShuffleMask(Mask, MaskAsInts);
2192  return isZeroEltSplatMask(MaskAsInts);
2193  }
2194 
2195  /// Return true if all elements of this shuffle are the same value as the
2196  /// first element of exactly one source vector without changing the length
2197  /// of that vector.
2198  /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0>
2199  /// TODO: Optionally allow length-changing shuffles.
2200  /// TODO: Optionally allow splats from other elements.
2201  bool isZeroEltSplat() const {
2202  return !changesLength() && isZeroEltSplatMask(ShuffleMask);
2203  }
2204 
2205  /// Return true if this shuffle mask is a transpose mask.
2206  /// Transpose vector masks transpose a 2xn matrix. They read corresponding
2207  /// even- or odd-numbered vector elements from two n-dimensional source
2208  /// vectors and write each result into consecutive elements of an
2209  /// n-dimensional destination vector. Two shuffles are necessary to complete
2210  /// the transpose, one for the even elements and another for the odd elements.
2211  /// This description closely follows how the TRN1 and TRN2 AArch64
2212  /// instructions operate.
2213  ///
2214  /// For example, a simple 2x2 matrix can be transposed with:
2215  ///
2216  /// ; Original matrix
2217  /// m0 = < a, b >
2218  /// m1 = < c, d >
2219  ///
2220  /// ; Transposed matrix
2221  /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 >
2222  /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 >
2223  ///
2224  /// For matrices having greater than n columns, the resulting nx2 transposed
2225  /// matrix is stored in two result vectors such that one vector contains
2226  /// interleaved elements from all the even-numbered rows and the other vector
2227  /// contains interleaved elements from all the odd-numbered rows. For example,
2228  /// a 2x4 matrix can be transposed with:
2229  ///
2230  /// ; Original matrix
2231  /// m0 = < a, b, c, d >
2232  /// m1 = < e, f, g, h >
2233  ///
2234  /// ; Transposed matrix
2235  /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 >
2236  /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 >
2237  static bool isTransposeMask(ArrayRef<int> Mask);
2238  static bool isTransposeMask(const Constant *Mask) {
2239  assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2240  SmallVector<int, 16> MaskAsInts;
2241  getShuffleMask(Mask, MaskAsInts);
2242  return isTransposeMask(MaskAsInts);
2243  }
2244 
2245  /// Return true if this shuffle transposes the elements of its inputs without
2246  /// changing the length of the vectors. This operation may also be known as a
2247  /// merge or interleave. See the description for isTransposeMask() for the
2248  /// exact specification.
2249  /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6>
2250  bool isTranspose() const {
2251  return !changesLength() && isTransposeMask(ShuffleMask);
2252  }
2253 
2254  /// Return true if this shuffle mask is an extract subvector mask.
2255  /// A valid extract subvector mask returns a smaller vector from a single
2256  /// source operand. The base extraction index is returned as well.
2257  static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2258  int &Index);
2259  static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts,
2260  int &Index) {
2261  assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2262  // Not possible to express a shuffle mask for a scalable vector for this
2263  // case.
2264  if (isa<ScalableVectorType>(Mask->getType()))
2265  return false;
2266  SmallVector<int, 16> MaskAsInts;
2267  getShuffleMask(Mask, MaskAsInts);
2268  return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index);
2269  }
2270 
2271  /// Return true if this shuffle mask is an extract subvector mask.
2272  bool isExtractSubvectorMask(int &Index) const {
2273  // Not possible to express a shuffle mask for a scalable vector for this
2274  // case.
2275  if (isa<ScalableVectorType>(getType()))
2276  return false;
2277 
2278  int NumSrcElts =
2279  cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2280  return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index);
2281  }
2282 
2283  /// Change values in a shuffle permute mask assuming the two vector operands
2284  /// of length InVecNumElts have swapped position.
2286  unsigned InVecNumElts) {
2287  for (int &Idx : Mask) {
2288  if (Idx == -1)
2289  continue;
2290  Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts;
2291  assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&
2292  "shufflevector mask index out of range");
2293  }
2294  }
2295 
2296  // Methods for support type inquiry through isa, cast, and dyn_cast:
2297  static bool classof(const Instruction *I) {
2298  return I->getOpcode() == Instruction::ShuffleVector;
2299  }
2300  static bool classof(const Value *V) {
2301  return isa<Instruction>(V) && classof(cast<Instruction>(V));
2302  }
2303 };
2304 
2305 template <>
2307  : public FixedNumOperandTraits<ShuffleVectorInst, 2> {};
2308 
2310 
2311 //===----------------------------------------------------------------------===//
2312 // ExtractValueInst Class
2313 //===----------------------------------------------------------------------===//
2314 
2315 /// This instruction extracts a struct member or array
2316 /// element value from an aggregate value.
2317 ///
2319  SmallVector<unsigned, 4> Indices;
2320 
2321  ExtractValueInst(const ExtractValueInst &EVI);
2322 
2323  /// Constructors - Create a extractvalue instruction with a base aggregate
2324  /// value and a list of indices. The first ctor can optionally insert before
2325  /// an existing instruction, the second appends the new instruction to the
2326  /// specified BasicBlock.
2327  inline ExtractValueInst(Value *Agg,
2328  ArrayRef<unsigned> Idxs,
2329  const Twine &NameStr,
2330  Instruction *InsertBefore);
2331  inline ExtractValueInst(Value *Agg,
2332  ArrayRef<unsigned> Idxs,
2333  const Twine &NameStr, BasicBlock *InsertAtEnd);
2334 
2335  void init(ArrayRef<unsigned> Idxs, const Twine &NameStr);
2336 
2337 protected:
2338  // Note: Instruction needs to be a friend here to call cloneImpl.
2339  friend class Instruction;
2340 
2341  ExtractValueInst *cloneImpl() const;
2342 
2343 public:
2345  ArrayRef<unsigned> Idxs,
2346  const Twine &NameStr = "",
2347  Instruction *InsertBefore = nullptr) {
2348  return new
2349  ExtractValueInst(Agg, Idxs, NameStr, InsertBefore);
2350  }
2351 
2353  ArrayRef<unsigned> Idxs,
2354  const Twine &NameStr,
2355  BasicBlock *InsertAtEnd) {
2356  return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd);
2357  }
2358 
2359  /// Returns the type of the element that would be extracted
2360  /// with an extractvalue instruction with the specified parameters.
2361  ///
2362  /// Null is returned if the indices are invalid for the specified type.
2363  static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs);
2364 
2365  using idx_iterator = const unsigned*;
2366 
2367  inline idx_iterator idx_begin() const { return Indices.begin(); }
2368  inline idx_iterator idx_end() const { return Indices.end(); }
2370  return make_range(idx_begin(), idx_end());
2371  }
2372 
2374  return getOperand(0);
2375  }
2376  const Value *getAggregateOperand() const {
2377  return getOperand(0);
2378  }
2379  static unsigned getAggregateOperandIndex() {
2380  return 0U; // get index for modifying correct operand
2381  }
2382 
2384  return Indices;
2385  }
2386 
2387  unsigned getNumIndices() const {
2388  return (unsigned)Indices.size();
2389  }
2390 
2391  bool hasIndices() const {
2392  return true;
2393  }
2394 
2395  // Methods for support type inquiry through isa, cast, and dyn_cast:
2396  static bool classof(const Instruction *I) {
2397  return I->getOpcode() == Instruction::ExtractValue;
2398  }
2399  static bool classof(const Value *V) {
2400  return isa<Instruction>(V) && classof(cast<Instruction>(V));
2401  }
2402 };
2403 
2404 ExtractValueInst::ExtractValueInst(Value *Agg,
2405  ArrayRef<unsigned> Idxs,
2406  const Twine &NameStr,
2407  Instruction *InsertBefore)
2408  : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2409  ExtractValue, Agg, InsertBefore) {
2410  init(Idxs, NameStr);
2411 }
2412 
2413 ExtractValueInst::ExtractValueInst(Value *Agg,
2414  ArrayRef<unsigned> Idxs,
2415  const Twine &NameStr,
2416  BasicBlock *InsertAtEnd)
2417  : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2418  ExtractValue, Agg, InsertAtEnd) {
2419  init(Idxs, NameStr);
2420 }
2421 
2422 //===----------------------------------------------------------------------===//
2423 // InsertValueInst Class
2424 //===----------------------------------------------------------------------===//
2425 
2426 /// This instruction inserts a struct field of array element
2427 /// value into an aggregate value.
2428 ///
2430  SmallVector<unsigned, 4> Indices;
2431 
2432  InsertValueInst(const InsertValueInst &IVI);
2433 
2434  /// Constructors - Create a insertvalue instruction with a base aggregate
2435  /// value, a value to insert, and a list of indices. The first ctor can
2436  /// optionally insert before an existing instruction, the second appends
2437  /// the new instruction to the specified BasicBlock.
2438  inline InsertValueInst(Value *Agg, Value *Val,
2439  ArrayRef<unsigned> Idxs,
2440  const Twine &NameStr,
2441  Instruction *InsertBefore);
2442  inline InsertValueInst(Value *Agg, Value *Val,
2443  ArrayRef<unsigned> Idxs,
2444  const Twine &NameStr, BasicBlock *InsertAtEnd);
2445 
2446  /// Constructors - These two constructors are convenience methods because one
2447  /// and two index insertvalue instructions are so common.
2448  InsertValueInst(Value *Agg, Value *Val, unsigned Idx,
2449  const Twine &NameStr = "",
2450  Instruction *InsertBefore = nullptr);
2451  InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr,
2452  BasicBlock *InsertAtEnd);
2453 
2454  void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2455  const Twine &NameStr);
2456 
2457 protected:
2458  // Note: Instruction needs to be a friend here to call cloneImpl.
2459  friend class Instruction;
2460 
2461  InsertValueInst *cloneImpl() const;
2462 
2463 public:
2464  // allocate space for exactly two operands
2465  void *operator new(size_t s) {
2466  return User::operator new(s, 2);
2467  }
2468 
2469  static InsertValueInst *Create(Value *Agg, Value *Val,
2470  ArrayRef<unsigned> Idxs,
2471  const Twine &NameStr = "",
2472  Instruction *InsertBefore = nullptr) {
2473  return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore);
2474  }
2475 
2476  static InsertValueInst *Create(Value *Agg, Value *Val,
2477  ArrayRef<unsigned> Idxs,
2478  const Twine &NameStr,
2479  BasicBlock *InsertAtEnd) {
2480  return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd);
2481  }
2482 
2483  /// Transparently provide more efficient getOperand methods.
2485 
2486  using idx_iterator = const unsigned*;
2487 
2488  inline idx_iterator idx_begin() const { return Indices.begin(); }
2489  inline idx_iterator idx_end() const { return Indices.end(); }
2491  return make_range(idx_begin(), idx_end());
2492  }
2493 
2495  return getOperand(0);
2496  }
2497  const Value *getAggregateOperand() const {
2498  return getOperand(0);
2499  }
2500  static unsigned getAggregateOperandIndex() {
2501  return 0U; // get index for modifying correct operand
2502  }
2503 
2505  return getOperand(1);
2506  }
2508  return getOperand(1);
2509  }
2510  static unsigned getInsertedValueOperandIndex() {
2511  return 1U; // get index for modifying correct operand
2512  }
2513 
2515  return Indices;
2516  }
2517 
2518  unsigned getNumIndices() const {
2519  return (unsigned)Indices.size();
2520  }
2521 
2522  bool hasIndices() const {
2523  return true;
2524  }
2525 
2526  // Methods for support type inquiry through isa, cast, and dyn_cast:
2527  static bool classof(const Instruction *I) {
2528  return I->getOpcode() == Instruction::InsertValue;
2529  }
2530  static bool classof(const Value *V) {
2531  return isa<Instruction>(V) && classof(cast<Instruction>(V));
2532  }
2533 };
2534 
2535 template <>
2537  public FixedNumOperandTraits<InsertValueInst, 2> {
2538 };
2539 
2540 InsertValueInst::InsertValueInst(Value *Agg,
2541  Value *Val,
2542  ArrayRef<unsigned> Idxs,
2543  const Twine &NameStr,
2544  Instruction *InsertBefore)
2545  : Instruction(Agg->getType(), InsertValue,
2546  OperandTraits<InsertValueInst>::op_begin(this),
2547  2, InsertBefore) {
2548  init(Agg, Val, Idxs, NameStr);
2549 }
2550 
2551 InsertValueInst::InsertValueInst(Value *Agg,
2552  Value *Val,
2553  ArrayRef<unsigned> Idxs,
2554  const Twine &NameStr,
2555  BasicBlock *InsertAtEnd)
2556  : Instruction(Agg->getType(), InsertValue,
2557  OperandTraits<InsertValueInst>::op_begin(this),
2558  2, InsertAtEnd) {
2559  init(Agg, Val, Idxs, NameStr);
2560 }
2561 
2563 
2564 //===----------------------------------------------------------------------===//
2565 // PHINode Class
2566 //===----------------------------------------------------------------------===//
2567 
2568 // PHINode - The PHINode class is used to represent the magical mystical PHI
2569 // node, that can not exist in nature, but can be synthesized in a computer
2570 // scientist's overactive imagination.
2571 //
2572 class PHINode : public Instruction {
2573  /// The number of operands actually allocated. NumOperands is
2574  /// the number actually in use.
2575  unsigned ReservedSpace;
2576 
2577  PHINode(const PHINode &PN);
2578 
2579  explicit PHINode(Type *Ty, unsigned NumReservedValues,
2580  const Twine &NameStr = "",
2581  Instruction *InsertBefore = nullptr)
2582  : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore),
2583  ReservedSpace(NumReservedValues) {
2584  setName(NameStr);
2585  allocHungoffUses(ReservedSpace);
2586  }
2587 
2588  PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr,
2589  BasicBlock *InsertAtEnd)
2590  : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd),
2591  ReservedSpace(NumReservedValues) {
2592  setName(NameStr);
2593  allocHungoffUses(ReservedSpace);
2594  }
2595 
2596 protected:
2597  // Note: Instruction needs to be a friend here to call cloneImpl.
2598  friend class Instruction;
2599 
2600  PHINode *cloneImpl() const;
2601 
2602  // allocHungoffUses - this is more complicated than the generic
2603  // User::allocHungoffUses, because we have to allocate Uses for the incoming
2604  // values and pointers to the incoming blocks, all in one allocation.
2605  void allocHungoffUses(unsigned N) {
2606  User::allocHungoffUses(N, /* IsPhi */ true);
2607  }
2608 
2609 public:
2610  /// Constructors - NumReservedValues is a hint for the number of incoming
2611  /// edges that this phi node will have (use 0 if you really have no idea).
2612  static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2613  const Twine &NameStr = "",
2614  Instruction *InsertBefore = nullptr) {
2615  return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore);
2616  }
2617 
2618  static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2619  const Twine &NameStr, BasicBlock *InsertAtEnd) {
2620  return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd);
2621  }
2622 
2623  /// Provide fast operand accessors
2625 
2626  // Block iterator interface. This provides access to the list of incoming
2627  // basic blocks, which parallels the list of incoming values.
2628 
2631 
2633  return reinterpret_cast<block_iterator>(op_begin() + ReservedSpace);
2634  }
2635 
2637  return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace);
2638  }
2639 
2641  return block_begin() + getNumOperands();
2642  }
2643 
2645  return block_begin() + getNumOperands();
2646  }
2647 
2649  return make_range(block_begin(), block_end());
2650  }
2651 
2653  return make_range(block_begin(), block_end());
2654  }
2655 
2656  op_range incoming_values() { return operands(); }
2657 
2658  const_op_range incoming_values() const { return operands(); }
2659 
2660  /// Return the number of incoming edges
2661  ///
2662  unsigned getNumIncomingValues() const { return getNumOperands(); }
2663 
2664  /// Return incoming value number x
2665  ///
2666  Value *getIncomingValue(unsigned i) const {
2667  return getOperand(i);
2668  }
2669  void setIncomingValue(unsigned i, Value *V) {
2670  assert(V && "PHI node got a null value!");
2671  assert(getType() == V->getType() &&
2672  "All operands to PHI node must be the same type as the PHI node!");
2673  setOperand(i, V);
2674  }
2675 
2676  static unsigned getOperandNumForIncomingValue(unsigned i) {
2677  return i;
2678  }
2679 
2680  static unsigned getIncomingValueNumForOperand(unsigned i) {
2681  return i;
2682  }
2683 
2684  /// Return incoming basic block number @p i.
2685  ///
2686  BasicBlock *getIncomingBlock(unsigned i) const {
2687  return block_begin()[i];
2688  }
2689 
2690  /// Return incoming basic block corresponding
2691  /// to an operand of the PHI.
2692  ///
2693  BasicBlock *getIncomingBlock(const Use &U) const {
2694  assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?");
2695  return getIncomingBlock(unsigned(&U - op_begin()));
2696  }
2697 
2698  /// Return incoming basic block corresponding
2699  /// to value use iterator.
2700  ///
2702  return getIncomingBlock(I.getUse());
2703  }
2704 
2705  void setIncomingBlock(unsigned i, BasicBlock *BB) {
2706  assert(BB && "PHI node got a null basic block!");
2707  block_begin()[i] = BB;
2708  }
2709 
2710  /// Replace every incoming basic block \p Old to basic block \p New.
2712  assert(New && Old && "PHI node got a null basic block!");
2713  for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2714  if (getIncomingBlock(Op) == Old)
2715  setIncomingBlock(Op, New);
2716  }
2717 
2718  /// Add an incoming value to the end of the PHI list
2719  ///
2721  if (getNumOperands() == ReservedSpace)
2722  growOperands(); // Get more space!
2723  // Initialize some new operands.
2724  setNumHungOffUseOperands(getNumOperands() + 1);
2725  setIncomingValue(getNumOperands() - 1, V);
2726  setIncomingBlock(getNumOperands() - 1, BB);
2727  }
2728 
2729  /// Remove an incoming value. This is useful if a
2730  /// predecessor basic block is deleted. The value removed is returned.
2731  ///
2732  /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty
2733  /// is true), the PHI node is destroyed and any uses of it are replaced with
2734  /// dummy values. The only time there should be zero incoming values to a PHI
2735  /// node is when the block is dead, so this strategy is sound.
2736  ///
2737  Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true);
2738 
2739  Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) {
2740  int Idx = getBasicBlockIndex(BB);
2741  assert(Idx >= 0 && "Invalid basic block argument to remove!");
2742  return removeIncomingValue(Idx, DeletePHIIfEmpty);
2743  }
2744 
2745  /// Return the first index of the specified basic
2746  /// block in the value list for this PHI. Returns -1 if no instance.
2747  ///
2748  int getBasicBlockIndex(const BasicBlock *BB) const {
2749  for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
2750  if (block_begin()[i] == BB)
2751  return i;
2752  return -1;
2753  }
2754 
2756  int Idx = getBasicBlockIndex(BB);
2757  assert(Idx >= 0 && "Invalid basic block argument!");
2758  return getIncomingValue(Idx);
2759  }
2760 
2761  /// Set every incoming value(s) for block \p BB to \p V.
2763  assert(BB && "PHI node got a null basic block!");
2764  bool Found = false;
2765  for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2766  if (getIncomingBlock(Op) == BB) {
2767  Found = true;
2768  setIncomingValue(Op, V);
2769  }
2770  (void)Found;
2771  assert(Found && "Invalid basic block argument to set!");
2772  }
2773 
2774  /// If the specified PHI node always merges together the
2775  /// same value, return the value, otherwise return null.
2776  Value *hasConstantValue() const;
2777 
2778  /// Whether the specified PHI node always merges
2779  /// together the same value, assuming undefs are equal to a unique
2780  /// non-undef value.
2781  bool hasConstantOrUndefValue() const;
2782 
2783  /// If the PHI node is complete which means all of its parent's predecessors
2784  /// have incoming value in this PHI, return true, otherwise return false.
2785  bool isComplete() const {
2787  [this](const BasicBlock *Pred) {
2788  return getBasicBlockIndex(Pred) >= 0;
2789  });
2790  }
2791 
2792  /// Methods for support type inquiry through isa, cast, and dyn_cast:
2793  static bool classof(const Instruction *I) {
2794  return I->getOpcode() == Instruction::PHI;
2795  }
2796  static bool classof(const Value *V) {
2797  return isa<Instruction>(V) && classof(cast<Instruction>(V));
2798  }
2799 
2800 private:
2801  void growOperands();
2802 };
2803 
2804 template <>
2806 };
2807 
2809 
2810 //===----------------------------------------------------------------------===//
2811 // LandingPadInst Class
2812 //===----------------------------------------------------------------------===//
2813 
2814 //===---------------------------------------------------------------------------
2815 /// The landingpad instruction holds all of the information
2816 /// necessary to generate correct exception handling. The landingpad instruction
2817 /// cannot be moved from the top of a landing pad block, which itself is
2818 /// accessible only from the 'unwind' edge of an invoke. This uses the
2819 /// SubclassData field in Value to store whether or not the landingpad is a
2820 /// cleanup.
2821 ///
2822 class LandingPadInst : public Instruction {
2823  using CleanupField = BoolBitfieldElementT<0>;
2824 
2825  /// The number of operands actually allocated. NumOperands is
2826  /// the number actually in use.
2827  unsigned ReservedSpace;
2828 
2829  LandingPadInst(const LandingPadInst &LP);
2830 
2831 public:
2833 
2834 private:
2835  explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2836  const Twine &NameStr, Instruction *InsertBefore);
2837  explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2838  const Twine &NameStr, BasicBlock *InsertAtEnd);
2839 
2840  // Allocate space for exactly zero operands.
2841  void *operator new(size_t s) {
2842  return User::operator new(s);
2843  }
2844 
2845  void growOperands(unsigned Size);
2846  void init(unsigned NumReservedValues, const Twine &NameStr);
2847 
2848 protected:
2849  // Note: Instruction needs to be a friend here to call cloneImpl.
2850  friend class Instruction;
2851 
2852  LandingPadInst *cloneImpl() const;
2853 
2854 public:
2855  /// Constructors - NumReservedClauses is a hint for the number of incoming
2856  /// clauses that this landingpad will have (use 0 if you really have no idea).
2857  static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2858  const Twine &NameStr = "",
2859  Instruction *InsertBefore = nullptr);
2860  static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2861  const Twine &NameStr, BasicBlock *InsertAtEnd);
2862 
2863  /// Provide fast operand accessors
2865 
2866  /// Return 'true' if this landingpad instruction is a
2867  /// cleanup. I.e., it should be run when unwinding even if its landing pad
2868  /// doesn't catch the exception.
2869  bool isCleanup() const { return getSubclassData<CleanupField>(); }
2870 
2871  /// Indicate that this landingpad instruction is a cleanup.
2872  void setCleanup(bool V) { setSubclassData<CleanupField>(V); }
2873 
2874  /// Add a catch or filter clause to the landing pad.
2875  void addClause(Constant *ClauseVal);
2876 
2877  /// Get the value of the clause at index Idx. Use isCatch/isFilter to
2878  /// determine what type of clause this is.
2879  Constant *getClause(unsigned Idx) const {
2880  return cast<Constant>(getOperandList()[Idx]);
2881  }
2882 
2883  /// Return 'true' if the clause and index Idx is a catch clause.
2884  bool isCatch(unsigned Idx) const {
2885  return !isa<ArrayType>(getOperandList()[Idx]->getType());
2886  }
2887 
2888  /// Return 'true' if the clause and index Idx is a filter clause.
2889  bool isFilter(unsigned Idx) const {
2890  return isa<ArrayType>(getOperandList()[Idx]->getType());
2891  }
2892 
2893  /// Get the number of clauses for this landing pad.
2894  unsigned getNumClauses() const { return getNumOperands(); }
2895 
2896  /// Grow the size of the operand list to accommodate the new
2897  /// number of clauses.
2898  void reserveClauses(unsigned Size) { growOperands(Size); }
2899 
2900  // Methods for support type inquiry through isa, cast, and dyn_cast:
2901  static bool classof(const Instruction *I) {
2902  return I->getOpcode() == Instruction::LandingPad;
2903  }
2904  static bool classof(const Value *V) {
2905  return isa<Instruction>(V) && classof(cast<Instruction>(V));
2906  }
2907 };
2908 
2909 template <>
2911 };
2912 
2914 
2915 //===----------------------------------------------------------------------===//
2916 // ReturnInst Class
2917 //===----------------------------------------------------------------------===//
2918 
2919 //===---------------------------------------------------------------------------
2920 /// Return a value (possibly void), from a function. Execution
2921 /// does not continue in this function any longer.
2922 ///
2923 class ReturnInst : public Instruction {
2924  ReturnInst(const ReturnInst &RI);
2925 
2926 private:
2927  // ReturnInst constructors:
2928  // ReturnInst() - 'ret void' instruction
2929  // ReturnInst( null) - 'ret void' instruction
2930  // ReturnInst(Value* X) - 'ret X' instruction
2931  // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I
2932  // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I
2933  // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B
2934  // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B
2935  //
2936  // NOTE: If the Value* passed is of type void then the constructor behaves as
2937  // if it was passed NULL.
2938  explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr,
2939  Instruction *InsertBefore = nullptr);
2940  ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd);
2941  explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd);
2942 
2943 protected:
2944  // Note: Instruction needs to be a friend here to call cloneImpl.
2945  friend class Instruction;
2946 
2947  ReturnInst *cloneImpl() const;
2948 
2949 public:
2950  static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr,
2951  Instruction *InsertBefore = nullptr) {
2952  return new(!!retVal) ReturnInst(C, retVal, InsertBefore);
2953  }
2954 
2955  static ReturnInst* Create(LLVMContext &C, Value *retVal,
2956  BasicBlock *InsertAtEnd) {
2957  return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd);
2958  }
2959 
2960  static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) {
2961  return new(0) ReturnInst(C, InsertAtEnd);
2962  }
2963 
2964  /// Provide fast operand accessors
2966 
2967  /// Convenience accessor. Returns null if there is no return value.
2969  return getNumOperands() != 0 ? getOperand(0) : nullptr;
2970  }
2971 
2972  unsigned getNumSuccessors() const { return 0; }
2973 
2974  // Methods for support type inquiry through isa, cast, and dyn_cast:
2975  static bool classof(const Instruction *I) {
2976  return (I->getOpcode() == Instruction::Ret);
2977  }
2978  static bool classof(const Value *V) {
2979  return isa<Instruction>(V) && classof(cast<Instruction>(V));
2980  }
2981 
2982 private:
2983  BasicBlock *getSuccessor(unsigned idx) const {
2984  llvm_unreachable("ReturnInst has no successors!");
2985  }
2986 
2987  void setSuccessor(unsigned idx, BasicBlock *B) {
2988  llvm_unreachable("ReturnInst has no successors!");
2989  }
2990 };
2991 
2992 template <>
2993 struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> {
2994 };
2995 
2997 
2998 //===----------------------------------------------------------------------===//
2999 // BranchInst Class
3000 //===----------------------------------------------------------------------===//
3001 
3002 //===---------------------------------------------------------------------------
3003 /// Conditional or Unconditional Branch instruction.
3004 ///
3005 class BranchInst : public Instruction {
3006  /// Ops list - Branches are strange. The operands are ordered:
3007  /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because
3008  /// they don't have to check for cond/uncond branchness. These are mostly
3009  /// accessed relative from op_end().
3010  BranchInst(const BranchInst &BI);
3011  // BranchInst constructors (where {B, T, F} are blocks, and C is a condition):
3012  // BranchInst(BB *B) - 'br B'
3013  // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F'
3014  // BranchInst(BB* B, Inst *I) - 'br B' insert before I
3015  // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I
3016  // BranchInst(BB* B, BB *I) - 'br B' insert at end
3017  // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end
3018  explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr);
3019  BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3020  Instruction *InsertBefore = nullptr);
3021  BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd);
3022  BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3023  BasicBlock *InsertAtEnd);
3024 
3025  void AssertOK();
3026 
3027 protected:
3028  // Note: Instruction needs to be a friend here to call cloneImpl.
3029  friend class Instruction;
3030 
3031  BranchInst *cloneImpl() const;
3032 
3033 public:
3034  /// Iterator type that casts an operand to a basic block.
3035  ///
3036  /// This only makes sense because the successors are stored as adjacent
3037  /// operands for branch instructions.
3039  : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3040  std::random_access_iterator_tag, BasicBlock *,
3041  ptrdiff_t, BasicBlock *, BasicBlock *> {
3042  explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {}
3043 
3044  BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3045  BasicBlock *operator->() const { return operator*(); }
3046  };
3047 
3048  /// The const version of `succ_op_iterator`.
3050  : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3051  std::random_access_iterator_tag,
3052  const BasicBlock *, ptrdiff_t, const BasicBlock *,
3053  const BasicBlock *> {
3055  : iterator_adaptor_base(I) {}
3056 
3057  const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3058  const BasicBlock *operator->() const { return operator*(); }
3059  };
3060 
3061  static BranchInst *Create(BasicBlock *IfTrue,
3062  Instruction *InsertBefore = nullptr) {
3063  return new(1) BranchInst(IfTrue, InsertBefore);
3064  }
3065 
3066  static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3067  Value *Cond, Instruction *InsertBefore = nullptr) {
3068  return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore);
3069  }
3070 
3071  static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) {
3072  return new(1) BranchInst(IfTrue, InsertAtEnd);
3073  }
3074 
3075  static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3076  Value *Cond, BasicBlock *InsertAtEnd) {
3077  return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd);
3078  }
3079 
3080  /// Transparently provide more efficient getOperand methods.
3082 
3083  bool isUnconditional() const { return getNumOperands() == 1; }
3084  bool isConditional() const { return getNumOperands() == 3; }
3085 
3086  Value *getCondition() const {
3087  assert(isConditional() && "Cannot get condition of an uncond branch!");
3088  return Op<-3>();
3089  }
3090 
3091  void setCondition(Value *V) {
3092  assert(isConditional() && "Cannot set condition of unconditional branch!");
3093  Op<-3>() = V;
3094  }
3095 
3096  unsigned getNumSuccessors() const { return 1+isConditional(); }
3097 
3098  BasicBlock *getSuccessor(unsigned i) const {
3099  assert(i < getNumSuccessors() && "Successor # out of range for Branch!");
3100  return cast_or_null<BasicBlock>((&Op<-1>() - i)->get());
3101  }
3102 
3103  void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3104  assert(idx < getNumSuccessors() && "Successor # out of range for Branch!");
3105  *(&Op<-1>() - idx) = NewSucc;
3106  }
3107 
3108  /// Swap the successors of this branch instruction.
3109  ///
3110  /// Swaps the successors of the branch instruction. This also swaps any
3111  /// branch weight metadata associated with the instruction so that it
3112  /// continues to map correctly to each operand.
3113  void swapSuccessors();
3114 
3116  return make_range(
3117  succ_op_iterator(std::next(value_op_begin(), isConditional() ? 1 : 0)),
3118  succ_op_iterator(value_op_end()));
3119  }
3120 
3123  std::next(value_op_begin(), isConditional() ? 1 : 0)),
3124  const_succ_op_iterator(value_op_end()));
3125  }
3126 
3127  // Methods for support type inquiry through isa, cast, and dyn_cast:
3128  static bool classof(const Instruction *I) {
3129  return (I->getOpcode() == Instruction::Br);
3130  }
3131  static bool classof(const Value *V) {
3132  return isa<Instruction>(V) && classof(cast<Instruction>(V));
3133  }
3134 };
3135 
3136 template <>
3137 struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> {
3138 };
3139 
3141 
3142 //===----------------------------------------------------------------------===//
3143 // SwitchInst Class
3144 //===----------------------------------------------------------------------===//
3145 
3146 //===---------------------------------------------------------------------------
3147 /// Multiway switch
3148 ///
3149 class SwitchInst : public Instruction {
3150  unsigned ReservedSpace;
3151 
3152  // Operand[0] = Value to switch on
3153  // Operand[1] = Default basic block destination
3154  // Operand[2n ] = Value to match
3155  // Operand[2n+1] = BasicBlock to go to on match
3156  SwitchInst(const SwitchInst &SI);
3157 
3158  /// Create a new switch instruction, specifying a value to switch on and a
3159  /// default destination. The number of additional cases can be specified here
3160  /// to make memory allocation more efficient. This constructor can also
3161  /// auto-insert before another instruction.
3162  SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3163  Instruction *InsertBefore);
3164 
3165  /// Create a new switch instruction, specifying a value to switch on and a
3166  /// default destination. The number of additional cases can be specified here
3167  /// to make memory allocation more efficient. This constructor also
3168  /// auto-inserts at the end of the specified BasicBlock.
3169  SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3170  BasicBlock *InsertAtEnd);
3171 
3172  // allocate space for exactly zero operands
3173  void *operator new(size_t s) {
3174  return User::operator new(s);
3175  }
3176 
3177  void init(Value *Value, BasicBlock *Default, unsigned NumReserved);
3178  void growOperands();
3179 
3180 protected:
3181  // Note: Instruction needs to be a friend here to call cloneImpl.
3182  friend class Instruction;
3183 
3184  SwitchInst *cloneImpl() const;
3185 
3186 public:
3187  // -2
3188  static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1);
3189 
3190  template <typename CaseHandleT> class CaseIteratorImpl;
3191 
3192  /// A handle to a particular switch case. It exposes a convenient interface
3193  /// to both the case value and the successor block.
3194  ///
3195  /// We define this as a template and instantiate it to form both a const and
3196  /// non-const handle.
3197  template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT>
3199  // Directly befriend both const and non-const iterators.
3200  friend class SwitchInst::CaseIteratorImpl<
3201  CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>;
3202 
3203  protected:
3204  // Expose the switch type we're parameterized with to the iterator.
3205  using SwitchInstType = SwitchInstT;
3206 
3207  SwitchInstT *SI;
3209 
3210  CaseHandleImpl() = default;
3211  CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index) : SI(SI), Index(Index) {}
3212 
3213  public:
3214  /// Resolves case value for current case.
3215  ConstantIntT *getCaseValue() const {
3216  assert((unsigned)Index < SI->getNumCases() &&
3217  "Index out the number of cases.");
3218  return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2));
3219  }
3220 
3221  /// Resolves successor for current case.
3222  BasicBlockT *getCaseSuccessor() const {
3223  assert(((unsigned)Index < SI->getNumCases() ||
3224  (unsigned)Index == DefaultPseudoIndex) &&
3225  "Index out the number of cases.");
3226  return SI->getSuccessor(getSuccessorIndex());
3227  }
3228 
3229  /// Returns number of current case.
3230  unsigned getCaseIndex() const { return Index; }
3231 
3232  /// Returns successor index for current case successor.
3233  unsigned getSuccessorIndex() const {
3234  assert(((unsigned)Index == DefaultPseudoIndex ||
3235  (unsigned)Index < SI->getNumCases()) &&
3236  "Index out the number of cases.");
3237  return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0;
3238  }
3239 
3240  bool operator==(const CaseHandleImpl &RHS) const {
3241  assert(SI == RHS.SI && "Incompatible operators.");
3242  return Index == RHS.Index;
3243  }
3244  };
3245 
3246  using ConstCaseHandle =
3248 
3250  : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> {
3252 
3253  public:
3255 
3256  /// Sets the new value for current case.
3258  assert((unsigned)Index < SI->getNumCases() &&
3259  "Index out the number of cases.");
3260  SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V));
3261  }
3262 
3263  /// Sets the new successor for current case.
3265  SI->setSuccessor(getSuccessorIndex(), S);
3266  }
3267  };
3268 
3269  template <typename CaseHandleT>
3270  class CaseIteratorImpl
3271  : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>,
3272  std::random_access_iterator_tag,
3273  CaseHandleT> {
3274  using SwitchInstT = typename CaseHandleT::SwitchInstType;
3275 
3276  CaseHandleT Case;
3277 
3278  public:
3279  /// Default constructed iterator is in an invalid state until assigned to
3280  /// a case for a particular switch.
3281  CaseIteratorImpl() = default;
3282 
3283  /// Initializes case iterator for given SwitchInst and for given
3284  /// case number.
3285  CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {}
3286 
3287  /// Initializes case iterator for given SwitchInst and for given
3288  /// successor index.
3290  unsigned SuccessorIndex) {
3291  assert(SuccessorIndex < SI->getNumSuccessors() &&
3292  "Successor index # out of range!");
3293  return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1)
3294  : CaseIteratorImpl(SI, DefaultPseudoIndex);
3295  }
3296 
3297  /// Support converting to the const variant. This will be a no-op for const
3298  /// variant.
3300  return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index);
3301  }
3302 
3304  // Check index correctness after addition.
3305  // Note: Index == getNumCases() means end().
3306  assert(Case.Index + N >= 0 &&
3307  (unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
3308  "Case.Index out the number of cases.");
3309  Case.Index += N;
3310  return *this;
3311  }
3313  // Check index correctness after subtraction.
3314  // Note: Case.Index == getNumCases() means end().
3315  assert(Case.Index - N >= 0 &&
3316  (unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
3317  "Case.Index out the number of cases.");
3318  Case.Index -= N;
3319  return *this;
3320  }
3322  assert(Case.SI == RHS.Case.SI && "Incompatible operators.");
3323  return Case.Index - RHS.Case.Index;
3324  }
3325  bool operator==(const CaseIteratorImpl &RHS) const {
3326  return Case == RHS.Case;
3327  }
3328  bool operator<(const CaseIteratorImpl &RHS) const {
3329  assert(Case.SI == RHS.Case.SI && "Incompatible operators.");
3330  return Case.Index < RHS.Case.Index;
3331  }
3332  CaseHandleT &operator*() { return Case; }
3333  const CaseHandleT &operator*() const { return Case; }
3334  };
3335 
3338 
3340  unsigned NumCases,
3341  Instruction *InsertBefore = nullptr) {
3342  return new SwitchInst(Value, Default, NumCases, InsertBefore);
3343  }
3344 
3346  unsigned NumCases, BasicBlock *InsertAtEnd) {
3347  return new SwitchInst(Value, Default, NumCases, InsertAtEnd);
3348  }
3349 
3350  /// Provide fast operand accessors
3352 
3353  // Accessor Methods for Switch stmt
3354  Value *getCondition() const { return getOperand(0); }
3355  void setCondition(Value *V) { setOperand(0, V); }
3356 
3358  return cast<BasicBlock>(getOperand(1));
3359  }
3360 
3361  void setDefaultDest(BasicBlock *DefaultCase) {
3362  setOperand(1, reinterpret_cast<Value*>(DefaultCase));
3363  }
3364 
3365  /// Return the number of 'cases' in this switch instruction, excluding the
3366  /// default case.
3367  unsigned getNumCases() const {
3368  return getNumOperands()/2 - 1;
3369  }
3370 
3371  /// Returns a read/write iterator that points to the first case in the
3372  /// SwitchInst.
3374  return CaseIt(this, 0);
3375  }
3376 
3377  /// Returns a read-only iterator that points to the first case in the
3378  /// SwitchInst.
3380  return ConstCaseIt(this, 0);
3381  }
3382 
3383  /// Returns a read/write iterator that points one past the last in the
3384  /// SwitchInst.
3386  return CaseIt(this, getNumCases());
3387  }
3388 
3389  /// Returns a read-only iterator that points one past the last in the
3390  /// SwitchInst.
3392  return ConstCaseIt(this, getNumCases());
3393  }
3394 
3395  /// Iteration adapter for range-for loops.
3397  return make_range(case_begin(), case_end());
3398  }
3399 
3400  /// Constant iteration adapter for range-for loops.
3402  return make_range(case_begin(), case_end());
3403  }
3404 
3405  /// Returns an iterator that points to the default case.
3406  /// Note: this iterator allows to resolve successor only. Attempt
3407  /// to resolve case value causes an assertion.
3408  /// Also note, that increment and decrement also causes an assertion and
3409  /// makes iterator invalid.
3411  return CaseIt(this, DefaultPseudoIndex);
3412  }
3414  return ConstCaseIt(this, DefaultPseudoIndex);
3415  }
3416 
3417  /// Search all of the case values for the specified constant. If it is
3418  /// explicitly handled, return the case iterator of it, otherwise return
3419  /// default case iterator to indicate that it is handled by the default
3420  /// handler.
3423  cases(), [C](CaseHandle &Case) { return Case.getCaseValue() == C; });
3424  if (I != case_end())
3425  return I;
3426 
3427  return case_default();
3428  }
3431  return Case.getCaseValue() == C;
3432  });
3433  if (I != case_end())
3434  return I;
3435 
3436  return case_default();
3437  }
3438 
3439  /// Finds the unique case value for a given successor. Returns null if the
3440  /// successor is not found, not unique, or is the default case.
3442  if (BB == getDefaultDest())
3443  return nullptr;
3444 
3445  ConstantInt *CI = nullptr;
3446  for (auto Case : cases()) {
3447  if (Case.getCaseSuccessor() != BB)
3448  continue;
3449 
3450  if (CI)
3451  return nullptr; // Multiple cases lead to BB.
3452 
3453  CI = Case.getCaseValue();
3454  }
3455 
3456  return CI;
3457  }
3458 
3459  /// Add an entry to the switch instruction.
3460  /// Note:
3461  /// This action invalidates case_end(). Old case_end() iterator will
3462  /// point to the added case.
3463  void addCase(ConstantInt *OnVal, BasicBlock *Dest);
3464 
3465  /// This method removes the specified case and its successor from the switch
3466  /// instruction. Note that this operation may reorder the remaining cases at
3467  /// index idx and above.
3468  /// Note:
3469  /// This action invalidates iterators for all cases following the one removed,
3470  /// including the case_end() iterator. It returns an iterator for the next
3471  /// case.
3472  CaseIt removeCase(CaseIt I);
3473 
3474  unsigned getNumSuccessors() const { return getNumOperands()/2; }
3475  BasicBlock *getSuccessor(unsigned idx) const {
3476  assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!");
3477  return cast<BasicBlock>(getOperand(idx*2+1));
3478  }
3479  void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3480  assert(idx < getNumSuccessors() && "Successor # out of range for switch!");
3481  setOperand(idx * 2 + 1, NewSucc);
3482  }
3483 
3484  // Methods for support type inquiry through isa, cast, and dyn_cast:
3485  static bool classof(const Instruction *I) {
3486  return I->getOpcode() == Instruction::Switch;
3487  }
3488  static bool classof(const Value *V) {
3489  return isa<Instruction>(V) && classof(cast<Instruction>(V));
3490  }
3491 };
3492 
3493 /// A wrapper class to simplify modification of SwitchInst cases along with
3494 /// their prof branch_weights metadata.
3496  SwitchInst &SI;
3498  bool Changed = false;
3499 
3500 protected:
3501  static MDNode *getProfBranchWeightsMD(const SwitchInst &SI);
3502 
3504 
3505  void init();
3506 
3507 public:
3509  SwitchInst *operator->() { return &SI; }
3510  SwitchInst &operator*() { return SI; }
3511  operator SwitchInst *() { return &SI; }
3512 
3514 
3516  if (Changed)
3517  SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD());
3518  }
3519 
3520  /// Delegate the call to the underlying SwitchInst::removeCase() and remove
3521  /// correspondent branch weight.
3523 
3524  /// Delegate the call to the underlying SwitchInst::addCase() and set the
3525  /// specified branch weight for the added case.
3526  void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W);
3527 
3528  /// Delegate the call to the underlying SwitchInst::eraseFromParent() and mark
3529  /// this object to not touch the underlying SwitchInst in destructor.
3531 
3532  void setSuccessorWeight(unsigned idx, CaseWeightOpt W);
3533  CaseWeightOpt getSuccessorWeight(unsigned idx);
3534 
3535  static CaseWeightOpt getSuccessorWeight(const SwitchInst &SI, unsigned idx);
3536 };
3537 
3538 template <>
3540 };
3541 
3543 
3544 //===----------------------------------------------------------------------===//
3545 // IndirectBrInst Class
3546 //===----------------------------------------------------------------------===//
3547 
3548 //===---------------------------------------------------------------------------
3549 /// Indirect Branch Instruction.
3550 ///
3551 class IndirectBrInst : public Instruction {
3552  unsigned ReservedSpace;
3553 
3554  // Operand[0] = Address to jump to
3555  // Operand[n+1] = n-th destination
3556  IndirectBrInst(const IndirectBrInst &IBI);
3557 
3558  /// Create a new indirectbr instruction, specifying an
3559  /// Address to jump to. The number of expected destinations can be specified
3560  /// here to make memory allocation more efficient. This constructor can also
3561  /// autoinsert before another instruction.
3562  IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore);
3563 
3564  /// Create a new indirectbr instruction, specifying an
3565  /// Address to jump to. The number of expected destinations can be specified
3566  /// here to make memory allocation more efficient. This constructor also
3567  /// autoinserts at the end of the specified BasicBlock.
3568  IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd);
3569 
3570  // allocate space for exactly zero operands
3571  void *operator new(size_t s) {
3572  return User::operator new(s);
3573  }
3574 
3575  void init(Value *Address, unsigned NumDests);
3576  void growOperands();
3577 
3578 protected:
3579  // Note: Instruction needs to be a friend here to call cloneImpl.
3580  friend class Instruction;
3581 
3582  IndirectBrInst *cloneImpl() const;
3583 
3584 public:
3585  /// Iterator type that casts an operand to a basic block.
3586  ///
3587  /// This only makes sense because the successors are stored as adjacent
3588  /// operands for indirectbr instructions.
3590  : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3591  std::random_access_iterator_tag, BasicBlock *,
3592  ptrdiff_t, BasicBlock *, BasicBlock *> {
3594 
3595  BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3596  BasicBlock *operator->() const { return operator*(); }
3597  };
3598 
3599  /// The const version of `succ_op_iterator`.
3601  : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3602  std::random_access_iterator_tag,
3603  const BasicBlock *, ptrdiff_t, const BasicBlock *,
3604  const BasicBlock *> {
3606  : iterator_adaptor_base(I) {}
3607 
3608  const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3609  const BasicBlock *operator->() const { return operator*(); }
3610  };
3611 
3612  static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3613  Instruction *InsertBefore = nullptr) {
3614  return new IndirectBrInst(Address, NumDests, InsertBefore);
3615  }
3616 
3617  static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3618  BasicBlock *InsertAtEnd) {
3619  return new IndirectBrInst(Address, NumDests, InsertAtEnd);
3620  }
3621 
3622  /// Provide fast operand accessors.
3624 
3625  // Accessor Methods for IndirectBrInst instruction.
3626  Value *getAddress() { return getOperand(0); }
3627  const Value *getAddress() const { return getOperand(0); }
3628  void setAddress(Value *V) { setOperand(0, V); }
3629 
3630  /// return the number of possible destinations in this
3631  /// indirectbr instruction.
3632  unsigned getNumDestinations() const { return getNumOperands()-1; }
3633 
3634  /// Return the specified destination.
3635  BasicBlock *getDestination(unsigned i) { return getSuccessor(i); }
3636  const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); }
3637 
3638  /// Add a destination.
3639  ///
3640  void addDestination(BasicBlock *Dest);
3641 
3642  /// This method removes the specified successor from the
3643  /// indirectbr instruction.
3644  void removeDestination(unsigned i);
3645 
3646  unsigned getNumSuccessors() const { return getNumOperands()-1; }
3647  BasicBlock *getSuccessor(unsigned i) const {
3648  return cast<BasicBlock>(getOperand(i+1));
3649  }
3650  void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3651  setOperand(i + 1, NewSucc);
3652  }
3653 
3655  return make_range(succ_op_iterator(std::next(value_op_begin())),
3656  succ_op_iterator(value_op_end()));
3657  }
3658 
3660  return make_range(const_succ_op_iterator(std::next(value_op_begin())),
3661  const_succ_op_iterator(value_op_end()));
3662  }
3663 
3664  // Methods for support type inquiry through isa, cast, and dyn_cast:
3665  static bool classof(const Instruction *I) {
3666  return I->getOpcode() == Instruction::IndirectBr;
3667  }
3668  static bool classof(const Value *V) {
3669  return isa<Instruction>(V) && classof(cast<Instruction>(V));
3670  }
3671 };
3672 
3673 template <>
3675 };
3676 
3678 
3679 //===----------------------------------------------------------------------===//
3680 // InvokeInst Class
3681 //===----------------------------------------------------------------------===//
3682 
3683 /// Invoke instruction. The SubclassData field is used to hold the
3684 /// calling convention of the call.
3685 ///
3686 class InvokeInst : public CallBase {
3687  /// The number of operands for this call beyond the called function,
3688  /// arguments, and operand bundles.
3689  static constexpr int NumExtraOperands = 2;
3690 
3691  /// The index from the end of the operand array to the normal destination.
3692  static constexpr int NormalDestOpEndIdx = -3;
3693 
3694  /// The index from the end of the operand array to the unwind destination.
3695  static constexpr int UnwindDestOpEndIdx = -2;
3696 
3697  InvokeInst(const InvokeInst &BI);
3698 
3699  /// Construct an InvokeInst given a range of arguments.
3700  ///
3701  /// Construct an InvokeInst from a range of arguments
3702  inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3703  BasicBlock *IfException, ArrayRef<Value *> Args,
3704  ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3705  const Twine &NameStr, Instruction *InsertBefore);
3706 
3707  inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3708  BasicBlock *IfException, ArrayRef<Value *> Args,
3709  ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3710  const Twine &NameStr, BasicBlock *InsertAtEnd);
3711 
3712  void init(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3713  BasicBlock *IfException, ArrayRef<Value *> Args,
3714  ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
3715 
3716  /// Compute the number of operands to allocate.
3717  static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
3718  // We need one operand for the called function, plus our extra operands and
3719  // the input operand counts provided.
3720  return 1 + NumExtraOperands + NumArgs + NumBundleInputs;
3721  }
3722 
3723 protected:
3724  // Note: Instruction needs to be a friend here to call cloneImpl.
3725  friend class Instruction;
3726 
3727  InvokeInst *cloneImpl() const;
3728 
3729 public:
3730  static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3731  BasicBlock *IfException, ArrayRef<Value *> Args,
3732  const Twine &NameStr,
3733  Instruction *InsertBefore = nullptr) {
3734  int NumOperands = ComputeNumOperands(Args.size());
3735  return new (NumOperands)
3736  InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands,
3737  NameStr, InsertBefore);
3738  }
3739 
3740  static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3741  BasicBlock *IfException, ArrayRef<Value *> Args,
3742  ArrayRef<OperandBundleDef> Bundles = None,
3743  const Twine &NameStr = "",
3744  Instruction *InsertBefore = nullptr) {
3745  int NumOperands =
3746  ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
3747  unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3748 
3749  return new (NumOperands, DescriptorBytes)
3750  InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
3751  NameStr, InsertBefore);
3752  }
3753 
3754  static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3755  BasicBlock *IfException, ArrayRef<Value *> Args,
3756  const Twine &NameStr, BasicBlock *InsertAtEnd) {
3757  int NumOperands = ComputeNumOperands(Args.size());
3758  return new (NumOperands)
3759  InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands,
3760  NameStr, InsertAtEnd);
3761  }
3762 
3763  static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3764  BasicBlock *IfException, ArrayRef<Value *> Args,
3766  const Twine &NameStr, BasicBlock *InsertAtEnd) {
3767  int NumOperands =
3768  ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
3769  unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3770 
3771  return new (NumOperands, DescriptorBytes)
3772  InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
3773  NameStr, InsertAtEnd);
3774  }
3775 
3776  static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3777  BasicBlock *IfException, ArrayRef<Value *> Args,
3778  const Twine &NameStr,
3779  Instruction *InsertBefore = nullptr) {
3780  return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3781  IfException, Args, None, NameStr, InsertBefore);
3782  }
3783 
3784  static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3785  BasicBlock *IfException, ArrayRef<Value *> Args,
3786  ArrayRef<OperandBundleDef> Bundles = None,
3787  const Twine &NameStr = "",
3788  Instruction *InsertBefore = nullptr) {
3789  return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3790  IfException, Args, Bundles, NameStr, InsertBefore);
3791  }
3792 
3793  static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3794  BasicBlock *IfException, ArrayRef<Value *> Args,
3795  const Twine &NameStr, BasicBlock *InsertAtEnd) {
3796  return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3797  IfException, Args, NameStr, InsertAtEnd);
3798  }
3799 
3800  static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3801  BasicBlock *IfException, ArrayRef<Value *> Args,
3803  const Twine &NameStr, BasicBlock *InsertAtEnd) {
3804  return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3805  IfException, Args, Bundles, NameStr, InsertAtEnd);
3806  }
3807 
3808  /// Create a clone of \p II with a different set of operand bundles and
3809  /// insert it before \p InsertPt.
3810  ///
3811  /// The returned invoke instruction is identical to \p II in every way except
3812  /// that the operand bundles for the new instruction are set to the operand
3813  /// bundles in \p Bundles.
3814  static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles,
3815  Instruction *InsertPt = nullptr);
3816 
3817  // get*Dest - Return the destination basic blocks...
3819  return cast<BasicBlock>(Op<NormalDestOpEndIdx>());
3820  }
3822  return cast<BasicBlock>(Op<UnwindDestOpEndIdx>());
3823  }
3825  Op<NormalDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3826  }
3828  Op<UnwindDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3829  }
3830 
3831  /// Get the landingpad instruction from the landing pad
3832  /// block (the unwind destination).
3833  LandingPadInst *getLandingPadInst() const;
3834 
3835  BasicBlock *getSuccessor(unsigned i) const {
3836  assert(i < 2 && "Successor # out of range for invoke!");
3837  return i == 0 ? getNormalDest() : getUnwindDest();
3838  }
3839 
3840  void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3841  assert(i < 2 && "Successor # out of range for invoke!");
3842  if (i == 0)
3843  setNormalDest(NewSucc);
3844  else
3845  setUnwindDest(NewSucc);
3846  }
3847 
3848  unsigned getNumSuccessors() const { return 2; }
3849 
3850  // Methods for support type inquiry through isa, cast, and dyn_cast:
3851  static bool classof(const Instruction *I) {
3852  return (I->getOpcode() == Instruction::Invoke);
3853  }
3854  static bool classof(const Value *V) {
3855  return isa<Instruction>(V) && classof(cast<Instruction>(V));
3856  }
3857 
3858 private:
3859  // Shadow Instruction::setInstructionSubclassData with a private forwarding
3860  // method so that subclasses cannot accidentally use it.
3861  template <typename Bitfield>
3862  void setSubclassData(typename Bitfield::Type Value) {
3863  Instruction::setSubclassData<Bitfield>(Value);
3864  }
3865 };
3866 
3867 InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3868  BasicBlock *IfException, ArrayRef<Value *> Args,
3869  ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3870  const Twine &NameStr, Instruction *InsertBefore)
3871  : CallBase(Ty->getReturnType(), Instruction::Invoke,
3872  OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
3873  InsertBefore) {
3874  init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
3875 }
3876 
3877 InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3878  BasicBlock *IfException, ArrayRef<Value *> Args,
3879  ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3880  const Twine &NameStr, BasicBlock *InsertAtEnd)
3881  : CallBase(Ty->getReturnType(), Instruction::Invoke,
3882  OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
3883  InsertAtEnd) {
3884  init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
3885 }
3886 
3887 //===----------------------------------------------------------------------===//
3888 // CallBrInst Class
3889 //===----------------------------------------------------------------------===//
3890 
3891 /// CallBr instruction, tracking function calls that may not return control but
3892 /// instead transfer it to a third location. The SubclassData field is used to
3893 /// hold the calling convention of the call.
3894 ///
3895 class CallBrInst : public CallBase {
3896 
3897  unsigned NumIndirectDests;
3898 
3899  CallBrInst(const CallBrInst &BI);
3900 
3901  /// Construct a CallBrInst given a range of arguments.
3902  ///
3903  /// Construct a CallBrInst from a range of arguments
3904  inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
3905  ArrayRef<BasicBlock *> IndirectDests,
3907  ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3908  const Twine &NameStr, Instruction *InsertBefore);
3909 
3910  inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
3911  ArrayRef<BasicBlock *> IndirectDests,
3913  ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3914  const Twine &NameStr, BasicBlock *InsertAtEnd);
3915 
3916  void init(FunctionType *FTy, Value *Func, BasicBlock *DefaultDest,
3918  ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
3919 
3920  /// Should the Indirect Destinations change, scan + update the Arg list.
3921  void updateArgBlockAddresses(unsigned i, BasicBlock *B);
3922 
3923  /// Compute the number of operands to allocate.
3924  static int ComputeNumOperands(int NumArgs, int NumIndirectDests,
3925  int NumBundleInputs = 0) {
3926  // We need one operand for the called function, plus our extra operands and
3927  // the input operand counts provided.
3928  return 2 + NumIndirectDests + NumArgs + NumBundleInputs;
3929  }
3930 
3931 protected:
3932  // Note: Instruction needs to be a friend here to call cloneImpl.
3933  friend class Instruction;
3934 
3935  CallBrInst *cloneImpl() const;
3936 
3937 public:
3938  static CallBrInst *Create(FunctionType *Ty, Value *Func,
3939  BasicBlock *DefaultDest,
3940  ArrayRef<BasicBlock *> IndirectDests,
3941  ArrayRef<Value *> Args, const Twine &NameStr,
3942  Instruction *InsertBefore = nullptr) {
3943  int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size());
3944  return new (NumOperands)
3945  CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None,
3946  NumOperands, NameStr, InsertBefore);
3947  }
3948 
3949  static CallBrInst *Create(FunctionType *Ty, Value *Func,
3950  BasicBlock *DefaultDest,
3951  ArrayRef<BasicBlock *> IndirectDests,
3953  ArrayRef<OperandBundleDef> Bundles = None,
3954  const Twine &NameStr = "",
3955  Instruction *InsertBefore = nullptr) {
3956  int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(),
3957  CountBundleInputs(Bundles));
3958  unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3959 
3960  return new (NumOperands, DescriptorBytes)
3961  CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
3962  NumOperands, NameStr, InsertBefore);
3963  }
3964 
3965  static CallBrInst *Create(FunctionType *Ty, Value *Func,
3966  BasicBlock *DefaultDest,
3967  ArrayRef<BasicBlock *> IndirectDests,
3968  ArrayRef<Value *> Args, const Twine &NameStr,
3969  BasicBlock *InsertAtEnd) {
3970  int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size());
3971  return new (NumOperands)
3972  CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None,
3973  NumOperands, NameStr, InsertAtEnd);
3974  }
3975 
3976  static CallBrInst *Create(FunctionType *Ty, Value *Func,
3977  BasicBlock *DefaultDest,
3978  ArrayRef<BasicBlock *> IndirectDests,
3981  const Twine &NameStr, BasicBlock *InsertAtEnd) {
3982  int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(),
3983  CountBundleInputs(Bundles));
3984  unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3985 
3986  return new (NumOperands, DescriptorBytes)
3987  CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
3988  NumOperands, NameStr, InsertAtEnd);
3989  }
3990 
3991  static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
3992  ArrayRef<BasicBlock *> IndirectDests,
3993  ArrayRef<Value *> Args, const Twine &NameStr,
3994  Instruction *InsertBefore = nullptr) {
3995  return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
3996  IndirectDests, Args, NameStr, InsertBefore);
3997  }
3998 
3999  static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4000  ArrayRef<BasicBlock *> IndirectDests,
4002  ArrayRef<OperandBundleDef> Bundles = None,
4003  const Twine &NameStr = "",
4004  Instruction *InsertBefore = nullptr) {
4005  return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4006  IndirectDests, Args, Bundles, NameStr, InsertBefore);
4007  }
4008 
4009  static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4010  ArrayRef<BasicBlock *> IndirectDests,
4011  ArrayRef<Value *> Args, const Twine &NameStr,
4012  BasicBlock *InsertAtEnd) {
4013  return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4014  IndirectDests, Args, NameStr, InsertAtEnd);
4015  }
4016 
4018  BasicBlock *DefaultDest,
4019  ArrayRef<BasicBlock *> IndirectDests,
4022  const Twine &NameStr, BasicBlock *InsertAtEnd) {
4023  return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4024  IndirectDests, Args, Bundles, NameStr, InsertAtEnd);
4025  }
4026 
4027  /// Create a clone of \p CBI with a different set of operand bundles and
4028  /// insert it before \p InsertPt.
4029  ///
4030  /// The returned callbr instruction is identical to \p CBI in every way
4031  /// except that the operand bundles for the new instruction are set to the
4032  /// operand bundles in \p Bundles.
4033  static CallBrInst *Create(CallBrInst *CBI,
4035  Instruction *InsertPt = nullptr);
4036 
4037  /// Return the number of callbr indirect dest labels.
4038  ///
4039  unsigned getNumIndirectDests() const { return NumIndirectDests; }
4040 
4041  /// getIndirectDestLabel - Return the i-th indirect dest label.
4042  ///
4043  Value *getIndirectDestLabel(unsigned i) const {
4044  assert(i < getNumIndirectDests() && "Out of bounds!");
4046  1);
4047  }
4048 
4049  Value *getIndirectDestLabelUse(unsigned i) const {
4050  assert(i < getNumIndirectDests() && "Out of bounds!");
4052  1);
4053  }
4054 
4055  // Return the destination basic blocks...
4057  return cast<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() - 1));
4058  }
4059  BasicBlock *getIndirectDest(unsigned i) const {
4060  return cast_or_null<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() + i));
4061  }
4063  SmallVector<BasicBlock *, 16> IndirectDests;
4064  for (unsigned i = 0, e = getNumIndirectDests(); i < e; ++i)
4065  IndirectDests.push_back(getIndirectDest(i));
4066  return IndirectDests;
4067  }
4069  *(&Op<-1>() - getNumIndirectDests() - 1) = reinterpret_cast<Value *>(B);
4070  }
4071  void setIndirectDest(unsigned i, BasicBlock *B) {
4072  updateArgBlockAddresses(i, B);
4073  *(&Op<-1>() - getNumIndirectDests() + i) = reinterpret_cast<Value *>(B);
4074  }
4075 
4076  BasicBlock *getSuccessor(unsigned i) const {
4077  assert(i < getNumSuccessors() + 1 &&
4078  "Successor # out of range for callbr!");
4079  return i == 0 ? getDefaultDest() : getIndirectDest(i - 1);
4080  }
4081 
4082  void setSuccessor(unsigned i, BasicBlock *NewSucc) {
4083  assert(i < getNumIndirectDests() + 1 &&
4084  "Successor # out of range for callbr!");
4085  return i == 0 ? setDefaultDest(NewSucc) : setIndirectDest(i - 1, NewSucc);
4086  }
4087 
4088  unsigned getNumSuccessors() const { return getNumIndirectDests() + 1; }
4089 
4090  // Methods for support type inquiry through isa, cast, and dyn_cast:
4091  static bool classof(const Instruction *I) {
4092  return (I->getOpcode() == Instruction::CallBr);
4093  }
4094  static bool classof(const Value *V) {
4095  return isa<Instruction>(V) && classof(cast<Instruction>(V));
4096  }
4097 
4098 private:
4099  // Shadow Instruction::setInstructionSubclassData with a private forwarding
4100  // method so that subclasses cannot accidentally use it.
4101  template <typename Bitfield>
4102  void setSubclassData(typename Bitfield::Type Value) {
4103  Instruction::setSubclassData<Bitfield>(Value);
4104  }
4105 };
4106 
4107 CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4108  ArrayRef<BasicBlock *> IndirectDests,
4109  ArrayRef<Value *> Args,
4110  ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4111  const Twine &NameStr, Instruction *InsertBefore)
4112  : CallBase(Ty->getReturnType(), Instruction::CallBr,
4113  OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4114  InsertBefore) {
4115  init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4116 }
4117 
4118 CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4119  ArrayRef<BasicBlock *> IndirectDests,
4120  ArrayRef<Value *> Args,
4121  ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4122  const Twine &NameStr, BasicBlock *InsertAtEnd)
4123  : CallBase(Ty->getReturnType(), Instruction::CallBr,
4124  OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4125  InsertAtEnd) {
4126  init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4127 }
4128 
4129 //===----------------------------------------------------------------------===//
4130 // ResumeInst Class
4131 //===----------------------------------------------------------------------===//
4132 
4133 //===---------------------------------------------------------------------------
4134 /// Resume the propagation of an exception.
4135 ///
4136 class ResumeInst : public Instruction {
4137  ResumeInst(const ResumeInst &RI);
4138 
4139  explicit ResumeInst(Value *Exn, Instruction *InsertBefore=nullptr);
4140  ResumeInst(Value *Exn, BasicBlock *InsertAtEnd);
4141 
4142 protected:
4143  // Note: Instruction needs to be a friend here to call cloneImpl.
4144  friend class Instruction;
4145 
4146  ResumeInst *cloneImpl() const;
4147 
4148 public:
4149  static ResumeInst *Create(Value *Exn, Instruction *InsertBefore = nullptr) {
4150  return new(1) ResumeInst(Exn, InsertBefore);
4151  }
4152 
4153  static ResumeInst *Create(Value *Exn, BasicBlock *InsertAtEnd) {
4154  return new(1) ResumeInst(Exn, InsertAtEnd);
4155  }
4156 
4157  /// Provide fast operand accessors
4159 
4160  /// Convenience accessor.
4161  Value *getValue() const { return Op<0>(); }
4162 
4163  unsigned getNumSuccessors() const { return 0; }
4164 
4165  // Methods for support type inquiry through isa, cast, and dyn_cast:
4166  static bool classof(const Instruction *I) {
4167  return I->getOpcode() == Instruction::Resume;
4168  }
4169  static bool classof(const Value *V) {
4170  return isa<Instruction>(V) && classof(cast<Instruction>(V));
4171  }
4172 
4173 private:
4174  BasicBlock *getSuccessor(unsigned idx) const {
4175  llvm_unreachable("ResumeInst has no successors!");
4176  }
4177 
4178  void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
4179  llvm_unreachable("ResumeInst has no successors!");
4180  }
4181 };
4182 
4183 template <>
4185  public FixedNumOperandTraits<ResumeInst, 1> {
4186 };
4187 
4189 
4190 //===----------------------------------------------------------------------===//
4191 // CatchSwitchInst Class
4192 //===----------------------------------------------------------------------===//
4194  using UnwindDestField = BoolBitfieldElementT<0>;
4195 
4196  /// The number of operands actually allocated. NumOperands is
4197  /// the number actually in use.
4198  unsigned ReservedSpace;
4199 
4200  // Operand[0] = Outer scope
4201  // Operand[1] = Unwind block destination
4202  // Operand[n] = BasicBlock to go to on match
4203  CatchSwitchInst(const CatchSwitchInst &CSI);
4204 
4205  /// Create a new switch instruction, specifying a
4206  /// default destination. The number of additional handlers can be specified
4207  /// here to make memory allocation more efficient.
4208  /// This constructor can also autoinsert before another instruction.
4209  CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4210  unsigned NumHandlers, const Twine &NameStr,
4211  Instruction *InsertBefore);
4212 
4213  /// Create a new switch instruction, specifying a
4214  /// default destination. The number of additional handlers can be specified
4215  /// here to make memory allocation more efficient.
4216  /// This constructor also autoinserts at the end of the specified BasicBlock.
4217  CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4218  unsigned NumHandlers, const Twine &NameStr,
4219  BasicBlock *InsertAtEnd);
4220 
4221  // allocate space for exactly zero operands
4222  void *operator new(size_t s) { return User::operator new(s); }
4223 
4224  void init(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumReserved);
4225  void growOperands(unsigned Size);
4226 
4227 protected:
4228  // Note: Instruction needs to be a friend here to call cloneImpl.
4229  friend class Instruction;
4230 
4231  CatchSwitchInst *cloneImpl() const;
4232 
4233 public:
4234  static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4235  unsigned NumHandlers,
4236  const Twine &NameStr = "",
4237  Instruction *InsertBefore = nullptr) {
4238  return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4239  InsertBefore);
4240  }
4241 
4242  static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4243  unsigned NumHandlers, const Twine &NameStr,
4244  BasicBlock *InsertAtEnd) {
4245  return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4246  InsertAtEnd);
4247  }
4248 
4249  /// Provide fast operand accessors
4251 
4252  // Accessor Methods for CatchSwitch stmt
4253  Value *getParentPad() const { return getOperand(0); }
4254  void setParentPad(Value *ParentPad) { setOperand(0, ParentPad); }
4255 
4256  // Accessor Methods for CatchSwitch stmt
4257  bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); }
4258  bool unwindsToCaller() const { return !hasUnwindDest(); }
4260  if (hasUnwindDest())
4261  return cast<BasicBlock>(getOperand(1));
4262  return nullptr;
4263  }
4264  void setUnwindDest(BasicBlock *UnwindDest) {
4265  assert(UnwindDest);
4266  assert(hasUnwindDest());
4267  setOperand(1, UnwindDest);
4268  }
4269 
4270  /// return the number of 'handlers' in this catchswitch
4271  /// instruction, except the default handler
4272  unsigned getNumHandlers() const {
4273  if (hasUnwindDest())
4274  return getNumOperands() - 2;
4275  return getNumOperands() - 1;
4276  }
4277 
4278 private:
4279  static BasicBlock *handler_helper(Value *V) { return cast<BasicBlock>(V); }
4280  static const BasicBlock *handler_helper(const Value *V) {
4281  return cast<BasicBlock>(V);
4282  }
4283 
4284 public:
4285  using DerefFnTy = BasicBlock *(*)(Value *);
4288  using ConstDerefFnTy = const BasicBlock *(*)(const Value *);
4289  using const_handler_iterator =
4292 
4293  /// Returns an iterator that points to the first handler in CatchSwitchInst.
4295  op_iterator It = op_begin() + 1;
4296  if (hasUnwindDest())
4297  ++It;
4298  return handler_iterator(It, DerefFnTy(handler_helper));
4299  }
4300 
4301  /// Returns an iterator that points to the first handler in the
4302  /// CatchSwitchInst.
4304  const_op_iterator It = op_begin() + 1;
4305  if (hasUnwindDest())
4306  ++It;
4307  return const_handler_iterator(It, ConstDerefFnTy(handler_helper));
4308  }
4309 
4310  /// Returns a read-only iterator that points one past the last
4311  /// handler in the CatchSwitchInst.
4313  return handler_iterator(op_end(), DerefFnTy(handler_helper));
4314  }
4315 
4316  /// Returns an iterator that points one past the last handler in the
4317  /// CatchSwitchInst.
4319  return const_handler_iterator(op_end(), ConstDerefFnTy(handler_helper));
4320  }
4321 
4322  /// iteration adapter for range-for loops.
4324  return make_range(handler_begin(), handler_end());
4325  }
4326 
4327  /// iteration adapter for range-for loops.
4329  return make_range(handler_begin(), handler_end());
4330  }
4331 
4332  /// Add an entry to the switch instruction...
4333  /// Note:
4334  /// This action invalidates handler_end(). Old handler_end() iterator will
4335  /// point to the added handler.
4336  void addHandler(BasicBlock *Dest);
4337 
4338  void removeHandler(handler_iterator HI);
4339 
4340  unsigned getNumSuccessors() const { return getNumOperands() - 1; }
4341  BasicBlock *getSuccessor(unsigned Idx) const {
4342  assert(Idx < getNumSuccessors() &&
4343  "Successor # out of range for catchswitch!");
4344  return cast<BasicBlock>(getOperand(Idx + 1));
4345  }
4346  void setSuccessor(unsigned Idx, BasicBlock *NewSucc) {
4347  assert(Idx < getNumSuccessors() &&
4348  "Successor # out of range for catchswitch!");
4349  setOperand(Idx + 1, NewSucc);
4350  }
4351 
4352  // Methods for support type inquiry through isa, cast, and dyn_cast:
4353  static bool classof(const Instruction *I) {
4354  return I->getOpcode() == Instruction::CatchSwitch;
4355  }
4356  static bool classof(const Value *V) {
4357  return isa<Instruction>(V) && classof(cast<Instruction>(V));
4358  }
4359 };
4360 
4361 template <>
4363 
4365 
4366 //===----------------------------------------------------------------------===//
4367 // CleanupPadInst Class
4368 //===----------------------------------------------------------------------===//
4370 private:
4371  explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4372  unsigned Values, const Twine &NameStr,
4373  Instruction *InsertBefore)
4374  : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
4375  NameStr, InsertBefore) {}
4376  explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4377  unsigned Values, const Twine &NameStr,
4378  BasicBlock *InsertAtEnd)
4379  : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
4380  NameStr, InsertAtEnd) {}
4381 
4382 public:
4384  const Twine &NameStr = "",
4385  Instruction *InsertBefore = nullptr) {
4386  unsigned Values = 1 + Args.size();
4387  return new (Values)
4388  CleanupPadInst(ParentPad, Args, Values, NameStr, InsertBefore);
4389  }
4390 
4392  const Twine &NameStr, BasicBlock *InsertAtEnd) {
4393  unsigned Values = 1 + Args.size();
4394  return new (Values)
4395  CleanupPadInst(ParentPad, Args, Values, NameStr, InsertAtEnd);
4396  }
4397 
4398  /// Methods for support type inquiry through isa, cast, and dyn_cast:
4399  static bool classof(const Instruction *I) {
4400  return I->getOpcode() == Instruction::CleanupPad;
4401  }
4402  static bool classof(const Value *V) {
4403  return isa<Instruction>(V) && classof(cast<Instruction>(V));
4404  }
4405 };
4406 
4407 //===----------------------------------------------------------------------===//
4408 // CatchPadInst Class
4409 //===----------------------------------------------------------------------===//
4411 private:
4412  explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
4413  unsigned Values, const Twine &NameStr,
4414  Instruction *InsertBefore)
4415  : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
4416  NameStr, InsertBefore) {}
4417  explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
4418  unsigned Values, const Twine &NameStr,
4419  BasicBlock *InsertAtEnd)
4420  : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
4421  NameStr, InsertAtEnd) {}
4422 
4423 public:
4425  const Twine &NameStr = "",
4426  Instruction *InsertBefore = nullptr) {
4427  unsigned Values = 1 + Args.size();
4428  return new (Values)
4429  CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertBefore);
4430  }
4431 
4433  const Twine &NameStr, BasicBlock *InsertAtEnd) {
4434  unsigned Values = 1 + Args.size();
4435  return new (Values)
4436  CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertAtEnd);
4437  }
4438 
4439  /// Convenience accessors
4441  return cast<CatchSwitchInst>(Op<-1>());
4442  }
4443  void setCatchSwitch(Value *CatchSwitch) {
4444  assert(CatchSwitch);
4445  Op<-1>() = CatchSwitch;
4446  }
4447 
4448  /// Methods for support type inquiry through isa, cast, and dyn_cast:
4449  static bool classof(const Instruction *I) {
4450  return I->getOpcode() == Instruction::CatchPad;
4451  }
4452  static bool classof(const Value *V) {
4453  return isa<Instruction>(V) && classof(cast<Instruction>(V));
4454  }
4455 };
4456 
4457 //===----------------------------------------------------------------------===//
4458 // CatchReturnInst Class
4459 //===----------------------------------------------------------------------===//
4460 
4462  CatchReturnInst(const CatchReturnInst &RI);
4463  CatchReturnInst(Value *CatchPad, BasicBlock *BB, Instruction *InsertBefore);
4464  CatchReturnInst(Value *CatchPad, BasicBlock *BB, BasicBlock *InsertAtEnd);
4465 
4466  void init(Value *CatchPad, BasicBlock *BB);
4467 
4468 protected:
4469  // Note: Instruction needs to be a friend here to call cloneImpl.
4470  friend class Instruction;
4471 
4472  CatchReturnInst *cloneImpl() const;
4473 
4474 public:
4476  Instruction *InsertBefore = nullptr) {
4477  assert(CatchPad);
4478  assert(BB);
4479  return new (2) CatchReturnInst(CatchPad, BB, InsertBefore);
4480  }
4481 
4483  BasicBlock *InsertAtEnd) {
4484  assert(CatchPad);
4485  assert(BB);
4486  return new (2) CatchReturnInst(CatchPad, BB, InsertAtEnd);
4487  }
4488 
4489  /// Provide fast operand accessors
4491 
4492  /// Convenience accessors.
4493  CatchPadInst *getCatchPad() const { return cast<CatchPadInst>(Op<0>()); }
4494  void setCatchPad(CatchPadInst *CatchPad) {
4495  assert(CatchPad);
4496  Op<0>() = CatchPad;
4497  }
4498 
4499  BasicBlock *getSuccessor() const { return cast<BasicBlock>(Op<1>()); }
4500  void setSuccessor(BasicBlock *NewSucc) {
4501  assert(NewSucc);
4502  Op<1>() = NewSucc;
4503  }
4504  unsigned getNumSuccessors() const { return 1; }
4505 
4506  /// Get the parentPad of this catchret's catchpad's catchswitch.
4507  /// The successor block is implicitly a member of this funclet.
4509  return getCatchPad()->getCatchSwitch()->getParentPad();
4510  }
4511 
4512  // Methods for support type inquiry through isa, cast, and dyn_cast:
4513  static bool classof(const Instruction *I) {
4514  return (I->getOpcode() == Instruction::CatchRet);
4515  }
4516  static bool classof(const Value *V) {
4517  return isa<Instruction>(V) && classof(cast<Instruction>(V));
4518  }
4519 
4520 private:
4521  BasicBlock *getSuccessor(unsigned Idx) const {
4522  assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!");
4523  return getSuccessor();
4524  }
4525 
4526  void setSuccessor(unsigned Idx, BasicBlock *B) {
4527  assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!");
4528  setSuccessor(B);
4529  }
4530 };
4531 
4532 template <>
4534  : public FixedNumOperandTraits<CatchReturnInst, 2> {};
4535 
4537 
4538 //===----------------------------------------------------------------------===//
4539 // CleanupReturnInst Class
4540 //===----------------------------------------------------------------------===//
4541 
4543  using UnwindDestField = BoolBitfieldElementT<0>;
4544 
4545 private:
4547  CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
4548  Instruction *InsertBefore = nullptr);
4549  CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
4550  BasicBlock *InsertAtEnd);
4551 
4552  void init(Value *CleanupPad, BasicBlock *UnwindBB);
4553 
4554 protected:
4555  // Note: Instruction needs to be a friend here to call cloneImpl.
4556  friend class Instruction;
4557 
4558  CleanupReturnInst *cloneImpl() const;
4559 
4560 public:
4561  static CleanupReturnInst *Create(Value *CleanupPad,
4562  BasicBlock *UnwindBB = nullptr,
4563  Instruction *InsertBefore = nullptr) {
4564  assert(CleanupPad);
4565  unsigned Values = 1;
4566  if (UnwindBB)
4567  ++Values;
4568  return new (Values)
4569  CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertBefore);
4570  }
4571 
4572  static CleanupReturnInst *Create(Value *CleanupPad, BasicBlock *UnwindBB,
4573  BasicBlock *InsertAtEnd) {
4574  assert(CleanupPad);
4575  unsigned Values = 1;
4576  if (UnwindBB)
4577  ++Values;
4578  return new (Values)
4579  CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertAtEnd);
4580  }
4581 
4582  /// Provide fast operand accessors
4584 
4585  bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); }
4586  bool unwindsToCaller() const { return !hasUnwindDest(); }
4587 
4588  /// Convenience accessor.
4590  return cast<CleanupPadInst>(Op<0>());
4591  }
4592  void setCleanupPad(CleanupPadInst *CleanupPad) {
4593  assert(CleanupPad);
4594  Op<0>() = CleanupPad;
4595  }
4596 
4597  unsigned getNumSuccessors() const { return hasUnwindDest() ? 1 : 0; }
4598 
4600  return hasUnwindDest() ? cast<BasicBlock>(Op<1>()) : nullptr;
4601  }
4602  void setUnwindDest(BasicBlock *NewDest) {
4603  assert(NewDest);
4604  assert(hasUnwindDest());
4605  Op<1>() = NewDest;
4606  }
4607 
4608  // Methods for support type inquiry through isa, cast, and dyn_cast:
4609  static bool classof(const Instruction *I) {
4610  return (I->getOpcode() == Instruction::CleanupRet);
4611  }
4612  static bool classof(const Value *V) {
4613  return isa<Instruction>(V) && classof(cast<Instruction>(V));
4614  }
4615 
4616 private:
4617  BasicBlock *getSuccessor(unsigned Idx) const {
4618  assert(Idx == 0);
4619  return getUnwindDest();
4620  }
4621 
4622  void setSuccessor(unsigned Idx, BasicBlock *B) {
4623  assert(Idx == 0);
4624  setUnwindDest(B);
4625  }
4626 
4627  // Shadow Instruction::setInstructionSubclassData with a private forwarding
4628  // method so that subclasses cannot accidentally use it.
4629  template <typename Bitfield>
4630  void setSubclassData(typename Bitfield::Type Value) {
4631  Instruction::setSubclassData<Bitfield>(Value);
4632  }
4633 };
4634 
4635 template <>
4637  : public VariadicOperandTraits<CleanupReturnInst, /*MINARITY=*/1> {};
4638 
4640 
4641 //===----------------------------------------------------------------------===//
4642 // UnreachableInst Class
4643 //===----------------------------------------------------------------------===//
4644 
4645 //===---------------------------------------------------------------------------
4646 /// This function has undefined behavior. In particular, the
4647 /// presence of this instruction indicates some higher level knowledge that the
4648 /// end of the block cannot be reached.
4649 ///
4651 protected:
4652  // Note: Instruction needs to be a friend here to call cloneImpl.
4653  friend class Instruction;
4654 
4655  UnreachableInst *cloneImpl() const;
4656 
4657 public:
4658  explicit UnreachableInst(LLVMContext &C, Instruction *InsertBefore = nullptr);
4659  explicit UnreachableInst(LLVMContext &C, BasicBlock *InsertAtEnd);
4660 
4661  // allocate space for exactly zero operands
4662  void *operator new(size_t s) {
4663  return User::operator new(s, 0);
4664  }
4665 
4666  unsigned getNumSuccessors() const { return 0; }
4667 
4668  // Methods for support type inquiry through isa, cast, and dyn_cast:
4669  static bool classof(const Instruction *I) {
4670  return I->getOpcode() == Instruction::Unreachable;
4671  }
4672  static bool classof(const Value *V) {
4673  return isa<Instruction>(V) && classof(cast<Instruction>(V));
4674  }
4675 
4676 private:
4677  BasicBlock *getSuccessor(unsigned idx) const {
4678  llvm_unreachable("UnreachableInst has no successors!");
4679  }
4680 
4681  void setSuccessor(unsigned idx, BasicBlock *B) {
4682  llvm_unreachable("UnreachableInst has no successors!");
4683  }
4684 };
4685 
4686 //===----------------------------------------------------------------------===//
4687 // TruncInst Class
4688 //===----------------------------------------------------------------------===//
4689 
4690 /// This class represents a truncation of integer types.