LLVM  14.0.0git
Instructions.h
Go to the documentation of this file.
1 //===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file exposes the class definitions of all of the subclasses of the
10 // Instruction class. This is meant to be an easy way to get access to all
11 // instruction subclasses.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #ifndef LLVM_IR_INSTRUCTIONS_H
16 #define LLVM_IR_INSTRUCTIONS_H
17 
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/Bitfields.h"
20 #include "llvm/ADT/MapVector.h"
21 #include "llvm/ADT/None.h"
22 #include "llvm/ADT/STLExtras.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/StringRef.h"
25 #include "llvm/ADT/Twine.h"
26 #include "llvm/ADT/iterator.h"
28 #include "llvm/IR/Attributes.h"
29 #include "llvm/IR/BasicBlock.h"
30 #include "llvm/IR/CallingConv.h"
31 #include "llvm/IR/CFG.h"
32 #include "llvm/IR/Constant.h"
33 #include "llvm/IR/DerivedTypes.h"
34 #include "llvm/IR/Function.h"
35 #include "llvm/IR/InstrTypes.h"
36 #include "llvm/IR/Instruction.h"
37 #include "llvm/IR/OperandTraits.h"
38 #include "llvm/IR/Type.h"
39 #include "llvm/IR/Use.h"
40 #include "llvm/IR/User.h"
41 #include "llvm/IR/Value.h"
43 #include "llvm/Support/Casting.h"
45 #include <cassert>
46 #include <cstddef>
47 #include <cstdint>
48 #include <iterator>
49 
50 namespace llvm {
51 
52 class APInt;
53 class ConstantInt;
54 class DataLayout;
55 class LLVMContext;
56 
57 //===----------------------------------------------------------------------===//
58 // AllocaInst Class
59 //===----------------------------------------------------------------------===//
60 
61 /// an instruction to allocate memory on the stack
62 class AllocaInst : public UnaryInstruction {
63  Type *AllocatedType;
64 
65  using AlignmentField = AlignmentBitfieldElementT<0>;
66  using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>;
68  static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField,
69  SwiftErrorField>(),
70  "Bitfields must be contiguous");
71 
72 protected:
73  // Note: Instruction needs to be a friend here to call cloneImpl.
74  friend class Instruction;
75 
76  AllocaInst *cloneImpl() const;
77 
78 public:
79  explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
80  const Twine &Name, Instruction *InsertBefore);
81  AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
82  const Twine &Name, BasicBlock *InsertAtEnd);
83 
84  AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
85  Instruction *InsertBefore);
86  AllocaInst(Type *Ty, unsigned AddrSpace,
87  const Twine &Name, BasicBlock *InsertAtEnd);
88 
89  AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
90  const Twine &Name = "", Instruction *InsertBefore = nullptr);
91  AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
92  const Twine &Name, BasicBlock *InsertAtEnd);
93 
94  /// Return true if there is an allocation size parameter to the allocation
95  /// instruction that is not 1.
96  bool isArrayAllocation() const;
97 
98  /// Get the number of elements allocated. For a simple allocation of a single
99  /// element, this will return a constant 1 value.
100  const Value *getArraySize() const { return getOperand(0); }
101  Value *getArraySize() { return getOperand(0); }
102 
103  /// Overload to return most specific pointer type.
104  PointerType *getType() const {
105  return cast<PointerType>(Instruction::getType());
106  }
107 
108  /// Get allocation size in bits. Returns None if size can't be determined,
109  /// e.g. in case of a VLA.
111 
112  /// Return the type that is being allocated by the instruction.
113  Type *getAllocatedType() const { return AllocatedType; }
114  /// for use only in special circumstances that need to generically
115  /// transform a whole instruction (eg: IR linking and vectorization).
116  void setAllocatedType(Type *Ty) { AllocatedType = Ty; }
117 
118  /// Return the alignment of the memory that is being allocated by the
119  /// instruction.
120  Align getAlign() const {
121  return Align(1ULL << getSubclassData<AlignmentField>());
122  }
123 
125  setSubclassData<AlignmentField>(Log2(Align));
126  }
127 
128  // FIXME: Remove this one transition to Align is over.
129  unsigned getAlignment() const { return getAlign().value(); }
130 
131  /// Return true if this alloca is in the entry block of the function and is a
132  /// constant size. If so, the code generator will fold it into the
133  /// prolog/epilog code, so it is basically free.
134  bool isStaticAlloca() const;
135 
136  /// Return true if this alloca is used as an inalloca argument to a call. Such
137  /// allocas are never considered static even if they are in the entry block.
138  bool isUsedWithInAlloca() const {
139  return getSubclassData<UsedWithInAllocaField>();
140  }
141 
142  /// Specify whether this alloca is used to represent the arguments to a call.
143  void setUsedWithInAlloca(bool V) {
144  setSubclassData<UsedWithInAllocaField>(V);
145  }
146 
147  /// Return true if this alloca is used as a swifterror argument to a call.
148  bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); }
149  /// Specify whether this alloca is used to represent a swifterror.
150  void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); }
151 
152  // Methods for support type inquiry through isa, cast, and dyn_cast:
153  static bool classof(const Instruction *I) {
154  return (I->getOpcode() == Instruction::Alloca);
155  }
156  static bool classof(const Value *V) {
157  return isa<Instruction>(V) && classof(cast<Instruction>(V));
158  }
159 
160 private:
161  // Shadow Instruction::setInstructionSubclassData with a private forwarding
162  // method so that subclasses cannot accidentally use it.
163  template <typename Bitfield>
164  void setSubclassData(typename Bitfield::Type Value) {
165  Instruction::setSubclassData<Bitfield>(Value);
166  }
167 };
168 
169 //===----------------------------------------------------------------------===//
170 // LoadInst Class
171 //===----------------------------------------------------------------------===//
172 
173 /// An instruction for reading from memory. This uses the SubclassData field in
174 /// Value to store whether or not the load is volatile.
175 class LoadInst : public UnaryInstruction {
176  using VolatileField = BoolBitfieldElementT<0>;
179  static_assert(
180  Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
181  "Bitfields must be contiguous");
182 
183  void AssertOK();
184 
185 protected:
186  // Note: Instruction needs to be a friend here to call cloneImpl.
187  friend class Instruction;
188 
189  LoadInst *cloneImpl() const;
190 
191 public:
192  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr,
193  Instruction *InsertBefore);
194  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
195  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
196  Instruction *InsertBefore);
197  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
198  BasicBlock *InsertAtEnd);
199  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
200  Align Align, Instruction *InsertBefore = nullptr);
201  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
202  Align Align, BasicBlock *InsertAtEnd);
203  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
204  Align Align, AtomicOrdering Order,
206  Instruction *InsertBefore = nullptr);
207  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
209  BasicBlock *InsertAtEnd);
210 
211  /// Return true if this is a load from a volatile memory location.
212  bool isVolatile() const { return getSubclassData<VolatileField>(); }
213 
214  /// Specify whether this is a volatile load or not.
215  void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
216 
217  /// Return the alignment of the access that is being performed.
218  /// FIXME: Remove this function once transition to Align is over.
219  /// Use getAlign() instead.
220  unsigned getAlignment() const { return getAlign().value(); }
221 
222  /// Return the alignment of the access that is being performed.
223  Align getAlign() const {
224  return Align(1ULL << (getSubclassData<AlignmentField>()));
225  }
226 
228  setSubclassData<AlignmentField>(Log2(Align));
229  }
230 
231  /// Returns the ordering constraint of this load instruction.
233  return getSubclassData<OrderingField>();
234  }
235  /// Sets the ordering constraint of this load instruction. May not be Release
236  /// or AcquireRelease.
237  void setOrdering(AtomicOrdering Ordering) {
238  setSubclassData<OrderingField>(Ordering);
239  }
240 
241  /// Returns the synchronization scope ID of this load instruction.
243  return SSID;
244  }
245 
246  /// Sets the synchronization scope ID of this load instruction.
248  this->SSID = SSID;
249  }
250 
251  /// Sets the ordering constraint and the synchronization scope ID of this load
252  /// instruction.
253  void setAtomic(AtomicOrdering Ordering,
255  setOrdering(Ordering);
256  setSyncScopeID(SSID);
257  }
258 
259  bool isSimple() const { return !isAtomic() && !isVolatile(); }
260 
261  bool isUnordered() const {
262  return (getOrdering() == AtomicOrdering::NotAtomic ||
264  !isVolatile();
265  }
266 
268  const Value *getPointerOperand() const { return getOperand(0); }
269  static unsigned getPointerOperandIndex() { return 0U; }
271 
272  /// Returns the address space of the pointer operand.
273  unsigned getPointerAddressSpace() const {
275  }
276 
277  // Methods for support type inquiry through isa, cast, and dyn_cast:
278  static bool classof(const Instruction *I) {
279  return I->getOpcode() == Instruction::Load;
280  }
281  static bool classof(const Value *V) {
282  return isa<Instruction>(V) && classof(cast<Instruction>(V));
283  }
284 
285 private:
286  // Shadow Instruction::setInstructionSubclassData with a private forwarding
287  // method so that subclasses cannot accidentally use it.
288  template <typename Bitfield>
289  void setSubclassData(typename Bitfield::Type Value) {
290  Instruction::setSubclassData<Bitfield>(Value);
291  }
292 
293  /// The synchronization scope ID of this load instruction. Not quite enough
294  /// room in SubClassData for everything, so synchronization scope ID gets its
295  /// own field.
296  SyncScope::ID SSID;
297 };
298 
299 //===----------------------------------------------------------------------===//
300 // StoreInst Class
301 //===----------------------------------------------------------------------===//
302 
303 /// An instruction for storing to memory.
304 class StoreInst : public Instruction {
305  using VolatileField = BoolBitfieldElementT<0>;
308  static_assert(
309  Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
310  "Bitfields must be contiguous");
311 
312  void AssertOK();
313 
314 protected:
315  // Note: Instruction needs to be a friend here to call cloneImpl.
316  friend class Instruction;
317 
318  StoreInst *cloneImpl() const;
319 
320 public:
321  StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore);
322  StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
323  StoreInst(Value *Val, Value *Ptr, bool isVolatile, Instruction *InsertBefore);
324  StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
325  StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
326  Instruction *InsertBefore = nullptr);
327  StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
328  BasicBlock *InsertAtEnd);
329  StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
331  Instruction *InsertBefore = nullptr);
332  StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
333  AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd);
334 
335  // allocate space for exactly two operands
336  void *operator new(size_t S) { return User::operator new(S, 2); }
337  void operator delete(void *Ptr) { User::operator delete(Ptr); }
338 
339  /// Return true if this is a store to a volatile memory location.
340  bool isVolatile() const { return getSubclassData<VolatileField>(); }
341 
342  /// Specify whether this is a volatile store or not.
343  void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
344 
345  /// Transparently provide more efficient getOperand methods.
347 
348  /// Return the alignment of the access that is being performed
349  /// FIXME: Remove this function once transition to Align is over.
350  /// Use getAlign() instead.
351  unsigned getAlignment() const { return getAlign().value(); }
352 
353  Align getAlign() const {
354  return Align(1ULL << (getSubclassData<AlignmentField>()));
355  }
356 
358  setSubclassData<AlignmentField>(Log2(Align));
359  }
360 
361  /// Returns the ordering constraint of this store instruction.
363  return getSubclassData<OrderingField>();
364  }
365 
366  /// Sets the ordering constraint of this store instruction. May not be
367  /// Acquire or AcquireRelease.
368  void setOrdering(AtomicOrdering Ordering) {
369  setSubclassData<OrderingField>(Ordering);
370  }
371 
372  /// Returns the synchronization scope ID of this store instruction.
374  return SSID;
375  }
376 
377  /// Sets the synchronization scope ID of this store instruction.
379  this->SSID = SSID;
380  }
381 
382  /// Sets the ordering constraint and the synchronization scope ID of this
383  /// store instruction.
384  void setAtomic(AtomicOrdering Ordering,
386  setOrdering(Ordering);
387  setSyncScopeID(SSID);
388  }
389 
390  bool isSimple() const { return !isAtomic() && !isVolatile(); }
391 
392  bool isUnordered() const {
393  return (getOrdering() == AtomicOrdering::NotAtomic ||
395  !isVolatile();
396  }
397 
398  Value *getValueOperand() { return getOperand(0); }
399  const Value *getValueOperand() const { return getOperand(0); }
400 
402  const Value *getPointerOperand() const { return getOperand(1); }
403  static unsigned getPointerOperandIndex() { return 1U; }
405 
406  /// Returns the address space of the pointer operand.
407  unsigned getPointerAddressSpace() const {
409  }
410 
411  // Methods for support type inquiry through isa, cast, and dyn_cast:
412  static bool classof(const Instruction *I) {
413  return I->getOpcode() == Instruction::Store;
414  }
415  static bool classof(const Value *V) {
416  return isa<Instruction>(V) && classof(cast<Instruction>(V));
417  }
418 
419 private:
420  // Shadow Instruction::setInstructionSubclassData with a private forwarding
421  // method so that subclasses cannot accidentally use it.
422  template <typename Bitfield>
423  void setSubclassData(typename Bitfield::Type Value) {
424  Instruction::setSubclassData<Bitfield>(Value);
425  }
426 
427  /// The synchronization scope ID of this store instruction. Not quite enough
428  /// room in SubClassData for everything, so synchronization scope ID gets its
429  /// own field.
430  SyncScope::ID SSID;
431 };
432 
433 template <>
434 struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> {
435 };
436 
438 
439 //===----------------------------------------------------------------------===//
440 // FenceInst Class
441 //===----------------------------------------------------------------------===//
442 
443 /// An instruction for ordering other memory operations.
444 class FenceInst : public Instruction {
445  using OrderingField = AtomicOrderingBitfieldElementT<0>;
446 
447  void Init(AtomicOrdering Ordering, SyncScope::ID SSID);
448 
449 protected:
450  // Note: Instruction needs to be a friend here to call cloneImpl.
451  friend class Instruction;
452 
453  FenceInst *cloneImpl() const;
454 
455 public:
456  // Ordering may only be Acquire, Release, AcquireRelease, or
457  // SequentiallyConsistent.
460  Instruction *InsertBefore = nullptr);
462  BasicBlock *InsertAtEnd);
463 
464  // allocate space for exactly zero operands
465  void *operator new(size_t S) { return User::operator new(S, 0); }
466  void operator delete(void *Ptr) { User::operator delete(Ptr); }
467 
468  /// Returns the ordering constraint of this fence instruction.
470  return getSubclassData<OrderingField>();
471  }
472 
473  /// Sets the ordering constraint of this fence instruction. May only be
474  /// Acquire, Release, AcquireRelease, or SequentiallyConsistent.
475  void setOrdering(AtomicOrdering Ordering) {
476  setSubclassData<OrderingField>(Ordering);
477  }
478 
479  /// Returns the synchronization scope ID of this fence instruction.
481  return SSID;
482  }
483 
484  /// Sets the synchronization scope ID of this fence instruction.
486  this->SSID = SSID;
487  }
488 
489  // Methods for support type inquiry through isa, cast, and dyn_cast:
490  static bool classof(const Instruction *I) {
491  return I->getOpcode() == Instruction::Fence;
492  }
493  static bool classof(const Value *V) {
494  return isa<Instruction>(V) && classof(cast<Instruction>(V));
495  }
496 
497 private:
498  // Shadow Instruction::setInstructionSubclassData with a private forwarding
499  // method so that subclasses cannot accidentally use it.
500  template <typename Bitfield>
501  void setSubclassData(typename Bitfield::Type Value) {
502  Instruction::setSubclassData<Bitfield>(Value);
503  }
504 
505  /// The synchronization scope ID of this fence instruction. Not quite enough
506  /// room in SubClassData for everything, so synchronization scope ID gets its
507  /// own field.
508  SyncScope::ID SSID;
509 };
510 
511 //===----------------------------------------------------------------------===//
512 // AtomicCmpXchgInst Class
513 //===----------------------------------------------------------------------===//
514 
515 /// An instruction that atomically checks whether a
516 /// specified value is in a memory location, and, if it is, stores a new value
517 /// there. The value returned by this instruction is a pair containing the
518 /// original value as first element, and an i1 indicating success (true) or
519 /// failure (false) as second element.
520 ///
522  void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align,
523  AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
524  SyncScope::ID SSID);
525 
526  template <unsigned Offset>
527  using AtomicOrderingBitfieldElement =
530 
531 protected:
532  // Note: Instruction needs to be a friend here to call cloneImpl.
533  friend class Instruction;
534 
535  AtomicCmpXchgInst *cloneImpl() const;
536 
537 public:
538  AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
539  AtomicOrdering SuccessOrdering,
540  AtomicOrdering FailureOrdering, SyncScope::ID SSID,
541  Instruction *InsertBefore = nullptr);
542  AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
543  AtomicOrdering SuccessOrdering,
544  AtomicOrdering FailureOrdering, SyncScope::ID SSID,
545  BasicBlock *InsertAtEnd);
546 
547  // allocate space for exactly three operands
548  void *operator new(size_t S) { return User::operator new(S, 3); }
549  void operator delete(void *Ptr) { User::operator delete(Ptr); }
550 
553  using SuccessOrderingField =
555  using FailureOrderingField =
557  using AlignmentField =
559  static_assert(
562  "Bitfields must be contiguous");
563 
564  /// Return the alignment of the memory that is being allocated by the
565  /// instruction.
566  Align getAlign() const {
567  return Align(1ULL << getSubclassData<AlignmentField>());
568  }
569 
571  setSubclassData<AlignmentField>(Log2(Align));
572  }
573 
574  /// Return true if this is a cmpxchg from a volatile memory
575  /// location.
576  ///
577  bool isVolatile() const { return getSubclassData<VolatileField>(); }
578 
579  /// Specify whether this is a volatile cmpxchg.
580  ///
581  void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
582 
583  /// Return true if this cmpxchg may spuriously fail.
584  bool isWeak() const { return getSubclassData<WeakField>(); }
585 
586  void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); }
587 
588  /// Transparently provide more efficient getOperand methods.
590 
591  static bool isValidSuccessOrdering(AtomicOrdering Ordering) {
592  return Ordering != AtomicOrdering::NotAtomic &&
593  Ordering != AtomicOrdering::Unordered;
594  }
595 
596  static bool isValidFailureOrdering(AtomicOrdering Ordering) {
597  return Ordering != AtomicOrdering::NotAtomic &&
598  Ordering != AtomicOrdering::Unordered &&
599  Ordering != AtomicOrdering::AcquireRelease &&
600  Ordering != AtomicOrdering::Release;
601  }
602 
603  /// Returns the success ordering constraint of this cmpxchg instruction.
605  return getSubclassData<SuccessOrderingField>();
606  }
607 
608  /// Sets the success ordering constraint of this cmpxchg instruction.
610  assert(isValidSuccessOrdering(Ordering) &&
611  "invalid CmpXchg success ordering");
612  setSubclassData<SuccessOrderingField>(Ordering);
613  }
614 
615  /// Returns the failure ordering constraint of this cmpxchg instruction.
617  return getSubclassData<FailureOrderingField>();
618  }
619 
620  /// Sets the failure ordering constraint of this cmpxchg instruction.
622  assert(isValidFailureOrdering(Ordering) &&
623  "invalid CmpXchg failure ordering");
624  setSubclassData<FailureOrderingField>(Ordering);
625  }
626 
627  /// Returns a single ordering which is at least as strong as both the
628  /// success and failure orderings for this cmpxchg.
637  }
638  return getSuccessOrdering();
639  }
640 
641  /// Returns the synchronization scope ID of this cmpxchg instruction.
643  return SSID;
644  }
645 
646  /// Sets the synchronization scope ID of this cmpxchg instruction.
648  this->SSID = SSID;
649  }
650 
652  const Value *getPointerOperand() const { return getOperand(0); }
653  static unsigned getPointerOperandIndex() { return 0U; }
654 
656  const Value *getCompareOperand() const { return getOperand(1); }
657 
659  const Value *getNewValOperand() const { return getOperand(2); }
660 
661  /// Returns the address space of the pointer operand.
662  unsigned getPointerAddressSpace() const {
664  }
665 
666  /// Returns the strongest permitted ordering on failure, given the
667  /// desired ordering on success.
668  ///
669  /// If the comparison in a cmpxchg operation fails, there is no atomic store
670  /// so release semantics cannot be provided. So this function drops explicit
671  /// Release requests from the AtomicOrdering. A SequentiallyConsistent
672  /// operation would remain SequentiallyConsistent.
673  static AtomicOrdering
675  switch (SuccessOrdering) {
676  default:
677  llvm_unreachable("invalid cmpxchg success ordering");
686  }
687  }
688 
689  // Methods for support type inquiry through isa, cast, and dyn_cast:
690  static bool classof(const Instruction *I) {
691  return I->getOpcode() == Instruction::AtomicCmpXchg;
692  }
693  static bool classof(const Value *V) {
694  return isa<Instruction>(V) && classof(cast<Instruction>(V));
695  }
696 
697 private:
698  // Shadow Instruction::setInstructionSubclassData with a private forwarding
699  // method so that subclasses cannot accidentally use it.
700  template <typename Bitfield>
701  void setSubclassData(typename Bitfield::Type Value) {
702  Instruction::setSubclassData<Bitfield>(Value);
703  }
704 
705  /// The synchronization scope ID of this cmpxchg instruction. Not quite
706  /// enough room in SubClassData for everything, so synchronization scope ID
707  /// gets its own field.
708  SyncScope::ID SSID;
709 };
710 
711 template <>
713  public FixedNumOperandTraits<AtomicCmpXchgInst, 3> {
714 };
715 
717 
718 //===----------------------------------------------------------------------===//
719 // AtomicRMWInst Class
720 //===----------------------------------------------------------------------===//
721 
722 /// an instruction that atomically reads a memory location,
723 /// combines it with another value, and then stores the result back. Returns
724 /// the old value.
725 ///
726 class AtomicRMWInst : public Instruction {
727 protected:
728  // Note: Instruction needs to be a friend here to call cloneImpl.
729  friend class Instruction;
730 
731  AtomicRMWInst *cloneImpl() const;
732 
733 public:
734  /// This enumeration lists the possible modifications atomicrmw can make. In
735  /// the descriptions, 'p' is the pointer to the instruction's memory location,
736  /// 'old' is the initial value of *p, and 'v' is the other value passed to the
737  /// instruction. These instructions always return 'old'.
738  enum BinOp : unsigned {
739  /// *p = v
741  /// *p = old + v
743  /// *p = old - v
745  /// *p = old & v
747  /// *p = ~(old & v)
749  /// *p = old | v
750  Or,
751  /// *p = old ^ v
753  /// *p = old >signed v ? old : v
755  /// *p = old <signed v ? old : v
757  /// *p = old >unsigned v ? old : v
759  /// *p = old <unsigned v ? old : v
761 
762  /// *p = old + v
764 
765  /// *p = old - v
767 
768  FIRST_BINOP = Xchg,
769  LAST_BINOP = FSub,
770  BAD_BINOP
771  };
772 
773 private:
774  template <unsigned Offset>
775  using AtomicOrderingBitfieldElement =
778 
779  template <unsigned Offset>
780  using BinOpBitfieldElement =
782 
783 public:
784  AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
785  AtomicOrdering Ordering, SyncScope::ID SSID,
786  Instruction *InsertBefore = nullptr);
787  AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
788  AtomicOrdering Ordering, SyncScope::ID SSID,
789  BasicBlock *InsertAtEnd);
790 
791  // allocate space for exactly two operands
792  void *operator new(size_t S) { return User::operator new(S, 2); }
793  void operator delete(void *Ptr) { User::operator delete(Ptr); }
794 
796  using AtomicOrderingField =
798  using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>;
802  "Bitfields must be contiguous");
803 
804  BinOp getOperation() const { return getSubclassData<OperationField>(); }
805 
806  static StringRef getOperationName(BinOp Op);
807 
808  static bool isFPOperation(BinOp Op) {
809  switch (Op) {
810  case AtomicRMWInst::FAdd:
811  case AtomicRMWInst::FSub:
812  return true;
813  default:
814  return false;
815  }
816  }
817 
819  setSubclassData<OperationField>(Operation);
820  }
821 
822  /// Return the alignment of the memory that is being allocated by the
823  /// instruction.
824  Align getAlign() const {
825  return Align(1ULL << getSubclassData<AlignmentField>());
826  }
827 
829  setSubclassData<AlignmentField>(Log2(Align));
830  }
831 
832  /// Return true if this is a RMW on a volatile memory location.
833  ///
834  bool isVolatile() const { return getSubclassData<VolatileField>(); }
835 
836  /// Specify whether this is a volatile RMW or not.
837  ///
838  void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
839 
840  /// Transparently provide more efficient getOperand methods.
842 
843  /// Returns the ordering constraint of this rmw instruction.
845  return getSubclassData<AtomicOrderingField>();
846  }
847 
848  /// Sets the ordering constraint of this rmw instruction.
849  void setOrdering(AtomicOrdering Ordering) {
850  assert(Ordering != AtomicOrdering::NotAtomic &&
851  "atomicrmw instructions can only be atomic.");
852  setSubclassData<AtomicOrderingField>(Ordering);
853  }
854 
855  /// Returns the synchronization scope ID of this rmw instruction.
857  return SSID;
858  }
859 
860  /// Sets the synchronization scope ID of this rmw instruction.
862  this->SSID = SSID;
863  }
864 
865  Value *getPointerOperand() { return getOperand(0); }
866  const Value *getPointerOperand() const { return getOperand(0); }
867  static unsigned getPointerOperandIndex() { return 0U; }
868 
869  Value *getValOperand() { return getOperand(1); }
870  const Value *getValOperand() const { return getOperand(1); }
871 
872  /// Returns the address space of the pointer operand.
873  unsigned getPointerAddressSpace() const {
875  }
876 
878  return isFPOperation(getOperation());
879  }
880 
881  // Methods for support type inquiry through isa, cast, and dyn_cast:
882  static bool classof(const Instruction *I) {
883  return I->getOpcode() == Instruction::AtomicRMW;
884  }
885  static bool classof(const Value *V) {
886  return isa<Instruction>(V) && classof(cast<Instruction>(V));
887  }
888 
889 private:
890  void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align,
891  AtomicOrdering Ordering, SyncScope::ID SSID);
892 
893  // Shadow Instruction::setInstructionSubclassData with a private forwarding
894  // method so that subclasses cannot accidentally use it.
895  template <typename Bitfield>
896  void setSubclassData(typename Bitfield::Type Value) {
897  Instruction::setSubclassData<Bitfield>(Value);
898  }
899 
900  /// The synchronization scope ID of this rmw instruction. Not quite enough
901  /// room in SubClassData for everything, so synchronization scope ID gets its
902  /// own field.
903  SyncScope::ID SSID;
904 };
905 
906 template <>
908  : public FixedNumOperandTraits<AtomicRMWInst,2> {
909 };
910 
912 
913 //===----------------------------------------------------------------------===//
914 // GetElementPtrInst Class
915 //===----------------------------------------------------------------------===//
916 
917 // checkGEPType - Simple wrapper function to give a better assertion failure
918 // message on bad indexes for a gep instruction.
919 //
921  assert(Ty && "Invalid GetElementPtrInst indices for type!");
922  return Ty;
923 }
924 
925 /// an instruction for type-safe pointer arithmetic to
926 /// access elements of arrays and structs
927 ///
929  Type *SourceElementType;
930  Type *ResultElementType;
931 
933 
934  /// Constructors - Create a getelementptr instruction with a base pointer an
935  /// list of indices. The first ctor can optionally insert before an existing
936  /// instruction, the second appends the new instruction to the specified
937  /// BasicBlock.
938  inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
939  ArrayRef<Value *> IdxList, unsigned Values,
940  const Twine &NameStr, Instruction *InsertBefore);
941  inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
942  ArrayRef<Value *> IdxList, unsigned Values,
943  const Twine &NameStr, BasicBlock *InsertAtEnd);
944 
945  void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr);
946 
947 protected:
948  // Note: Instruction needs to be a friend here to call cloneImpl.
949  friend class Instruction;
950 
951  GetElementPtrInst *cloneImpl() const;
952 
953 public:
954  static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
955  ArrayRef<Value *> IdxList,
956  const Twine &NameStr = "",
957  Instruction *InsertBefore = nullptr) {
958  unsigned Values = 1 + unsigned(IdxList.size());
959  assert(PointeeType && "Must specify element type");
960  assert(cast<PointerType>(Ptr->getType()->getScalarType())
961  ->isOpaqueOrPointeeTypeMatches(PointeeType));
962  return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
963  NameStr, InsertBefore);
964  }
965 
966  static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
967  ArrayRef<Value *> IdxList,
968  const Twine &NameStr,
969  BasicBlock *InsertAtEnd) {
970  unsigned Values = 1 + unsigned(IdxList.size());
971  assert(PointeeType && "Must specify element type");
972  assert(cast<PointerType>(Ptr->getType()->getScalarType())
973  ->isOpaqueOrPointeeTypeMatches(PointeeType));
974  return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
975  NameStr, InsertAtEnd);
976  }
977 
979  Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr = "",
980  Instruction *InsertBefore = nullptr),
981  "Use the version with explicit element type instead") {
982  return CreateInBounds(
983  Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, IdxList,
984  NameStr, InsertBefore);
985  }
986 
987  /// Create an "inbounds" getelementptr. See the documentation for the
988  /// "inbounds" flag in LangRef.html for details.
989  static GetElementPtrInst *
990  CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList,
991  const Twine &NameStr = "",
992  Instruction *InsertBefore = nullptr) {
994  Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore);
995  GEP->setIsInBounds(true);
996  return GEP;
997  }
998 
1000  Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr,
1001  BasicBlock *InsertAtEnd),
1002  "Use the version with explicit element type instead") {
1003  return CreateInBounds(
1004  Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, IdxList,
1005  NameStr, InsertAtEnd);
1006  }
1007 
1008  static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr,
1009  ArrayRef<Value *> IdxList,
1010  const Twine &NameStr,
1011  BasicBlock *InsertAtEnd) {
1013  Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd);
1014  GEP->setIsInBounds(true);
1015  return GEP;
1016  }
1017 
1018  /// Transparently provide more efficient getOperand methods.
1020 
1021  Type *getSourceElementType() const { return SourceElementType; }
1022 
1023  void setSourceElementType(Type *Ty) { SourceElementType = Ty; }
1024  void setResultElementType(Type *Ty) { ResultElementType = Ty; }
1025 
1027  assert(cast<PointerType>(getType()->getScalarType())
1028  ->isOpaqueOrPointeeTypeMatches(ResultElementType));
1029  return ResultElementType;
1030  }
1031 
1032  /// Returns the address space of this instruction's pointer type.
1033  unsigned getAddressSpace() const {
1034  // Note that this is always the same as the pointer operand's address space
1035  // and that is cheaper to compute, so cheat here.
1036  return getPointerAddressSpace();
1037  }
1038 
1039  /// Returns the result type of a getelementptr with the given source
1040  /// element type and indexes.
1041  ///
1042  /// Null is returned if the indices are invalid for the specified
1043  /// source element type.
1044  static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList);
1045  static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList);
1046  static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList);
1047 
1048  /// Return the type of the element at the given index of an indexable
1049  /// type. This is equivalent to "getIndexedType(Agg, {Zero, Idx})".
1050  ///
1051  /// Returns null if the type can't be indexed, or the given index is not
1052  /// legal for the given type.
1053  static Type *getTypeAtIndex(Type *Ty, Value *Idx);
1054  static Type *getTypeAtIndex(Type *Ty, uint64_t Idx);
1055 
1056  inline op_iterator idx_begin() { return op_begin()+1; }
1057  inline const_op_iterator idx_begin() const { return op_begin()+1; }
1058  inline op_iterator idx_end() { return op_end(); }
1059  inline const_op_iterator idx_end() const { return op_end(); }
1060 
1062  return make_range(idx_begin(), idx_end());
1063  }
1064 
1066  return make_range(idx_begin(), idx_end());
1067  }
1068 
1070  return getOperand(0);
1071  }
1072  const Value *getPointerOperand() const {
1073  return getOperand(0);
1074  }
1075  static unsigned getPointerOperandIndex() {
1076  return 0U; // get index for modifying correct operand.
1077  }
1078 
1079  /// Method to return the pointer operand as a
1080  /// PointerType.
1082  return getPointerOperand()->getType();
1083  }
1084 
1085  /// Returns the address space of the pointer operand.
1086  unsigned getPointerAddressSpace() const {
1088  }
1089 
1090  /// Returns the pointer type returned by the GEP
1091  /// instruction, which may be a vector of pointers.
1092  static Type *getGEPReturnType(Type *ElTy, Value *Ptr,
1093  ArrayRef<Value *> IdxList) {
1094  PointerType *OrigPtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
1095  unsigned AddrSpace = OrigPtrTy->getAddressSpace();
1096  Type *ResultElemTy = checkGEPType(getIndexedType(ElTy, IdxList));
1097  Type *PtrTy = OrigPtrTy->isOpaque()
1098  ? PointerType::get(OrigPtrTy->getContext(), AddrSpace)
1099  : PointerType::get(ResultElemTy, AddrSpace);
1100  // Vector GEP
1101  if (auto *PtrVTy = dyn_cast<VectorType>(Ptr->getType())) {
1102  ElementCount EltCount = PtrVTy->getElementCount();
1103  return VectorType::get(PtrTy, EltCount);
1104  }
1105  for (Value *Index : IdxList)
1106  if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) {
1107  ElementCount EltCount = IndexVTy->getElementCount();
1108  return VectorType::get(PtrTy, EltCount);
1109  }
1110  // Scalar GEP
1111  return PtrTy;
1112  }
1113 
1114  unsigned getNumIndices() const { // Note: always non-negative
1115  return getNumOperands() - 1;
1116  }
1117 
1118  bool hasIndices() const {
1119  return getNumOperands() > 1;
1120  }
1121 
1122  /// Return true if all of the indices of this GEP are
1123  /// zeros. If so, the result pointer and the first operand have the same
1124  /// value, just potentially different types.
1125  bool hasAllZeroIndices() const;
1126 
1127  /// Return true if all of the indices of this GEP are
1128  /// constant integers. If so, the result pointer and the first operand have
1129  /// a constant offset between them.
1130  bool hasAllConstantIndices() const;
1131 
1132  /// Set or clear the inbounds flag on this GEP instruction.
1133  /// See LangRef.html for the meaning of inbounds on a getelementptr.
1134  void setIsInBounds(bool b = true);
1135 
1136  /// Determine whether the GEP has the inbounds flag.
1137  bool isInBounds() const;
1138 
1139  /// Accumulate the constant address offset of this GEP if possible.
1140  ///
1141  /// This routine accepts an APInt into which it will accumulate the constant
1142  /// offset of this GEP if the GEP is in fact constant. If the GEP is not
1143  /// all-constant, it returns false and the value of the offset APInt is
1144  /// undefined (it is *not* preserved!). The APInt passed into this routine
1145  /// must be at least as wide as the IntPtr type for the address space of
1146  /// the base GEP pointer.
1147  bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const;
1148  bool collectOffset(const DataLayout &DL, unsigned BitWidth,
1149  MapVector<Value *, APInt> &VariableOffsets,
1150  APInt &ConstantOffset) const;
1151  // Methods for support type inquiry through isa, cast, and dyn_cast:
1152  static bool classof(const Instruction *I) {
1153  return (I->getOpcode() == Instruction::GetElementPtr);
1154  }
1155  static bool classof(const Value *V) {
1156  return isa<Instruction>(V) && classof(cast<Instruction>(V));
1157  }
1158 };
1159 
1160 template <>
1162  public VariadicOperandTraits<GetElementPtrInst, 1> {
1163 };
1164 
1165 GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1166  ArrayRef<Value *> IdxList, unsigned Values,
1167  const Twine &NameStr,
1168  Instruction *InsertBefore)
1169  : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1170  OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1171  Values, InsertBefore),
1172  SourceElementType(PointeeType),
1173  ResultElementType(getIndexedType(PointeeType, IdxList)) {
1174  assert(cast<PointerType>(getType()->getScalarType())
1175  ->isOpaqueOrPointeeTypeMatches(ResultElementType));
1176  init(Ptr, IdxList, NameStr);
1177 }
1178 
1179 GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1180  ArrayRef<Value *> IdxList, unsigned Values,
1181  const Twine &NameStr,
1182  BasicBlock *InsertAtEnd)
1183  : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1184  OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1185  Values, InsertAtEnd),
1186  SourceElementType(PointeeType),
1187  ResultElementType(getIndexedType(PointeeType, IdxList)) {
1188  assert(cast<PointerType>(getType()->getScalarType())
1189  ->isOpaqueOrPointeeTypeMatches(ResultElementType));
1190  init(Ptr, IdxList, NameStr);
1191 }
1192 
1193 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)
1194 
1195 //===----------------------------------------------------------------------===//
1196 // ICmpInst Class
1197 //===----------------------------------------------------------------------===//
1198 
1199 /// This instruction compares its operands according to the predicate given
1200 /// to the constructor. It only operates on integers or pointers. The operands
1201 /// must be identical types.
1202 /// Represent an integer comparison operator.
1203 class ICmpInst: public CmpInst {
1204  void AssertOK() {
1205  assert(isIntPredicate() &&
1206  "Invalid ICmp predicate value");
1207  assert(getOperand(0)->getType() == getOperand(1)->getType() &&
1208  "Both operands to ICmp instruction are not of the same type!");
1209  // Check that the operands are the right type
1210  assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||
1211  getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
1212  "Invalid operand types for ICmp instruction");
1213  }
1214 
1215 protected:
1216  // Note: Instruction needs to be a friend here to call cloneImpl.
1217  friend class Instruction;
1218 
1219  /// Clone an identical ICmpInst
1220  ICmpInst *cloneImpl() const;
1221 
1222 public:
1223  /// Constructor with insert-before-instruction semantics.
1225  Instruction *InsertBefore, ///< Where to insert
1226  Predicate pred, ///< The predicate to use for the comparison
1227  Value *LHS, ///< The left-hand-side of the expression
1228  Value *RHS, ///< The right-hand-side of the expression
1229  const Twine &NameStr = "" ///< Name of the instruction
1230  ) : CmpInst(makeCmpResultType(LHS->getType()),
1231  Instruction::ICmp, pred, LHS, RHS, NameStr,
1232  InsertBefore) {
1233 #ifndef NDEBUG
1234  AssertOK();
1235 #endif
1236  }
1237 
1238  /// Constructor with insert-at-end semantics.
1240  BasicBlock &InsertAtEnd, ///< Block to insert into.
1241  Predicate pred, ///< The predicate to use for the comparison
1242  Value *LHS, ///< The left-hand-side of the expression
1243  Value *RHS, ///< The right-hand-side of the expression
1244  const Twine &NameStr = "" ///< Name of the instruction
1245  ) : CmpInst(makeCmpResultType(LHS->getType()),
1246  Instruction::ICmp, pred, LHS, RHS, NameStr,
1247  &InsertAtEnd) {
1248 #ifndef NDEBUG
1249  AssertOK();
1250 #endif
1251  }
1252 
1253  /// Constructor with no-insertion semantics
1255  Predicate pred, ///< The predicate to use for the comparison
1256  Value *LHS, ///< The left-hand-side of the expression
1257  Value *RHS, ///< The right-hand-side of the expression
1258  const Twine &NameStr = "" ///< Name of the instruction
1259  ) : CmpInst(makeCmpResultType(LHS->getType()),
1260  Instruction::ICmp, pred, LHS, RHS, NameStr) {
1261 #ifndef NDEBUG
1262  AssertOK();
1263 #endif
1264  }
1265 
1266  /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
1267  /// @returns the predicate that would be the result if the operand were
1268  /// regarded as signed.
1269  /// Return the signed version of the predicate
1271  return getSignedPredicate(getPredicate());
1272  }
1273 
1274  /// This is a static version that you can use without an instruction.
1275  /// Return the signed version of the predicate.
1276  static Predicate getSignedPredicate(Predicate pred);
1277 
1278  /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
1279  /// @returns the predicate that would be the result if the operand were
1280  /// regarded as unsigned.
1281  /// Return the unsigned version of the predicate
1283  return getUnsignedPredicate(getPredicate());
1284  }
1285 
1286  /// This is a static version that you can use without an instruction.
1287  /// Return the unsigned version of the predicate.
1288  static Predicate getUnsignedPredicate(Predicate pred);
1289 
1290  /// Return true if this predicate is either EQ or NE. This also
1291  /// tests for commutativity.
1292  static bool isEquality(Predicate P) {
1293  return P == ICMP_EQ || P == ICMP_NE;
1294  }
1295 
1296  /// Return true if this predicate is either EQ or NE. This also
1297  /// tests for commutativity.
1298  bool isEquality() const {
1299  return isEquality(getPredicate());
1300  }
1301 
1302  /// @returns true if the predicate of this ICmpInst is commutative
1303  /// Determine if this relation is commutative.
1304  bool isCommutative() const { return isEquality(); }
1305 
1306  /// Return true if the predicate is relational (not EQ or NE).
1307  ///
1308  bool isRelational() const {
1309  return !isEquality();
1310  }
1311 
1312  /// Return true if the predicate is relational (not EQ or NE).
1313  ///
1314  static bool isRelational(Predicate P) {
1315  return !isEquality(P);
1316  }
1317 
1318  /// Return true if the predicate is SGT or UGT.
1319  ///
1320  static bool isGT(Predicate P) {
1321  return P == ICMP_SGT || P == ICMP_UGT;
1322  }
1323 
1324  /// Return true if the predicate is SLT or ULT.
1325  ///
1326  static bool isLT(Predicate P) {
1327  return P == ICMP_SLT || P == ICMP_ULT;
1328  }
1329 
1330  /// Return true if the predicate is SGE or UGE.
1331  ///
1332  static bool isGE(Predicate P) {
1333  return P == ICMP_SGE || P == ICMP_UGE;
1334  }
1335 
1336  /// Return true if the predicate is SLE or ULE.
1337  ///
1338  static bool isLE(Predicate P) {
1339  return P == ICMP_SLE || P == ICMP_ULE;
1340  }
1341 
1342  /// Exchange the two operands to this instruction in such a way that it does
1343  /// not modify the semantics of the instruction. The predicate value may be
1344  /// changed to retain the same result if the predicate is order dependent
1345  /// (e.g. ult).
1346  /// Swap operands and adjust predicate.
1347  void swapOperands() {
1348  setPredicate(getSwappedPredicate());
1349  Op<0>().swap(Op<1>());
1350  }
1351 
1352  // Methods for support type inquiry through isa, cast, and dyn_cast:
1353  static bool classof(const Instruction *I) {
1354  return I->getOpcode() == Instruction::ICmp;
1355  }
1356  static bool classof(const Value *V) {
1357  return isa<Instruction>(V) && classof(cast<Instruction>(V));
1358  }
1359 };
1360 
1361 //===----------------------------------------------------------------------===//
1362 // FCmpInst Class
1363 //===----------------------------------------------------------------------===//
1364 
1365 /// This instruction compares its operands according to the predicate given
1366 /// to the constructor. It only operates on floating point values or packed
1367 /// vectors of floating point values. The operands must be identical types.
1368 /// Represents a floating point comparison operator.
1369 class FCmpInst: public CmpInst {
1370  void AssertOK() {
1371  assert(isFPPredicate() && "Invalid FCmp predicate value");
1372  assert(getOperand(0)->getType() == getOperand(1)->getType() &&
1373  "Both operands to FCmp instruction are not of the same type!");
1374  // Check that the operands are the right type
1375  assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&
1376  "Invalid operand types for FCmp instruction");
1377  }
1378 
1379 protected:
1380  // Note: Instruction needs to be a friend here to call cloneImpl.
1381  friend class Instruction;
1382 
1383  /// Clone an identical FCmpInst
1384  FCmpInst *cloneImpl() const;
1385 
1386 public:
1387  /// Constructor with insert-before-instruction semantics.
1389  Instruction *InsertBefore, ///< Where to insert
1390  Predicate pred, ///< The predicate to use for the comparison
1391  Value *LHS, ///< The left-hand-side of the expression
1392  Value *RHS, ///< The right-hand-side of the expression
1393  const Twine &NameStr = "" ///< Name of the instruction
1394  ) : CmpInst(makeCmpResultType(LHS->getType()),
1395  Instruction::FCmp, pred, LHS, RHS, NameStr,
1396  InsertBefore) {
1397  AssertOK();
1398  }
1399 
1400  /// Constructor with insert-at-end semantics.
1402  BasicBlock &InsertAtEnd, ///< Block to insert into.
1403  Predicate pred, ///< The predicate to use for the comparison
1404  Value *LHS, ///< The left-hand-side of the expression
1405  Value *RHS, ///< The right-hand-side of the expression
1406  const Twine &NameStr = "" ///< Name of the instruction
1407  ) : CmpInst(makeCmpResultType(LHS->getType()),
1408  Instruction::FCmp, pred, LHS, RHS, NameStr,
1409  &InsertAtEnd) {
1410  AssertOK();
1411  }
1412 
1413  /// Constructor with no-insertion semantics
1415  Predicate Pred, ///< The predicate to use for the comparison
1416  Value *LHS, ///< The left-hand-side of the expression
1417  Value *RHS, ///< The right-hand-side of the expression
1418  const Twine &NameStr = "", ///< Name of the instruction
1419  Instruction *FlagsSource = nullptr
1420  ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS,
1421  RHS, NameStr, nullptr, FlagsSource) {
1422  AssertOK();
1423  }
1424 
1425  /// @returns true if the predicate of this instruction is EQ or NE.
1426  /// Determine if this is an equality predicate.
1427  static bool isEquality(Predicate Pred) {
1428  return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ ||
1429  Pred == FCMP_UNE;
1430  }
1431 
1432  /// @returns true if the predicate of this instruction is EQ or NE.
1433  /// Determine if this is an equality predicate.
1434  bool isEquality() const { return isEquality(getPredicate()); }
1435 
1436  /// @returns true if the predicate of this instruction is commutative.
1437  /// Determine if this is a commutative predicate.
1438  bool isCommutative() const {
1439  return isEquality() ||
1440  getPredicate() == FCMP_FALSE ||
1441  getPredicate() == FCMP_TRUE ||
1442  getPredicate() == FCMP_ORD ||
1443  getPredicate() == FCMP_UNO;
1444  }
1445 
1446  /// @returns true if the predicate is relational (not EQ or NE).
1447  /// Determine if this a relational predicate.
1448  bool isRelational() const { return !isEquality(); }
1449 
1450  /// Exchange the two operands to this instruction in such a way that it does
1451  /// not modify the semantics of the instruction. The predicate value may be
1452  /// changed to retain the same result if the predicate is order dependent
1453  /// (e.g. ult).
1454  /// Swap operands and adjust predicate.
1455  void swapOperands() {
1457  Op<0>().swap(Op<1>());
1458  }
1459 
1460  /// Methods for support type inquiry through isa, cast, and dyn_cast:
1461  static bool classof(const Instruction *I) {
1462  return I->getOpcode() == Instruction::FCmp;
1463  }
1464  static bool classof(const Value *V) {
1465  return isa<Instruction>(V) && classof(cast<Instruction>(V));
1466  }
1467 };
1468 
1469 //===----------------------------------------------------------------------===//
1470 /// This class represents a function call, abstracting a target
1471 /// machine's calling convention. This class uses low bit of the SubClassData
1472 /// field to indicate whether or not this is a tail call. The rest of the bits
1473 /// hold the calling convention of the call.
1474 ///
1475 class CallInst : public CallBase {
1476  CallInst(const CallInst &CI);
1477 
1478  /// Construct a CallInst given a range of arguments.
1479  /// Construct a CallInst from a range of arguments
1480  inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1481  ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1482  Instruction *InsertBefore);
1483 
1484  inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1485  const Twine &NameStr, Instruction *InsertBefore)
1486  : CallInst(Ty, Func, Args, None, NameStr, InsertBefore) {}
1487 
1488  /// Construct a CallInst given a range of arguments.
1489  /// Construct a CallInst from a range of arguments
1490  inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1491  ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1492  BasicBlock *InsertAtEnd);
1493 
1494  explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr,
1495  Instruction *InsertBefore);
1496 
1497  CallInst(FunctionType *ty, Value *F, const Twine &NameStr,
1498  BasicBlock *InsertAtEnd);
1499 
1500  void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
1501  ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
1502  void init(FunctionType *FTy, Value *Func, const Twine &NameStr);
1503 
1504  /// Compute the number of operands to allocate.
1505  static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
1506  // We need one operand for the called function, plus the input operand
1507  // counts provided.
1508  return 1 + NumArgs + NumBundleInputs;
1509  }
1510 
1511 protected:
1512  // Note: Instruction needs to be a friend here to call cloneImpl.
1513  friend class Instruction;
1514 
1515  CallInst *cloneImpl() const;
1516 
1517 public:
1518  static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "",
1519  Instruction *InsertBefore = nullptr) {
1520  return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore);
1521  }
1522 
1524  const Twine &NameStr,
1525  Instruction *InsertBefore = nullptr) {
1526  return new (ComputeNumOperands(Args.size()))
1527  CallInst(Ty, Func, Args, None, NameStr, InsertBefore);
1528  }
1529 
1531  ArrayRef<OperandBundleDef> Bundles = None,
1532  const Twine &NameStr = "",
1533  Instruction *InsertBefore = nullptr) {
1534  const int NumOperands =
1535  ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1536  const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1537 
1538  return new (NumOperands, DescriptorBytes)
1539  CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore);
1540  }
1541 
1542  static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr,
1543  BasicBlock *InsertAtEnd) {
1544  return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertAtEnd);
1545  }
1546 
1548  const Twine &NameStr, BasicBlock *InsertAtEnd) {
1549  return new (ComputeNumOperands(Args.size()))
1550  CallInst(Ty, Func, Args, None, NameStr, InsertAtEnd);
1551  }
1552 
1555  const Twine &NameStr, BasicBlock *InsertAtEnd) {
1556  const int NumOperands =
1557  ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1558  const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1559 
1560  return new (NumOperands, DescriptorBytes)
1561  CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd);
1562  }
1563 
1564  static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "",
1565  Instruction *InsertBefore = nullptr) {
1566  return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1567  InsertBefore);
1568  }
1569 
1571  ArrayRef<OperandBundleDef> Bundles = None,
1572  const Twine &NameStr = "",
1573  Instruction *InsertBefore = nullptr) {
1574  return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1575  NameStr, InsertBefore);
1576  }
1577 
1579  const Twine &NameStr,
1580  Instruction *InsertBefore = nullptr) {
1581  return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1582  InsertBefore);
1583  }
1584 
1585  static CallInst *Create(FunctionCallee Func, const Twine &NameStr,
1586  BasicBlock *InsertAtEnd) {
1587  return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1588  InsertAtEnd);
1589  }
1590 
1592  const Twine &NameStr, BasicBlock *InsertAtEnd) {
1593  return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1594  InsertAtEnd);
1595  }
1596 
1599  const Twine &NameStr, BasicBlock *InsertAtEnd) {
1600  return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1601  NameStr, InsertAtEnd);
1602  }
1603 
1604  /// Create a clone of \p CI with a different set of operand bundles and
1605  /// insert it before \p InsertPt.
1606  ///
1607  /// The returned call instruction is identical \p CI in every way except that
1608  /// the operand bundles for the new instruction are set to the operand bundles
1609  /// in \p Bundles.
1610  static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles,
1611  Instruction *InsertPt = nullptr);
1612 
1613  /// Generate the IR for a call to malloc:
1614  /// 1. Compute the malloc call's argument as the specified type's size,
1615  /// possibly multiplied by the array size if the array size is not
1616  /// constant 1.
1617  /// 2. Call malloc with that argument.
1618  /// 3. Bitcast the result of the malloc call to the specified type.
1619  static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1620  Type *AllocTy, Value *AllocSize,
1621  Value *ArraySize = nullptr,
1622  Function *MallocF = nullptr,
1623  const Twine &Name = "");
1624  static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1625  Type *AllocTy, Value *AllocSize,
1626  Value *ArraySize = nullptr,
1627  Function *MallocF = nullptr,
1628  const Twine &Name = "");
1629  static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1630  Type *AllocTy, Value *AllocSize,
1631  Value *ArraySize = nullptr,
1632  ArrayRef<OperandBundleDef> Bundles = None,
1633  Function *MallocF = nullptr,
1634  const Twine &Name = "");
1635  static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1636  Type *AllocTy, Value *AllocSize,
1637  Value *ArraySize = nullptr,
1638  ArrayRef<OperandBundleDef> Bundles = None,
1639  Function *MallocF = nullptr,
1640  const Twine &Name = "");
1641  /// Generate the IR for a call to the builtin free function.
1642  static Instruction *CreateFree(Value *Source, Instruction *InsertBefore);
1643  static Instruction *CreateFree(Value *Source, BasicBlock *InsertAtEnd);
1646  Instruction *InsertBefore);
1649  BasicBlock *InsertAtEnd);
1650 
1651  // Note that 'musttail' implies 'tail'.
1652  enum TailCallKind : unsigned {
1658  };
1659 
1661  static_assert(
1662  Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(),
1663  "Bitfields must be contiguous");
1664 
1666  return getSubclassData<TailCallKindField>();
1667  }
1668 
1669  bool isTailCall() const {
1671  return Kind == TCK_Tail || Kind == TCK_MustTail;
1672  }
1673 
1674  bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; }
1675 
1676  bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; }
1677 
1679  setSubclassData<TailCallKindField>(TCK);
1680  }
1681 
1682  void setTailCall(bool IsTc = true) {
1683  setTailCallKind(IsTc ? TCK_Tail : TCK_None);
1684  }
1685 
1686  /// Return true if the call can return twice
1687  bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); }
1689  addAttribute(AttributeList::FunctionIndex, Attribute::ReturnsTwice);
1690  }
1691 
1692  // Methods for support type inquiry through isa, cast, and dyn_cast:
1693  static bool classof(const Instruction *I) {
1694  return I->getOpcode() == Instruction::Call;
1695  }
1696  static bool classof(const Value *V) {
1697  return isa<Instruction>(V) && classof(cast<Instruction>(V));
1698  }
1699 
1700  /// Updates profile metadata by scaling it by \p S / \p T.
1701  void updateProfWeight(uint64_t S, uint64_t T);
1702 
1703 private:
1704  // Shadow Instruction::setInstructionSubclassData with a private forwarding
1705  // method so that subclasses cannot accidentally use it.
1706  template <typename Bitfield>
1707  void setSubclassData(typename Bitfield::Type Value) {
1708  Instruction::setSubclassData<Bitfield>(Value);
1709  }
1710 };
1711 
1712 CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1713  ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1714  BasicBlock *InsertAtEnd)
1715  : CallBase(Ty->getReturnType(), Instruction::Call,
1716  OperandTraits<CallBase>::op_end(this) -
1717  (Args.size() + CountBundleInputs(Bundles) + 1),
1718  unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1719  InsertAtEnd) {
1720  init(Ty, Func, Args, Bundles, NameStr);
1721 }
1722 
1723 CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1724  ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1725  Instruction *InsertBefore)
1726  : CallBase(Ty->getReturnType(), Instruction::Call,
1727  OperandTraits<CallBase>::op_end(this) -
1728  (Args.size() + CountBundleInputs(Bundles) + 1),
1729  unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1730  InsertBefore) {
1731  init(Ty, Func, Args, Bundles, NameStr);
1732 }
1733 
1734 //===----------------------------------------------------------------------===//
1735 // SelectInst Class
1736 //===----------------------------------------------------------------------===//
1737 
1738 /// This class represents the LLVM 'select' instruction.
1739 ///
1740 class SelectInst : public Instruction {
1741  SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1742  Instruction *InsertBefore)
1744  &Op<0>(), 3, InsertBefore) {
1745  init(C, S1, S2);
1746  setName(NameStr);
1747  }
1748 
1749  SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1750  BasicBlock *InsertAtEnd)
1752  &Op<0>(), 3, InsertAtEnd) {
1753  init(C, S1, S2);
1754  setName(NameStr);
1755  }
1756 
1757  void init(Value *C, Value *S1, Value *S2) {
1758  assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select");
1759  Op<0>() = C;
1760  Op<1>() = S1;
1761  Op<2>() = S2;
1762  }
1763 
1764 protected:
1765  // Note: Instruction needs to be a friend here to call cloneImpl.
1766  friend class Instruction;
1767 
1768  SelectInst *cloneImpl() const;
1769 
1770 public:
1771  static SelectInst *Create(Value *C, Value *S1, Value *S2,
1772  const Twine &NameStr = "",
1773  Instruction *InsertBefore = nullptr,
1774  Instruction *MDFrom = nullptr) {
1775  SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore);
1776  if (MDFrom)
1777  Sel->copyMetadata(*MDFrom);
1778  return Sel;
1779  }
1780 
1781  static SelectInst *Create(Value *C, Value *S1, Value *S2,
1782  const Twine &NameStr,
1783  BasicBlock *InsertAtEnd) {
1784  return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd);
1785  }
1786 
1787  const Value *getCondition() const { return Op<0>(); }
1788  const Value *getTrueValue() const { return Op<1>(); }
1789  const Value *getFalseValue() const { return Op<2>(); }
1790  Value *getCondition() { return Op<0>(); }
1791  Value *getTrueValue() { return Op<1>(); }
1792  Value *getFalseValue() { return Op<2>(); }
1793 
1794  void setCondition(Value *V) { Op<0>() = V; }
1795  void setTrueValue(Value *V) { Op<1>() = V; }
1796  void setFalseValue(Value *V) { Op<2>() = V; }
1797 
1798  /// Swap the true and false values of the select instruction.
1799  /// This doesn't swap prof metadata.
1800  void swapValues() { Op<1>().swap(Op<2>()); }
1801 
1802  /// Return a string if the specified operands are invalid
1803  /// for a select operation, otherwise return null.
1804  static const char *areInvalidOperands(Value *Cond, Value *True, Value *False);
1805 
1806  /// Transparently provide more efficient getOperand methods.
1808 
1810  return static_cast<OtherOps>(Instruction::getOpcode());
1811  }
1812 
1813  // Methods for support type inquiry through isa, cast, and dyn_cast:
1814  static bool classof(const Instruction *I) {
1815  return I->getOpcode() == Instruction::Select;
1816  }
1817  static bool classof(const Value *V) {
1818  return isa<Instruction>(V) && classof(cast<Instruction>(V));
1819  }
1820 };
1821 
1822 template <>
1823 struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> {
1824 };
1825 
1827 
1828 //===----------------------------------------------------------------------===//
1829 // VAArgInst Class
1830 //===----------------------------------------------------------------------===//
1831 
1832 /// This class represents the va_arg llvm instruction, which returns
1833 /// an argument of the specified type given a va_list and increments that list
1834 ///
1835 class VAArgInst : public UnaryInstruction {
1836 protected:
1837  // Note: Instruction needs to be a friend here to call cloneImpl.
1838  friend class Instruction;
1839 
1840  VAArgInst *cloneImpl() const;
1841 
1842 public:
1843  VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "",
1844  Instruction *InsertBefore = nullptr)
1845  : UnaryInstruction(Ty, VAArg, List, InsertBefore) {
1846  setName(NameStr);
1847  }
1848 
1849  VAArgInst(Value *List, Type *Ty, const Twine &NameStr,
1850  BasicBlock *InsertAtEnd)
1851  : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) {
1852  setName(NameStr);
1853  }
1854 
1855  Value *getPointerOperand() { return getOperand(0); }
1856  const Value *getPointerOperand() const { return getOperand(0); }
1857  static unsigned getPointerOperandIndex() { return 0U; }
1858 
1859  // Methods for support type inquiry through isa, cast, and dyn_cast:
1860  static bool classof(const Instruction *I) {
1861  return I->getOpcode() == VAArg;
1862  }
1863  static bool classof(const Value *V) {
1864  return isa<Instruction>(V) && classof(cast<Instruction>(V));
1865  }
1866 };
1867 
1868 //===----------------------------------------------------------------------===//
1869 // ExtractElementInst Class
1870 //===----------------------------------------------------------------------===//
1871 
1872 /// This instruction extracts a single (scalar)
1873 /// element from a VectorType value
1874 ///
1876  ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "",
1877  Instruction *InsertBefore = nullptr);
1878  ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr,
1879  BasicBlock *InsertAtEnd);
1880 
1881 protected:
1882  // Note: Instruction needs to be a friend here to call cloneImpl.
1883  friend class Instruction;
1884 
1885  ExtractElementInst *cloneImpl() const;
1886 
1887 public:
1888  static ExtractElementInst *Create(Value *Vec, Value *Idx,
1889  const Twine &NameStr = "",
1890  Instruction *InsertBefore = nullptr) {
1891  return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore);
1892  }
1893 
1894  static ExtractElementInst *Create(Value *Vec, Value *Idx,
1895  const Twine &NameStr,
1896  BasicBlock *InsertAtEnd) {
1897  return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd);
1898  }
1899 
1900  /// Return true if an extractelement instruction can be
1901  /// formed with the specified operands.
1902  static bool isValidOperands(const Value *Vec, const Value *Idx);
1903 
1904  Value *getVectorOperand() { return Op<0>(); }
1905  Value *getIndexOperand() { return Op<1>(); }
1906  const Value *getVectorOperand() const { return Op<0>(); }
1907  const Value *getIndexOperand() const { return Op<1>(); }
1908 
1910  return cast<VectorType>(getVectorOperand()->getType());
1911  }
1912 
1913  /// Transparently provide more efficient getOperand methods.
1915 
1916  // Methods for support type inquiry through isa, cast, and dyn_cast:
1917  static bool classof(const Instruction *I) {
1918  return I->getOpcode() == Instruction::ExtractElement;
1919  }
1920  static bool classof(const Value *V) {
1921  return isa<Instruction>(V) && classof(cast<Instruction>(V));
1922  }
1923 };
1924 
1925 template <>
1927  public FixedNumOperandTraits<ExtractElementInst, 2> {
1928 };
1929 
1931 
1932 //===----------------------------------------------------------------------===//
1933 // InsertElementInst Class
1934 //===----------------------------------------------------------------------===//
1935 
1936 /// This instruction inserts a single (scalar)
1937 /// element into a VectorType value
1938 ///
1940  InsertElementInst(Value *Vec, Value *NewElt, Value *Idx,
1941  const Twine &NameStr = "",
1942  Instruction *InsertBefore = nullptr);
1943  InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr,
1944  BasicBlock *InsertAtEnd);
1945 
1946 protected:
1947  // Note: Instruction needs to be a friend here to call cloneImpl.
1948  friend class Instruction;
1949 
1950  InsertElementInst *cloneImpl() const;
1951 
1952 public:
1953  static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1954  const Twine &NameStr = "",
1955  Instruction *InsertBefore = nullptr) {
1956  return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore);
1957  }
1958 
1959  static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1960  const Twine &NameStr,
1961  BasicBlock *InsertAtEnd) {
1962  return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd);
1963  }
1964 
1965  /// Return true if an insertelement instruction can be
1966  /// formed with the specified operands.
1967  static bool isValidOperands(const Value *Vec, const Value *NewElt,
1968  const Value *Idx);
1969 
1970  /// Overload to return most specific vector type.
1971  ///
1972  VectorType *getType() const {
1973  return cast<VectorType>(Instruction::getType());
1974  }
1975 
1976  /// Transparently provide more efficient getOperand methods.
1978 
1979  // Methods for support type inquiry through isa, cast, and dyn_cast:
1980  static bool classof(const Instruction *I) {
1981  return I->getOpcode() == Instruction::InsertElement;
1982  }
1983  static bool classof(const Value *V) {
1984  return isa<Instruction>(V) && classof(cast<Instruction>(V));
1985  }
1986 };
1987 
1988 template <>
1990  public FixedNumOperandTraits<InsertElementInst, 3> {
1991 };
1992 
1994 
1995 //===----------------------------------------------------------------------===//
1996 // ShuffleVectorInst Class
1997 //===----------------------------------------------------------------------===//
1998 
1999 constexpr int UndefMaskElem = -1;
2000 
2001 /// This instruction constructs a fixed permutation of two
2002 /// input vectors.
2003 ///
2004 /// For each element of the result vector, the shuffle mask selects an element
2005 /// from one of the input vectors to copy to the result. Non-negative elements
2006 /// in the mask represent an index into the concatenated pair of input vectors.
2007 /// UndefMaskElem (-1) specifies that the result element is undefined.
2008 ///
2009 /// For scalable vectors, all the elements of the mask must be 0 or -1. This
2010 /// requirement may be relaxed in the future.
2012  SmallVector<int, 4> ShuffleMask;
2013  Constant *ShuffleMaskForBitcode;
2014 
2015 protected:
2016  // Note: Instruction needs to be a friend here to call cloneImpl.
2017  friend class Instruction;
2018 
2019  ShuffleVectorInst *cloneImpl() const;
2020 
2021 public:
2023  const Twine &NameStr = "",
2024  Instruction *InsertBefor = nullptr);
2026  const Twine &NameStr, BasicBlock *InsertAtEnd);
2028  const Twine &NameStr = "",
2029  Instruction *InsertBefor = nullptr);
2031  const Twine &NameStr, BasicBlock *InsertAtEnd);
2032 
2033  void *operator new(size_t S) { return User::operator new(S, 2); }
2034  void operator delete(void *Ptr) { return User::operator delete(Ptr); }
2035 
2036  /// Swap the operands and adjust the mask to preserve the semantics
2037  /// of the instruction.
2038  void commute();
2039 
2040  /// Return true if a shufflevector instruction can be
2041  /// formed with the specified operands.
2042  static bool isValidOperands(const Value *V1, const Value *V2,
2043  const Value *Mask);
2044  static bool isValidOperands(const Value *V1, const Value *V2,
2046 
2047  /// Overload to return most specific vector type.
2048  ///
2049  VectorType *getType() const {
2050  return cast<VectorType>(Instruction::getType());
2051  }
2052 
2053  /// Transparently provide more efficient getOperand methods.
2055 
2056  /// Return the shuffle mask value of this instruction for the given element
2057  /// index. Return UndefMaskElem if the element is undef.
2058  int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; }
2059 
2060  /// Convert the input shuffle mask operand to a vector of integers. Undefined
2061  /// elements of the mask are returned as UndefMaskElem.
2062  static void getShuffleMask(const Constant *Mask,
2063  SmallVectorImpl<int> &Result);
2064 
2065  /// Return the mask for this instruction as a vector of integers. Undefined
2066  /// elements of the mask are returned as UndefMaskElem.
2067  void getShuffleMask(SmallVectorImpl<int> &Result) const {
2068  Result.assign(ShuffleMask.begin(), ShuffleMask.end());
2069  }
2070 
2071  /// Return the mask for this instruction, for use in bitcode.
2072  ///
2073  /// TODO: This is temporary until we decide a new bitcode encoding for
2074  /// shufflevector.
2075  Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; }
2076 
2077  static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask,
2078  Type *ResultTy);
2079 
2080  void setShuffleMask(ArrayRef<int> Mask);
2081 
2082  ArrayRef<int> getShuffleMask() const { return ShuffleMask; }
2083 
2084  /// Return true if this shuffle returns a vector with a different number of
2085  /// elements than its source vectors.
2086  /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3>
2087  /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5>
2088  bool changesLength() const {
2089  unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2090  ->getElementCount()
2091  .getKnownMinValue();
2092  unsigned NumMaskElts = ShuffleMask.size();
2093  return NumSourceElts != NumMaskElts;
2094  }
2095 
2096  /// Return true if this shuffle returns a vector with a greater number of
2097  /// elements than its source vectors.
2098  /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3>
2099  bool increasesLength() const {
2100  unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2101  ->getElementCount()
2102  .getKnownMinValue();
2103  unsigned NumMaskElts = ShuffleMask.size();
2104  return NumSourceElts < NumMaskElts;
2105  }
2106 
2107  /// Return true if this shuffle mask chooses elements from exactly one source
2108  /// vector.
2109  /// Example: <7,5,undef,7>
2110  /// This assumes that vector operands are the same length as the mask.
2111  static bool isSingleSourceMask(ArrayRef<int> Mask);
2112  static bool isSingleSourceMask(const Constant *Mask) {
2113  assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2114  SmallVector<int, 16> MaskAsInts;
2115  getShuffleMask(Mask, MaskAsInts);
2116  return isSingleSourceMask(MaskAsInts);
2117  }
2118 
2119  /// Return true if this shuffle chooses elements from exactly one source
2120  /// vector without changing the length of that vector.
2121  /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3>
2122  /// TODO: Optionally allow length-changing shuffles.
2123  bool isSingleSource() const {
2124  return !changesLength() && isSingleSourceMask(ShuffleMask);
2125  }
2126 
2127  /// Return true if this shuffle mask chooses elements from exactly one source
2128  /// vector without lane crossings. A shuffle using this mask is not
2129  /// necessarily a no-op because it may change the number of elements from its
2130  /// input vectors or it may provide demanded bits knowledge via undef lanes.
2131  /// Example: <undef,undef,2,3>
2132  static bool isIdentityMask(ArrayRef<int> Mask);
2133  static bool isIdentityMask(const Constant *Mask) {
2134  assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2135  SmallVector<int, 16> MaskAsInts;
2136  getShuffleMask(Mask, MaskAsInts);
2137  return isIdentityMask(MaskAsInts);
2138  }
2139 
2140  /// Return true if this shuffle chooses elements from exactly one source
2141  /// vector without lane crossings and does not change the number of elements
2142  /// from its input vectors.
2143  /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef>
2144  bool isIdentity() const {
2145  return !changesLength() && isIdentityMask(ShuffleMask);
2146  }
2147 
2148  /// Return true if this shuffle lengthens exactly one source vector with
2149  /// undefs in the high elements.
2150  bool isIdentityWithPadding() const;
2151 
2152  /// Return true if this shuffle extracts the first N elements of exactly one
2153  /// source vector.
2154  bool isIdentityWithExtract() const;
2155 
2156  /// Return true if this shuffle concatenates its 2 source vectors. This
2157  /// returns false if either input is undefined. In that case, the shuffle is
2158  /// is better classified as an identity with padding operation.
2159  bool isConcat() const;
2160 
2161  /// Return true if this shuffle mask chooses elements from its source vectors
2162  /// without lane crossings. A shuffle using this mask would be
2163  /// equivalent to a vector select with a constant condition operand.
2164  /// Example: <4,1,6,undef>
2165  /// This returns false if the mask does not choose from both input vectors.
2166  /// In that case, the shuffle is better classified as an identity shuffle.
2167  /// This assumes that vector operands are the same length as the mask
2168  /// (a length-changing shuffle can never be equivalent to a vector select).
2169  static bool isSelectMask(ArrayRef<int> Mask);
2170  static bool isSelectMask(const Constant *Mask) {
2171  assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2172  SmallVector<int, 16> MaskAsInts;
2173  getShuffleMask(Mask, MaskAsInts);
2174  return isSelectMask(MaskAsInts);
2175  }
2176 
2177  /// Return true if this shuffle chooses elements from its source vectors
2178  /// without lane crossings and all operands have the same number of elements.
2179  /// In other words, this shuffle is equivalent to a vector select with a
2180  /// constant condition operand.
2181  /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3>
2182  /// This returns false if the mask does not choose from both input vectors.
2183  /// In that case, the shuffle is better classified as an identity shuffle.
2184  /// TODO: Optionally allow length-changing shuffles.
2185  bool isSelect() const {
2186  return !changesLength() && isSelectMask(ShuffleMask);
2187  }
2188 
2189  /// Return true if this shuffle mask swaps the order of elements from exactly
2190  /// one source vector.
2191  /// Example: <7,6,undef,4>
2192  /// This assumes that vector operands are the same length as the mask.
2193  static bool isReverseMask(ArrayRef<int> Mask);
2194  static bool isReverseMask(const Constant *Mask) {
2195  assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2196  SmallVector<int, 16> MaskAsInts;
2197  getShuffleMask(Mask, MaskAsInts);
2198  return isReverseMask(MaskAsInts);
2199  }
2200 
2201  /// Return true if this shuffle swaps the order of elements from exactly
2202  /// one source vector.
2203  /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef>
2204  /// TODO: Optionally allow length-changing shuffles.
2205  bool isReverse() const {
2206  return !changesLength() && isReverseMask(ShuffleMask);
2207  }
2208 
2209  /// Return true if this shuffle mask chooses all elements with the same value
2210  /// as the first element of exactly one source vector.
2211  /// Example: <4,undef,undef,4>
2212  /// This assumes that vector operands are the same length as the mask.
2213  static bool isZeroEltSplatMask(ArrayRef<int> Mask);
2214  static bool isZeroEltSplatMask(const Constant *Mask) {
2215  assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2216  SmallVector<int, 16> MaskAsInts;
2217  getShuffleMask(Mask, MaskAsInts);
2218  return isZeroEltSplatMask(MaskAsInts);
2219  }
2220 
2221  /// Return true if all elements of this shuffle are the same value as the
2222  /// first element of exactly one source vector without changing the length
2223  /// of that vector.
2224  /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0>
2225  /// TODO: Optionally allow length-changing shuffles.
2226  /// TODO: Optionally allow splats from other elements.
2227  bool isZeroEltSplat() const {
2228  return !changesLength() && isZeroEltSplatMask(ShuffleMask);
2229  }
2230 
2231  /// Return true if this shuffle mask is a transpose mask.
2232  /// Transpose vector masks transpose a 2xn matrix. They read corresponding
2233  /// even- or odd-numbered vector elements from two n-dimensional source
2234  /// vectors and write each result into consecutive elements of an
2235  /// n-dimensional destination vector. Two shuffles are necessary to complete
2236  /// the transpose, one for the even elements and another for the odd elements.
2237  /// This description closely follows how the TRN1 and TRN2 AArch64
2238  /// instructions operate.
2239  ///
2240  /// For example, a simple 2x2 matrix can be transposed with:
2241  ///
2242  /// ; Original matrix
2243  /// m0 = < a, b >
2244  /// m1 = < c, d >
2245  ///
2246  /// ; Transposed matrix
2247  /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 >
2248  /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 >
2249  ///
2250  /// For matrices having greater than n columns, the resulting nx2 transposed
2251  /// matrix is stored in two result vectors such that one vector contains
2252  /// interleaved elements from all the even-numbered rows and the other vector
2253  /// contains interleaved elements from all the odd-numbered rows. For example,
2254  /// a 2x4 matrix can be transposed with:
2255  ///
2256  /// ; Original matrix
2257  /// m0 = < a, b, c, d >
2258  /// m1 = < e, f, g, h >
2259  ///
2260  /// ; Transposed matrix
2261  /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 >
2262  /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 >
2263  static bool isTransposeMask(ArrayRef<int> Mask);
2264  static bool isTransposeMask(const Constant *Mask) {
2265  assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2266  SmallVector<int, 16> MaskAsInts;
2267  getShuffleMask(Mask, MaskAsInts);
2268  return isTransposeMask(MaskAsInts);
2269  }
2270 
2271  /// Return true if this shuffle transposes the elements of its inputs without
2272  /// changing the length of the vectors. This operation may also be known as a
2273  /// merge or interleave. See the description for isTransposeMask() for the
2274  /// exact specification.
2275  /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6>
2276  bool isTranspose() const {
2277  return !changesLength() && isTransposeMask(ShuffleMask);
2278  }
2279 
2280  /// Return true if this shuffle mask is an extract subvector mask.
2281  /// A valid extract subvector mask returns a smaller vector from a single
2282  /// source operand. The base extraction index is returned as well.
2283  static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2284  int &Index);
2285  static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts,
2286  int &Index) {
2287  assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2288  // Not possible to express a shuffle mask for a scalable vector for this
2289  // case.
2290  if (isa<ScalableVectorType>(Mask->getType()))
2291  return false;
2292  SmallVector<int, 16> MaskAsInts;
2293  getShuffleMask(Mask, MaskAsInts);
2294  return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index);
2295  }
2296 
2297  /// Return true if this shuffle mask is an extract subvector mask.
2298  bool isExtractSubvectorMask(int &Index) const {
2299  // Not possible to express a shuffle mask for a scalable vector for this
2300  // case.
2301  if (isa<ScalableVectorType>(getType()))
2302  return false;
2303 
2304  int NumSrcElts =
2305  cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2306  return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index);
2307  }
2308 
2309  /// Return true if this shuffle mask is an insert subvector mask.
2310  /// A valid insert subvector mask inserts the lowest elements of a second
2311  /// source operand into an in-place first source operand operand.
2312  /// Both the sub vector width and the insertion index is returned.
2313  static bool isInsertSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2314  int &NumSubElts, int &Index);
2315  static bool isInsertSubvectorMask(const Constant *Mask, int NumSrcElts,
2316  int &NumSubElts, int &Index) {
2317  assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2318  // Not possible to express a shuffle mask for a scalable vector for this
2319  // case.
2320  if (isa<ScalableVectorType>(Mask->getType()))
2321  return false;
2322  SmallVector<int, 16> MaskAsInts;
2323  getShuffleMask(Mask, MaskAsInts);
2324  return isInsertSubvectorMask(MaskAsInts, NumSrcElts, NumSubElts, Index);
2325  }
2326 
2327  /// Return true if this shuffle mask is an insert subvector mask.
2328  bool isInsertSubvectorMask(int &NumSubElts, int &Index) const {
2329  // Not possible to express a shuffle mask for a scalable vector for this
2330  // case.
2331  if (isa<ScalableVectorType>(getType()))
2332  return false;
2333 
2334  int NumSrcElts =
2335  cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2336  return isInsertSubvectorMask(ShuffleMask, NumSrcElts, NumSubElts, Index);
2337  }
2338 
2339  /// Change values in a shuffle permute mask assuming the two vector operands
2340  /// of length InVecNumElts have swapped position.
2342  unsigned InVecNumElts) {
2343  for (int &Idx : Mask) {
2344  if (Idx == -1)
2345  continue;
2346  Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts;
2347  assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&
2348  "shufflevector mask index out of range");
2349  }
2350  }
2351 
2352  // Methods for support type inquiry through isa, cast, and dyn_cast:
2353  static bool classof(const Instruction *I) {
2354  return I->getOpcode() == Instruction::ShuffleVector;
2355  }
2356  static bool classof(const Value *V) {
2357  return isa<Instruction>(V) && classof(cast<Instruction>(V));
2358  }
2359 };
2360 
2361 template <>
2363  : public FixedNumOperandTraits<ShuffleVectorInst, 2> {};
2364 
2366 
2367 //===----------------------------------------------------------------------===//
2368 // ExtractValueInst Class
2369 //===----------------------------------------------------------------------===//
2370 
2371 /// This instruction extracts a struct member or array
2372 /// element value from an aggregate value.
2373 ///
2375  SmallVector<unsigned, 4> Indices;
2376 
2377  ExtractValueInst(const ExtractValueInst &EVI);
2378 
2379  /// Constructors - Create a extractvalue instruction with a base aggregate
2380  /// value and a list of indices. The first ctor can optionally insert before
2381  /// an existing instruction, the second appends the new instruction to the
2382  /// specified BasicBlock.
2383  inline ExtractValueInst(Value *Agg,
2384  ArrayRef<unsigned> Idxs,
2385  const Twine &NameStr,
2386  Instruction *InsertBefore);
2387  inline ExtractValueInst(Value *Agg,
2388  ArrayRef<unsigned> Idxs,
2389  const Twine &NameStr, BasicBlock *InsertAtEnd);
2390 
2391  void init(ArrayRef<unsigned> Idxs, const Twine &NameStr);
2392 
2393 protected:
2394  // Note: Instruction needs to be a friend here to call cloneImpl.
2395  friend class Instruction;
2396 
2397  ExtractValueInst *cloneImpl() const;
2398 
2399 public:
2401  ArrayRef<unsigned> Idxs,
2402  const Twine &NameStr = "",
2403  Instruction *InsertBefore = nullptr) {
2404  return new
2405  ExtractValueInst(Agg, Idxs, NameStr, InsertBefore);
2406  }
2407 
2409  ArrayRef<unsigned> Idxs,
2410  const Twine &NameStr,
2411  BasicBlock *InsertAtEnd) {
2412  return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd);
2413  }
2414 
2415  /// Returns the type of the element that would be extracted
2416  /// with an extractvalue instruction with the specified parameters.
2417  ///
2418  /// Null is returned if the indices are invalid for the specified type.
2419  static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs);
2420 
2421  using idx_iterator = const unsigned*;
2422 
2423  inline idx_iterator idx_begin() const { return Indices.begin(); }
2424  inline idx_iterator idx_end() const { return Indices.end(); }
2426  return make_range(idx_begin(), idx_end());
2427  }
2428 
2430  return getOperand(0);
2431  }
2432  const Value *getAggregateOperand() const {
2433  return getOperand(0);
2434  }
2435  static unsigned getAggregateOperandIndex() {
2436  return 0U; // get index for modifying correct operand
2437  }
2438 
2440  return Indices;
2441  }
2442 
2443  unsigned getNumIndices() const {
2444  return (unsigned)Indices.size();
2445  }
2446 
2447  bool hasIndices() const {
2448  return true;
2449  }
2450 
2451  // Methods for support type inquiry through isa, cast, and dyn_cast:
2452  static bool classof(const Instruction *I) {
2453  return I->getOpcode() == Instruction::ExtractValue;
2454  }
2455  static bool classof(const Value *V) {
2456  return isa<Instruction>(V) && classof(cast<Instruction>(V));
2457  }
2458 };
2459 
2460 ExtractValueInst::ExtractValueInst(Value *Agg,
2461  ArrayRef<unsigned> Idxs,
2462  const Twine &NameStr,
2463  Instruction *InsertBefore)
2464  : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2465  ExtractValue, Agg, InsertBefore) {
2466  init(Idxs, NameStr);
2467 }
2468 
2469 ExtractValueInst::ExtractValueInst(Value *Agg,
2470  ArrayRef<unsigned> Idxs,
2471  const Twine &NameStr,
2472  BasicBlock *InsertAtEnd)
2473  : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2474  ExtractValue, Agg, InsertAtEnd) {
2475  init(Idxs, NameStr);
2476 }
2477 
2478 //===----------------------------------------------------------------------===//
2479 // InsertValueInst Class
2480 //===----------------------------------------------------------------------===//
2481 
2482 /// This instruction inserts a struct field of array element
2483 /// value into an aggregate value.
2484 ///
2486  SmallVector<unsigned, 4> Indices;
2487 
2488  InsertValueInst(const InsertValueInst &IVI);
2489 
2490  /// Constructors - Create a insertvalue instruction with a base aggregate
2491  /// value, a value to insert, and a list of indices. The first ctor can
2492  /// optionally insert before an existing instruction, the second appends
2493  /// the new instruction to the specified BasicBlock.
2494  inline InsertValueInst(Value *Agg, Value *Val,
2495  ArrayRef<unsigned> Idxs,
2496  const Twine &NameStr,
2497  Instruction *InsertBefore);
2498  inline InsertValueInst(Value *Agg, Value *Val,
2499  ArrayRef<unsigned> Idxs,
2500  const Twine &NameStr, BasicBlock *InsertAtEnd);
2501 
2502  /// Constructors - These two constructors are convenience methods because one
2503  /// and two index insertvalue instructions are so common.
2504  InsertValueInst(Value *Agg, Value *Val, unsigned Idx,
2505  const Twine &NameStr = "",
2506  Instruction *InsertBefore = nullptr);
2507  InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr,
2508  BasicBlock *InsertAtEnd);
2509 
2510  void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2511  const Twine &NameStr);
2512 
2513 protected:
2514  // Note: Instruction needs to be a friend here to call cloneImpl.
2515  friend class Instruction;
2516 
2517  InsertValueInst *cloneImpl() const;
2518 
2519 public:
2520  // allocate space for exactly two operands
2521  void *operator new(size_t S) { return User::operator new(S, 2); }
2522  void operator delete(void *Ptr) { User::operator delete(Ptr); }
2523 
2524  static InsertValueInst *Create(Value *Agg, Value *Val,
2525  ArrayRef<unsigned> Idxs,
2526  const Twine &NameStr = "",
2527  Instruction *InsertBefore = nullptr) {
2528  return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore);
2529  }
2530 
2531  static InsertValueInst *Create(Value *Agg, Value *Val,
2532  ArrayRef<unsigned> Idxs,
2533  const Twine &NameStr,
2534  BasicBlock *InsertAtEnd) {
2535  return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd);
2536  }
2537 
2538  /// Transparently provide more efficient getOperand methods.
2540 
2541  using idx_iterator = const unsigned*;
2542 
2543  inline idx_iterator idx_begin() const { return Indices.begin(); }
2544  inline idx_iterator idx_end() const { return Indices.end(); }
2546  return make_range(idx_begin(), idx_end());
2547  }
2548 
2550  return getOperand(0);
2551  }
2552  const Value *getAggregateOperand() const {
2553  return getOperand(0);
2554  }
2555  static unsigned getAggregateOperandIndex() {
2556  return 0U; // get index for modifying correct operand
2557  }
2558 
2560  return getOperand(1);
2561  }
2563  return getOperand(1);
2564  }
2565  static unsigned getInsertedValueOperandIndex() {
2566  return 1U; // get index for modifying correct operand
2567  }
2568 
2570  return Indices;
2571  }
2572 
2573  unsigned getNumIndices() const {
2574  return (unsigned)Indices.size();
2575  }
2576 
2577  bool hasIndices() const {
2578  return true;
2579  }
2580 
2581  // Methods for support type inquiry through isa, cast, and dyn_cast:
2582  static bool classof(const Instruction *I) {
2583  return I->getOpcode() == Instruction::InsertValue;
2584  }
2585  static bool classof(const Value *V) {
2586  return isa<Instruction>(V) && classof(cast<Instruction>(V));
2587  }
2588 };
2589 
2590 template <>
2592  public FixedNumOperandTraits<InsertValueInst, 2> {
2593 };
2594 
2595 InsertValueInst::InsertValueInst(Value *Agg,
2596  Value *Val,
2597  ArrayRef<unsigned> Idxs,
2598  const Twine &NameStr,
2599  Instruction *InsertBefore)
2600  : Instruction(Agg->getType(), InsertValue,
2601  OperandTraits<InsertValueInst>::op_begin(this),
2602  2, InsertBefore) {
2603  init(Agg, Val, Idxs, NameStr);
2604 }
2605 
2606 InsertValueInst::InsertValueInst(Value *Agg,
2607  Value *Val,
2608  ArrayRef<unsigned> Idxs,
2609  const Twine &NameStr,
2610  BasicBlock *InsertAtEnd)
2611  : Instruction(Agg->getType(), InsertValue,
2612  OperandTraits<InsertValueInst>::op_begin(this),
2613  2, InsertAtEnd) {
2614  init(Agg, Val, Idxs, NameStr);
2615 }
2616 
2618 
2619 //===----------------------------------------------------------------------===//
2620 // PHINode Class
2621 //===----------------------------------------------------------------------===//
2622 
2623 // PHINode - The PHINode class is used to represent the magical mystical PHI
2624 // node, that can not exist in nature, but can be synthesized in a computer
2625 // scientist's overactive imagination.
2626 //
2627 class PHINode : public Instruction {
2628  /// The number of operands actually allocated. NumOperands is
2629  /// the number actually in use.
2630  unsigned ReservedSpace;
2631 
2632  PHINode(const PHINode &PN);
2633 
2634  explicit PHINode(Type *Ty, unsigned NumReservedValues,
2635  const Twine &NameStr = "",
2636  Instruction *InsertBefore = nullptr)
2637  : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore),
2638  ReservedSpace(NumReservedValues) {
2639  assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!");
2640  setName(NameStr);
2641  allocHungoffUses(ReservedSpace);
2642  }
2643 
2644  PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr,
2645  BasicBlock *InsertAtEnd)
2646  : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd),
2647  ReservedSpace(NumReservedValues) {
2648  assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!");
2649  setName(NameStr);
2650  allocHungoffUses(ReservedSpace);
2651  }
2652 
2653 protected:
2654  // Note: Instruction needs to be a friend here to call cloneImpl.
2655  friend class Instruction;
2656 
2657  PHINode *cloneImpl() const;
2658 
2659  // allocHungoffUses - this is more complicated than the generic
2660  // User::allocHungoffUses, because we have to allocate Uses for the incoming
2661  // values and pointers to the incoming blocks, all in one allocation.
2662  void allocHungoffUses(unsigned N) {
2663  User::allocHungoffUses(N, /* IsPhi */ true);
2664  }
2665 
2666 public:
2667  /// Constructors - NumReservedValues is a hint for the number of incoming
2668  /// edges that this phi node will have (use 0 if you really have no idea).
2669  static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2670  const Twine &NameStr = "",
2671  Instruction *InsertBefore = nullptr) {
2672  return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore);
2673  }
2674 
2675  static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2676  const Twine &NameStr, BasicBlock *InsertAtEnd) {
2677  return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd);
2678  }
2679 
2680  /// Provide fast operand accessors
2682 
2683  // Block iterator interface. This provides access to the list of incoming
2684  // basic blocks, which parallels the list of incoming values.
2685 
2688 
2690  return reinterpret_cast<block_iterator>(op_begin() + ReservedSpace);
2691  }
2692 
2694  return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace);
2695  }
2696 
2698  return block_begin() + getNumOperands();
2699  }
2700 
2702  return block_begin() + getNumOperands();
2703  }
2704 
2706  return make_range(block_begin(), block_end());
2707  }
2708 
2710  return make_range(block_begin(), block_end());
2711  }
2712 
2713  op_range incoming_values() { return operands(); }
2714 
2715  const_op_range incoming_values() const { return operands(); }
2716 
2717  /// Return the number of incoming edges
2718  ///
2719  unsigned getNumIncomingValues() const { return getNumOperands(); }
2720 
2721  /// Return incoming value number x
2722  ///
2723  Value *getIncomingValue(unsigned i) const {
2724  return getOperand(i);
2725  }
2726  void setIncomingValue(unsigned i, Value *V) {
2727  assert(V && "PHI node got a null value!");
2728  assert(getType() == V->getType() &&
2729  "All operands to PHI node must be the same type as the PHI node!");
2730  setOperand(i, V);
2731  }
2732 
2733  static unsigned getOperandNumForIncomingValue(unsigned i) {
2734  return i;
2735  }
2736 
2737  static unsigned getIncomingValueNumForOperand(unsigned i) {
2738  return i;
2739  }
2740 
2741  /// Return incoming basic block number @p i.
2742  ///
2743  BasicBlock *getIncomingBlock(unsigned i) const {
2744  return block_begin()[i];
2745  }
2746 
2747  /// Return incoming basic block corresponding
2748  /// to an operand of the PHI.
2749  ///
2750  BasicBlock *getIncomingBlock(const Use &U) const {
2751  assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?");
2752  return getIncomingBlock(unsigned(&U - op_begin()));
2753  }
2754 
2755  /// Return incoming basic block corresponding
2756  /// to value use iterator.
2757  ///
2759  return getIncomingBlock(I.getUse());
2760  }
2761 
2762  void setIncomingBlock(unsigned i, BasicBlock *BB) {
2763  assert(BB && "PHI node got a null basic block!");
2764  block_begin()[i] = BB;
2765  }
2766 
2767  /// Replace every incoming basic block \p Old to basic block \p New.
2769  assert(New && Old && "PHI node got a null basic block!");
2770  for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2771  if (getIncomingBlock(Op) == Old)
2772  setIncomingBlock(Op, New);
2773  }
2774 
2775  /// Add an incoming value to the end of the PHI list
2776  ///
2778  if (getNumOperands() == ReservedSpace)
2779  growOperands(); // Get more space!
2780  // Initialize some new operands.
2781  setNumHungOffUseOperands(getNumOperands() + 1);
2782  setIncomingValue(getNumOperands() - 1, V);
2783  setIncomingBlock(getNumOperands() - 1, BB);
2784  }
2785 
2786  /// Remove an incoming value. This is useful if a
2787  /// predecessor basic block is deleted. The value removed is returned.
2788  ///
2789  /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty
2790  /// is true), the PHI node is destroyed and any uses of it are replaced with
2791  /// dummy values. The only time there should be zero incoming values to a PHI
2792  /// node is when the block is dead, so this strategy is sound.
2793  ///
2794  Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true);
2795 
2796  Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) {
2797  int Idx = getBasicBlockIndex(BB);
2798  assert(Idx >= 0 && "Invalid basic block argument to remove!");
2799  return removeIncomingValue(Idx, DeletePHIIfEmpty);
2800  }
2801 
2802  /// Return the first index of the specified basic
2803  /// block in the value list for this PHI. Returns -1 if no instance.
2804  ///
2805  int getBasicBlockIndex(const BasicBlock *BB) const {
2806  for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
2807  if (block_begin()[i] == BB)
2808  return i;
2809  return -1;
2810  }
2811 
2813  int Idx = getBasicBlockIndex(BB);
2814  assert(Idx >= 0 && "Invalid basic block argument!");
2815  return getIncomingValue(Idx);
2816  }
2817 
2818  /// Set every incoming value(s) for block \p BB to \p V.
2820  assert(BB && "PHI node got a null basic block!");
2821  bool Found = false;
2822  for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2823  if (getIncomingBlock(Op) == BB) {
2824  Found = true;
2825  setIncomingValue(Op, V);
2826  }
2827  (void)Found;
2828  assert(Found && "Invalid basic block argument to set!");
2829  }
2830 
2831  /// If the specified PHI node always merges together the
2832  /// same value, return the value, otherwise return null.
2833  Value *hasConstantValue() const;
2834 
2835  /// Whether the specified PHI node always merges
2836  /// together the same value, assuming undefs are equal to a unique
2837  /// non-undef value.
2838  bool hasConstantOrUndefValue() const;
2839 
2840  /// If the PHI node is complete which means all of its parent's predecessors
2841  /// have incoming value in this PHI, return true, otherwise return false.
2842  bool isComplete() const {
2844  [this](const BasicBlock *Pred) {
2845  return getBasicBlockIndex(Pred) >= 0;
2846  });
2847  }
2848 
2849  /// Methods for support type inquiry through isa, cast, and dyn_cast:
2850  static bool classof(const Instruction *I) {
2851  return I->getOpcode() == Instruction::PHI;
2852  }
2853  static bool classof(const Value *V) {
2854  return isa<Instruction>(V) && classof(cast<Instruction>(V));
2855  }
2856 
2857 private:
2858  void growOperands();
2859 };
2860 
2861 template <>
2863 };
2864 
2866 
2867 //===----------------------------------------------------------------------===//
2868 // LandingPadInst Class
2869 //===----------------------------------------------------------------------===//
2870 
2871 //===---------------------------------------------------------------------------
2872 /// The landingpad instruction holds all of the information
2873 /// necessary to generate correct exception handling. The landingpad instruction
2874 /// cannot be moved from the top of a landing pad block, which itself is
2875 /// accessible only from the 'unwind' edge of an invoke. This uses the
2876 /// SubclassData field in Value to store whether or not the landingpad is a
2877 /// cleanup.
2878 ///
2879 class LandingPadInst : public Instruction {
2880  using CleanupField = BoolBitfieldElementT<0>;
2881 
2882  /// The number of operands actually allocated. NumOperands is
2883  /// the number actually in use.
2884  unsigned ReservedSpace;
2885 
2886  LandingPadInst(const LandingPadInst &LP);
2887 
2888 public:
2890 
2891 private:
2892  explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2893  const Twine &NameStr, Instruction *InsertBefore);
2894  explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2895  const Twine &NameStr, BasicBlock *InsertAtEnd);
2896 
2897  // Allocate space for exactly zero operands.
2898  void *operator new(size_t S) { return User::operator new(S); }
2899 
2900  void growOperands(unsigned Size);
2901  void init(unsigned NumReservedValues, const Twine &NameStr);
2902 
2903 protected:
2904  // Note: Instruction needs to be a friend here to call cloneImpl.
2905  friend class Instruction;
2906 
2907  LandingPadInst *cloneImpl() const;
2908 
2909 public:
2910  void operator delete(void *Ptr) { User::operator delete(Ptr); }
2911 
2912  /// Constructors - NumReservedClauses is a hint for the number of incoming
2913  /// clauses that this landingpad will have (use 0 if you really have no idea).
2914  static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2915  const Twine &NameStr = "",
2916  Instruction *InsertBefore = nullptr);
2917  static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2918  const Twine &NameStr, BasicBlock *InsertAtEnd);
2919 
2920  /// Provide fast operand accessors
2922 
2923  /// Return 'true' if this landingpad instruction is a
2924  /// cleanup. I.e., it should be run when unwinding even if its landing pad
2925  /// doesn't catch the exception.
2926  bool isCleanup() const { return getSubclassData<CleanupField>(); }
2927 
2928  /// Indicate that this landingpad instruction is a cleanup.
2929  void setCleanup(bool V) { setSubclassData<CleanupField>(V); }
2930 
2931  /// Add a catch or filter clause to the landing pad.
2932  void addClause(Constant *ClauseVal);
2933 
2934  /// Get the value of the clause at index Idx. Use isCatch/isFilter to
2935  /// determine what type of clause this is.
2936  Constant *getClause(unsigned Idx) const {
2937  return cast<Constant>(getOperandList()[Idx]);
2938  }
2939 
2940  /// Return 'true' if the clause and index Idx is a catch clause.
2941  bool isCatch(unsigned Idx) const {
2942  return !isa<ArrayType>(getOperandList()[Idx]->getType());
2943  }
2944 
2945  /// Return 'true' if the clause and index Idx is a filter clause.
2946  bool isFilter(unsigned Idx) const {
2947  return isa<ArrayType>(getOperandList()[Idx]->getType());
2948  }
2949 
2950  /// Get the number of clauses for this landing pad.
2951  unsigned getNumClauses() const { return getNumOperands(); }
2952 
2953  /// Grow the size of the operand list to accommodate the new
2954  /// number of clauses.
2955  void reserveClauses(unsigned Size) { growOperands(Size); }
2956 
2957  // Methods for support type inquiry through isa, cast, and dyn_cast:
2958  static bool classof(const Instruction *I) {
2959  return I->getOpcode() == Instruction::LandingPad;
2960  }
2961  static bool classof(const Value *V) {
2962  return isa<Instruction>(V) && classof(cast<Instruction>(V));
2963  }
2964 };
2965 
2966 template <>
2968 };
2969 
2971 
2972 //===----------------------------------------------------------------------===//
2973 // ReturnInst Class
2974 //===----------------------------------------------------------------------===//
2975 
2976 //===---------------------------------------------------------------------------
2977 /// Return a value (possibly void), from a function. Execution
2978 /// does not continue in this function any longer.
2979 ///
2980 class ReturnInst : public Instruction {
2981  ReturnInst(const ReturnInst &RI);
2982 
2983 private:
2984  // ReturnInst constructors:
2985  // ReturnInst() - 'ret void' instruction
2986  // ReturnInst( null) - 'ret void' instruction
2987  // ReturnInst(Value* X) - 'ret X' instruction
2988  // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I
2989  // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I
2990  // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B
2991  // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B
2992  //
2993  // NOTE: If the Value* passed is of type void then the constructor behaves as
2994  // if it was passed NULL.
2995  explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr,
2996  Instruction *InsertBefore = nullptr);
2997  ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd);
2998  explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd);
2999 
3000 protected:
3001  // Note: Instruction needs to be a friend here to call cloneImpl.
3002  friend class Instruction;
3003 
3004  ReturnInst *cloneImpl() const;
3005 
3006 public:
3007  static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr,
3008  Instruction *InsertBefore = nullptr) {
3009  return new(!!retVal) ReturnInst(C, retVal, InsertBefore);
3010  }
3011 
3012  static ReturnInst* Create(LLVMContext &C, Value *retVal,
3013  BasicBlock *InsertAtEnd) {
3014  return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd);
3015  }
3016 
3017  static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) {
3018  return new(0) ReturnInst(C, InsertAtEnd);
3019  }
3020 
3021  /// Provide fast operand accessors
3023 
3024  /// Convenience accessor. Returns null if there is no return value.
3026  return getNumOperands() != 0 ? getOperand(0) : nullptr;
3027  }
3028 
3029  unsigned getNumSuccessors() const { return 0; }
3030 
3031  // Methods for support type inquiry through isa, cast, and dyn_cast:
3032  static bool classof(const Instruction *I) {
3033  return (I->getOpcode() == Instruction::Ret);
3034  }
3035  static bool classof(const Value *V) {
3036  return isa<Instruction>(V) && classof(cast<Instruction>(V));
3037  }
3038 
3039 private:
3040  BasicBlock *getSuccessor(unsigned idx) const {
3041  llvm_unreachable("ReturnInst has no successors!");
3042  }
3043 
3044  void setSuccessor(unsigned idx, BasicBlock *B) {
3045  llvm_unreachable("ReturnInst has no successors!");
3046  }
3047 };
3048 
3049 template <>
3050 struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> {
3051 };
3052 
3054 
3055 //===----------------------------------------------------------------------===//
3056 // BranchInst Class
3057 //===----------------------------------------------------------------------===//
3058 
3059 //===---------------------------------------------------------------------------
3060 /// Conditional or Unconditional Branch instruction.
3061 ///
3062 class BranchInst : public Instruction {
3063  /// Ops list - Branches are strange. The operands are ordered:
3064  /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because
3065  /// they don't have to check for cond/uncond branchness. These are mostly
3066  /// accessed relative from op_end().
3067  BranchInst(const BranchInst &BI);
3068  // BranchInst constructors (where {B, T, F} are blocks, and C is a condition):
3069  // BranchInst(BB *B) - 'br B'
3070  // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F'
3071  // BranchInst(BB* B, Inst *I) - 'br B' insert before I
3072  // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I
3073  // BranchInst(BB* B, BB *I) - 'br B' insert at end
3074  // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end
3075  explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr);
3076  BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3077  Instruction *InsertBefore = nullptr);
3078  BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd);
3079  BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3080  BasicBlock *InsertAtEnd);
3081 
3082  void AssertOK();
3083 
3084 protected:
3085  // Note: Instruction needs to be a friend here to call cloneImpl.
3086  friend class Instruction;
3087 
3088  BranchInst *cloneImpl() const;
3089 
3090 public:
3091  /// Iterator type that casts an operand to a basic block.
3092  ///
3093  /// This only makes sense because the successors are stored as adjacent
3094  /// operands for branch instructions.
3096  : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3097  std::random_access_iterator_tag, BasicBlock *,
3098  ptrdiff_t, BasicBlock *, BasicBlock *> {
3099  explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {}
3100 
3101  BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3102  BasicBlock *operator->() const { return operator*(); }
3103  };
3104 
3105  /// The const version of `succ_op_iterator`.
3107  : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3108  std::random_access_iterator_tag,
3109  const BasicBlock *, ptrdiff_t, const BasicBlock *,
3110  const BasicBlock *> {
3112  : iterator_adaptor_base(I) {}
3113 
3114  const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3115  const BasicBlock *operator->() const { return operator*(); }
3116  };
3117 
3118  static BranchInst *Create(BasicBlock *IfTrue,
3119  Instruction *InsertBefore = nullptr) {
3120  return new(1) BranchInst(IfTrue, InsertBefore);
3121  }
3122 
3123  static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3124  Value *Cond, Instruction *InsertBefore = nullptr) {
3125  return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore);
3126  }
3127 
3128  static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) {
3129  return new(1) BranchInst(IfTrue, InsertAtEnd);
3130  }
3131 
3132  static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3133  Value *Cond, BasicBlock *InsertAtEnd) {
3134  return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd);
3135  }
3136 
3137  /// Transparently provide more efficient getOperand methods.
3139 
3140  bool isUnconditional() const { return getNumOperands() == 1; }
3141  bool isConditional() const { return getNumOperands() == 3; }
3142 
3143  Value *getCondition() const {
3144  assert(isConditional() && "Cannot get condition of an uncond branch!");
3145  return Op<-3>();
3146  }
3147 
3148  void setCondition(Value *V) {
3149  assert(isConditional() && "Cannot set condition of unconditional branch!");
3150  Op<-3>() = V;
3151  }
3152 
3153  unsigned getNumSuccessors() const { return 1+isConditional(); }
3154 
3155  BasicBlock *getSuccessor(unsigned i) const {
3156  assert(i < getNumSuccessors() && "Successor # out of range for Branch!");
3157  return cast_or_null<BasicBlock>((&Op<-1>() - i)->get());
3158  }
3159 
3160  void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3161  assert(idx < getNumSuccessors() && "Successor # out of range for Branch!");
3162  *(&Op<-1>() - idx) = NewSucc;
3163  }
3164 
3165  /// Swap the successors of this branch instruction.
3166  ///
3167  /// Swaps the successors of the branch instruction. This also swaps any
3168  /// branch weight metadata associated with the instruction so that it
3169  /// continues to map correctly to each operand.
3170  void swapSuccessors();
3171 
3173  return make_range(
3174  succ_op_iterator(std::next(value_op_begin(), isConditional() ? 1 : 0)),
3175  succ_op_iterator(value_op_end()));
3176  }
3177 
3180  std::next(value_op_begin(), isConditional() ? 1 : 0)),
3181  const_succ_op_iterator(value_op_end()));
3182  }
3183 
3184  // Methods for support type inquiry through isa, cast, and dyn_cast:
3185  static bool classof(const Instruction *I) {
3186  return (I->getOpcode() == Instruction::Br);
3187  }
3188  static bool classof(const Value *V) {
3189  return isa<Instruction>(V) && classof(cast<Instruction>(V));
3190  }
3191 };
3192 
3193 template <>
3194 struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> {
3195 };
3196 
3198 
3199 //===----------------------------------------------------------------------===//
3200 // SwitchInst Class
3201 //===----------------------------------------------------------------------===//
3202 
3203 //===---------------------------------------------------------------------------
3204 /// Multiway switch
3205 ///
3206 class SwitchInst : public Instruction {
3207  unsigned ReservedSpace;
3208 
3209  // Operand[0] = Value to switch on
3210  // Operand[1] = Default basic block destination
3211  // Operand[2n ] = Value to match
3212  // Operand[2n+1] = BasicBlock to go to on match
3213  SwitchInst(const SwitchInst &SI);
3214 
3215  /// Create a new switch instruction, specifying a value to switch on and a
3216  /// default destination. The number of additional cases can be specified here
3217  /// to make memory allocation more efficient. This constructor can also
3218  /// auto-insert before another instruction.
3219  SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3220  Instruction *InsertBefore);
3221 
3222  /// Create a new switch instruction, specifying a value to switch on and a
3223  /// default destination. The number of additional cases can be specified here
3224  /// to make memory allocation more efficient. This constructor also
3225  /// auto-inserts at the end of the specified BasicBlock.
3226  SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3227  BasicBlock *InsertAtEnd);
3228 
3229  // allocate space for exactly zero operands
3230  void *operator new(size_t S) { return User::operator new(S); }
3231 
3232  void init(Value *Value, BasicBlock *Default, unsigned NumReserved);
3233  void growOperands();
3234 
3235 protected:
3236  // Note: Instruction needs to be a friend here to call cloneImpl.
3237  friend class Instruction;
3238 
3239  SwitchInst *cloneImpl() const;
3240 
3241 public:
3242  void operator delete(void *Ptr) { User::operator delete(Ptr); }
3243 
3244  // -2
3245  static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1);
3246 
3247  template <typename CaseHandleT> class CaseIteratorImpl;
3248 
3249  /// A handle to a particular switch case. It exposes a convenient interface
3250  /// to both the case value and the successor block.
3251  ///
3252  /// We define this as a template and instantiate it to form both a const and
3253  /// non-const handle.
3254  template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT>
3256  // Directly befriend both const and non-const iterators.
3257  friend class SwitchInst::CaseIteratorImpl<
3258  CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>;
3259 
3260  protected:
3261  // Expose the switch type we're parameterized with to the iterator.
3262  using SwitchInstType = SwitchInstT;
3263 
3264  SwitchInstT *SI;
3266 
3267  CaseHandleImpl() = default;
3268  CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index) : SI(SI), Index(Index) {}
3269 
3270  public:
3271  /// Resolves case value for current case.
3272  ConstantIntT *getCaseValue() const {
3273  assert((unsigned)Index < SI->getNumCases() &&
3274  "Index out the number of cases.");
3275  return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2));
3276  }
3277 
3278  /// Resolves successor for current case.
3279  BasicBlockT *getCaseSuccessor() const {
3280  assert(((unsigned)Index < SI->getNumCases() ||
3281  (unsigned)Index == DefaultPseudoIndex) &&
3282  "Index out the number of cases.");
3283  return SI->getSuccessor(getSuccessorIndex());
3284  }
3285 
3286  /// Returns number of current case.
3287  unsigned getCaseIndex() const { return Index; }
3288 
3289  /// Returns successor index for current case successor.
3290  unsigned getSuccessorIndex() const {
3291  assert(((unsigned)Index == DefaultPseudoIndex ||
3292  (unsigned)Index < SI->getNumCases()) &&
3293  "Index out the number of cases.");
3294  return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0;
3295  }
3296 
3297  bool operator==(const CaseHandleImpl &RHS) const {
3298  assert(SI == RHS.SI && "Incompatible operators.");
3299  return Index == RHS.Index;
3300  }
3301  };
3302 
3303  using ConstCaseHandle =
3305 
3307  : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> {
3309 
3310  public:
3312 
3313  /// Sets the new value for current case.
3315  assert((unsigned)Index < SI->getNumCases() &&
3316  "Index out the number of cases.");
3317  SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V));
3318  }
3319 
3320  /// Sets the new successor for current case.
3322  SI->setSuccessor(getSuccessorIndex(), S);
3323  }
3324  };
3325 
3326  template <typename CaseHandleT>
3327  class CaseIteratorImpl
3328  : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>,
3329  std::random_access_iterator_tag,
3330  CaseHandleT> {
3331  using SwitchInstT = typename CaseHandleT::SwitchInstType;
3332 
3333  CaseHandleT Case;
3334 
3335  public:
3336  /// Default constructed iterator is in an invalid state until assigned to
3337  /// a case for a particular switch.
3338  CaseIteratorImpl() = default;
3339 
3340  /// Initializes case iterator for given SwitchInst and for given
3341  /// case number.
3342  CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {}
3343 
3344  /// Initializes case iterator for given SwitchInst and for given
3345  /// successor index.
3347  unsigned SuccessorIndex) {
3348  assert(SuccessorIndex < SI->getNumSuccessors() &&
3349  "Successor index # out of range!");
3350  return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1)
3351  : CaseIteratorImpl(SI, DefaultPseudoIndex);
3352  }
3353 
3354  /// Support converting to the const variant. This will be a no-op for const
3355  /// variant.
3357  return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index);
3358  }
3359 
3361  // Check index correctness after addition.
3362  // Note: Index == getNumCases() means end().
3363  assert(Case.Index + N >= 0 &&
3364  (unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
3365  "Case.Index out the number of cases.");
3366  Case.Index += N;
3367  return *this;
3368  }
3370  // Check index correctness after subtraction.
3371  // Note: Case.Index == getNumCases() means end().
3372  assert(Case.Index - N >= 0 &&
3373  (unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
3374  "Case.Index out the number of cases.");
3375  Case.Index -= N;
3376  return *this;
3377  }
3379  assert(Case.SI == RHS.Case.SI && "Incompatible operators.");
3380  return Case.Index - RHS.Case.Index;
3381  }
3382  bool operator==(const CaseIteratorImpl &RHS) const {
3383  return Case == RHS.Case;
3384  }
3385  bool operator<(const CaseIteratorImpl &RHS) const {
3386  assert(Case.SI == RHS.Case.SI && "Incompatible operators.");
3387  return Case.Index < RHS.Case.Index;
3388  }
3389  CaseHandleT &operator*() { return Case; }
3390  const CaseHandleT &operator*() const { return Case; }
3391  };
3392 
3395 
3397  unsigned NumCases,
3398  Instruction *InsertBefore = nullptr) {
3399  return new SwitchInst(Value, Default, NumCases, InsertBefore);
3400  }
3401 
3403  unsigned NumCases, BasicBlock *InsertAtEnd) {
3404  return new SwitchInst(Value, Default, NumCases, InsertAtEnd);
3405  }
3406 
3407  /// Provide fast operand accessors
3409 
3410  // Accessor Methods for Switch stmt
3411  Value *getCondition() const { return getOperand(0); }
3412  void setCondition(Value *V) { setOperand(0, V); }
3413 
3415  return cast<BasicBlock>(getOperand(1));
3416  }
3417 
3418  void setDefaultDest(BasicBlock *DefaultCase) {
3419  setOperand(1, reinterpret_cast<Value*>(DefaultCase));
3420  }
3421 
3422  /// Return the number of 'cases' in this switch instruction, excluding the
3423  /// default case.
3424  unsigned getNumCases() const {
3425  return getNumOperands()/2 - 1;
3426  }
3427 
3428  /// Returns a read/write iterator that points to the first case in the
3429  /// SwitchInst.
3431  return CaseIt(this, 0);
3432  }
3433 
3434  /// Returns a read-only iterator that points to the first case in the
3435  /// SwitchInst.
3437  return ConstCaseIt(this, 0);
3438  }
3439 
3440  /// Returns a read/write iterator that points one past the last in the
3441  /// SwitchInst.
3443  return CaseIt(this, getNumCases());
3444  }
3445 
3446  /// Returns a read-only iterator that points one past the last in the
3447  /// SwitchInst.
3449  return ConstCaseIt(this, getNumCases());
3450  }
3451 
3452  /// Iteration adapter for range-for loops.
3454  return make_range(case_begin(), case_end());
3455  }
3456 
3457  /// Constant iteration adapter for range-for loops.
3459  return make_range(case_begin(), case_end());
3460  }
3461 
3462  /// Returns an iterator that points to the default case.
3463  /// Note: this iterator allows to resolve successor only. Attempt
3464  /// to resolve case value causes an assertion.
3465  /// Also note, that increment and decrement also causes an assertion and
3466  /// makes iterator invalid.
3468  return CaseIt(this, DefaultPseudoIndex);
3469  }
3471  return ConstCaseIt(this, DefaultPseudoIndex);
3472  }
3473 
3474  /// Search all of the case values for the specified constant. If it is
3475  /// explicitly handled, return the case iterator of it, otherwise return
3476  /// default case iterator to indicate that it is handled by the default
3477  /// handler.
3480  cases(), [C](CaseHandle &Case) { return Case.getCaseValue() == C; });
3481  if (I != case_end())
3482  return I;
3483 
3484  return case_default();
3485  }
3488  return Case.getCaseValue() == C;
3489  });
3490  if (I != case_end())
3491  return I;
3492 
3493  return case_default();
3494  }
3495 
3496  /// Finds the unique case value for a given successor. Returns null if the
3497  /// successor is not found, not unique, or is the default case.
3499  if (BB == getDefaultDest())
3500  return nullptr;
3501 
3502  ConstantInt *CI = nullptr;
3503  for (auto Case : cases()) {
3504  if (Case.getCaseSuccessor() != BB)
3505  continue;
3506 
3507  if (CI)
3508  return nullptr; // Multiple cases lead to BB.
3509 
3510  CI = Case.getCaseValue();
3511  }
3512 
3513  return CI;
3514  }
3515 
3516  /// Add an entry to the switch instruction.
3517  /// Note:
3518  /// This action invalidates case_end(). Old case_end() iterator will
3519  /// point to the added case.
3520  void addCase(ConstantInt *OnVal, BasicBlock *Dest);
3521 
3522  /// This method removes the specified case and its successor from the switch
3523  /// instruction. Note that this operation may reorder the remaining cases at
3524  /// index idx and above.
3525  /// Note:
3526  /// This action invalidates iterators for all cases following the one removed,
3527  /// including the case_end() iterator. It returns an iterator for the next
3528  /// case.
3529  CaseIt removeCase(CaseIt I);
3530 
3531  unsigned getNumSuccessors() const { return getNumOperands()/2; }
3532  BasicBlock *getSuccessor(unsigned idx) const {
3533  assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!");
3534  return cast<BasicBlock>(getOperand(idx*2+1));
3535  }
3536  void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3537  assert(idx < getNumSuccessors() && "Successor # out of range for switch!");
3538  setOperand(idx * 2 + 1, NewSucc);
3539  }
3540 
3541  // Methods for support type inquiry through isa, cast, and dyn_cast:
3542  static bool classof(const Instruction *I) {
3543  return I->getOpcode() == Instruction::Switch;
3544  }
3545  static bool classof(const Value *V) {
3546  return isa<Instruction>(V) && classof(cast<Instruction>(V));
3547  }
3548 };
3549 
3550 /// A wrapper class to simplify modification of SwitchInst cases along with
3551 /// their prof branch_weights metadata.
3553  SwitchInst &SI;
3555  bool Changed = false;
3556 
3557 protected:
3558  static MDNode *getProfBranchWeightsMD(const SwitchInst &SI);
3559 
3561 
3562  void init();
3563 
3564 public:
3566  SwitchInst *operator->() { return &SI; }
3567  SwitchInst &operator*() { return SI; }
3568  operator SwitchInst *() { return &SI; }
3569 
3571 
3573  if (Changed)
3574  SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD());
3575  }
3576 
3577  /// Delegate the call to the underlying SwitchInst::removeCase() and remove
3578  /// correspondent branch weight.
3580 
3581  /// Delegate the call to the underlying SwitchInst::addCase() and set the
3582  /// specified branch weight for the added case.
3583  void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W);
3584 
3585  /// Delegate the call to the underlying SwitchInst::eraseFromParent() and mark
3586  /// this object to not touch the underlying SwitchInst in destructor.
3588 
3589  void setSuccessorWeight(unsigned idx, CaseWeightOpt W);
3590  CaseWeightOpt getSuccessorWeight(unsigned idx);
3591 
3592  static CaseWeightOpt getSuccessorWeight(const SwitchInst &SI, unsigned idx);
3593 };
3594 
3595 template <>
3597 };
3598 
3600 
3601 //===----------------------------------------------------------------------===//
3602 // IndirectBrInst Class
3603 //===----------------------------------------------------------------------===//
3604 
3605 //===---------------------------------------------------------------------------
3606 /// Indirect Branch Instruction.
3607 ///
3608 class IndirectBrInst : public Instruction {
3609  unsigned ReservedSpace;
3610 
3611  // Operand[0] = Address to jump to
3612  // Operand[n+1] = n-th destination
3613  IndirectBrInst(const IndirectBrInst &IBI);
3614 
3615  /// Create a new indirectbr instruction, specifying an
3616  /// Address to jump to. The number of expected destinations can be specified
3617  /// here to make memory allocation more efficient. This constructor can also
3618  /// autoinsert before another instruction.
3619  IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore);
3620 
3621  /// Create a new indirectbr instruction, specifying an
3622  /// Address to jump to. The number of expected destinations can be specified
3623  /// here to make memory allocation more efficient. This constructor also
3624  /// autoinserts at the end of the specified BasicBlock.
3625  IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd);
3626 
3627  // allocate space for exactly zero operands
3628  void *operator new(size_t S) { return User::operator new(S); }
3629 
3630  void init(Value *Address, unsigned NumDests);
3631  void growOperands();
3632 
3633 protected:
3634  // Note: Instruction needs to be a friend here to call cloneImpl.
3635  friend class Instruction;
3636 
3637  IndirectBrInst *cloneImpl() const;
3638 
3639 public:
3640  void operator delete(void *Ptr) { User::operator delete(Ptr); }
3641 
3642  /// Iterator type that casts an operand to a basic block.
3643  ///
3644  /// This only makes sense because the successors are stored as adjacent
3645  /// operands for indirectbr instructions.
3647  : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3648  std::random_access_iterator_tag, BasicBlock *,
3649  ptrdiff_t, BasicBlock *, BasicBlock *> {
3651 
3652  BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3653  BasicBlock *operator->() const { return operator*(); }
3654  };
3655 
3656  /// The const version of `succ_op_iterator`.
3658  : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3659  std::random_access_iterator_tag,
3660  const BasicBlock *, ptrdiff_t, const BasicBlock *,
3661  const BasicBlock *> {
3663  : iterator_adaptor_base(I) {}
3664 
3665  const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3666  const BasicBlock *operator->() const { return operator*(); }
3667  };
3668 
3669  static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3670  Instruction *InsertBefore = nullptr) {
3671  return new IndirectBrInst(Address, NumDests, InsertBefore);
3672  }
3673 
3674  static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3675  BasicBlock *InsertAtEnd) {
3676  return new IndirectBrInst(Address, NumDests, InsertAtEnd);
3677  }
3678 
3679  /// Provide fast operand accessors.
3681 
3682  // Accessor Methods for IndirectBrInst instruction.
3683  Value *getAddress() { return getOperand(0); }
3684  const Value *getAddress() const { return getOperand(0); }
3685  void setAddress(Value *V) { setOperand(0, V); }
3686 
3687  /// return the number of possible destinations in this
3688  /// indirectbr instruction.
3689  unsigned getNumDestinations() const { return getNumOperands()-1; }
3690 
3691  /// Return the specified destination.
3692  BasicBlock *getDestination(unsigned i) { return getSuccessor(i); }
3693  const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); }
3694 
3695  /// Add a destination.
3696  ///
3697  void addDestination(BasicBlock *Dest);
3698 
3699  /// This method removes the specified successor from the
3700  /// indirectbr instruction.
3701  void removeDestination(unsigned i);
3702 
3703  unsigned getNumSuccessors() const { return getNumOperands()-1; }
3704  BasicBlock *getSuccessor(unsigned i) const {
3705  return cast<BasicBlock>(getOperand(i+1));
3706  }
3707  void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3708  setOperand(i + 1, NewSucc);
3709  }
3710 
3712  return make_range(succ_op_iterator(std::next(value_op_begin())),
3713  succ_op_iterator(value_op_end()));
3714  }
3715 
3717  return make_range(const_succ_op_iterator(std::next(value_op_begin())),
3718  const_succ_op_iterator(value_op_end()));
3719  }
3720 
3721  // Methods for support type inquiry through isa, cast, and dyn_cast:
3722  static bool classof(const Instruction *I) {
3723  return I->getOpcode() == Instruction::IndirectBr;
3724  }
3725  static bool classof(const Value *V) {
3726  return isa<Instruction>(V) && classof(cast<Instruction>(V));
3727  }
3728 };
3729 
3730 template <>
3732 };
3733 
3735 
3736 //===----------------------------------------------------------------------===//
3737 // InvokeInst Class
3738 //===----------------------------------------------------------------------===//
3739 
3740 /// Invoke instruction. The SubclassData field is used to hold the
3741 /// calling convention of the call.
3742 ///
3743 class InvokeInst : public CallBase {
3744  /// The number of operands for this call beyond the called function,
3745  /// arguments, and operand bundles.
3746  static constexpr int NumExtraOperands = 2;
3747 
3748  /// The index from the end of the operand array to the normal destination.
3749  static constexpr int NormalDestOpEndIdx = -3;
3750 
3751  /// The index from the end of the operand array to the unwind destination.
3752  static constexpr int UnwindDestOpEndIdx = -2;
3753 
3754  InvokeInst(const InvokeInst &BI);
3755 
3756  /// Construct an InvokeInst given a range of arguments.
3757  ///
3758  /// Construct an InvokeInst from a range of arguments
3759  inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3760  BasicBlock *IfException, ArrayRef<Value *> Args,
3761  ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3762  const Twine &NameStr, Instruction *InsertBefore);
3763 
3764  inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3765  BasicBlock *IfException, ArrayRef<Value *> Args,
3766  ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3767  const Twine &NameStr, BasicBlock *InsertAtEnd);
3768 
3769  void init(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3770  BasicBlock *IfException, ArrayRef<Value *> Args,
3771  ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
3772 
3773  /// Compute the number of operands to allocate.
3774  static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
3775  // We need one operand for the called function, plus our extra operands and
3776  // the input operand counts provided.
3777  return 1 + NumExtraOperands + NumArgs + NumBundleInputs;
3778  }
3779 
3780 protected:
3781  // Note: Instruction needs to be a friend here to call cloneImpl.
3782  friend class Instruction;
3783 
3784  InvokeInst *cloneImpl() const;
3785 
3786 public:
3787  static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3788  BasicBlock *IfException, ArrayRef<Value *> Args,
3789  const Twine &NameStr,
3790  Instruction *InsertBefore = nullptr) {
3791  int NumOperands = ComputeNumOperands(Args.size());
3792  return new (NumOperands)
3793  InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands,
3794  NameStr, InsertBefore);
3795  }
3796 
3797  static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3798  BasicBlock *IfException, ArrayRef<Value *> Args,
3799  ArrayRef<OperandBundleDef> Bundles = None,
3800  const Twine &NameStr = "",
3801  Instruction *InsertBefore = nullptr) {
3802  int NumOperands =
3803  ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
3804  unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3805 
3806  return new (NumOperands, DescriptorBytes)
3807  InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
3808  NameStr, InsertBefore);
3809  }
3810 
3811  static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3812  BasicBlock *IfException, ArrayRef<Value *> Args,
3813  const Twine &NameStr, BasicBlock *InsertAtEnd) {
3814  int NumOperands = ComputeNumOperands(Args.size());
3815  return new (NumOperands)
3816  InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands,
3817  NameStr, InsertAtEnd);
3818  }
3819 
3820  static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3821  BasicBlock *IfException, ArrayRef<Value *> Args,
3823  const Twine &NameStr, BasicBlock *InsertAtEnd) {
3824  int NumOperands =
3825  ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
3826  unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3827 
3828  return new (NumOperands, DescriptorBytes)
3829  InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
3830  NameStr, InsertAtEnd);
3831  }
3832 
3833  static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3834  BasicBlock *IfException, ArrayRef<Value *> Args,
3835  const Twine &NameStr,
3836  Instruction *InsertBefore = nullptr) {
3837  return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3838  IfException, Args, None, NameStr, InsertBefore);
3839  }
3840 
3841  static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3842  BasicBlock *IfException, ArrayRef<Value *> Args,
3843  ArrayRef<OperandBundleDef> Bundles = None,
3844  const Twine &NameStr = "",
3845  Instruction *InsertBefore = nullptr) {
3846  return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3847  IfException, Args, Bundles, NameStr, InsertBefore);
3848  }
3849 
3850  static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3851  BasicBlock *IfException, ArrayRef<Value *> Args,
3852  const Twine &NameStr, BasicBlock *InsertAtEnd) {
3853  return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3854  IfException, Args, NameStr, InsertAtEnd);
3855  }
3856 
3857  static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3858  BasicBlock *IfException, ArrayRef<Value *> Args,
3860  const Twine &NameStr, BasicBlock *InsertAtEnd) {
3861  return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3862  IfException, Args, Bundles, NameStr, InsertAtEnd);
3863  }
3864 
3865  /// Create a clone of \p II with a different set of operand bundles and
3866  /// insert it before \p InsertPt.
3867  ///
3868  /// The returned invoke instruction is identical to \p II in every way except
3869  /// that the operand bundles for the new instruction are set to the operand
3870  /// bundles in \p Bundles.
3871  static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles,
3872  Instruction *InsertPt = nullptr);
3873 
3874  // get*Dest - Return the destination basic blocks...
3876  return cast<BasicBlock>(Op<NormalDestOpEndIdx>());
3877  }
3879  return cast<BasicBlock>(Op<UnwindDestOpEndIdx>());
3880  }
3882  Op<NormalDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3883  }
3885  Op<UnwindDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3886  }
3887 
3888  /// Get the landingpad instruction from the landing pad
3889  /// block (the unwind destination).
3890  LandingPadInst *getLandingPadInst() const;
3891 
3892  BasicBlock *getSuccessor(unsigned i) const {
3893  assert(i < 2 && "Successor # out of range for invoke!");
3894  return i == 0 ? getNormalDest() : getUnwindDest();
3895  }
3896 
3897  void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3898  assert(i < 2 && "Successor # out of range for invoke!");
3899  if (i == 0)
3900  setNormalDest(NewSucc);
3901  else
3902  setUnwindDest(NewSucc);
3903  }
3904 
3905  unsigned getNumSuccessors() const { return 2; }
3906 
3907  // Methods for support type inquiry through isa, cast, and dyn_cast:
3908  static bool classof(const Instruction *I) {
3909  return (I->getOpcode() == Instruction::Invoke);
3910  }
3911  static bool classof(const Value *V) {
3912  return isa<Instruction>(V) && classof(cast<Instruction>(V));
3913  }
3914 
3915 private:
3916  // Shadow Instruction::setInstructionSubclassData with a private forwarding
3917  // method so that subclasses cannot accidentally use it.
3918  template <typename Bitfield>
3919  void setSubclassData(typename Bitfield::Type Value) {
3920  Instruction::setSubclassData<Bitfield>(Value);
3921  }
3922 };
3923 
3924 InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3925  BasicBlock *IfException, ArrayRef<Value *> Args,
3926  ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3927  const Twine &NameStr, Instruction *InsertBefore)
3928  : CallBase(Ty->getReturnType(), Instruction::Invoke,
3929  OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
3930  InsertBefore) {
3931  init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
3932 }
3933 
3934 InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3935  BasicBlock *IfException, ArrayRef<Value *> Args,
3936  ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3937  const Twine &NameStr, BasicBlock *InsertAtEnd)
3938  : CallBase(Ty->getReturnType(), Instruction::Invoke,
3939  OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
3940  InsertAtEnd) {
3941  init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
3942 }
3943 
3944 //===----------------------------------------------------------------------===//
3945 // CallBrInst Class
3946 //===----------------------------------------------------------------------===//
3947 
3948 /// CallBr instruction, tracking function calls that may not return control but
3949 /// instead transfer it to a third location. The SubclassData field is used to
3950 /// hold the calling convention of the call.
3951 ///
3952 class CallBrInst : public CallBase {
3953 
3954  unsigned NumIndirectDests;
3955 
3956  CallBrInst(const CallBrInst &BI);
3957 
3958  /// Construct a CallBrInst given a range of arguments.
3959  ///
3960  /// Construct a CallBrInst from a range of arguments
3961  inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
3962  ArrayRef<BasicBlock *> IndirectDests,
3964  ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3965  const Twine &NameStr, Instruction *InsertBefore);
3966 
3967  inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
3968  ArrayRef<BasicBlock *> IndirectDests,
3970  ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3971  const Twine &NameStr, BasicBlock *InsertAtEnd);
3972 
3973  void init(FunctionType *FTy, Value *Func, BasicBlock *DefaultDest,
3975  ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
3976 
3977  /// Should the Indirect Destinations change, scan + update the Arg list.
3978  void updateArgBlockAddresses(unsigned i, BasicBlock *B);
3979 
3980  /// Compute the number of operands to allocate.
3981  static int ComputeNumOperands(int NumArgs, int NumIndirectDests,
3982  int NumBundleInputs = 0) {
3983  // We need one operand for the called function, plus our extra operands and
3984  // the input operand counts provided.
3985  return 2 + NumIndirectDests + NumArgs + NumBundleInputs;
3986  }
3987 
3988 protected:
3989  // Note: Instruction needs to be a friend here to call cloneImpl.
3990  friend class Instruction;
3991 
3992  CallBrInst *cloneImpl() const;
3993 
3994 public:
3995  static CallBrInst *Create(FunctionType *Ty, Value *Func,
3996  BasicBlock *DefaultDest,
3997  ArrayRef<BasicBlock *> IndirectDests,
3998  ArrayRef<Value *> Args, const Twine &NameStr,
3999  Instruction *InsertBefore = nullptr) {
4000  int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size());
4001  return new (NumOperands)
4002  CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None,
4003  NumOperands, NameStr, InsertBefore);
4004  }
4005 
4006  static CallBrInst *Create(FunctionType *Ty, Value *Func,
4007  BasicBlock *DefaultDest,
4008  ArrayRef<BasicBlock *> IndirectDests,
4010  ArrayRef<OperandBundleDef> Bundles = None,
4011  const Twine &NameStr = "",
4012  Instruction *InsertBefore = nullptr) {
4013  int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(),
4014  CountBundleInputs(Bundles));
4015  unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
4016 
4017  return new (NumOperands, DescriptorBytes)
4018  CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
4019  NumOperands, NameStr, InsertBefore);
4020  }
4021 
4022  static CallBrInst *Create(FunctionType *Ty, Value *Func,
4023  BasicBlock *DefaultDest,
4024  ArrayRef<BasicBlock *> IndirectDests,
4025  ArrayRef<Value *> Args, const Twine &NameStr,
4026  BasicBlock *InsertAtEnd) {
4027  int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size());
4028  return new (NumOperands)
4029  CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None,
4030  NumOperands, NameStr, InsertAtEnd);
4031  }
4032 
4033  static CallBrInst *Create(FunctionType *Ty, Value *Func,
4034  BasicBlock *DefaultDest,
4035  ArrayRef<BasicBlock *> IndirectDests,
4038  const Twine &NameStr, BasicBlock *InsertAtEnd) {
4039  int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(),
4040  CountBundleInputs(Bundles));
4041  unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
4042 
4043  return new (NumOperands, DescriptorBytes)
4044  CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
4045  NumOperands, NameStr, InsertAtEnd);
4046  }
4047 
4048  static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4049  ArrayRef<BasicBlock *> IndirectDests,
4050  ArrayRef<Value *> Args, const Twine &NameStr,
4051  Instruction *InsertBefore = nullptr) {
4052  return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4053  IndirectDests, Args, NameStr, InsertBefore);
4054  }
4055 
4056  static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4057  ArrayRef<BasicBlock *> IndirectDests,
4059  ArrayRef<OperandBundleDef> Bundles = None,
4060  const Twine &NameStr = "",
4061  Instruction *InsertBefore = nullptr) {
4062  return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4063  IndirectDests, Args, Bundles, NameStr, InsertBefore);
4064  }
4065 
4066  static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4067  ArrayRef<BasicBlock *> IndirectDests,
4068  ArrayRef<Value *> Args, const Twine &NameStr,
4069  BasicBlock *InsertAtEnd) {
4070  return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4071  IndirectDests, Args, NameStr, InsertAtEnd);
4072  }
4073 
4075  BasicBlock *DefaultDest,
4076  ArrayRef<BasicBlock *> IndirectDests,
4079  const Twine &NameStr, BasicBlock *InsertAtEnd) {
4080  return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4081  IndirectDests, Args, Bundles, NameStr, InsertAtEnd);
4082  }
4083 
4084  /// Create a clone of \p CBI with a different set of operand bundles and
4085  /// insert it before \p InsertPt.
4086  ///
4087  /// The returned callbr instruction is identical to \p CBI in every way
4088  /// except that the operand bundles for the new instruction are set to the
4089  /// operand bundles in \p Bundles.
4090  static CallBrInst *Create(CallBrInst *CBI,
4092  Instruction *InsertPt = nullptr);
4093 
4094  /// Return the number of callbr indirect dest labels.
4095  ///
4096  unsigned getNumIndirectDests() const { return NumIndirectDests; }
4097 
4098  /// getIndirectDestLabel - Return the i-th indirect dest label.
4099  ///
4100  Value *getIndirectDestLabel(unsigned i) const {
4101  assert(i < getNumIndirectDests() && "Out of bounds!");
4103  1);
4104  }
4105 
4106  Value *getIndirectDestLabelUse(unsigned i) const {
4107  assert(i < getNumIndirectDests() && "Out of bounds!");
4109  1);
4110  }
4111 
4112  // Return the destination basic blocks...
4114  return cast<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() - 1));
4115  }
4116  BasicBlock *getIndirectDest(unsigned i) const {
4117  return cast_or_null<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() + i));
4118  }
4120  SmallVector<BasicBlock *, 16> IndirectDests;
4121  for (unsigned i = 0, e = getNumIndirectDests(); i < e; ++i)
4122  IndirectDests.push_back(getIndirectDest(i));
4123  return IndirectDests;
4124  }
4126  *(&Op<-1>() - getNumIndirectDests() - 1) = reinterpret_cast<Value *>(B);
4127  }
4128  void setIndirectDest(unsigned i, BasicBlock *B) {
4129  updateArgBlockAddresses(i, B);
4130  *(&Op<-1>() - getNumIndirectDests() + i) = reinterpret_cast<Value *>(B);
4131  }
4132 
4133  BasicBlock *getSuccessor(unsigned i) const {
4134  assert(i < getNumSuccessors() + 1 &&
4135  "Successor # out of range for callbr!");
4136  return i == 0 ? getDefaultDest() : getIndirectDest(i - 1);
4137  }
4138 
4139  void setSuccessor(unsigned i, BasicBlock *NewSucc) {
4140  assert(i < getNumIndirectDests() + 1 &&
4141  "Successor # out of range for callbr!");
4142  return i == 0 ? setDefaultDest(NewSucc) : setIndirectDest(i - 1, NewSucc);
4143  }
4144 
4145  unsigned getNumSuccessors() const { return getNumIndirectDests() + 1; }
4146 
4147  // Methods for support type inquiry through isa, cast, and dyn_cast:
4148  static bool classof(const Instruction *I) {
4149  return (I->getOpcode() == Instruction::CallBr);
4150  }
4151  static bool classof(const Value *V) {
4152  return isa<Instruction>(V) && classof(cast<Instruction>(V));
4153  }
4154 
4155 private:
4156  // Shadow Instruction::setInstructionSubclassData with a private forwarding
4157  // method so that subclasses cannot accidentally use it.
4158  template <typename Bitfield>
4159  void setSubclassData(typename Bitfield::Type Value) {
4160  Instruction::setSubclassData<Bitfield>(Value);
4161  }
4162 };
4163 
4164 CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4165  ArrayRef<BasicBlock *> IndirectDests,
4166  ArrayRef<Value *> Args,
4167  ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4168  const Twine &NameStr, Instruction *InsertBefore)
4169  : CallBase(Ty->getReturnType(), Instruction::CallBr,
4170  OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4171  InsertBefore) {
4172  init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4173 }
4174 
4175 CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4176  ArrayRef<BasicBlock *> IndirectDests,
4177  ArrayRef<Value *> Args,
4178  ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4179  const Twine &NameStr, BasicBlock *InsertAtEnd)
4180  : CallBase(Ty->getReturnType(), Instruction::CallBr,
4181  OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4182  InsertAtEnd) {
4183  init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4184 }
4185 
4186 //===----------------------------------------------------------------------===//
4187 // ResumeInst Class
4188 //===----------------------------------------------------------------------===//
4189 
4190 //===---------------------------------------------------------------------------
4191 /// Resume the propagation of an exception.
4192 ///
4193 class ResumeInst : public Instruction {
4194  ResumeInst(const ResumeInst &RI);
4195 
4196  explicit ResumeInst(Value *Exn, Instruction *InsertBefore=nullptr);
4197  ResumeInst(Value *Exn, BasicBlock *InsertAtEnd);
4198 
4199 protected:
4200  // Note: Instruction needs to be a friend here to call cloneImpl.
4201  friend class Instruction;
4202 
4203  ResumeInst *cloneImpl() const;
4204 
4205 public:
4206  static ResumeInst *Create(Value *Exn, Instruction *InsertBefore = nullptr) {
4207  return new(1) ResumeInst(Exn, InsertBefore);
4208  }
4209 
4210  static ResumeInst *Create(Value *Exn, BasicBlock *InsertAtEnd) {
4211  return new(1) ResumeInst(Exn, InsertAtEnd);
4212  }
4213 
4214  /// Provide fast operand accessors
4216 
4217  /// Convenience accessor.
4218  Value *getValue() const { return Op<0>(); }
4219 
4220  unsigned getNumSuccessors() const { return 0; }
4221 
4222  // Methods for support type inquiry through isa, cast, and dyn_cast:
4223  static bool classof(const Instruction *I) {
4224  return I->getOpcode() == Instruction::Resume;
4225  }
4226  static bool classof(const Value *V) {
4227  return isa<Instruction>(V) && classof(cast<Instruction>(V));
4228  }
4229 
4230 private:
4231  BasicBlock *getSuccessor(unsigned idx) const {
4232  llvm_unreachable("ResumeInst has no successors!");
4233  }
4234 
4235  void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
4236  llvm_unreachable("ResumeInst has no successors!");
4237  }
4238 };
4239 
4240 template <>
4242  public FixedNumOperandTraits<ResumeInst, 1> {
4243 };
4244 
4246 
4247 //===----------------------------------------------------------------------===//
4248 // CatchSwitchInst Class
4249 //===----------------------------------------------------------------------===//
4251  using UnwindDestField = BoolBitfieldElementT<0>;
4252 
4253  /// The number of operands actually allocated. NumOperands is
4254  /// the number actually in use.
4255  unsigned ReservedSpace;
4256 
4257  // Operand[0] = Outer scope
4258  // Operand[1] = Unwind block destination
4259  // Operand[n] = BasicBlock to go to on match
4260  CatchSwitchInst(const CatchSwitchInst &CSI);
4261 
4262  /// Create a new switch instruction, specifying a
4263  /// default destination. The number of additional handlers can be specified
4264  /// here to make memory allocation more efficient.
4265  /// This constructor can also autoinsert before another instruction.
4266  CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4267  unsigned NumHandlers, const Twine &NameStr,
4268  Instruction *InsertBefore);
4269 
4270  /// Create a new switch instruction, specifying a
4271  /// default destination. The number of additional handlers can be specified
4272  /// here to make memory allocation more efficient.
4273  /// This constructor also autoinserts at the end of the specified BasicBlock.
4274  CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4275  unsigned NumHandlers, const Twine &NameStr,
4276  BasicBlock *InsertAtEnd);
4277 
4278  // allocate space for exactly zero operands
4279  void *operator new(size_t S) { return User::operator new(S); }
4280 
4281  void init(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumReserved);
4282  void growOperands(unsigned Size);
4283 
4284 protected:
4285  // Note: Instruction needs to be a friend here to call cloneImpl.
4286  friend class Instruction;
4287 
4288  CatchSwitchInst *cloneImpl() const;
4289 
4290 public:
4291  void operator delete(void *Ptr) { return User::operator delete(Ptr); }
4292 
4293  static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4294  unsigned NumHandlers,
4295  const Twine &NameStr = "",
4296  Instruction *InsertBefore = nullptr) {
4297  return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4298  InsertBefore);
4299  }
4300 
4301  static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4302  unsigned NumHandlers, const Twine &NameStr,
4303  BasicBlock *InsertAtEnd) {
4304  return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4305  InsertAtEnd);
4306  }
4307 
4308  /// Provide fast operand accessors
4310 
4311  // Accessor Methods for CatchSwitch stmt
4312  Value *getParentPad() const { return getOperand(0); }
4313  void setParentPad(Value *ParentPad) { setOperand(0, ParentPad); }
4314 
4315  // Accessor Methods for CatchSwitch stmt
4316  bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); }
4317  bool unwindsToCaller() const { return !hasUnwindDest(); }
4319  if (hasUnwindDest())
4320  return cast<BasicBlock>(getOperand(1));
4321  return nullptr;
4322  }
4323  void setUnwindDest(BasicBlock *UnwindDest) {
4324  assert(UnwindDest);
4325  assert(hasUnwindDest());
4326  setOperand(1, UnwindDest);
4327  }
4328 
4329  /// return the number of 'handlers' in this catchswitch
4330  /// instruction, except the default handler
4331  unsigned getNumHandlers() const {
4332  if (hasUnwindDest())
4333  return getNumOperands() - 2;
4334  return getNumOperands() - 1;
4335  }
4336 
4337 private:
4338  static BasicBlock *handler_helper(Value *V) { return cast<BasicBlock>(V); }
4339  static const BasicBlock *handler_helper(const Value *V) {
4340  return cast<BasicBlock>(V);
4341  }
4342 
4343 public:
4344  using DerefFnTy = BasicBlock *(*)(Value *);
4347  using ConstDerefFnTy = const BasicBlock *(*)(const Value *);
4348  using const_handler_iterator =
4351 
4352  /// Returns an iterator that points to the first handler in CatchSwitchInst.
4354  op_iterator It = op_begin() + 1;
4355  if (hasUnwindDest())
4356  ++It;
4357  return handler_iterator(It, DerefFnTy(handler_helper));
4358  }
4359 
4360  /// Returns an iterator that points to the first handler in the
4361  /// CatchSwitchInst.
4363  const_op_iterator It = op_begin() + 1;
4364  if (hasUnwindDest())
4365  ++It;
4366  return const_handler_iterator(It, ConstDerefFnTy(handler_helper));
4367  }
4368 
4369  /// Returns a read-only iterator that points one past the last
4370  /// handler in the CatchSwitchInst.
4372  return handler_iterator(op_end(), DerefFnTy(handler_helper));
4373  }
4374 
4375  /// Returns an iterator that points one past the last handler in the
4376  /// CatchSwitchInst.
4378  return const_handler_iterator(op_end(), ConstDerefFnTy(handler_helper));
4379  }
4380 
4381  /// iteration adapter for range-for loops.
4383  return make_range(handler_begin(), handler_end());
4384  }
4385 
4386  /// iteration adapter for range-for loops.
4388  return make_range(handler_begin(), handler_end());
4389  }
4390 
4391  /// Add an entry to the switch instruction...
4392  /// Note:
4393  /// This action invalidates handler_end(). Old handler_end() iterator will
4394  /// point to the added handler.
4395  void addHandler(BasicBlock *Dest);
4396 
4397  void removeHandler(handler_iterator HI);
4398 
4399  unsigned getNumSuccessors() const { return getNumOperands() - 1; }
4400  BasicBlock *getSuccessor(unsigned Idx) const {
4401  assert(Idx < getNumSuccessors() &&
4402  "Successor # out of range for catchswitch!");
4403  return cast<BasicBlock>(getOperand(Idx + 1));
4404  }
4405  void setSuccessor(unsigned Idx, BasicBlock *NewSucc) {
4406  assert(Idx < getNumSuccessors() &&
4407  "Successor # out of range for catchswitch!");
4408  setOperand(Idx + 1, NewSucc);
4409  }
4410 
4411  // Methods for support type inquiry through isa, cast, and dyn_cast:
4412  static bool classof(const Instruction *I) {
4413  return I->getOpcode() == Instruction::CatchSwitch;
4414  }
4415  static bool classof(const Value *V) {
4416  return isa<Instruction>(V) && classof(cast<Instruction>(V));
4417  }
4418 };
4419 
4420 template <>
4422 
4424 
4425 //===----------------------------------------------------------------------===//
4426 // CleanupPadInst Class
4427 //===----------------------------------------------------------------------===//
4429 private:
4430  explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4431  unsigned Values, const Twine &NameStr,
4432  Instruction *InsertBefore)
4433  : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
4434  NameStr, InsertBefore) {}
4435  explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4436  unsigned Values, const Twine &NameStr,
4437  BasicBlock *InsertAtEnd)
4438  : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
4439  NameStr, InsertAtEnd) {}
4440 
4441 public:
4443  const Twine &NameStr = "",
4444  Instruction *InsertBefore = nullptr) {
4445  unsigned Values = 1 + Args.size();
4446  return new (Values)
4447  CleanupPadInst(ParentPad, Args, Values, NameStr, InsertBefore);
4448  }
4449 
4451  const Twine &NameStr, BasicBlock *InsertAtEnd) {
4452  unsigned Values = 1 + Args.size();
4453  return new (Values)
4454  CleanupPadInst(ParentPad, Args, Values, NameStr, InsertAtEnd);
4455  }
4456 
4457  /// Methods for support type inquiry through isa, cast, and dyn_cast:
4458  static bool classof(const Instruction *I) {
4459  return I->getOpcode() == Instruction::CleanupPad;
4460  }
4461  static bool classof(const Value *V) {
4462  return isa<Instruction>(V) && classof(cast<Instruction>(V));
4463  }
4464 };
4465 
4466 //===----------------------------------------------------------------------===//
4467 // CatchPadInst Class
4468 //===----------------------------------------------------------------------===//
4470 private:
4471  explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
4472  unsigned Values, const Twine &NameStr,
4473  Instruction *InsertBefore)
4474  : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
4475  NameStr, InsertBefore) {}
4476  explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
4477  unsigned Values, const Twine &NameStr,
4478  BasicBlock *InsertAtEnd)
4479  : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
4480  NameStr, InsertAtEnd) {}
4481 
4482 public:
4484  const Twine &NameStr = "",
4485  Instruction *InsertBefore = nullptr) {
4486  unsigned Values = 1 + Args.size();
4487  return new (Values)
4488  CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertBefore);
4489  }
4490 
4492  const Twine &NameStr, BasicBlock *InsertAtEnd) {
4493  unsigned Values = 1 + Args.size();
4494  return new (Values)
4495  CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertAtEnd);
4496  }
4497 
4498  /// Convenience accessors
4500  return cast<CatchSwitchInst>(Op<-1>());
4501  }
4502  void setCatchSwitch(Value *CatchSwitch) {
4503  assert(CatchSwitch);
4504  Op<-1>() = CatchSwitch;
4505  }
4506 
4507  /// Methods for support type inquiry through isa, cast, and dyn_cast:
4508  static bool classof(const Instruction *I) {
4509  return I->getOpcode() == Instruction::CatchPad;
4510  }
4511  static bool classof(const Value *V) {
4512  return isa<Instruction>(V) && classof(cast<Instruction>(V));
4513  }
4514 };
4515 
4516 //===----------------------------------------------------------------------===//
4517 // CatchReturnInst Class
4518 //===----------------------------------------------------------------------===//
4519 
4521  CatchReturnInst(const CatchReturnInst &RI);
4522  CatchReturnInst(Value *CatchPad, BasicBlock *BB, Instruction *InsertBefore);
4523  CatchReturnInst(Value *CatchPad, BasicBlock *BB, BasicBlock *InsertAtEnd);
4524 
4525  void init(Value *CatchPad, BasicBlock *BB);
4526 
4527 protected:
4528  // Note: Instruction needs to be a friend here to call cloneImpl.
4529  friend class Instruction;
4530 
4531  CatchReturnInst *cloneImpl() const;
4532 
4533 public:
4535  Instruction *InsertBefore = nullptr) {
4536  assert(CatchPad);
4537  assert(BB);
4538  return new (2) CatchReturnInst(CatchPad, BB, InsertBefore);
4539  }
4540 
4542  BasicBlock *InsertAtEnd) {
4543  assert(CatchPad);
4544  assert(BB);
4545  return new (2) CatchReturnInst(CatchPad, BB, InsertAtEnd);
4546  }
4547 
4548  /// Provide fast operand accessors
4550 
4551  /// Convenience accessors.
4552  CatchPadInst *getCatchPad() const { return cast<CatchPadInst>(Op<0>()); }
4553  void setCatchPad(CatchPadInst *CatchPad) {
4554  assert(CatchPad);
4555  Op<0>() = CatchPad;
4556  }
4557 
4558  BasicBlock *getSuccessor() const { return cast<BasicBlock>(Op<1>()); }
4559  void setSuccessor(BasicBlock *NewSucc) {
4560  assert(NewSucc);
4561  Op<1>() = NewSucc;
4562  }
4563  unsigned getNumSuccessors() const { return 1; }
4564 
4565  /// Get the parentPad of this catchret's catchpad's catchswitch.
4566  /// The successor block is implicitly a member of this funclet.
4568  return getCatchPad()->getCatchSwitch()->getParentPad();
4569  }
4570 
4571  // Methods for support type inquiry through isa, cast, and dyn_cast:
4572  static bool classof(const Instruction *I) {
4573  return (I->getOpcode() == Instruction::CatchRet);
4574  }
4575  static bool classof(const Value *V) {
4576  return isa<Instruction>(V) && classof(cast<Instruction>(V));
4577  }
4578 
4579 private:
4580  BasicBlock *getSuccessor(unsigned Idx) const {
4581  assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!");
4582  return getSuccessor();
4583  }
4584 
4585  void setSuccessor(unsigned Idx, BasicBlock *B) {
4586  assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!");
4587  setSuccessor(B);
4588  }
4589 };
4590 
4591 template <>
4593  : public FixedNumOperandTraits<CatchReturnInst, 2> {};
4594 
4596 
4597 //===----------------------------------------------------------------------===//
4598 // CleanupReturnInst Class
4599 //===----------------------------------------------------------------------===//
4600 
4602  using UnwindDestField = BoolBitfieldElementT<0>;
4603 
4604 private:
4606  CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
4607  Instruction *InsertBefore = nullptr);
4608  CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
4609  BasicBlock *InsertAtEnd);
4610 
4611  void init(Value *CleanupPad, BasicBlock *UnwindBB);
4612 
4613 protected:
4614  // Note: Instruction needs to be a friend here to call cloneImpl.
4615  friend class Instruction;
4616 
4617  CleanupReturnInst *cloneImpl() const;
4618 
4619 public:
4620  static CleanupReturnInst *Create(Value *CleanupPad,
4621  BasicBlock *UnwindBB = nullptr,
4622  Instruction *InsertBefore = nullptr) {
4623  assert(CleanupPad);
4624  unsigned Values = 1;
4625  if (UnwindBB)
4626  ++Values;
4627  return new (Values)
4628  CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertBefore);
4629  }
4630 
4631  static CleanupReturnInst *Create(Value *CleanupPad, BasicBlock *UnwindBB,
4632  BasicBlock *InsertAtEnd) {
4633  assert(CleanupPad);
4634  unsigned Values = 1;
4635  if (UnwindBB)
4636  ++Values;
4637  return new (Values)
4638  CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertAtEnd);
4639  }
4640 
4641  /// Provide fast operand accessors
4643 
4644  bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); }
4645  bool unwindsToCaller() const { return !hasUnwindDest(); }
4646 
4647  /// Convenience accessor.
4649  return cast<CleanupPadInst>(Op<0>());
4650  }
4651  void setCleanupPad(CleanupPadInst *CleanupPad) {
4652  assert(CleanupPad);