LLVM  13.0.0git
Instructions.cpp
Go to the documentation of this file.
1 //===- Instructions.cpp - Implement the LLVM instructions -----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements all of the non-inline methods for the LLVM instruction
10 // classes.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/IR/Instructions.h"
15 #include "LLVMContextImpl.h"
16 #include "llvm/ADT/None.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/Twine.h"
19 #include "llvm/IR/Attributes.h"
20 #include "llvm/IR/BasicBlock.h"
21 #include "llvm/IR/Constant.h"
22 #include "llvm/IR/Constants.h"
23 #include "llvm/IR/DataLayout.h"
24 #include "llvm/IR/DerivedTypes.h"
25 #include "llvm/IR/Function.h"
26 #include "llvm/IR/InstrTypes.h"
27 #include "llvm/IR/Instruction.h"
28 #include "llvm/IR/Intrinsics.h"
29 #include "llvm/IR/LLVMContext.h"
30 #include "llvm/IR/MDBuilder.h"
31 #include "llvm/IR/Metadata.h"
32 #include "llvm/IR/Module.h"
33 #include "llvm/IR/Operator.h"
34 #include "llvm/IR/Type.h"
35 #include "llvm/IR/Value.h"
37 #include "llvm/Support/Casting.h"
40 #include "llvm/Support/TypeSize.h"
41 #include <algorithm>
42 #include <cassert>
43 #include <cstdint>
44 #include <vector>
45 
46 using namespace llvm;
47 
48 //===----------------------------------------------------------------------===//
49 // AllocaInst Class
50 //===----------------------------------------------------------------------===//
51 
54  TypeSize Size = DL.getTypeAllocSizeInBits(getAllocatedType());
55  if (isArrayAllocation()) {
56  auto *C = dyn_cast<ConstantInt>(getArraySize());
57  if (!C)
58  return None;
59  assert(!Size.isScalable() && "Array elements cannot have a scalable size");
60  Size *= C->getZExtValue();
61  }
62  return Size;
63 }
64 
65 //===----------------------------------------------------------------------===//
66 // SelectInst Class
67 //===----------------------------------------------------------------------===//
68 
69 /// areInvalidOperands - Return a string if the specified operands are invalid
70 /// for a select operation, otherwise return null.
71 const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) {
72  if (Op1->getType() != Op2->getType())
73  return "both values to select must have same type";
74 
75  if (Op1->getType()->isTokenTy())
76  return "select values cannot have token type";
77 
78  if (VectorType *VT = dyn_cast<VectorType>(Op0->getType())) {
79  // Vector select.
80  if (VT->getElementType() != Type::getInt1Ty(Op0->getContext()))
81  return "vector select condition element type must be i1";
82  VectorType *ET = dyn_cast<VectorType>(Op1->getType());
83  if (!ET)
84  return "selected values for vector select must be vectors";
85  if (ET->getElementCount() != VT->getElementCount())
86  return "vector select requires selected vectors to have "
87  "the same vector length as select condition";
88  } else if (Op0->getType() != Type::getInt1Ty(Op0->getContext())) {
89  return "select condition must be i1 or <n x i1>";
90  }
91  return nullptr;
92 }
93 
94 //===----------------------------------------------------------------------===//
95 // PHINode Class
96 //===----------------------------------------------------------------------===//
97 
98 PHINode::PHINode(const PHINode &PN)
99  : Instruction(PN.getType(), Instruction::PHI, nullptr, PN.getNumOperands()),
100  ReservedSpace(PN.getNumOperands()) {
102  std::copy(PN.op_begin(), PN.op_end(), op_begin());
103  std::copy(PN.block_begin(), PN.block_end(), block_begin());
105 }
106 
107 // removeIncomingValue - Remove an incoming value. This is useful if a
108 // predecessor basic block is deleted.
109 Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) {
110  Value *Removed = getIncomingValue(Idx);
111 
112  // Move everything after this operand down.
113  //
114  // FIXME: we could just swap with the end of the list, then erase. However,
115  // clients might not expect this to happen. The code as it is thrashes the
116  // use/def lists, which is kinda lame.
117  std::copy(op_begin() + Idx + 1, op_end(), op_begin() + Idx);
118  std::copy(block_begin() + Idx + 1, block_end(), block_begin() + Idx);
119 
120  // Nuke the last value.
121  Op<-1>().set(nullptr);
123 
124  // If the PHI node is dead, because it has zero entries, nuke it now.
125  if (getNumOperands() == 0 && DeletePHIIfEmpty) {
126  // If anyone is using this PHI, make them use a dummy value instead...
128  eraseFromParent();
129  }
130  return Removed;
131 }
132 
133 /// growOperands - grow operands - This grows the operand list in response
134 /// to a push_back style of operation. This grows the number of ops by 1.5
135 /// times.
136 ///
137 void PHINode::growOperands() {
138  unsigned e = getNumOperands();
139  unsigned NumOps = e + e / 2;
140  if (NumOps < 2) NumOps = 2; // 2 op PHI nodes are VERY common.
141 
142  ReservedSpace = NumOps;
143  growHungoffUses(ReservedSpace, /* IsPhi */ true);
144 }
145 
146 /// hasConstantValue - If the specified PHI node always merges together the same
147 /// value, return the value, otherwise return null.
149  // Exploit the fact that phi nodes always have at least one entry.
150  Value *ConstantValue = getIncomingValue(0);
151  for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i)
152  if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) {
153  if (ConstantValue != this)
154  return nullptr; // Incoming values not all the same.
155  // The case where the first value is this PHI.
156  ConstantValue = getIncomingValue(i);
157  }
158  if (ConstantValue == this)
159  return UndefValue::get(getType());
160  return ConstantValue;
161 }
162 
163 /// hasConstantOrUndefValue - Whether the specified PHI node always merges
164 /// together the same value, assuming that undefs result in the same value as
165 /// non-undefs.
166 /// Unlike \ref hasConstantValue, this does not return a value because the
167 /// unique non-undef incoming value need not dominate the PHI node.
169  Value *ConstantValue = nullptr;
170  for (unsigned i = 0, e = getNumIncomingValues(); i != e; ++i) {
171  Value *Incoming = getIncomingValue(i);
172  if (Incoming != this && !isa<UndefValue>(Incoming)) {
173  if (ConstantValue && ConstantValue != Incoming)
174  return false;
175  ConstantValue = Incoming;
176  }
177  }
178  return true;
179 }
180 
181 //===----------------------------------------------------------------------===//
182 // LandingPadInst Implementation
183 //===----------------------------------------------------------------------===//
184 
185 LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
186  const Twine &NameStr, Instruction *InsertBefore)
187  : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertBefore) {
188  init(NumReservedValues, NameStr);
189 }
190 
191 LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
192  const Twine &NameStr, BasicBlock *InsertAtEnd)
193  : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertAtEnd) {
194  init(NumReservedValues, NameStr);
195 }
196 
197 LandingPadInst::LandingPadInst(const LandingPadInst &LP)
198  : Instruction(LP.getType(), Instruction::LandingPad, nullptr,
199  LP.getNumOperands()),
200  ReservedSpace(LP.getNumOperands()) {
202  Use *OL = getOperandList();
203  const Use *InOL = LP.getOperandList();
204  for (unsigned I = 0, E = ReservedSpace; I != E; ++I)
205  OL[I] = InOL[I];
206 
207  setCleanup(LP.isCleanup());
208 }
209 
210 LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
211  const Twine &NameStr,
212  Instruction *InsertBefore) {
213  return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertBefore);
214 }
215 
216 LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
217  const Twine &NameStr,
218  BasicBlock *InsertAtEnd) {
219  return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertAtEnd);
220 }
221 
222 void LandingPadInst::init(unsigned NumReservedValues, const Twine &NameStr) {
223  ReservedSpace = NumReservedValues;
225  allocHungoffUses(ReservedSpace);
226  setName(NameStr);
227  setCleanup(false);
228 }
229 
230 /// growOperands - grow operands - This grows the operand list in response to a
231 /// push_back style of operation. This grows the number of ops by 2 times.
232 void LandingPadInst::growOperands(unsigned Size) {
233  unsigned e = getNumOperands();
234  if (ReservedSpace >= e + Size) return;
235  ReservedSpace = (std::max(e, 1U) + Size / 2) * 2;
236  growHungoffUses(ReservedSpace);
237 }
238 
240  unsigned OpNo = getNumOperands();
241  growOperands(1);
242  assert(OpNo < ReservedSpace && "Growing didn't work!");
244  getOperandList()[OpNo] = Val;
245 }
246 
247 //===----------------------------------------------------------------------===//
248 // CallBase Implementation
249 //===----------------------------------------------------------------------===//
250 
252  Instruction *InsertPt) {
253  switch (CB->getOpcode()) {
254  case Instruction::Call:
255  return CallInst::Create(cast<CallInst>(CB), Bundles, InsertPt);
256  case Instruction::Invoke:
257  return InvokeInst::Create(cast<InvokeInst>(CB), Bundles, InsertPt);
258  case Instruction::CallBr:
259  return CallBrInst::Create(cast<CallBrInst>(CB), Bundles, InsertPt);
260  default:
261  llvm_unreachable("Unknown CallBase sub-class!");
262  }
263 }
264 
266  Instruction *InsertPt) {
268  for (unsigned i = 0, e = CI->getNumOperandBundles(); i < e; ++i) {
269  auto ChildOB = CI->getOperandBundleAt(i);
270  if (ChildOB.getTagName() != OpB.getTag())
271  OpDefs.emplace_back(ChildOB);
272  }
273  OpDefs.emplace_back(OpB);
274  return CallBase::Create(CI, OpDefs, InsertPt);
275 }
276 
277 
279 
281  assert(getOpcode() == Instruction::CallBr && "Unexpected opcode!");
282  return cast<CallBrInst>(this)->getNumIndirectDests() + 1;
283 }
284 
286  const Value *V = getCalledOperand();
287  if (isa<Function>(V) || isa<Constant>(V))
288  return false;
289  return !isInlineAsm();
290 }
291 
292 /// Tests if this call site must be tail call optimized. Only a CallInst can
293 /// be tail call optimized.
295  if (auto *CI = dyn_cast<CallInst>(this))
296  return CI->isMustTailCall();
297  return false;
298 }
299 
300 /// Tests if this call site is marked as a tail call.
301 bool CallBase::isTailCall() const {
302  if (auto *CI = dyn_cast<CallInst>(this))
303  return CI->isTailCall();
304  return false;
305 }
306 
308  if (auto *F = getCalledFunction())
309  return F->getIntrinsicID();
311 }
312 
314  if (hasRetAttr(Attribute::NonNull))
315  return true;
316 
319  getType()->getPointerAddressSpace()))
320  return true;
321 
322  return false;
323 }
324 
326  unsigned Index;
327 
328  if (Attrs.hasAttrSomewhere(Attribute::Returned, &Index) && Index)
330  if (const Function *F = getCalledFunction())
331  if (F->getAttributes().hasAttrSomewhere(Attribute::Returned, &Index) &&
332  Index)
334 
335  return nullptr;
336 }
337 
338 /// Determine whether the argument or parameter has the given attribute.
339 bool CallBase::paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
340  assert(ArgNo < getNumArgOperands() && "Param index out of bounds!");
341 
342  if (Attrs.hasParamAttribute(ArgNo, Kind))
343  return true;
344  if (const Function *F = getCalledFunction())
345  return F->getAttributes().hasParamAttribute(ArgNo, Kind);
346  return false;
347 }
348 
349 bool CallBase::hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const {
350  if (const Function *F = getCalledFunction())
351  return F->getAttributes().hasFnAttribute(Kind);
352  return false;
353 }
354 
355 bool CallBase::hasFnAttrOnCalledFunction(StringRef Kind) const {
356  if (const Function *F = getCalledFunction())
357  return F->getAttributes().hasFnAttribute(Kind);
358  return false;
359 }
360 
362  SmallVectorImpl<OperandBundleDef> &Defs) const {
363  for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
365 }
366 
369  const unsigned BeginIndex) {
370  auto It = op_begin() + BeginIndex;
371  for (auto &B : Bundles)
372  It = std::copy(B.input_begin(), B.input_end(), It);
373 
374  auto *ContextImpl = getContext().pImpl;
375  auto BI = Bundles.begin();
376  unsigned CurrentIndex = BeginIndex;
377 
378  for (auto &BOI : bundle_op_infos()) {
379  assert(BI != Bundles.end() && "Incorrect allocation?");
380 
381  BOI.Tag = ContextImpl->getOrInsertBundleTag(BI->getTag());
382  BOI.Begin = CurrentIndex;
383  BOI.End = CurrentIndex + BI->input_size();
384  CurrentIndex = BOI.End;
385  BI++;
386  }
387 
388  assert(BI == Bundles.end() && "Incorrect allocation?");
389 
390  return It;
391 }
392 
394  /// When there isn't many bundles, we do a simple linear search.
395  /// Else fallback to a binary-search that use the fact that bundles usually
396  /// have similar number of argument to get faster convergence.
398  for (auto &BOI : bundle_op_infos())
399  if (BOI.Begin <= OpIdx && OpIdx < BOI.End)
400  return BOI;
401 
402  llvm_unreachable("Did not find operand bundle for operand!");
403  }
404 
405  assert(OpIdx >= arg_size() && "the Idx is not in the operand bundles");
407  OpIdx < std::prev(bundle_op_info_end())->End &&
408  "The Idx isn't in the operand bundle");
409 
410  /// We need a decimal number below and to prevent using floating point numbers
411  /// we use an intergal value multiplied by this constant.
412  constexpr unsigned NumberScaling = 1024;
413 
416  bundle_op_iterator Current = Begin;
417 
418  while (Begin != End) {
419  unsigned ScaledOperandPerBundle =
420  NumberScaling * (std::prev(End)->End - Begin->Begin) / (End - Begin);
421  Current = Begin + (((OpIdx - Begin->Begin) * NumberScaling) /
422  ScaledOperandPerBundle);
423  if (Current >= End)
424  Current = std::prev(End);
425  assert(Current < End && Current >= Begin &&
426  "the operand bundle doesn't cover every value in the range");
427  if (OpIdx >= Current->Begin && OpIdx < Current->End)
428  break;
429  if (OpIdx >= Current->End)
430  Begin = Current + 1;
431  else
432  End = Current;
433  }
434 
435  assert(OpIdx >= Current->Begin && OpIdx < Current->End &&
436  "the operand bundle doesn't cover every value in the range");
437  return *Current;
438 }
439 
442  Instruction *InsertPt) {
443  if (CB->getOperandBundle(ID))
444  return CB;
445 
447  CB->getOperandBundlesAsDefs(Bundles);
448  Bundles.push_back(OB);
449  return Create(CB, Bundles, InsertPt);
450 }
451 
453  Instruction *InsertPt) {
455  bool CreateNew = false;
456 
457  for (unsigned I = 0, E = CB->getNumOperandBundles(); I != E; ++I) {
458  auto Bundle = CB->getOperandBundleAt(I);
459  if (Bundle.getTagID() == ID) {
460  CreateNew = true;
461  continue;
462  }
463  Bundles.emplace_back(Bundle);
464  }
465 
466  return CreateNew ? Create(CB, Bundles, InsertPt) : CB;
467 }
468 
470  // Implementation note: this is a conservative implementation of operand
471  // bundle semantics, where *any* non-assume operand bundle forces a callsite
472  // to be at least readonly.
473  return hasOperandBundles() && getIntrinsicID() != Intrinsic::assume;
474 }
475 
476 //===----------------------------------------------------------------------===//
477 // CallInst Implementation
478 //===----------------------------------------------------------------------===//
479 
480 void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
481  ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) {
482  this->FTy = FTy;
483  assert(getNumOperands() == Args.size() + CountBundleInputs(Bundles) + 1 &&
484  "NumOperands not set up?");
485  setCalledOperand(Func);
486 
487 #ifndef NDEBUG
488  assert((Args.size() == FTy->getNumParams() ||
489  (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
490  "Calling a function with bad signature!");
491 
492  for (unsigned i = 0; i != Args.size(); ++i)
493  assert((i >= FTy->getNumParams() ||
494  FTy->getParamType(i) == Args[i]->getType()) &&
495  "Calling a function with a bad signature!");
496 #endif
497 
499 
500  auto It = populateBundleOperandInfos(Bundles, Args.size());
501  (void)It;
502  assert(It + 1 == op_end() && "Should add up!");
503 
504  setName(NameStr);
505 }
506 
507 void CallInst::init(FunctionType *FTy, Value *Func, const Twine &NameStr) {
508  this->FTy = FTy;
509  assert(getNumOperands() == 1 && "NumOperands not set up?");
510  setCalledOperand(Func);
511 
512  assert(FTy->getNumParams() == 0 && "Calling a function with bad signature");
513 
514  setName(NameStr);
515 }
516 
517 CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
518  Instruction *InsertBefore)
519  : CallBase(Ty->getReturnType(), Instruction::Call,
520  OperandTraits<CallBase>::op_end(this) - 1, 1, InsertBefore) {
521  init(Ty, Func, Name);
522 }
523 
524 CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
525  BasicBlock *InsertAtEnd)
526  : CallBase(Ty->getReturnType(), Instruction::Call,
527  OperandTraits<CallBase>::op_end(this) - 1, 1, InsertAtEnd) {
528  init(Ty, Func, Name);
529 }
530 
531 CallInst::CallInst(const CallInst &CI)
532  : CallBase(CI.Attrs, CI.FTy, CI.getType(), Instruction::Call,
533  OperandTraits<CallBase>::op_end(this) - CI.getNumOperands(),
534  CI.getNumOperands()) {
535  setTailCallKind(CI.getTailCallKind());
537 
538  std::copy(CI.op_begin(), CI.op_end(), op_begin());
542 }
543 
545  Instruction *InsertPt) {
546  std::vector<Value *> Args(CI->arg_begin(), CI->arg_end());
547 
548  auto *NewCI = CallInst::Create(CI->getFunctionType(), CI->getCalledOperand(),
549  Args, OpB, CI->getName(), InsertPt);
550  NewCI->setTailCallKind(CI->getTailCallKind());
551  NewCI->setCallingConv(CI->getCallingConv());
552  NewCI->SubclassOptionalData = CI->SubclassOptionalData;
553  NewCI->setAttributes(CI->getAttributes());
554  NewCI->setDebugLoc(CI->getDebugLoc());
555  return NewCI;
556 }
557 
558 // Update profile weight for call instruction by scaling it using the ratio
559 // of S/T. The meaning of "branch_weights" meta data for call instruction is
560 // transfered to represent call count.
561 void CallInst::updateProfWeight(uint64_t S, uint64_t T) {
562  auto *ProfileData = getMetadata(LLVMContext::MD_prof);
563  if (ProfileData == nullptr)
564  return;
565 
566  auto *ProfDataName = dyn_cast<MDString>(ProfileData->getOperand(0));
567  if (!ProfDataName || (!ProfDataName->getString().equals("branch_weights") &&
568  !ProfDataName->getString().equals("VP")))
569  return;
570 
571  if (T == 0) {
572  LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
573  "div by 0. Ignoring. Likely the function "
574  << getParent()->getParent()->getName()
575  << " has 0 entry count, and contains call instructions "
576  "with non-zero prof info.");
577  return;
578  }
579 
580  MDBuilder MDB(getContext());
582  Vals.push_back(ProfileData->getOperand(0));
583  APInt APS(128, S), APT(128, T);
584  if (ProfDataName->getString().equals("branch_weights") &&
585  ProfileData->getNumOperands() > 0) {
586  // Using APInt::div may be expensive, but most cases should fit 64 bits.
587  APInt Val(128, mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(1))
588  ->getValue()
589  .getZExtValue());
590  Val *= APS;
591  Vals.push_back(MDB.createConstant(
593  Val.udiv(APT).getLimitedValue(UINT32_MAX))));
594  } else if (ProfDataName->getString().equals("VP"))
595  for (unsigned i = 1; i < ProfileData->getNumOperands(); i += 2) {
596  // The first value is the key of the value profile, which will not change.
597  Vals.push_back(ProfileData->getOperand(i));
598  uint64_t Count =
599  mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(i + 1))
600  ->getValue()
601  .getZExtValue();
602  // Don't scale the magic number.
603  if (Count == NOMORE_ICP_MAGICNUM) {
604  Vals.push_back(ProfileData->getOperand(i + 1));
605  continue;
606  }
607  // Using APInt::div may be expensive, but most cases should fit 64 bits.
608  APInt Val(128, Count);
609  Val *= APS;
610  Vals.push_back(MDB.createConstant(
612  Val.udiv(APT).getLimitedValue())));
613  }
614  setMetadata(LLVMContext::MD_prof, MDNode::get(getContext(), Vals));
615 }
616 
617 /// IsConstantOne - Return true only if val is constant int 1
618 static bool IsConstantOne(Value *val) {
619  assert(val && "IsConstantOne does not work with nullptr val");
620  const ConstantInt *CVal = dyn_cast<ConstantInt>(val);
621  return CVal && CVal->isOne();
622 }
623 
624 static Instruction *createMalloc(Instruction *InsertBefore,
625  BasicBlock *InsertAtEnd, Type *IntPtrTy,
626  Type *AllocTy, Value *AllocSize,
627  Value *ArraySize,
629  Function *MallocF, const Twine &Name) {
630  assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) &&
631  "createMalloc needs either InsertBefore or InsertAtEnd");
632 
633  // malloc(type) becomes:
634  // bitcast (i8* malloc(typeSize)) to type*
635  // malloc(type, arraySize) becomes:
636  // bitcast (i8* malloc(typeSize*arraySize)) to type*
637  if (!ArraySize)
638  ArraySize = ConstantInt::get(IntPtrTy, 1);
639  else if (ArraySize->getType() != IntPtrTy) {
640  if (InsertBefore)
641  ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, false,
642  "", InsertBefore);
643  else
644  ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, false,
645  "", InsertAtEnd);
646  }
647 
648  if (!IsConstantOne(ArraySize)) {
649  if (IsConstantOne(AllocSize)) {
650  AllocSize = ArraySize; // Operand * 1 = Operand
651  } else if (Constant *CO = dyn_cast<Constant>(ArraySize)) {
652  Constant *Scale = ConstantExpr::getIntegerCast(CO, IntPtrTy,
653  false /*ZExt*/);
654  // Malloc arg is constant product of type size and array size
655  AllocSize = ConstantExpr::getMul(Scale, cast<Constant>(AllocSize));
656  } else {
657  // Multiply type size by the array size...
658  if (InsertBefore)
659  AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize,
660  "mallocsize", InsertBefore);
661  else
662  AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize,
663  "mallocsize", InsertAtEnd);
664  }
665  }
666 
667  assert(AllocSize->getType() == IntPtrTy && "malloc arg is wrong size");
668  // Create the call to Malloc.
669  BasicBlock *BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd;
670  Module *M = BB->getParent()->getParent();
671  Type *BPTy = Type::getInt8PtrTy(BB->getContext());
672  FunctionCallee MallocFunc = MallocF;
673  if (!MallocFunc)
674  // prototype malloc as "void *malloc(size_t)"
675  MallocFunc = M->getOrInsertFunction("malloc", BPTy, IntPtrTy);
676  PointerType *AllocPtrType = PointerType::getUnqual(AllocTy);
677  CallInst *MCall = nullptr;
678  Instruction *Result = nullptr;
679  if (InsertBefore) {
680  MCall = CallInst::Create(MallocFunc, AllocSize, OpB, "malloccall",
681  InsertBefore);
682  Result = MCall;
683  if (Result->getType() != AllocPtrType)
684  // Create a cast instruction to convert to the right type...
685  Result = new BitCastInst(MCall, AllocPtrType, Name, InsertBefore);
686  } else {
687  MCall = CallInst::Create(MallocFunc, AllocSize, OpB, "malloccall");
688  Result = MCall;
689  if (Result->getType() != AllocPtrType) {
690  InsertAtEnd->getInstList().push_back(MCall);
691  // Create a cast instruction to convert to the right type...
692  Result = new BitCastInst(MCall, AllocPtrType, Name);
693  }
694  }
695  MCall->setTailCall();
696  if (Function *F = dyn_cast<Function>(MallocFunc.getCallee())) {
697  MCall->setCallingConv(F->getCallingConv());
698  if (!F->returnDoesNotAlias())
699  F->setReturnDoesNotAlias();
700  }
701  assert(!MCall->getType()->isVoidTy() && "Malloc has void return type");
702 
703  return Result;
704 }
705 
706 /// CreateMalloc - Generate the IR for a call to malloc:
707 /// 1. Compute the malloc call's argument as the specified type's size,
708 /// possibly multiplied by the array size if the array size is not
709 /// constant 1.
710 /// 2. Call malloc with that argument.
711 /// 3. Bitcast the result of the malloc call to the specified type.
713  Type *IntPtrTy, Type *AllocTy,
714  Value *AllocSize, Value *ArraySize,
715  Function *MallocF,
716  const Twine &Name) {
717  return createMalloc(InsertBefore, nullptr, IntPtrTy, AllocTy, AllocSize,
718  ArraySize, None, MallocF, Name);
719 }
721  Type *IntPtrTy, Type *AllocTy,
722  Value *AllocSize, Value *ArraySize,
724  Function *MallocF,
725  const Twine &Name) {
726  return createMalloc(InsertBefore, nullptr, IntPtrTy, AllocTy, AllocSize,
727  ArraySize, OpB, MallocF, Name);
728 }
729 
730 /// CreateMalloc - Generate the IR for a call to malloc:
731 /// 1. Compute the malloc call's argument as the specified type's size,
732 /// possibly multiplied by the array size if the array size is not
733 /// constant 1.
734 /// 2. Call malloc with that argument.
735 /// 3. Bitcast the result of the malloc call to the specified type.
736 /// Note: This function does not add the bitcast to the basic block, that is the
737 /// responsibility of the caller.
739  Type *IntPtrTy, Type *AllocTy,
740  Value *AllocSize, Value *ArraySize,
741  Function *MallocF, const Twine &Name) {
742  return createMalloc(nullptr, InsertAtEnd, IntPtrTy, AllocTy, AllocSize,
743  ArraySize, None, MallocF, Name);
744 }
746  Type *IntPtrTy, Type *AllocTy,
747  Value *AllocSize, Value *ArraySize,
749  Function *MallocF, const Twine &Name) {
750  return createMalloc(nullptr, InsertAtEnd, IntPtrTy, AllocTy, AllocSize,
751  ArraySize, OpB, MallocF, Name);
752 }
753 
756  Instruction *InsertBefore,
757  BasicBlock *InsertAtEnd) {
758  assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) &&
759  "createFree needs either InsertBefore or InsertAtEnd");
760  assert(Source->getType()->isPointerTy() &&
761  "Can not free something of nonpointer type!");
762 
763  BasicBlock *BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd;
764  Module *M = BB->getParent()->getParent();
765 
766  Type *VoidTy = Type::getVoidTy(M->getContext());
767  Type *IntPtrTy = Type::getInt8PtrTy(M->getContext());
768  // prototype free as "void free(void*)"
769  FunctionCallee FreeFunc = M->getOrInsertFunction("free", VoidTy, IntPtrTy);
770  CallInst *Result = nullptr;
771  Value *PtrCast = Source;
772  if (InsertBefore) {
773  if (Source->getType() != IntPtrTy)
774  PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertBefore);
775  Result = CallInst::Create(FreeFunc, PtrCast, Bundles, "", InsertBefore);
776  } else {
777  if (Source->getType() != IntPtrTy)
778  PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertAtEnd);
779  Result = CallInst::Create(FreeFunc, PtrCast, Bundles, "");
780  }
781  Result->setTailCall();
782  if (Function *F = dyn_cast<Function>(FreeFunc.getCallee()))
783  Result->setCallingConv(F->getCallingConv());
784 
785  return Result;
786 }
787 
788 /// CreateFree - Generate the IR for a call to the builtin free function.
790  return createFree(Source, None, InsertBefore, nullptr);
791 }
794  Instruction *InsertBefore) {
795  return createFree(Source, Bundles, InsertBefore, nullptr);
796 }
797 
798 /// CreateFree - Generate the IR for a call to the builtin free function.
799 /// Note: This function does not add the call to the basic block, that is the
800 /// responsibility of the caller.
802  Instruction *FreeCall = createFree(Source, None, nullptr, InsertAtEnd);
803  assert(FreeCall && "CreateFree did not create a CallInst");
804  return FreeCall;
805 }
808  BasicBlock *InsertAtEnd) {
809  Instruction *FreeCall = createFree(Source, Bundles, nullptr, InsertAtEnd);
810  assert(FreeCall && "CreateFree did not create a CallInst");
811  return FreeCall;
812 }
813 
814 //===----------------------------------------------------------------------===//
815 // InvokeInst Implementation
816 //===----------------------------------------------------------------------===//
817 
818 void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal,
819  BasicBlock *IfException, ArrayRef<Value *> Args,
821  const Twine &NameStr) {
822  this->FTy = FTy;
823 
824  assert((int)getNumOperands() ==
825  ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)) &&
826  "NumOperands not set up?");
827  setNormalDest(IfNormal);
828  setUnwindDest(IfException);
829  setCalledOperand(Fn);
830 
831 #ifndef NDEBUG
832  assert(((Args.size() == FTy->getNumParams()) ||
833  (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
834  "Invoking a function with bad signature");
835 
836  for (unsigned i = 0, e = Args.size(); i != e; i++)
837  assert((i >= FTy->getNumParams() ||
838  FTy->getParamType(i) == Args[i]->getType()) &&
839  "Invoking a function with a bad signature!");
840 #endif
841 
843 
844  auto It = populateBundleOperandInfos(Bundles, Args.size());
845  (void)It;
846  assert(It + 3 == op_end() && "Should add up!");
847 
848  setName(NameStr);
849 }
850 
851 InvokeInst::InvokeInst(const InvokeInst &II)
852  : CallBase(II.Attrs, II.FTy, II.getType(), Instruction::Invoke,
853  OperandTraits<CallBase>::op_end(this) - II.getNumOperands(),
854  II.getNumOperands()) {
856  std::copy(II.op_begin(), II.op_end(), op_begin());
860 }
861 
863  Instruction *InsertPt) {
864  std::vector<Value *> Args(II->arg_begin(), II->arg_end());
865 
866  auto *NewII = InvokeInst::Create(
867  II->getFunctionType(), II->getCalledOperand(), II->getNormalDest(),
868  II->getUnwindDest(), Args, OpB, II->getName(), InsertPt);
869  NewII->setCallingConv(II->getCallingConv());
870  NewII->SubclassOptionalData = II->SubclassOptionalData;
871  NewII->setAttributes(II->getAttributes());
872  NewII->setDebugLoc(II->getDebugLoc());
873  return NewII;
874 }
875 
877  return cast<LandingPadInst>(getUnwindDest()->getFirstNonPHI());
878 }
879 
880 //===----------------------------------------------------------------------===//
881 // CallBrInst Implementation
882 //===----------------------------------------------------------------------===//
883 
884 void CallBrInst::init(FunctionType *FTy, Value *Fn, BasicBlock *Fallthrough,
885  ArrayRef<BasicBlock *> IndirectDests,
888  const Twine &NameStr) {
889  this->FTy = FTy;
890 
891  assert((int)getNumOperands() ==
892  ComputeNumOperands(Args.size(), IndirectDests.size(),
893  CountBundleInputs(Bundles)) &&
894  "NumOperands not set up?");
895  NumIndirectDests = IndirectDests.size();
896  setDefaultDest(Fallthrough);
897  for (unsigned i = 0; i != NumIndirectDests; ++i)
898  setIndirectDest(i, IndirectDests[i]);
899  setCalledOperand(Fn);
900 
901 #ifndef NDEBUG
902  assert(((Args.size() == FTy->getNumParams()) ||
903  (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
904  "Calling a function with bad signature");
905 
906  for (unsigned i = 0, e = Args.size(); i != e; i++)
907  assert((i >= FTy->getNumParams() ||
908  FTy->getParamType(i) == Args[i]->getType()) &&
909  "Calling a function with a bad signature!");
910 #endif
911 
912  std::copy(Args.begin(), Args.end(), op_begin());
913 
914  auto It = populateBundleOperandInfos(Bundles, Args.size());
915  (void)It;
916  assert(It + 2 + IndirectDests.size() == op_end() && "Should add up!");
917 
918  setName(NameStr);
919 }
920 
921 void CallBrInst::updateArgBlockAddresses(unsigned i, BasicBlock *B) {
922  assert(getNumIndirectDests() > i && "IndirectDest # out of range for callbr");
923  if (BasicBlock *OldBB = getIndirectDest(i)) {
924  BlockAddress *Old = BlockAddress::get(OldBB);
926  for (unsigned ArgNo = 0, e = getNumArgOperands(); ArgNo != e; ++ArgNo)
927  if (dyn_cast<BlockAddress>(getArgOperand(ArgNo)) == Old)
928  setArgOperand(ArgNo, New);
929  }
930 }
931 
932 CallBrInst::CallBrInst(const CallBrInst &CBI)
933  : CallBase(CBI.Attrs, CBI.FTy, CBI.getType(), Instruction::CallBr,
934  OperandTraits<CallBase>::op_end(this) - CBI.getNumOperands(),
935  CBI.getNumOperands()) {
937  std::copy(CBI.op_begin(), CBI.op_end(), op_begin());
941  NumIndirectDests = CBI.NumIndirectDests;
942 }
943 
945  Instruction *InsertPt) {
946  std::vector<Value *> Args(CBI->arg_begin(), CBI->arg_end());
947 
948  auto *NewCBI = CallBrInst::Create(
949  CBI->getFunctionType(), CBI->getCalledOperand(), CBI->getDefaultDest(),
950  CBI->getIndirectDests(), Args, OpB, CBI->getName(), InsertPt);
951  NewCBI->setCallingConv(CBI->getCallingConv());
952  NewCBI->SubclassOptionalData = CBI->SubclassOptionalData;
953  NewCBI->setAttributes(CBI->getAttributes());
954  NewCBI->setDebugLoc(CBI->getDebugLoc());
955  NewCBI->NumIndirectDests = CBI->NumIndirectDests;
956  return NewCBI;
957 }
958 
959 //===----------------------------------------------------------------------===//
960 // ReturnInst Implementation
961 //===----------------------------------------------------------------------===//
962 
963 ReturnInst::ReturnInst(const ReturnInst &RI)
964  : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Ret,
965  OperandTraits<ReturnInst>::op_end(this) - RI.getNumOperands(),
966  RI.getNumOperands()) {
967  if (RI.getNumOperands())
968  Op<0>() = RI.Op<0>();
970 }
971 
972 ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, Instruction *InsertBefore)
973  : Instruction(Type::getVoidTy(C), Instruction::Ret,
974  OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal,
975  InsertBefore) {
976  if (retVal)
977  Op<0>() = retVal;
978 }
979 
980 ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd)
981  : Instruction(Type::getVoidTy(C), Instruction::Ret,
982  OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal,
983  InsertAtEnd) {
984  if (retVal)
985  Op<0>() = retVal;
986 }
987 
988 ReturnInst::ReturnInst(LLVMContext &Context, BasicBlock *InsertAtEnd)
989  : Instruction(Type::getVoidTy(Context), Instruction::Ret,
990  OperandTraits<ReturnInst>::op_end(this), 0, InsertAtEnd) {}
991 
992 //===----------------------------------------------------------------------===//
993 // ResumeInst Implementation
994 //===----------------------------------------------------------------------===//
995 
996 ResumeInst::ResumeInst(const ResumeInst &RI)
997  : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Resume,
998  OperandTraits<ResumeInst>::op_begin(this), 1) {
999  Op<0>() = RI.Op<0>();
1000 }
1001 
1002 ResumeInst::ResumeInst(Value *Exn, Instruction *InsertBefore)
1003  : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
1004  OperandTraits<ResumeInst>::op_begin(this), 1, InsertBefore) {
1005  Op<0>() = Exn;
1006 }
1007 
1008 ResumeInst::ResumeInst(Value *Exn, BasicBlock *InsertAtEnd)
1009  : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
1010  OperandTraits<ResumeInst>::op_begin(this), 1, InsertAtEnd) {
1011  Op<0>() = Exn;
1012 }
1013 
1014 //===----------------------------------------------------------------------===//
1015 // CleanupReturnInst Implementation
1016 //===----------------------------------------------------------------------===//
1017 
1018 CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst &CRI)
1019  : Instruction(CRI.getType(), Instruction::CleanupRet,
1021  CRI.getNumOperands(),
1022  CRI.getNumOperands()) {
1023  setSubclassData<Instruction::OpaqueField>(
1025  Op<0>() = CRI.Op<0>();
1026  if (CRI.hasUnwindDest())
1027  Op<1>() = CRI.Op<1>();
1028 }
1029 
1030 void CleanupReturnInst::init(Value *CleanupPad, BasicBlock *UnwindBB) {
1031  if (UnwindBB)
1032  setSubclassData<UnwindDestField>(true);
1033 
1034  Op<0>() = CleanupPad;
1035  if (UnwindBB)
1036  Op<1>() = UnwindBB;
1037 }
1038 
1039 CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
1040  unsigned Values, Instruction *InsertBefore)
1041  : Instruction(Type::getVoidTy(CleanupPad->getContext()),
1042  Instruction::CleanupRet,
1043  OperandTraits<CleanupReturnInst>::op_end(this) - Values,
1044  Values, InsertBefore) {
1045  init(CleanupPad, UnwindBB);
1046 }
1047 
1048 CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
1049  unsigned Values, BasicBlock *InsertAtEnd)
1050  : Instruction(Type::getVoidTy(CleanupPad->getContext()),
1051  Instruction::CleanupRet,
1052  OperandTraits<CleanupReturnInst>::op_end(this) - Values,
1053  Values, InsertAtEnd) {
1054  init(CleanupPad, UnwindBB);
1055 }
1056 
1057 //===----------------------------------------------------------------------===//
1058 // CatchReturnInst Implementation
1059 //===----------------------------------------------------------------------===//
1060 void CatchReturnInst::init(Value *CatchPad, BasicBlock *BB) {
1061  Op<0>() = CatchPad;
1062  Op<1>() = BB;
1063 }
1064 
1065 CatchReturnInst::CatchReturnInst(const CatchReturnInst &CRI)
1066  : Instruction(Type::getVoidTy(CRI.getContext()), Instruction::CatchRet,
1067  OperandTraits<CatchReturnInst>::op_begin(this), 2) {
1068  Op<0>() = CRI.Op<0>();
1069  Op<1>() = CRI.Op<1>();
1070 }
1071 
1072 CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
1073  Instruction *InsertBefore)
1074  : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
1075  OperandTraits<CatchReturnInst>::op_begin(this), 2,
1076  InsertBefore) {
1077  init(CatchPad, BB);
1078 }
1079 
1080 CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
1081  BasicBlock *InsertAtEnd)
1082  : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
1083  OperandTraits<CatchReturnInst>::op_begin(this), 2,
1084  InsertAtEnd) {
1085  init(CatchPad, BB);
1086 }
1087 
1088 //===----------------------------------------------------------------------===//
1089 // CatchSwitchInst Implementation
1090 //===----------------------------------------------------------------------===//
1091 
1092 CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
1093  unsigned NumReservedValues,
1094  const Twine &NameStr,
1095  Instruction *InsertBefore)
1096  : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
1097  InsertBefore) {
1098  if (UnwindDest)
1099  ++NumReservedValues;
1100  init(ParentPad, UnwindDest, NumReservedValues + 1);
1101  setName(NameStr);
1102 }
1103 
1104 CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
1105  unsigned NumReservedValues,
1106  const Twine &NameStr, BasicBlock *InsertAtEnd)
1107  : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
1108  InsertAtEnd) {
1109  if (UnwindDest)
1110  ++NumReservedValues;
1111  init(ParentPad, UnwindDest, NumReservedValues + 1);
1112  setName(NameStr);
1113 }
1114 
1115 CatchSwitchInst::CatchSwitchInst(const CatchSwitchInst &CSI)
1116  : Instruction(CSI.getType(), Instruction::CatchSwitch, nullptr,
1117  CSI.getNumOperands()) {
1118  init(CSI.getParentPad(), CSI.getUnwindDest(), CSI.getNumOperands());
1119  setNumHungOffUseOperands(ReservedSpace);
1120  Use *OL = getOperandList();
1121  const Use *InOL = CSI.getOperandList();
1122  for (unsigned I = 1, E = ReservedSpace; I != E; ++I)
1123  OL[I] = InOL[I];
1124 }
1125 
1126 void CatchSwitchInst::init(Value *ParentPad, BasicBlock *UnwindDest,
1127  unsigned NumReservedValues) {
1128  assert(ParentPad && NumReservedValues);
1129 
1130  ReservedSpace = NumReservedValues;
1131  setNumHungOffUseOperands(UnwindDest ? 2 : 1);
1132  allocHungoffUses(ReservedSpace);
1133 
1134  Op<0>() = ParentPad;
1135  if (UnwindDest) {
1136  setSubclassData<UnwindDestField>(true);
1137  setUnwindDest(UnwindDest);
1138  }
1139 }
1140 
1141 /// growOperands - grow operands - This grows the operand list in response to a
1142 /// push_back style of operation. This grows the number of ops by 2 times.
1143 void CatchSwitchInst::growOperands(unsigned Size) {
1144  unsigned NumOperands = getNumOperands();
1145  assert(NumOperands >= 1);
1146  if (ReservedSpace >= NumOperands + Size)
1147  return;
1148  ReservedSpace = (NumOperands + Size / 2) * 2;
1149  growHungoffUses(ReservedSpace);
1150 }
1151 
1153  unsigned OpNo = getNumOperands();
1154  growOperands(1);
1155  assert(OpNo < ReservedSpace && "Growing didn't work!");
1157  getOperandList()[OpNo] = Handler;
1158 }
1159 
1161  // Move all subsequent handlers up one.
1162  Use *EndDst = op_end() - 1;
1163  for (Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst)
1164  *CurDst = *(CurDst + 1);
1165  // Null out the last handler use.
1166  *EndDst = nullptr;
1167 
1169 }
1170 
1171 //===----------------------------------------------------------------------===//
1172 // FuncletPadInst Implementation
1173 //===----------------------------------------------------------------------===//
1174 void FuncletPadInst::init(Value *ParentPad, ArrayRef<Value *> Args,
1175  const Twine &NameStr) {
1176  assert(getNumOperands() == 1 + Args.size() && "NumOperands not set up?");
1177  llvm::copy(Args, op_begin());
1178  setParentPad(ParentPad);
1179  setName(NameStr);
1180 }
1181 
1182 FuncletPadInst::FuncletPadInst(const FuncletPadInst &FPI)
1183  : Instruction(FPI.getType(), FPI.getOpcode(),
1184  OperandTraits<FuncletPadInst>::op_end(this) -
1185  FPI.getNumOperands(),
1186  FPI.getNumOperands()) {
1187  std::copy(FPI.op_begin(), FPI.op_end(), op_begin());
1188  setParentPad(FPI.getParentPad());
1189 }
1190 
1191 FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
1192  ArrayRef<Value *> Args, unsigned Values,
1193  const Twine &NameStr, Instruction *InsertBefore)
1194  : Instruction(ParentPad->getType(), Op,
1195  OperandTraits<FuncletPadInst>::op_end(this) - Values, Values,
1196  InsertBefore) {
1197  init(ParentPad, Args, NameStr);
1198 }
1199 
1200 FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
1201  ArrayRef<Value *> Args, unsigned Values,
1202  const Twine &NameStr, BasicBlock *InsertAtEnd)
1203  : Instruction(ParentPad->getType(), Op,
1204  OperandTraits<FuncletPadInst>::op_end(this) - Values, Values,
1205  InsertAtEnd) {
1206  init(ParentPad, Args, NameStr);
1207 }
1208 
1209 //===----------------------------------------------------------------------===//
1210 // UnreachableInst Implementation
1211 //===----------------------------------------------------------------------===//
1212 
1214  Instruction *InsertBefore)
1215  : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr,
1216  0, InsertBefore) {}
1218  : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr,
1219  0, InsertAtEnd) {}
1220 
1221 //===----------------------------------------------------------------------===//
1222 // BranchInst Implementation
1223 //===----------------------------------------------------------------------===//
1224 
1225 void BranchInst::AssertOK() {
1226  if (isConditional())
1227  assert(getCondition()->getType()->isIntegerTy(1) &&
1228  "May only branch on boolean predicates!");
1229 }
1230 
1231 BranchInst::BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore)
1232  : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1233  OperandTraits<BranchInst>::op_end(this) - 1, 1,
1234  InsertBefore) {
1235  assert(IfTrue && "Branch destination may not be null!");
1236  Op<-1>() = IfTrue;
1237 }
1238 
1239 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
1240  Instruction *InsertBefore)
1241  : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1242  OperandTraits<BranchInst>::op_end(this) - 3, 3,
1243  InsertBefore) {
1244  Op<-1>() = IfTrue;
1245  Op<-2>() = IfFalse;
1246  Op<-3>() = Cond;
1247 #ifndef NDEBUG
1248  AssertOK();
1249 #endif
1250 }
1251 
1252 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd)
1253  : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1254  OperandTraits<BranchInst>::op_end(this) - 1, 1, InsertAtEnd) {
1255  assert(IfTrue && "Branch destination may not be null!");
1256  Op<-1>() = IfTrue;
1257 }
1258 
1259 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
1260  BasicBlock *InsertAtEnd)
1261  : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1262  OperandTraits<BranchInst>::op_end(this) - 3, 3, InsertAtEnd) {
1263  Op<-1>() = IfTrue;
1264  Op<-2>() = IfFalse;
1265  Op<-3>() = Cond;
1266 #ifndef NDEBUG
1267  AssertOK();
1268 #endif
1269 }
1270 
1271 BranchInst::BranchInst(const BranchInst &BI)
1272  : Instruction(Type::getVoidTy(BI.getContext()), Instruction::Br,
1273  OperandTraits<BranchInst>::op_end(this) - BI.getNumOperands(),
1274  BI.getNumOperands()) {
1275  Op<-1>() = BI.Op<-1>();
1276  if (BI.getNumOperands() != 1) {
1277  assert(BI.getNumOperands() == 3 && "BR can have 1 or 3 operands!");
1278  Op<-3>() = BI.Op<-3>();
1279  Op<-2>() = BI.Op<-2>();
1280  }
1282 }
1283 
1285  assert(isConditional() &&
1286  "Cannot swap successors of an unconditional branch");
1287  Op<-1>().swap(Op<-2>());
1288 
1289  // Update profile metadata if present and it matches our structural
1290  // expectations.
1291  swapProfMetadata();
1292 }
1293 
1294 //===----------------------------------------------------------------------===//
1295 // AllocaInst Implementation
1296 //===----------------------------------------------------------------------===//
1297 
1299  if (!Amt)
1301  else {
1302  assert(!isa<BasicBlock>(Amt) &&
1303  "Passed basic block into allocation size parameter! Use other ctor");
1304  assert(Amt->getType()->isIntegerTy() &&
1305  "Allocation array size is not an integer!");
1306  }
1307  return Amt;
1308 }
1309 
1311  assert(BB && "Insertion BB cannot be null when alignment not provided!");
1312  assert(BB->getParent() &&
1313  "BB must be in a Function when alignment not provided!");
1314  const DataLayout &DL = BB->getModule()->getDataLayout();
1315  return DL.getPrefTypeAlign(Ty);
1316 }
1317 
1319  assert(I && "Insertion position cannot be null when alignment not provided!");
1320  return computeAllocaDefaultAlign(Ty, I->getParent());
1321 }
1322 
1323 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
1324  Instruction *InsertBefore)
1325  : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertBefore) {}
1326 
1327 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
1328  BasicBlock *InsertAtEnd)
1329  : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertAtEnd) {}
1330 
1331 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1332  const Twine &Name, Instruction *InsertBefore)
1333  : AllocaInst(Ty, AddrSpace, ArraySize,
1334  computeAllocaDefaultAlign(Ty, InsertBefore), Name,
1335  InsertBefore) {}
1336 
1337 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1338  const Twine &Name, BasicBlock *InsertAtEnd)
1339  : AllocaInst(Ty, AddrSpace, ArraySize,
1340  computeAllocaDefaultAlign(Ty, InsertAtEnd), Name,
1341  InsertAtEnd) {}
1342 
1343 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1344  Align Align, const Twine &Name,
1345  Instruction *InsertBefore)
1346  : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca,
1347  getAISize(Ty->getContext(), ArraySize), InsertBefore),
1348  AllocatedType(Ty) {
1350  assert(!Ty->isVoidTy() && "Cannot allocate void!");
1351  setName(Name);
1352 }
1353 
1354 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1355  Align Align, const Twine &Name, BasicBlock *InsertAtEnd)
1356  : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca,
1357  getAISize(Ty->getContext(), ArraySize), InsertAtEnd),
1358  AllocatedType(Ty) {
1360  assert(!Ty->isVoidTy() && "Cannot allocate void!");
1361  setName(Name);
1362 }
1363 
1364 
1366  if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(0)))
1367  return !CI->isOne();
1368  return true;
1369 }
1370 
1371 /// isStaticAlloca - Return true if this alloca is in the entry block of the
1372 /// function and is a constant size. If so, the code generator will fold it
1373 /// into the prolog/epilog code, so it is basically free.
1375  // Must be constant size.
1376  if (!isa<ConstantInt>(getArraySize())) return false;
1377 
1378  // Must be in the entry block.
1379  const BasicBlock *Parent = getParent();
1380  return Parent == &Parent->getParent()->front() && !isUsedWithInAlloca();
1381 }
1382 
1383 //===----------------------------------------------------------------------===//
1384 // LoadInst Implementation
1385 //===----------------------------------------------------------------------===//
1386 
1387 void LoadInst::AssertOK() {
1388  assert(getOperand(0)->getType()->isPointerTy() &&
1389  "Ptr must have pointer type.");
1390  assert(!(isAtomic() && getAlignment() == 0) &&
1391  "Alignment required for atomic load");
1392 }
1393 
1395  assert(BB && "Insertion BB cannot be null when alignment not provided!");
1396  assert(BB->getParent() &&
1397  "BB must be in a Function when alignment not provided!");
1398  const DataLayout &DL = BB->getModule()->getDataLayout();
1399  return DL.getABITypeAlign(Ty);
1400 }
1401 
1403  assert(I && "Insertion position cannot be null when alignment not provided!");
1404  return computeLoadStoreDefaultAlign(Ty, I->getParent());
1405 }
1406 
1408  Instruction *InsertBef)
1409  : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertBef) {}
1410 
1412  BasicBlock *InsertAE)
1413  : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertAE) {}
1414 
1415 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1416  Instruction *InsertBef)
1417  : LoadInst(Ty, Ptr, Name, isVolatile,
1418  computeLoadStoreDefaultAlign(Ty, InsertBef), InsertBef) {}
1419 
1420 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1421  BasicBlock *InsertAE)
1422  : LoadInst(Ty, Ptr, Name, isVolatile,
1423  computeLoadStoreDefaultAlign(Ty, InsertAE), InsertAE) {}
1424 
1425 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1426  Align Align, Instruction *InsertBef)
1427  : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
1428  SyncScope::System, InsertBef) {}
1429 
1430 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1431  Align Align, BasicBlock *InsertAE)
1432  : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
1433  SyncScope::System, InsertAE) {}
1434 
1435 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1436  Align Align, AtomicOrdering Order, SyncScope::ID SSID,
1437  Instruction *InsertBef)
1438  : UnaryInstruction(Ty, Load, Ptr, InsertBef) {
1439  assert(Ty == cast<PointerType>(Ptr->getType())->getElementType());
1442  setAtomic(Order, SSID);
1443  AssertOK();
1444  setName(Name);
1445 }
1446 
1447 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1448  Align Align, AtomicOrdering Order, SyncScope::ID SSID,
1449  BasicBlock *InsertAE)
1450  : UnaryInstruction(Ty, Load, Ptr, InsertAE) {
1451  assert(Ty == cast<PointerType>(Ptr->getType())->getElementType());
1454  setAtomic(Order, SSID);
1455  AssertOK();
1456  setName(Name);
1457 }
1458 
1459 //===----------------------------------------------------------------------===//
1460 // StoreInst Implementation
1461 //===----------------------------------------------------------------------===//
1462 
1463 void StoreInst::AssertOK() {
1464  assert(getOperand(0) && getOperand(1) && "Both operands must be non-null!");
1465  assert(getOperand(1)->getType()->isPointerTy() &&
1466  "Ptr must have pointer type!");
1467  assert(getOperand(0)->getType() ==
1468  cast<PointerType>(getOperand(1)->getType())->getElementType()
1469  && "Ptr must be a pointer to Val type!");
1470  assert(!(isAtomic() && getAlignment() == 0) &&
1471  "Alignment required for atomic store");
1472 }
1473 
1475  : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {}
1476 
1478  : StoreInst(val, addr, /*isVolatile=*/false, InsertAtEnd) {}
1479 
1480 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1481  Instruction *InsertBefore)
1482  : StoreInst(val, addr, isVolatile,
1483  computeLoadStoreDefaultAlign(val->getType(), InsertBefore),
1484  InsertBefore) {}
1485 
1486 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1487  BasicBlock *InsertAtEnd)
1488  : StoreInst(val, addr, isVolatile,
1489  computeLoadStoreDefaultAlign(val->getType(), InsertAtEnd),
1490  InsertAtEnd) {}
1491 
1492 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1493  Instruction *InsertBefore)
1494  : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
1495  SyncScope::System, InsertBefore) {}
1496 
1497 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1498  BasicBlock *InsertAtEnd)
1499  : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
1500  SyncScope::System, InsertAtEnd) {}
1501 
1502 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1503  AtomicOrdering Order, SyncScope::ID SSID,
1504  Instruction *InsertBefore)
1505  : Instruction(Type::getVoidTy(val->getContext()), Store,
1506  OperandTraits<StoreInst>::op_begin(this),
1507  OperandTraits<StoreInst>::operands(this), InsertBefore) {
1508  Op<0>() = val;
1509  Op<1>() = addr;
1512  setAtomic(Order, SSID);
1513  AssertOK();
1514 }
1515 
1516 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1517  AtomicOrdering Order, SyncScope::ID SSID,
1518  BasicBlock *InsertAtEnd)
1519  : Instruction(Type::getVoidTy(val->getContext()), Store,
1520  OperandTraits<StoreInst>::op_begin(this),
1521  OperandTraits<StoreInst>::operands(this), InsertAtEnd) {
1522  Op<0>() = val;
1523  Op<1>() = addr;
1526  setAtomic(Order, SSID);
1527  AssertOK();
1528 }
1529 
1530 
1531 //===----------------------------------------------------------------------===//
1532 // AtomicCmpXchgInst Implementation
1533 //===----------------------------------------------------------------------===//
1534 
1535 void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
1536  Align Alignment, AtomicOrdering SuccessOrdering,
1537  AtomicOrdering FailureOrdering,
1538  SyncScope::ID SSID) {
1539  Op<0>() = Ptr;
1540  Op<1>() = Cmp;
1541  Op<2>() = NewVal;
1542  setSuccessOrdering(SuccessOrdering);
1543  setFailureOrdering(FailureOrdering);
1544  setSyncScopeID(SSID);
1545  setAlignment(Alignment);
1546 
1547  assert(getOperand(0) && getOperand(1) && getOperand(2) &&
1548  "All operands must be non-null!");
1549  assert(getOperand(0)->getType()->isPointerTy() &&
1550  "Ptr must have pointer type!");
1551  assert(getOperand(1)->getType() ==
1552  cast<PointerType>(getOperand(0)->getType())->getElementType()
1553  && "Ptr must be a pointer to Cmp type!");
1554  assert(getOperand(2)->getType() ==
1555  cast<PointerType>(getOperand(0)->getType())->getElementType()
1556  && "Ptr must be a pointer to NewVal type!");
1557  assert(SuccessOrdering != AtomicOrdering::NotAtomic &&
1558  "AtomicCmpXchg instructions must be atomic!");
1559  assert(FailureOrdering != AtomicOrdering::NotAtomic &&
1560  "AtomicCmpXchg instructions must be atomic!");
1561  assert(!isStrongerThan(FailureOrdering, SuccessOrdering) &&
1562  "AtomicCmpXchg failure argument shall be no stronger than the success "
1563  "argument");
1564  assert(FailureOrdering != AtomicOrdering::Release &&
1565  FailureOrdering != AtomicOrdering::AcquireRelease &&
1566  "AtomicCmpXchg failure ordering cannot include release semantics");
1567 }
1568 
1570  Align Alignment,
1571  AtomicOrdering SuccessOrdering,
1572  AtomicOrdering FailureOrdering,
1573  SyncScope::ID SSID,
1574  Instruction *InsertBefore)
1575  : Instruction(
1576  StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
1577  AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
1578  OperandTraits<AtomicCmpXchgInst>::operands(this), InsertBefore) {
1579  Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
1580 }
1581 
1583  Align Alignment,
1584  AtomicOrdering SuccessOrdering,
1585  AtomicOrdering FailureOrdering,
1586  SyncScope::ID SSID,
1587  BasicBlock *InsertAtEnd)
1588  : Instruction(
1589  StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
1590  AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
1591  OperandTraits<AtomicCmpXchgInst>::operands(this), InsertAtEnd) {
1592  Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
1593 }
1594 
1595 //===----------------------------------------------------------------------===//
1596 // AtomicRMWInst Implementation
1597 //===----------------------------------------------------------------------===//
1598 
1599 void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,
1600  Align Alignment, AtomicOrdering Ordering,
1601  SyncScope::ID SSID) {
1602  Op<0>() = Ptr;
1603  Op<1>() = Val;
1605  setOrdering(Ordering);
1606  setSyncScopeID(SSID);
1607  setAlignment(Alignment);
1608 
1609  assert(getOperand(0) && getOperand(1) &&
1610  "All operands must be non-null!");
1611  assert(getOperand(0)->getType()->isPointerTy() &&
1612  "Ptr must have pointer type!");
1613  assert(getOperand(1)->getType() ==
1614  cast<PointerType>(getOperand(0)->getType())->getElementType()
1615  && "Ptr must be a pointer to Val type!");
1616  assert(Ordering != AtomicOrdering::NotAtomic &&
1617  "AtomicRMW instructions must be atomic!");
1618 }
1619 
1621  Align Alignment, AtomicOrdering Ordering,
1622  SyncScope::ID SSID, Instruction *InsertBefore)
1623  : Instruction(Val->getType(), AtomicRMW,
1624  OperandTraits<AtomicRMWInst>::op_begin(this),
1625  OperandTraits<AtomicRMWInst>::operands(this), InsertBefore) {
1626  Init(Operation, Ptr, Val, Alignment, Ordering, SSID);
1627 }
1628 
1630  Align Alignment, AtomicOrdering Ordering,
1631  SyncScope::ID SSID, BasicBlock *InsertAtEnd)
1632  : Instruction(Val->getType(), AtomicRMW,
1633  OperandTraits<AtomicRMWInst>::op_begin(this),
1634  OperandTraits<AtomicRMWInst>::operands(this), InsertAtEnd) {
1635  Init(Operation, Ptr, Val, Alignment, Ordering, SSID);
1636 }
1637 
1639  switch (Op) {
1640  case AtomicRMWInst::Xchg:
1641  return "xchg";
1642  case AtomicRMWInst::Add:
1643  return "add";
1644  case AtomicRMWInst::Sub:
1645  return "sub";
1646  case AtomicRMWInst::And:
1647  return "and";
1648  case AtomicRMWInst::Nand:
1649  return "nand";
1650  case AtomicRMWInst::Or:
1651  return "or";
1652  case AtomicRMWInst::Xor:
1653  return "xor";
1654  case AtomicRMWInst::Max:
1655  return "max";
1656  case AtomicRMWInst::Min:
1657  return "min";
1658  case AtomicRMWInst::UMax:
1659  return "umax";
1660  case AtomicRMWInst::UMin:
1661  return "umin";
1662  case AtomicRMWInst::FAdd:
1663  return "fadd";
1664  case AtomicRMWInst::FSub:
1665  return "fsub";
1667  return "<invalid operation>";
1668  }
1669 
1670  llvm_unreachable("invalid atomicrmw operation");
1671 }
1672 
1673 //===----------------------------------------------------------------------===//
1674 // FenceInst Implementation
1675 //===----------------------------------------------------------------------===//
1676 
1678  SyncScope::ID SSID,
1679  Instruction *InsertBefore)
1680  : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertBefore) {
1681  setOrdering(Ordering);
1682  setSyncScopeID(SSID);
1683 }
1684 
1686  SyncScope::ID SSID,
1687  BasicBlock *InsertAtEnd)
1688  : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertAtEnd) {
1689  setOrdering(Ordering);
1690  setSyncScopeID(SSID);
1691 }
1692 
1693 //===----------------------------------------------------------------------===//
1694 // GetElementPtrInst Implementation
1695 //===----------------------------------------------------------------------===//
1696 
1697 void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList,
1698  const Twine &Name) {
1699  assert(getNumOperands() == 1 + IdxList.size() &&
1700  "NumOperands not initialized?");
1701  Op<0>() = Ptr;
1702  llvm::copy(IdxList, op_begin() + 1);
1703  setName(Name);
1704 }
1705 
1706 GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI)
1707  : Instruction(GEPI.getType(), GetElementPtr,
1709  GEPI.getNumOperands(),
1710  GEPI.getNumOperands()),
1711  SourceElementType(GEPI.SourceElementType),
1712  ResultElementType(GEPI.ResultElementType) {
1713  std::copy(GEPI.op_begin(), GEPI.op_end(), op_begin());
1715 }
1716 
1718  if (auto *Struct = dyn_cast<StructType>(Ty)) {
1719  if (!Struct->indexValid(Idx))
1720  return nullptr;
1721  return Struct->getTypeAtIndex(Idx);
1722  }
1723  if (!Idx->getType()->isIntOrIntVectorTy())
1724  return nullptr;
1725  if (auto *Array = dyn_cast<ArrayType>(Ty))
1726  return Array->getElementType();
1727  if (auto *Vector = dyn_cast<VectorType>(Ty))
1728  return Vector->getElementType();
1729  return nullptr;
1730 }
1731 
1733  if (auto *Struct = dyn_cast<StructType>(Ty)) {
1734  if (Idx >= Struct->getNumElements())
1735  return nullptr;
1736  return Struct->getElementType(Idx);
1737  }
1738  if (auto *Array = dyn_cast<ArrayType>(Ty))
1739  return Array->getElementType();
1740  if (auto *Vector = dyn_cast<VectorType>(Ty))
1741  return Vector->getElementType();
1742  return nullptr;
1743 }
1744 
1745 template <typename IndexTy>
1747  if (IdxList.empty())
1748  return Ty;
1749  for (IndexTy V : IdxList.slice(1)) {
1751  if (!Ty)
1752  return Ty;
1753  }
1754  return Ty;
1755 }
1756 
1758  return getIndexedTypeInternal(Ty, IdxList);
1759 }
1760 
1762  ArrayRef<Constant *> IdxList) {
1763  return getIndexedTypeInternal(Ty, IdxList);
1764 }
1765 
1767  return getIndexedTypeInternal(Ty, IdxList);
1768 }
1769 
1770 /// hasAllZeroIndices - Return true if all of the indices of this GEP are
1771 /// zeros. If so, the result pointer and the first operand have the same
1772 /// value, just potentially different types.
1774  for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1775  if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(i))) {
1776  if (!CI->isZero()) return false;
1777  } else {
1778  return false;
1779  }
1780  }
1781  return true;
1782 }
1783 
1784 /// hasAllConstantIndices - Return true if all of the indices of this GEP are
1785 /// constant integers. If so, the result pointer and the first operand have
1786 /// a constant offset between them.
1788  for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1789  if (!isa<ConstantInt>(getOperand(i)))
1790  return false;
1791  }
1792  return true;
1793 }
1794 
1796  cast<GEPOperator>(this)->setIsInBounds(B);
1797 }
1798 
1800  return cast<GEPOperator>(this)->isInBounds();
1801 }
1802 
1804  APInt &Offset) const {
1805  // Delegate to the generic GEPOperator implementation.
1806  return cast<GEPOperator>(this)->accumulateConstantOffset(DL, Offset);
1807 }
1808 
1809 //===----------------------------------------------------------------------===//
1810 // ExtractElementInst Implementation
1811 //===----------------------------------------------------------------------===//
1812 
1813 ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
1814  const Twine &Name,
1815  Instruction *InsertBef)
1816  : Instruction(cast<VectorType>(Val->getType())->getElementType(),
1817  ExtractElement,
1819  2, InsertBef) {
1820  assert(isValidOperands(Val, Index) &&
1821  "Invalid extractelement instruction operands!");
1822  Op<0>() = Val;
1823  Op<1>() = Index;
1824  setName(Name);
1825 }
1826 
1827 ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
1828  const Twine &Name,
1829  BasicBlock *InsertAE)
1830  : Instruction(cast<VectorType>(Val->getType())->getElementType(),
1831  ExtractElement,
1833  2, InsertAE) {
1834  assert(isValidOperands(Val, Index) &&
1835  "Invalid extractelement instruction operands!");
1836 
1837  Op<0>() = Val;
1838  Op<1>() = Index;
1839  setName(Name);
1840 }
1841 
1843  if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy())
1844  return false;
1845  return true;
1846 }
1847 
1848 //===----------------------------------------------------------------------===//
1849 // InsertElementInst Implementation
1850 //===----------------------------------------------------------------------===//
1851 
1852 InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
1853  const Twine &Name,
1854  Instruction *InsertBef)
1855  : Instruction(Vec->getType(), InsertElement,
1856  OperandTraits<InsertElementInst>::op_begin(this),
1857  3, InsertBef) {
1858  assert(isValidOperands(Vec, Elt, Index) &&
1859  "Invalid insertelement instruction operands!");
1860  Op<0>() = Vec;
1861  Op<1>() = Elt;
1862  Op<2>() = Index;
1863  setName(Name);
1864 }
1865 
1866 InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
1867  const Twine &Name,
1868  BasicBlock *InsertAE)
1869  : Instruction(Vec->getType(), InsertElement,
1870  OperandTraits<InsertElementInst>::op_begin(this),
1871  3, InsertAE) {
1872  assert(isValidOperands(Vec, Elt, Index) &&
1873  "Invalid insertelement instruction operands!");
1874 
1875  Op<0>() = Vec;
1876  Op<1>() = Elt;
1877  Op<2>() = Index;
1878  setName(Name);
1879 }
1880 
1881 bool InsertElementInst::isValidOperands(const Value *Vec, const Value *Elt,
1882  const Value *Index) {
1883  if (!Vec->getType()->isVectorTy())
1884  return false; // First operand of insertelement must be vector type.
1885 
1886  if (Elt->getType() != cast<VectorType>(Vec->getType())->getElementType())
1887  return false;// Second operand of insertelement must be vector element type.
1888 
1889  if (!Index->getType()->isIntegerTy())
1890  return false; // Third operand of insertelement must be i32.
1891  return true;
1892 }
1893 
1894 //===----------------------------------------------------------------------===//
1895 // ShuffleVectorInst Implementation
1896 //===----------------------------------------------------------------------===//
1897 
1899  const Twine &Name,
1900  Instruction *InsertBefore)
1901  : Instruction(
1902  VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
1903  cast<VectorType>(Mask->getType())->getElementCount()),
1904  ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
1905  OperandTraits<ShuffleVectorInst>::operands(this), InsertBefore) {
1906  assert(isValidOperands(V1, V2, Mask) &&
1907  "Invalid shuffle vector instruction operands!");
1908 
1909  Op<0>() = V1;
1910  Op<1>() = V2;
1911  SmallVector<int, 16> MaskArr;
1912  getShuffleMask(cast<Constant>(Mask), MaskArr);
1913  setShuffleMask(MaskArr);
1914  setName(Name);
1915 }
1916 
1918  const Twine &Name, BasicBlock *InsertAtEnd)
1919  : Instruction(
1920  VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
1921  cast<VectorType>(Mask->getType())->getElementCount()),
1922  ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
1923  OperandTraits<ShuffleVectorInst>::operands(this), InsertAtEnd) {
1924  assert(isValidOperands(V1, V2, Mask) &&
1925  "Invalid shuffle vector instruction operands!");
1926 
1927  Op<0>() = V1;
1928  Op<1>() = V2;
1929  SmallVector<int, 16> MaskArr;
1930  getShuffleMask(cast<Constant>(Mask), MaskArr);
1931  setShuffleMask(MaskArr);
1932  setName(Name);
1933 }
1934 
1936  const Twine &Name,
1937  Instruction *InsertBefore)
1938  : Instruction(
1939  VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
1940  Mask.size(), isa<ScalableVectorType>(V1->getType())),
1941  ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
1942  OperandTraits<ShuffleVectorInst>::operands(this), InsertBefore) {
1943  assert(isValidOperands(V1, V2, Mask) &&
1944  "Invalid shuffle vector instruction operands!");
1945  Op<0>() = V1;
1946  Op<1>() = V2;
1948  setName(Name);
1949 }
1950 
1952  const Twine &Name, BasicBlock *InsertAtEnd)
1953  : Instruction(
1954  VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
1955  Mask.size(), isa<ScalableVectorType>(V1->getType())),
1956  ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
1957  OperandTraits<ShuffleVectorInst>::operands(this), InsertAtEnd) {
1958  assert(isValidOperands(V1, V2, Mask) &&
1959  "Invalid shuffle vector instruction operands!");
1960 
1961  Op<0>() = V1;
1962  Op<1>() = V2;
1964  setName(Name);
1965 }
1966 
1968  int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
1969  int NumMaskElts = ShuffleMask.size();
1970  SmallVector<int, 16> NewMask(NumMaskElts);
1971  for (int i = 0; i != NumMaskElts; ++i) {
1972  int MaskElt = getMaskValue(i);
1973  if (MaskElt == UndefMaskElem) {
1974  NewMask[i] = UndefMaskElem;
1975  continue;
1976  }
1977  assert(MaskElt >= 0 && MaskElt < 2 * NumOpElts && "Out-of-range mask");
1978  MaskElt = (MaskElt < NumOpElts) ? MaskElt + NumOpElts : MaskElt - NumOpElts;
1979  NewMask[i] = MaskElt;
1980  }
1981  setShuffleMask(NewMask);
1982  Op<0>().swap(Op<1>());
1983 }
1984 
1986  ArrayRef<int> Mask) {
1987  // V1 and V2 must be vectors of the same type.
1988  if (!isa<VectorType>(V1->getType()) || V1->getType() != V2->getType())
1989  return false;
1990 
1991  // Make sure the mask elements make sense.
1992  int V1Size =
1993  cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue();
1994  for (int Elem : Mask)
1995  if (Elem != UndefMaskElem && Elem >= V1Size * 2)
1996  return false;
1997 
1998  if (isa<ScalableVectorType>(V1->getType()))
1999  if ((Mask[0] != 0 && Mask[0] != UndefMaskElem) || !is_splat(Mask))
2000  return false;
2001 
2002  return true;
2003 }
2004 
2006  const Value *Mask) {
2007  // V1 and V2 must be vectors of the same type.
2008  if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType())
2009  return false;
2010 
2011  // Mask must be vector of i32, and must be the same kind of vector as the
2012  // input vectors
2013  auto *MaskTy = dyn_cast<VectorType>(Mask->getType());
2014  if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32) ||
2015  isa<ScalableVectorType>(MaskTy) != isa<ScalableVectorType>(V1->getType()))
2016  return false;
2017 
2018  // Check to see if Mask is valid.
2019  if (isa<UndefValue>(Mask) || isa<ConstantAggregateZero>(Mask))
2020  return true;
2021 
2022  if (const auto *MV = dyn_cast<ConstantVector>(Mask)) {
2023  unsigned V1Size = cast<FixedVectorType>(V1->getType())->getNumElements();
2024  for (Value *Op : MV->operands()) {
2025  if (auto *CI = dyn_cast<ConstantInt>(Op)) {
2026  if (CI->uge(V1Size*2))
2027  return false;
2028  } else if (!isa<UndefValue>(Op)) {
2029  return false;
2030  }
2031  }
2032  return true;
2033  }
2034 
2035  if (const auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
2036  unsigned V1Size = cast<FixedVectorType>(V1->getType())->getNumElements();
2037  for (unsigned i = 0, e = cast<FixedVectorType>(MaskTy)->getNumElements();
2038  i != e; ++i)
2039  if (CDS->getElementAsInteger(i) >= V1Size*2)
2040  return false;
2041  return true;
2042  }
2043 
2044  return false;
2045 }
2046 
2048  SmallVectorImpl<int> &Result) {
2049  ElementCount EC = cast<VectorType>(Mask->getType())->getElementCount();
2050 
2051  if (isa<ConstantAggregateZero>(Mask)) {
2052  Result.resize(EC.getKnownMinValue(), 0);
2053  return;
2054  }
2055 
2056  Result.reserve(EC.getKnownMinValue());
2057 
2058  if (EC.isScalable()) {
2059  assert((isa<ConstantAggregateZero>(Mask) || isa<UndefValue>(Mask)) &&
2060  "Scalable vector shuffle mask must be undef or zeroinitializer");
2061  int MaskVal = isa<UndefValue>(Mask) ? -1 : 0;
2062  for (unsigned I = 0; I < EC.getKnownMinValue(); ++I)
2063  Result.emplace_back(MaskVal);
2064  return;
2065  }
2066 
2067  unsigned NumElts = EC.getKnownMinValue();
2068 
2069  if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
2070  for (unsigned i = 0; i != NumElts; ++i)
2071  Result.push_back(CDS->getElementAsInteger(i));
2072  return;
2073  }
2074  for (unsigned i = 0; i != NumElts; ++i) {
2075  Constant *C = Mask->getAggregateElement(i);
2076  Result.push_back(isa<UndefValue>(C) ? -1 :
2077  cast<ConstantInt>(C)->getZExtValue());
2078  }
2079 }
2080 
2082  ShuffleMask.assign(Mask.begin(), Mask.end());
2083  ShuffleMaskForBitcode = convertShuffleMaskForBitcode(Mask, getType());
2084 }
2086  Type *ResultTy) {
2087  Type *Int32Ty = Type::getInt32Ty(ResultTy->getContext());
2088  if (isa<ScalableVectorType>(ResultTy)) {
2089  assert(is_splat(Mask) && "Unexpected shuffle");
2090  Type *VecTy = VectorType::get(Int32Ty, Mask.size(), true);
2091  if (Mask[0] == 0)
2092  return Constant::getNullValue(VecTy);
2093  return UndefValue::get(VecTy);
2094  }
2095  SmallVector<Constant *, 16> MaskConst;
2096  for (int Elem : Mask) {
2097  if (Elem == UndefMaskElem)
2098  MaskConst.push_back(UndefValue::get(Int32Ty));
2099  else
2100  MaskConst.push_back(ConstantInt::get(Int32Ty, Elem));
2101  }
2102  return ConstantVector::get(MaskConst);
2103 }
2104 
2105 static bool isSingleSourceMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
2106  assert(!Mask.empty() && "Shuffle mask must contain elements");
2107  bool UsesLHS = false;
2108  bool UsesRHS = false;
2109  for (int I : Mask) {
2110  if (I == -1)
2111  continue;
2112  assert(I >= 0 && I < (NumOpElts * 2) &&
2113  "Out-of-bounds shuffle mask element");
2114  UsesLHS |= (I < NumOpElts);
2115  UsesRHS |= (I >= NumOpElts);
2116  if (UsesLHS && UsesRHS)
2117  return false;
2118  }
2119  // Allow for degenerate case: completely undef mask means neither source is used.
2120  return UsesLHS || UsesRHS;
2121 }
2122 
2124  // We don't have vector operand size information, so assume operands are the
2125  // same size as the mask.
2126  return isSingleSourceMaskImpl(Mask, Mask.size());
2127 }
2128 
2129 static bool isIdentityMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
2130  if (!isSingleSourceMaskImpl(Mask, NumOpElts))
2131  return false;
2132  for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) {
2133  if (Mask[i] == -1)
2134  continue;
2135  if (Mask[i] != i && Mask[i] != (NumOpElts + i))
2136  return false;
2137  }
2138  return true;
2139 }
2140 
2142  // We don't have vector operand size information, so assume operands are the
2143  // same size as the mask.
2144  return isIdentityMaskImpl(Mask, Mask.size());
2145 }
2146 
2148  if (!isSingleSourceMask(Mask))
2149  return false;
2150  for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) {
2151  if (Mask[i] == -1)
2152  continue;
2153  if (Mask[i] != (NumElts - 1 - i) && Mask[i] != (NumElts + NumElts - 1 - i))
2154  return false;
2155  }
2156  return true;
2157 }
2158 
2160  if (!isSingleSourceMask(Mask))
2161  return false;
2162  for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) {
2163  if (Mask[i] == -1)
2164  continue;
2165  if (Mask[i] != 0 && Mask[i] != NumElts)
2166  return false;
2167  }
2168  return true;
2169 }
2170 
2172  // Select is differentiated from identity. It requires using both sources.
2173  if (isSingleSourceMask(Mask))
2174  return false;
2175  for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) {
2176  if (Mask[i] == -1)
2177  continue;
2178  if (Mask[i] != i && Mask[i] != (NumElts + i))
2179  return false;
2180  }
2181  return true;
2182 }
2183 
2185  // Example masks that will return true:
2186  // v1 = <a, b, c, d>
2187  // v2 = <e, f, g, h>
2188  // trn1 = shufflevector v1, v2 <0, 4, 2, 6> = <a, e, c, g>
2189  // trn2 = shufflevector v1, v2 <1, 5, 3, 7> = <b, f, d, h>
2190 
2191  // 1. The number of elements in the mask must be a power-of-2 and at least 2.
2192  int NumElts = Mask.size();
2193  if (NumElts < 2 || !isPowerOf2_32(NumElts))
2194  return false;
2195 
2196  // 2. The first element of the mask must be either a 0 or a 1.
2197  if (Mask[0] != 0 && Mask[0] != 1)
2198  return false;
2199 
2200  // 3. The difference between the first 2 elements must be equal to the
2201  // number of elements in the mask.
2202  if ((Mask[1] - Mask[0]) != NumElts)
2203  return false;
2204 
2205  // 4. The difference between consecutive even-numbered and odd-numbered
2206  // elements must be equal to 2.
2207  for (int i = 2; i < NumElts; ++i) {
2208  int MaskEltVal = Mask[i];
2209  if (MaskEltVal == -1)
2210  return false;
2211  int MaskEltPrevVal = Mask[i - 2];
2212  if (MaskEltVal - MaskEltPrevVal != 2)
2213  return false;
2214  }
2215  return true;
2216 }
2217 
2219  int NumSrcElts, int &Index) {
2220  // Must extract from a single source.
2221  if (!isSingleSourceMaskImpl(Mask, NumSrcElts))
2222  return false;
2223 
2224  // Must be smaller (else this is an Identity shuffle).
2225  if (NumSrcElts <= (int)Mask.size())
2226  return false;
2227 
2228  // Find start of extraction, accounting that we may start with an UNDEF.
2229  int SubIndex = -1;
2230  for (int i = 0, e = Mask.size(); i != e; ++i) {
2231  int M = Mask[i];
2232  if (M < 0)
2233  continue;
2234  int Offset = (M % NumSrcElts) - i;
2235  if (0 <= SubIndex && SubIndex != Offset)
2236  return false;
2237  SubIndex = Offset;
2238  }
2239 
2240  if (0 <= SubIndex && SubIndex + (int)Mask.size() <= NumSrcElts) {
2241  Index = SubIndex;
2242  return true;
2243  }
2244  return false;
2245 }
2246 
2248  if (isa<UndefValue>(Op<2>()))
2249  return false;
2250 
2251  // FIXME: Not currently possible to express a shuffle mask for a scalable
2252  // vector for this case.
2253  if (isa<ScalableVectorType>(getType()))
2254  return false;
2255 
2256  int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2257  int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2258  if (NumMaskElts <= NumOpElts)
2259  return false;
2260 
2261  // The first part of the mask must choose elements from exactly 1 source op.
2263  if (!isIdentityMaskImpl(Mask, NumOpElts))
2264  return false;
2265 
2266  // All extending must be with undef elements.
2267  for (int i = NumOpElts; i < NumMaskElts; ++i)
2268  if (Mask[i] != -1)
2269  return false;
2270 
2271  return true;
2272 }
2273 
2275  if (isa<UndefValue>(Op<2>()))
2276  return false;
2277 
2278  // FIXME: Not currently possible to express a shuffle mask for a scalable
2279  // vector for this case.
2280  if (isa<ScalableVectorType>(getType()))
2281  return false;
2282 
2283  int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2284  int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2285  if (NumMaskElts >= NumOpElts)
2286  return false;
2287 
2288  return isIdentityMaskImpl(getShuffleMask(), NumOpElts);
2289 }
2290 
2292  // Vector concatenation is differentiated from identity with padding.
2293  if (isa<UndefValue>(Op<0>()) || isa<UndefValue>(Op<1>()) ||
2294  isa<UndefValue>(Op<2>()))
2295  return false;
2296 
2297  // FIXME: Not currently possible to express a shuffle mask for a scalable
2298  // vector for this case.
2299  if (isa<ScalableVectorType>(getType()))
2300  return false;
2301 
2302  int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2303  int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2304  if (NumMaskElts != NumOpElts * 2)
2305  return false;
2306 
2307  // Use the mask length rather than the operands' vector lengths here. We
2308  // already know that the shuffle returns a vector twice as long as the inputs,
2309  // and neither of the inputs are undef vectors. If the mask picks consecutive
2310  // elements from both inputs, then this is a concatenation of the inputs.
2311  return isIdentityMaskImpl(getShuffleMask(), NumMaskElts);
2312 }
2313 
2314 //===----------------------------------------------------------------------===//
2315 // InsertValueInst Class
2316 //===----------------------------------------------------------------------===//
2317 
2318 void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2319  const Twine &Name) {
2320  assert(getNumOperands() == 2 && "NumOperands not initialized?");
2321 
2322  // There's no fundamental reason why we require at least one index
2323  // (other than weirdness with &*IdxBegin being invalid; see
2324  // getelementptr's init routine for example). But there's no
2325  // present need to support it.
2326  assert(!Idxs.empty() && "InsertValueInst must have at least one index");
2327 
2329  Val->getType() && "Inserted value must match indexed type!");
2330  Op<0>() = Agg;
2331  Op<1>() = Val;
2332 
2333  Indices.append(Idxs.begin(), Idxs.end());
2334  setName(Name);
2335 }
2336 
2337 InsertValueInst::InsertValueInst(const InsertValueInst &IVI)
2338  : Instruction(IVI.getType(), InsertValue,
2339  OperandTraits<InsertValueInst>::op_begin(this), 2),
2340  Indices(IVI.Indices) {
2341  Op<0>() = IVI.getOperand(0);
2342  Op<1>() = IVI.getOperand(1);
2344 }
2345 
2346 //===----------------------------------------------------------------------===//
2347 // ExtractValueInst Class
2348 //===----------------------------------------------------------------------===//
2349 
2350 void ExtractValueInst::init(ArrayRef<unsigned> Idxs, const Twine &Name) {
2351  assert(getNumOperands() == 1 && "NumOperands not initialized?");
2352 
2353  // There's no fundamental reason why we require at least one index.
2354  // But there's no present need to support it.
2355  assert(!Idxs.empty() && "ExtractValueInst must have at least one index");
2356 
2357  Indices.append(Idxs.begin(), Idxs.end());
2358  setName(Name);
2359 }
2360 
2361 ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI)
2362  : UnaryInstruction(EVI.getType(), ExtractValue, EVI.getOperand(0)),
2363  Indices(EVI.Indices) {
2365 }
2366 
2367 // getIndexedType - Returns the type of the element that would be extracted
2368 // with an extractvalue instruction with the specified parameters.
2369 //
2370 // A null type is returned if the indices are invalid for the specified
2371 // pointer type.
2372 //
2374  ArrayRef<unsigned> Idxs) {
2375  for (unsigned Index : Idxs) {
2376  // We can't use CompositeType::indexValid(Index) here.
2377  // indexValid() always returns true for arrays because getelementptr allows
2378  // out-of-bounds indices. Since we don't allow those for extractvalue and
2379  // insertvalue we need to check array indexing manually.
2380  // Since the only other types we can index into are struct types it's just
2381  // as easy to check those manually as well.
2382  if (ArrayType *AT = dyn_cast<ArrayType>(Agg)) {
2383  if (Index >= AT->getNumElements())
2384  return nullptr;
2385  Agg = AT->getElementType();
2386  } else if (StructType *ST = dyn_cast<StructType>(Agg)) {
2387  if (Index >= ST->getNumElements())
2388  return nullptr;
2389  Agg = ST->getElementType(Index);
2390  } else {
2391  // Not a valid type to index into.
2392  return nullptr;
2393  }
2394  }
2395  return const_cast<Type*>(Agg);
2396 }
2397 
2398 //===----------------------------------------------------------------------===//
2399 // UnaryOperator Class
2400 //===----------------------------------------------------------------------===//
2401 
2403  Type *Ty, const Twine &Name,
2404  Instruction *InsertBefore)
2405  : UnaryInstruction(Ty, iType, S, InsertBefore) {
2406  Op<0>() = S;
2407  setName(Name);
2408  AssertOK();
2409 }
2410 
2412  Type *Ty, const Twine &Name,
2413  BasicBlock *InsertAtEnd)
2414  : UnaryInstruction(Ty, iType, S, InsertAtEnd) {
2415  Op<0>() = S;
2416  setName(Name);
2417  AssertOK();
2418 }
2419 
2421  const Twine &Name,
2422  Instruction *InsertBefore) {
2423  return new UnaryOperator(Op, S, S->getType(), Name, InsertBefore);
2424 }
2425 
2427  const Twine &Name,
2428  BasicBlock *InsertAtEnd) {
2429  UnaryOperator *Res = Create(Op, S, Name);
2430  InsertAtEnd->getInstList().push_back(Res);
2431  return Res;
2432 }
2433 
2434 void UnaryOperator::AssertOK() {
2435  Value *LHS = getOperand(0);
2436  (void)LHS; // Silence warnings.
2437 #ifndef NDEBUG
2438  switch (getOpcode()) {
2439  case FNeg:
2440  assert(getType() == LHS->getType() &&
2441  "Unary operation should return same type as operand!");
2442  assert(getType()->isFPOrFPVectorTy() &&
2443  "Tried to create a floating-point operation on a "
2444  "non-floating-point type!");
2445  break;
2446  default: llvm_unreachable("Invalid opcode provided");
2447  }
2448 #endif
2449 }
2450 
2451 //===----------------------------------------------------------------------===//
2452 // BinaryOperator Class
2453 //===----------------------------------------------------------------------===//
2454 
2456  Type *Ty, const Twine &Name,
2457  Instruction *InsertBefore)
2458  : Instruction(Ty, iType,
2459  OperandTraits<BinaryOperator>::op_begin(this),
2460  OperandTraits<BinaryOperator>::operands(this),
2461  InsertBefore) {
2462  Op<0>() = S1;
2463  Op<1>() = S2;
2464  setName(Name);
2465  AssertOK();
2466 }
2467 
2469  Type *Ty, const Twine &Name,
2470  BasicBlock *InsertAtEnd)
2471  : Instruction(Ty, iType,
2472  OperandTraits<BinaryOperator>::op_begin(this),
2473  OperandTraits<BinaryOperator>::operands(this),
2474  InsertAtEnd) {
2475  Op<0>() = S1;
2476  Op<1>() = S2;
2477  setName(Name);
2478  AssertOK();
2479 }
2480 
2481 void BinaryOperator::AssertOK() {
2482  Value *LHS = getOperand(0), *RHS = getOperand(1);
2483  (void)LHS; (void)RHS; // Silence warnings.
2484  assert(LHS->getType() == RHS->getType() &&
2485  "Binary operator operand types must match!");
2486 #ifndef NDEBUG
2487  switch (getOpcode()) {
2488  case Add: case Sub:
2489  case Mul:
2490  assert(getType() == LHS->getType() &&
2491  "Arithmetic operation should return same type as operands!");
2492  assert(getType()->isIntOrIntVectorTy() &&
2493  "Tried to create an integer operation on a non-integer type!");
2494  break;
2495  case FAdd: case FSub:
2496  case FMul:
2497  assert(getType() == LHS->getType() &&
2498  "Arithmetic operation should return same type as operands!");
2499  assert(getType()->isFPOrFPVectorTy() &&
2500  "Tried to create a floating-point operation on a "
2501  "non-floating-point type!");
2502  break;
2503  case UDiv:
2504  case SDiv:
2505  assert(getType() == LHS->getType() &&
2506  "Arithmetic operation should return same type as operands!");
2507  assert(getType()->isIntOrIntVectorTy() &&
2508  "Incorrect operand type (not integer) for S/UDIV");
2509  break;
2510  case FDiv:
2511  assert(getType() == LHS->getType() &&
2512  "Arithmetic operation should return same type as operands!");
2513  assert(getType()->isFPOrFPVectorTy() &&
2514  "Incorrect operand type (not floating point) for FDIV");
2515  break;
2516  case URem:
2517  case SRem:
2518  assert(getType() == LHS->getType() &&
2519  "Arithmetic operation should return same type as operands!");
2520  assert(getType()->isIntOrIntVectorTy() &&
2521  "Incorrect operand type (not integer) for S/UREM");
2522  break;
2523  case FRem:
2524  assert(getType() == LHS->getType() &&
2525  "Arithmetic operation should return same type as operands!");
2526  assert(getType()->isFPOrFPVectorTy() &&
2527  "Incorrect operand type (not floating point) for FREM");
2528  break;
2529  case Shl:
2530  case LShr:
2531  case AShr:
2532  assert(getType() == LHS->getType() &&
2533  "Shift operation should return same type as operands!");
2534  assert(getType()->isIntOrIntVectorTy() &&
2535  "Tried to create a shift operation on a non-integral type!");
2536  break;
2537  case And: case Or:
2538  case Xor:
2539  assert(getType() == LHS->getType() &&
2540  "Logical operation should return same type as operands!");
2541  assert(getType()->isIntOrIntVectorTy() &&
2542  "Tried to create a logical operation on a non-integral type!");
2543  break;
2544  default: llvm_unreachable("Invalid opcode provided");
2545  }
2546 #endif
2547 }
2548 
2550  const Twine &Name,
2551  Instruction *InsertBefore) {
2552  assert(S1->getType() == S2->getType() &&
2553  "Cannot create binary operator with two operands of differing type!");
2554  return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore);
2555 }
2556 
2558  const Twine &Name,
2559  BasicBlock *InsertAtEnd) {
2560  BinaryOperator *Res = Create(Op, S1, S2, Name);
2561  InsertAtEnd->getInstList().push_back(Res);
2562  return Res;
2563 }
2564 
2566  Instruction *InsertBefore) {
2568  return new BinaryOperator(Instruction::Sub,
2569  zero, Op,
2570  Op->getType(), Name, InsertBefore);
2571 }
2572 
2574  BasicBlock *InsertAtEnd) {
2576  return new BinaryOperator(Instruction::Sub,
2577  zero, Op,
2578  Op->getType(), Name, InsertAtEnd);
2579 }
2580 
2582  Instruction *InsertBefore) {
2584  return BinaryOperator::CreateNSWSub(zero, Op, Name, InsertBefore);
2585 }
2586 
2588  BasicBlock *InsertAtEnd) {
2590  return BinaryOperator::CreateNSWSub(zero, Op, Name, InsertAtEnd);
2591 }
2592 
2594  Instruction *InsertBefore) {
2596  return BinaryOperator::CreateNUWSub(zero, Op, Name, InsertBefore);
2597 }
2598 
2600  BasicBlock *InsertAtEnd) {
2602  return BinaryOperator::CreateNUWSub(zero, Op, Name, InsertAtEnd);
2603 }
2604 
2606  Instruction *InsertBefore) {
2608  return new BinaryOperator(Instruction::Xor, Op, C,
2609  Op->getType(), Name, InsertBefore);
2610 }
2611 
2613  BasicBlock *InsertAtEnd) {
2615  return new BinaryOperator(Instruction::Xor, Op, AllOnes,
2616  Op->getType(), Name, InsertAtEnd);
2617 }
2618 
2619 // Exchange the two operands to this instruction. This instruction is safe to
2620 // use on any binary instruction and does not modify the semantics of the
2621 // instruction. If the instruction is order-dependent (SetLT f.e.), the opcode
2622 // is changed.
2624  if (!isCommutative())
2625  return true; // Can't commute operands
2626  Op<0>().swap(Op<1>());
2627  return false;
2628 }
2629 
2630 //===----------------------------------------------------------------------===//
2631 // FPMathOperator Class
2632 //===----------------------------------------------------------------------===//
2633 
2635  const MDNode *MD =
2636  cast<Instruction>(this)->getMetadata(LLVMContext::MD_fpmath);
2637  if (!MD)
2638  return 0.0;
2639  ConstantFP *Accuracy = mdconst::extract<ConstantFP>(MD->getOperand(0));
2640  return Accuracy->getValueAPF().convertToFloat();
2641 }
2642 
2643 //===----------------------------------------------------------------------===//
2644 // CastInst Class
2645 //===----------------------------------------------------------------------===//
2646 
2647 // Just determine if this cast only deals with integral->integral conversion.
2649  switch (getOpcode()) {
2650  default: return false;
2651  case Instruction::ZExt:
2652  case Instruction::SExt:
2653  case Instruction::Trunc:
2654  return true;
2655  case Instruction::BitCast:
2656  return getOperand(0)->getType()->isIntegerTy() &&
2657  getType()->isIntegerTy();
2658  }
2659 }
2660 
2662  // Only BitCast can be lossless, exit fast if we're not BitCast
2663  if (getOpcode() != Instruction::BitCast)
2664  return false;
2665 
2666  // Identity cast is always lossless
2667  Type *SrcTy = getOperand(0)->getType();
2668  Type *DstTy = getType();
2669  if (SrcTy == DstTy)
2670  return true;
2671 
2672  // Pointer to pointer is always lossless.
2673  if (SrcTy->isPointerTy())
2674  return DstTy->isPointerTy();
2675  return false; // Other types have no identity values
2676 }
2677 
2678 /// This function determines if the CastInst does not require any bits to be
2679 /// changed in order to effect the cast. Essentially, it identifies cases where
2680 /// no code gen is necessary for the cast, hence the name no-op cast. For
2681 /// example, the following are all no-op casts:
2682 /// # bitcast i32* %x to i8*
2683 /// # bitcast <2 x i32> %x to <4 x i16>
2684 /// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only
2685 /// Determine if the described cast is a no-op.
2687  Type *SrcTy,
2688  Type *DestTy,
2689  const DataLayout &DL) {
2690  assert(castIsValid(Opcode, SrcTy, DestTy) && "method precondition");
2691  switch (Opcode) {
2692  default: llvm_unreachable("Invalid CastOp");
2693  case Instruction::Trunc:
2694  case Instruction::ZExt:
2695  case Instruction::SExt:
2696  case Instruction::FPTrunc:
2697  case Instruction::FPExt:
2698  case Instruction::UIToFP:
2699  case Instruction::SIToFP:
2700  case Instruction::FPToUI:
2701  case Instruction::FPToSI:
2702  case Instruction::AddrSpaceCast:
2703  // TODO: Target informations may give a more accurate answer here.
2704  return false;
2705  case Instruction::BitCast:
2706  return true; // BitCast never modifies bits.
2707  case Instruction::PtrToInt:
2708  return DL.getIntPtrType(SrcTy)->getScalarSizeInBits() ==
2709  DestTy->getScalarSizeInBits();
2710  case Instruction::IntToPtr:
2711  return DL.getIntPtrType(DestTy)->getScalarSizeInBits() ==
2712  SrcTy->getScalarSizeInBits();
2713  }
2714 }
2715 
2716 bool CastInst::isNoopCast(const DataLayout &DL) const {
2717  return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), DL);
2718 }
2719 
2720 /// This function determines if a pair of casts can be eliminated and what
2721 /// opcode should be used in the elimination. This assumes that there are two
2722 /// instructions like this:
2723 /// * %F = firstOpcode SrcTy %x to MidTy
2724 /// * %S = secondOpcode MidTy %F to DstTy
2725 /// The function returns a resultOpcode so these two casts can be replaced with:
2726 /// * %Replacement = resultOpcode %SrcTy %x to DstTy
2727 /// If no such cast is permitted, the function returns 0.
2729  Instruction::CastOps firstOp, Instruction::CastOps secondOp,
2730  Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy,
2731  Type *DstIntPtrTy) {
2732  // Define the 144 possibilities for these two cast instructions. The values
2733  // in this matrix determine what to do in a given situation and select the
2734  // case in the switch below. The rows correspond to firstOp, the columns
2735  // correspond to secondOp. In looking at the table below, keep in mind
2736  // the following cast properties:
2737  //
2738  // Size Compare Source Destination
2739  // Operator Src ? Size Type Sign Type Sign
2740  // -------- ------------ ------------------- ---------------------
2741  // TRUNC > Integer Any Integral Any
2742  // ZEXT < Integral Unsigned Integer Any
2743  // SEXT < Integral Signed Integer Any
2744  // FPTOUI n/a FloatPt n/a Integral Unsigned
2745  // FPTOSI n/a FloatPt n/a Integral Signed
2746  // UITOFP n/a Integral Unsigned FloatPt n/a
2747  // SITOFP n/a Integral Signed FloatPt n/a
2748  // FPTRUNC > FloatPt n/a FloatPt n/a
2749  // FPEXT < FloatPt n/a FloatPt n/a
2750  // PTRTOINT n/a Pointer n/a Integral Unsigned
2751  // INTTOPTR n/a Integral Unsigned Pointer n/a
2752  // BITCAST = FirstClass n/a FirstClass n/a
2753  // ADDRSPCST n/a Pointer n/a Pointer n/a
2754  //
2755  // NOTE: some transforms are safe, but we consider them to be non-profitable.
2756  // For example, we could merge "fptoui double to i32" + "zext i32 to i64",
2757  // into "fptoui double to i64", but this loses information about the range
2758  // of the produced value (we no longer know the top-part is all zeros).
2759  // Further this conversion is often much more expensive for typical hardware,
2760  // and causes issues when building libgcc. We disallow fptosi+sext for the
2761  // same reason.
2762  const unsigned numCastOps =
2763  Instruction::CastOpsEnd - Instruction::CastOpsBegin;
2764  static const uint8_t CastResults[numCastOps][numCastOps] = {
2765  // T F F U S F F P I B A -+
2766  // R Z S P P I I T P 2 N T S |
2767  // U E E 2 2 2 2 R E I T C C +- secondOp
2768  // N X X U S F F N X N 2 V V |
2769  // C T T I I P P C T T P T T -+
2770  { 1, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // Trunc -+
2771  { 8, 1, 9,99,99, 2,17,99,99,99, 2, 3, 0}, // ZExt |
2772  { 8, 0, 1,99,99, 0, 2,99,99,99, 0, 3, 0}, // SExt |
2773  { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToUI |
2774  { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToSI |
2775  { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // UIToFP +- firstOp
2776  { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // SIToFP |
2777  { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // FPTrunc |
2778  { 99,99,99, 2, 2,99,99, 8, 2,99,99, 4, 0}, // FPExt |
2779  { 1, 0, 0,99,99, 0, 0,99,99,99, 7, 3, 0}, // PtrToInt |
2780  { 99,99,99,99,99,99,99,99,99,11,99,15, 0}, // IntToPtr |
2781  { 5, 5, 5, 6, 6, 5, 5, 6, 6,16, 5, 1,14}, // BitCast |
2782  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+
2783  };
2784 
2785  // TODO: This logic could be encoded into the table above and handled in the
2786  // switch below.
2787  // If either of the casts are a bitcast from scalar to vector, disallow the
2788  // merging. However, any pair of bitcasts are allowed.
2789  bool IsFirstBitcast = (firstOp == Instruction::BitCast);
2790  bool IsSecondBitcast = (secondOp == Instruction::BitCast);
2791  bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast;
2792 
2793  // Check if any of the casts convert scalars <-> vectors.
2794  if ((IsFirstBitcast && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) ||
2795  (IsSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy)))
2796  if (!AreBothBitcasts)
2797  return 0;
2798 
2799  int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin]
2800  [secondOp-Instruction::CastOpsBegin];
2801  switch (ElimCase) {
2802  case 0:
2803  // Categorically disallowed.
2804  return 0;
2805  case 1:
2806  // Allowed, use first cast's opcode.
2807  return firstOp;
2808  case 2:
2809  // Allowed, use second cast's opcode.
2810  return secondOp;
2811  case 3:
2812  // No-op cast in second op implies firstOp as long as the DestTy
2813  // is integer and we are not converting between a vector and a
2814  // non-vector type.
2815  if (!SrcTy->isVectorTy() && DstTy->isIntegerTy())
2816  return firstOp;
2817  return 0;
2818  case 4:
2819  // No-op cast in second op implies firstOp as long as the DestTy
2820  // is floating point.
2821  if (DstTy->isFloatingPointTy())
2822  return firstOp;
2823  return 0;
2824  case 5:
2825  // No-op cast in first op implies secondOp as long as the SrcTy
2826  // is an integer.
2827  if (SrcTy->isIntegerTy())
2828  return secondOp;
2829  return 0;
2830  case 6:
2831  // No-op cast in first op implies secondOp as long as the SrcTy
2832  // is a floating point.
2833  if (SrcTy->isFloatingPointTy())
2834  return secondOp;
2835  return 0;
2836  case 7: {
2837  // Cannot simplify if address spaces are different!
2838  if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
2839  return 0;
2840 
2841  unsigned MidSize = MidTy->getScalarSizeInBits();
2842  // We can still fold this without knowing the actual sizes as long we
2843  // know that the intermediate pointer is the largest possible
2844  // pointer size.
2845  // FIXME: Is this always true?
2846  if (MidSize == 64)
2847  return Instruction::BitCast;
2848 
2849  // ptrtoint, inttoptr -> bitcast (ptr -> ptr) if int size is >= ptr size.
2850  if (!SrcIntPtrTy || DstIntPtrTy != SrcIntPtrTy)
2851  return 0;
2852  unsigned PtrSize = SrcIntPtrTy->getScalarSizeInBits();
2853  if (MidSize >= PtrSize)
2854  return Instruction::BitCast;
2855  return 0;
2856  }
2857  case 8: {
2858  // ext, trunc -> bitcast, if the SrcTy and DstTy are same size
2859  // ext, trunc -> ext, if sizeof(SrcTy) < sizeof(DstTy)
2860  // ext, trunc -> trunc, if sizeof(SrcTy) > sizeof(DstTy)
2861  unsigned SrcSize = SrcTy->getScalarSizeInBits();
2862  unsigned DstSize = DstTy->getScalarSizeInBits();
2863  if (SrcSize == DstSize)
2864  return Instruction::BitCast;
2865  else if (SrcSize < DstSize)
2866  return firstOp;
2867  return secondOp;
2868  }
2869  case 9:
2870  // zext, sext -> zext, because sext can't sign extend after zext
2871  return Instruction::ZExt;
2872  case 11: {
2873  // inttoptr, ptrtoint -> bitcast if SrcSize<=PtrSize and SrcSize==DstSize
2874  if (!MidIntPtrTy)
2875  return 0;
2876  unsigned PtrSize = MidIntPtrTy->getScalarSizeInBits();
2877  unsigned SrcSize = SrcTy->getScalarSizeInBits();
2878  unsigned DstSize = DstTy->getScalarSizeInBits();
2879  if (SrcSize <= PtrSize && SrcSize == DstSize)
2880  return Instruction::BitCast;
2881  return 0;
2882  }
2883  case 12:
2884  // addrspacecast, addrspacecast -> bitcast, if SrcAS == DstAS
2885  // addrspacecast, addrspacecast -> addrspacecast, if SrcAS != DstAS
2886  if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
2887  return Instruction::AddrSpaceCast;
2888  return Instruction::BitCast;
2889  case 13:
2890  // FIXME: this state can be merged with (1), but the following assert
2891  // is useful to check the correcteness of the sequence due to semantic
2892  // change of bitcast.
2893  assert(
2894  SrcTy->isPtrOrPtrVectorTy() &&
2895  MidTy->isPtrOrPtrVectorTy() &&
2896  DstTy->isPtrOrPtrVectorTy() &&
2897  SrcTy->getPointerAddressSpace() != MidTy->getPointerAddressSpace() &&
2898  MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
2899  "Illegal addrspacecast, bitcast sequence!");
2900  // Allowed, use first cast's opcode
2901  return firstOp;
2902  case 14:
2903  // bitcast, addrspacecast -> addrspacecast if the element type of
2904  // bitcast's source is the same as that of addrspacecast's destination.
2905  if (SrcTy->getScalarType()->getPointerElementType() ==
2907  return Instruction::AddrSpaceCast;
2908  return 0;
2909  case 15:
2910  // FIXME: this state can be merged with (1), but the following assert
2911  // is useful to check the correcteness of the sequence due to semantic
2912  // change of bitcast.
2913  assert(
2914  SrcTy->isIntOrIntVectorTy() &&
2915  MidTy->isPtrOrPtrVectorTy() &&
2916  DstTy->isPtrOrPtrVectorTy() &&
2917  MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
2918  "Illegal inttoptr, bitcast sequence!");
2919  // Allowed, use first cast's opcode
2920  return firstOp;
2921  case 16:
2922  // FIXME: this state can be merged with (2), but the following assert
2923  // is useful to check the correcteness of the sequence due to semantic
2924  // change of bitcast.
2925  assert(
2926  SrcTy->isPtrOrPtrVectorTy() &&
2927  MidTy->isPtrOrPtrVectorTy() &&
2928  DstTy->isIntOrIntVectorTy() &&
2929  SrcTy->getPointerAddressSpace() == MidTy->getPointerAddressSpace() &&
2930  "Illegal bitcast, ptrtoint sequence!");
2931  // Allowed, use second cast's opcode
2932  return secondOp;
2933  case 17:
2934  // (sitofp (zext x)) -> (uitofp x)
2935  return Instruction::UIToFP;
2936  case 99:
2937  // Cast combination can't happen (error in input). This is for all cases
2938  // where the MidTy is not the same for the two cast instructions.
2939  llvm_unreachable("Invalid Cast Combination");
2940  default:
2941  llvm_unreachable("Error in CastResults table!!!");
2942  }
2943 }
2944 
2946  const Twine &Name, Instruction *InsertBefore) {
2947  assert(castIsValid(op, S, Ty) && "Invalid cast!");
2948  // Construct and return the appropriate CastInst subclass
2949  switch (op) {
2950  case Trunc: return new TruncInst (S, Ty, Name, InsertBefore);
2951  case ZExt: return new ZExtInst (S, Ty, Name, InsertBefore);
2952  case SExt: return new SExtInst (S, Ty, Name, InsertBefore);
2953  case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertBefore);
2954  case FPExt: return new FPExtInst (S, Ty, Name, InsertBefore);
2955  case UIToFP: return new UIToFPInst (S, Ty, Name, InsertBefore);
2956  case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore);
2957  case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore);
2958  case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore);
2959  case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore);
2960  case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore);
2961  case BitCast: return new BitCastInst (S, Ty, Name, InsertBefore);
2962  case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertBefore);
2963  default: llvm_unreachable("Invalid opcode provided");
2964  }
2965 }
2966 
2968  const Twine &Name, BasicBlock *InsertAtEnd) {
2969  assert(castIsValid(op, S, Ty) && "Invalid cast!");
2970  // Construct and return the appropriate CastInst subclass
2971  switch (op) {
2972  case Trunc: return new TruncInst (S, Ty, Name, InsertAtEnd);
2973  case ZExt: return new ZExtInst (S, Ty, Name, InsertAtEnd);
2974  case SExt: return new SExtInst (S, Ty, Name, InsertAtEnd);
2975  case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertAtEnd);
2976  case FPExt: return new FPExtInst (S, Ty, Name, InsertAtEnd);
2977  case UIToFP: return new UIToFPInst (S, Ty, Name, InsertAtEnd);
2978  case SIToFP: return new SIToFPInst (S, Ty, Name, InsertAtEnd);
2979  case FPToUI: return new FPToUIInst (S, Ty, Name, InsertAtEnd);
2980  case FPToSI: return new FPToSIInst (S, Ty, Name, InsertAtEnd);
2981  case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertAtEnd);
2982  case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertAtEnd);
2983  case BitCast: return new BitCastInst (S, Ty, Name, InsertAtEnd);
2984  case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertAtEnd);
2985  default: llvm_unreachable("Invalid opcode provided");
2986  }
2987 }
2988 
2990  const Twine &Name,
2991  Instruction *InsertBefore) {
2992  if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
2993  return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
2994  return Create(Instruction::ZExt, S, Ty, Name, InsertBefore);
2995 }
2996 
2998  const Twine &Name,
2999  BasicBlock *InsertAtEnd) {
3000  if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3001  return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
3002  return Create(Instruction::ZExt, S, Ty, Name, InsertAtEnd);
3003 }
3004 
3006  const Twine &Name,
3007  Instruction *InsertBefore) {
3008  if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3009  return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3010  return Create(Instruction::SExt, S, Ty, Name, InsertBefore);
3011 }
3012 
3014  const Twine &Name,
3015  BasicBlock *InsertAtEnd) {
3016  if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3017  return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
3018  return Create(Instruction::SExt, S, Ty, Name, InsertAtEnd);
3019 }
3020 
3022  const Twine &Name,
3023  Instruction *InsertBefore) {
3024  if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3025  return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3026  return Create(Instruction::Trunc, S, Ty, Name, InsertBefore);
3027 }
3028 
3030  const Twine &Name,
3031  BasicBlock *InsertAtEnd) {
3032  if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3033  return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
3034  return Create(Instruction::Trunc, S, Ty, Name, InsertAtEnd);
3035 }
3036 
3038  const Twine &Name,
3039  BasicBlock *InsertAtEnd) {
3040  assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3041  assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
3042  "Invalid cast");
3043  assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
3044  assert((!Ty->isVectorTy() ||
3045  cast<VectorType>(Ty)->getElementCount() ==
3046  cast<VectorType>(S->getType())->getElementCount()) &&
3047  "Invalid cast");
3048 
3049  if (Ty->isIntOrIntVectorTy())
3050  return Create(Instruction::PtrToInt, S, Ty, Name, InsertAtEnd);
3051 
3052  return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertAtEnd);
3053 }
3054 
3055 /// Create a BitCast or a PtrToInt cast instruction
3057  const Twine &Name,
3058  Instruction *InsertBefore) {
3059  assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3060  assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
3061  "Invalid cast");
3062  assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
3063  assert((!Ty->isVectorTy() ||
3064  cast<VectorType>(Ty)->getElementCount() ==
3065  cast<VectorType>(S->getType())->getElementCount()) &&
3066  "Invalid cast");
3067 
3068  if (Ty->isIntOrIntVectorTy())
3069  return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3070 
3071  return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore);
3072 }
3073 
3075  Value *S, Type *Ty,
3076  const Twine &Name,
3077  BasicBlock *InsertAtEnd) {
3078  assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3079  assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
3080 
3081  if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace())
3082  return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertAtEnd);
3083 
3084  return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
3085 }
3086 
3088  Value *S, Type *Ty,
3089  const Twine &Name,
3090  Instruction *InsertBefore) {
3091  assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3092  assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
3093 
3094  if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace())
3095  return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore);
3096 
3097  return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3098 }
3099 
3101  const Twine &Name,
3102  Instruction *InsertBefore) {
3103  if (S->getType()->isPointerTy() && Ty->isIntegerTy())
3104  return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3105  if (S->getType()->isIntegerTy() && Ty->isPointerTy())
3106  return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore);
3107 
3108  return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3109 }
3110 
3112  bool isSigned, const Twine &Name,
3113  Instruction *InsertBefore) {
3114  assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
3115  "Invalid integer cast");
3116  unsigned SrcBits = C->getType()->getScalarSizeInBits();
3117  unsigned DstBits = Ty->getScalarSizeInBits();
3118  Instruction::CastOps opcode =
3119  (SrcBits == DstBits ? Instruction::BitCast :
3120  (SrcBits > DstBits ? Instruction::Trunc :
3121  (isSigned ? Instruction::SExt : Instruction::ZExt)));
3122  return Create(opcode, C, Ty, Name, InsertBefore);
3123 }
3124 
3126  bool isSigned, const Twine &Name,
3127  BasicBlock *InsertAtEnd) {
3128  assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
3129  "Invalid cast");
3130  unsigned SrcBits = C->getType()->getScalarSizeInBits();
3131  unsigned DstBits = Ty->getScalarSizeInBits();
3132  Instruction::CastOps opcode =
3133  (SrcBits == DstBits ? Instruction::BitCast :
3134  (SrcBits > DstBits ? Instruction::Trunc :
3135  (isSigned ? Instruction::SExt : Instruction::ZExt)));
3136  return Create(opcode, C, Ty, Name, InsertAtEnd);
3137 }
3138 
3140  const Twine &Name,
3141  Instruction *InsertBefore) {
3142  assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
3143  "Invalid cast");
3144  unsigned SrcBits = C->getType()->getScalarSizeInBits();
3145  unsigned DstBits = Ty->getScalarSizeInBits();
3146  Instruction::CastOps opcode =
3147  (SrcBits == DstBits ? Instruction::BitCast :
3148  (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
3149  return Create(opcode, C, Ty, Name, InsertBefore);
3150 }
3151 
3153  const Twine &Name,
3154  BasicBlock *InsertAtEnd) {
3155  assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
3156  "Invalid cast");
3157  unsigned SrcBits = C->getType()->getScalarSizeInBits();
3158  unsigned DstBits = Ty->getScalarSizeInBits();
3159  Instruction::CastOps opcode =
3160  (SrcBits == DstBits ? Instruction::BitCast :
3161  (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
3162  return Create(opcode, C, Ty, Name, InsertAtEnd);
3163 }
3164 
3165 bool CastInst::isBitCastable(Type *SrcTy, Type *DestTy) {
3166  if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType())
3167  return false;
3168 
3169  if (SrcTy == DestTy)
3170  return true;
3171 
3172  if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) {
3173  if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) {
3174  if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3175  // An element by element cast. Valid if casting the elements is valid.
3176  SrcTy = SrcVecTy->getElementType();
3177  DestTy = DestVecTy->getElementType();
3178  }
3179  }
3180  }
3181 
3182  if (PointerType *DestPtrTy = dyn_cast<PointerType>(DestTy)) {
3183  if (PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy)) {
3184  return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace();
3185  }
3186  }
3187 
3188  TypeSize SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
3189  TypeSize DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
3190 
3191  // Could still have vectors of pointers if the number of elements doesn't
3192  // match
3193  if (SrcBits.getKnownMinSize() == 0 || DestBits.getKnownMinSize() == 0)
3194  return false;
3195 
3196  if (SrcBits != DestBits)
3197  return false;
3198 
3199  if (DestTy->isX86_MMXTy() || SrcTy->isX86_MMXTy())
3200  return false;
3201 
3202  return true;
3203 }
3204 
3206  const DataLayout &DL) {
3207  // ptrtoint and inttoptr are not allowed on non-integral pointers
3208  if (auto *PtrTy = dyn_cast<PointerType>(SrcTy))
3209  if (auto *IntTy = dyn_cast<IntegerType>(DestTy))
3210  return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
3211  !DL.isNonIntegralPointerType(PtrTy));
3212  if (auto *PtrTy = dyn_cast<PointerType>(DestTy))
3213  if (auto *IntTy = dyn_cast<IntegerType>(SrcTy))
3214  return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
3215  !DL.isNonIntegralPointerType(PtrTy));
3216 
3217  return isBitCastable(SrcTy, DestTy);
3218 }
3219 
3220 // Provide a way to get a "cast" where the cast opcode is inferred from the
3221 // types and size of the operand. This, basically, is a parallel of the
3222 // logic in the castIsValid function below. This axiom should hold:
3223 // castIsValid( getCastOpcode(Val, Ty), Val, Ty)
3224 // should not assert in castIsValid. In other words, this produces a "correct"
3225 // casting opcode for the arguments passed to it.
3228  const Value *Src, bool SrcIsSigned, Type *DestTy, bool DestIsSigned) {
3229  Type *SrcTy = Src->getType();
3230 
3231  assert(SrcTy->isFirstClassType() && DestTy->isFirstClassType() &&
3232  "Only first class types are castable!");
3233 
3234  if (SrcTy == DestTy)
3235  return BitCast;
3236 
3237  // FIXME: Check address space sizes here
3238  if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy))
3239  if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy))
3240  if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3241  // An element by element cast. Find the appropriate opcode based on the
3242  // element types.
3243  SrcTy = SrcVecTy->getElementType();
3244  DestTy = DestVecTy->getElementType();
3245  }
3246 
3247  // Get the bit sizes, we'll need these
3248  unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
3249  unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
3250 
3251  // Run through the possibilities ...
3252  if (DestTy->isIntegerTy()) { // Casting to integral
3253  if (SrcTy->isIntegerTy()) { // Casting from integral
3254  if (DestBits < SrcBits)
3255  return Trunc; // int -> smaller int
3256  else if (DestBits > SrcBits) { // its an extension
3257  if (SrcIsSigned)
3258  return SExt; // signed -> SEXT
3259  else
3260  return ZExt; // unsigned -> ZEXT
3261  } else {
3262  return BitCast; // Same size, No-op cast
3263  }
3264  } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
3265  if (DestIsSigned)
3266  return FPToSI; // FP -> sint
3267  else
3268  return FPToUI; // FP -> uint
3269  } else if (SrcTy->isVectorTy()) {
3270  assert(DestBits == SrcBits &&
3271  "Casting vector to integer of different width");
3272  return BitCast; // Same size, no-op cast
3273  } else {
3274  assert(SrcTy->isPointerTy() &&
3275  "Casting from a value that is not first-class type");
3276  return PtrToInt; // ptr -> int
3277  }
3278  } else if (DestTy->isFloatingPointTy()) { // Casting to floating pt
3279  if (SrcTy->isIntegerTy()) { // Casting from integral
3280  if (SrcIsSigned)
3281  return SIToFP; // sint -> FP
3282  else
3283  return UIToFP; // uint -> FP
3284  } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
3285  if (DestBits < SrcBits) {
3286  return FPTrunc; // FP -> smaller FP
3287  } else if (DestBits > SrcBits) {
3288  return FPExt; // FP -> larger FP
3289  } else {
3290  return BitCast; // same size, no-op cast
3291  }
3292  } else if (SrcTy->isVectorTy()) {
3293  assert(DestBits == SrcBits &&
3294  "Casting vector to floating point of different width");
3295  return BitCast; // same size, no-op cast
3296  }
3297  llvm_unreachable("Casting pointer or non-first class to float");
3298  } else if (DestTy->isVectorTy()) {
3299  assert(DestBits == SrcBits &&
3300  "Illegal cast to vector (wrong type or size)");
3301  return BitCast;
3302  } else if (DestTy->isPointerTy()) {
3303  if (SrcTy->isPointerTy()) {
3304  if (DestTy->getPointerAddressSpace() != SrcTy->getPointerAddressSpace())
3305  return AddrSpaceCast;
3306  return BitCast; // ptr -> ptr
3307  } else if (SrcTy->isIntegerTy()) {
3308  return IntToPtr; // int -> ptr
3309  }
3310  llvm_unreachable("Casting pointer to other than pointer or int");
3311  } else if (DestTy->isX86_MMXTy()) {
3312  if (SrcTy->isVectorTy()) {
3313  assert(DestBits == SrcBits && "Casting vector of wrong width to X86_MMX");
3314  return BitCast; // 64-bit vector to MMX
3315  }
3316  llvm_unreachable("Illegal cast to X86_MMX");
3317  }
3318  llvm_unreachable("Casting to type that is not first-class");
3319 }
3320 
3321 //===----------------------------------------------------------------------===//
3322 // CastInst SubClass Constructors
3323 //===----------------------------------------------------------------------===//
3324 
3325 /// Check that the construction parameters for a CastInst are correct. This
3326 /// could be broken out into the separate constructors but it is useful to have
3327 /// it in one place and to eliminate the redundant code for getting the sizes
3328 /// of the types involved.
3329 bool
3331  if (!SrcTy->isFirstClassType() || !DstTy->isFirstClassType() ||
3332  SrcTy->isAggregateType() || DstTy->isAggregateType())
3333  return false;
3334 
3335  // Get the size of the types in bits, and whether we are dealing
3336  // with vector types, we'll need this later.
3337  bool SrcIsVec = isa<VectorType>(SrcTy);
3338  bool DstIsVec = isa<VectorType>(DstTy);
3339  unsigned SrcScalarBitSize = SrcTy->getScalarSizeInBits();
3340  unsigned DstScalarBitSize = DstTy->getScalarSizeInBits();
3341 
3342  // If these are vector types, get the lengths of the vectors (using zero for
3343  // scalar types means that checking that vector lengths match also checks that
3344  // scalars are not being converted to vectors or vectors to scalars).
3345  ElementCount SrcEC = SrcIsVec ? cast<VectorType>(SrcTy)->getElementCount()
3347  ElementCount DstEC = DstIsVec ? cast<VectorType>(DstTy)->getElementCount()
3349 
3350  // Switch on the opcode provided
3351  switch (op) {
3352  default: return false; // This is an input error
3353  case Instruction::Trunc:
3354  return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3355  SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
3356  case Instruction::ZExt:
3357  return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3358  SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3359  case Instruction::SExt:
3360  return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3361  SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3362  case Instruction::FPTrunc:
3363  return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
3364  SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
3365  case Instruction::FPExt:
3366  return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
3367  SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3368  case Instruction::UIToFP:
3369  case Instruction::SIToFP:
3370  return SrcTy->isIntOrIntVectorTy() && DstTy->isFPOrFPVectorTy() &&
3371  SrcEC == DstEC;
3372  case Instruction::FPToUI:
3373  case Instruction::FPToSI:
3374  return SrcTy->isFPOrFPVectorTy() && DstTy->isIntOrIntVectorTy() &&
3375  SrcEC == DstEC;
3376  case Instruction::PtrToInt:
3377  if (SrcEC != DstEC)
3378  return false;
3379  return SrcTy->isPtrOrPtrVectorTy() && DstTy->isIntOrIntVectorTy();
3380  case Instruction::IntToPtr:
3381  if (SrcEC != DstEC)
3382  return false;
3383  return SrcTy->isIntOrIntVectorTy() && DstTy->isPtrOrPtrVectorTy();
3384  case Instruction::BitCast: {
3385  PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
3386  PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
3387 
3388  // BitCast implies a no-op cast of type only. No bits change.
3389  // However, you can't cast pointers to anything but pointers.
3390  if (!SrcPtrTy != !DstPtrTy)
3391  return false;
3392 
3393  // For non-pointer cases, the cast is okay if the source and destination bit
3394  // widths are identical.
3395  if (!SrcPtrTy)
3396  return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits();
3397 
3398  // If both are pointers then the address spaces must match.
3399  if (SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace())
3400  return false;
3401 
3402  // A vector of pointers must have the same number of elements.
3403  if (SrcIsVec && DstIsVec)
3404  return SrcEC == DstEC;
3405  if (SrcIsVec)
3406  return SrcEC == ElementCount::getFixed(1);
3407  if (DstIsVec)
3408  return DstEC == ElementCount::getFixed(1);
3409 
3410  return true;
3411  }
3412  case Instruction::AddrSpaceCast: {
3413  PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
3414  if (!SrcPtrTy)
3415  return false;
3416 
3417  PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
3418  if (!DstPtrTy)
3419  return false;
3420 
3421  if (SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace())
3422  return false;
3423 
3424  return SrcEC == DstEC;
3425  }
3426  }
3427 }
3428 
3430  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3431 ) : CastInst(Ty, Trunc, S, Name, InsertBefore) {
3432  assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
3433 }
3434 
3436  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3437 ) : CastInst(Ty, Trunc, S, Name, InsertAtEnd) {
3438  assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
3439 }
3440 
3442  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3443 ) : CastInst(Ty, ZExt, S, Name, InsertBefore) {
3444  assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
3445 }
3446 
3448  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3449 ) : CastInst(Ty, ZExt, S, Name, InsertAtEnd) {
3450  assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
3451 }
3453  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3454 ) : CastInst(Ty, SExt, S, Name, InsertBefore) {
3455  assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
3456 }
3457 
3459  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3460 ) : CastInst(Ty, SExt, S, Name, InsertAtEnd) {
3461  assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
3462 }
3463 
3465  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3466 ) : CastInst(Ty, FPTrunc, S, Name, InsertBefore) {
3467  assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
3468 }
3469 
3471  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3472 ) : CastInst(Ty, FPTrunc, S, Name, InsertAtEnd) {
3473  assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
3474 }
3475 
3477  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3478 ) : CastInst(Ty, FPExt, S, Name, InsertBefore) {
3479  assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
3480 }
3481 
3483  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3484 ) : CastInst(Ty, FPExt, S, Name, InsertAtEnd) {
3485  assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
3486 }
3487 
3489  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3490 ) : CastInst(Ty, UIToFP, S, Name, InsertBefore) {
3491  assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
3492 }
3493 
3495  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3496 ) : CastInst(Ty, UIToFP, S, Name, InsertAtEnd) {
3497  assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
3498 }
3499 
3501  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3502 ) : CastInst(Ty, SIToFP, S, Name, InsertBefore) {
3503  assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
3504 }
3505 
3507  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3508 ) : CastInst(Ty, SIToFP, S, Name, InsertAtEnd) {
3509  assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
3510 }
3511 
3513  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3514 ) : CastInst(Ty, FPToUI, S, Name, InsertBefore) {
3515  assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
3516 }
3517 
3519  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3520 ) : CastInst(Ty, FPToUI, S, Name, InsertAtEnd) {
3521  assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
3522 }
3523 
3525  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3526 ) : CastInst(Ty, FPToSI, S, Name, InsertBefore) {
3527  assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
3528 }
3529 
3531  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3532 ) : CastInst(Ty, FPToSI, S, Name, InsertAtEnd) {
3533  assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
3534 }
3535 
3537  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3538 ) : CastInst(Ty, PtrToInt, S, Name, InsertBefore) {
3539  assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
3540 }
3541 
3543  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3544 ) : CastInst(Ty, PtrToInt, S, Name, InsertAtEnd) {
3545  assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
3546 }
3547 
3549  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3550 ) : CastInst(Ty, IntToPtr, S, Name, InsertBefore) {
3551  assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
3552 }
3553 
3555  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3556 ) : CastInst(Ty, IntToPtr, S, Name, InsertAtEnd) {
3557  assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
3558 }
3559 
3561  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3562 ) : CastInst(Ty, BitCast, S, Name, InsertBefore) {
3563  assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
3564 }
3565 
3567  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3568 ) : CastInst(Ty, BitCast, S, Name, InsertAtEnd) {
3569  assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
3570 }
3571 
3573  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3574 ) : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) {
3575  assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
3576 }
3577 
3579  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3580 ) : CastInst(Ty, AddrSpaceCast, S, Name, InsertAtEnd) {
3581  assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
3582 }
3583 
3584 //===----------------------------------------------------------------------===//
3585 // CmpInst Classes
3586 //===----------------------------------------------------------------------===//
3587 
3589  Value *RHS, const Twine &Name, Instruction *InsertBefore,
3590  Instruction *FlagsSource)
3591  : Instruction(ty, op,
3592  OperandTraits<CmpInst>::op_begin(this),
3593  OperandTraits<CmpInst>::operands(this),
3594  InsertBefore) {
3595  Op<0>() = LHS;
3596  Op<1>() = RHS;
3597  setPredicate((Predicate)predicate);
3598  setName(Name);
3599  if (FlagsSource)
3600  copyIRFlags(FlagsSource);
3601 }
3602 
3604  Value *RHS, const Twine &Name, BasicBlock *InsertAtEnd)
3605  : Instruction(ty, op,
3606  OperandTraits<CmpInst>::op_begin(this),
3607  OperandTraits<CmpInst>::operands(this),
3608  InsertAtEnd) {
3609  Op<0>() = LHS;
3610  Op<1>() = RHS;
3611  setPredicate((Predicate)predicate);
3612  setName(Name);
3613 }
3614 
3615 CmpInst *
3617  const Twine &Name, Instruction *InsertBefore) {
3618  if (Op == Instruction::ICmp) {
3619  if (InsertBefore)
3620  return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate),
3621  S1, S2, Name);
3622  else
3623  return new ICmpInst(CmpInst::Predicate(predicate),
3624  S1, S2, Name);
3625  }
3626 
3627  if (InsertBefore)
3628  return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate),
3629  S1, S2, Name);
3630  else
3631  return new FCmpInst(CmpInst::Predicate(predicate),
3632  S1, S2, Name);
3633 }
3634 
3635 CmpInst *
3637  const Twine &Name, BasicBlock *InsertAtEnd) {
3638  if (Op == Instruction::ICmp) {
3639  return new ICmpInst(*InsertAtEnd, CmpInst::Predicate(predicate),
3640  S1, S2, Name);
3641  }
3642  return new FCmpInst(*InsertAtEnd, CmpInst::Predicate(predicate),
3643  S1, S2, Name);
3644 }
3645 
3647  if (ICmpInst *IC = dyn_cast<ICmpInst>(this))
3648  IC->swapOperands();
3649  else
3650  cast<FCmpInst>(this)->swapOperands();
3651 }
3652 
3654  if (const ICmpInst *IC = dyn_cast<ICmpInst>(this))
3655  return IC->isCommutative();
3656  return cast<FCmpInst>(this)->isCommutative();
3657 }
3658 
3661  return ICmpInst::isEquality(P);
3663  return FCmpInst::isEquality(P);
3664  llvm_unreachable("Unsupported predicate kind");
3665 }
3666 
3668  switch (pred) {
3669  default: llvm_unreachable("Unknown cmp predicate!");
3670  case ICMP_EQ: return ICMP_NE;
3671  case ICMP_NE: return ICMP_EQ;
3672  case ICMP_UGT: return ICMP_ULE;
3673  case ICMP_ULT: return ICMP_UGE;
3674  case ICMP_UGE: return ICMP_ULT;
3675  case ICMP_ULE: return ICMP_UGT;
3676  case ICMP_SGT: return ICMP_SLE;
3677  case ICMP_SLT: return ICMP_SGE;
3678  case ICMP_SGE: return ICMP_SLT;
3679  case ICMP_SLE: return ICMP_SGT;
3680 
3681  case FCMP_OEQ: return FCMP_UNE;
3682  case FCMP_ONE: return FCMP_UEQ;
3683  case FCMP_OGT: return FCMP_ULE;
3684  case FCMP_OLT: return FCMP_UGE;
3685  case FCMP_OGE: return FCMP_ULT;
3686  case FCMP_OLE: return FCMP_UGT;
3687  case FCMP_UEQ: return FCMP_ONE;
3688  case FCMP_UNE: return FCMP_OEQ;
3689  case FCMP_UGT: return FCMP_OLE;
3690  case FCMP_ULT: return FCMP_OGE;
3691  case FCMP_UGE: return FCMP_OLT;
3692  case FCMP_ULE: return FCMP_OGT;
3693  case FCMP_ORD: return FCMP_UNO;
3694  case FCMP_UNO: return FCMP_ORD;
3695  case FCMP_TRUE: return FCMP_FALSE;
3696  case FCMP_FALSE: return FCMP_TRUE;
3697  }
3698 }
3699 
3701  switch (Pred) {
3702  default: return "unknown";
3703  case FCmpInst::FCMP_FALSE: return "false";
3704  case FCmpInst::FCMP_OEQ: return "oeq";
3705  case FCmpInst::FCMP_OGT: return "ogt";
3706  case FCmpInst::FCMP_OGE: return "oge";
3707  case FCmpInst::FCMP_OLT: return "olt";
3708  case FCmpInst::FCMP_OLE: return "ole";
3709  case FCmpInst::FCMP_ONE: return "one";
3710  case FCmpInst::FCMP_ORD: return "ord";
3711  case FCmpInst::FCMP_UNO: return "uno";
3712  case FCmpInst::FCMP_UEQ: return "ueq";
3713  case FCmpInst::FCMP_UGT: return "ugt";
3714  case FCmpInst::FCMP_UGE: return "uge";
3715  case FCmpInst::FCMP_ULT: return "ult";
3716  case FCmpInst::FCMP_ULE: return "ule";
3717  case FCmpInst::FCMP_UNE: return "une";
3718  case FCmpInst::FCMP_TRUE: return "true";
3719  case ICmpInst::ICMP_EQ: return "eq";
3720  case ICmpInst::ICMP_NE: return "ne";
3721  case ICmpInst::ICMP_SGT: return "sgt";
3722  case ICmpInst::ICMP_SGE: return "sge";
3723  case ICmpInst::ICMP_SLT: return "slt";
3724  case ICmpInst::ICMP_SLE: return "sle";
3725  case ICmpInst::ICMP_UGT: return "ugt";
3726  case ICmpInst::ICMP_UGE: return "uge";
3727  case ICmpInst::ICMP_ULT: return "ult";
3728  case ICmpInst::ICMP_ULE: return "ule";
3729  }
3730 }
3731 
3733  switch (pred) {
3734  default: llvm_unreachable("Unknown icmp predicate!");
3735  case ICMP_EQ: case ICMP_NE:
3736  case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE:
3737  return pred;
3738  case ICMP_UGT: return ICMP_SGT;
3739  case ICMP_ULT: return ICMP_SLT;
3740  case ICMP_UGE: return ICMP_SGE;
3741  case ICMP_ULE: return ICMP_SLE;
3742  }
3743 }
3744 
3746  switch (pred) {
3747  default: llvm_unreachable("Unknown icmp predicate!");
3748  case ICMP_EQ: case ICMP_NE:
3749  case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE:
3750  return pred;
3751  case ICMP_SGT: return ICMP_UGT;
3752  case ICMP_SLT: return ICMP_ULT;
3753  case ICMP_SGE: return ICMP_UGE;
3754  case ICMP_SLE: return ICMP_ULE;
3755  }
3756 }
3757 
3759  switch (pred) {
3760  default: llvm_unreachable("Unknown cmp predicate!");
3761  case ICMP_EQ: case ICMP_NE:
3762  return pred;
3763  case ICMP_SGT: return ICMP_SLT;
3764  case ICMP_SLT: return ICMP_SGT;
3765  case ICMP_SGE: return ICMP_SLE;
3766  case ICMP_SLE: return ICMP_SGE;
3767  case ICMP_UGT: return ICMP_ULT;
3768  case ICMP_ULT: return ICMP_UGT;
3769  case ICMP_UGE: return ICMP_ULE;
3770  case ICMP_ULE: return ICMP_UGE;
3771 
3772  case FCMP_FALSE: case FCMP_TRUE:
3773  case FCMP_OEQ: case FCMP_ONE:
3774  case FCMP_UEQ: case FCMP_UNE:
3775  case FCMP_ORD: case FCMP_UNO:
3776  return pred;
3777  case FCMP_OGT: return FCMP_OLT;
3778  case FCMP_OLT: return FCMP_OGT;
3779  case FCMP_OGE: return FCMP_OLE;
3780  case FCMP_OLE: return FCMP_OGE;
3781  case FCMP_UGT: return FCMP_ULT;
3782  case FCMP_ULT: return FCMP_UGT;
3783  case FCMP_UGE: return FCMP_ULE;
3784  case FCMP_ULE: return FCMP_UGE;
3785  }
3786 }
3787 
3789  switch (pred) {
3790  case ICMP_SGE:
3791  case ICMP_SLE:
3792  case ICMP_UGE:
3793  case ICMP_ULE:
3794  case FCMP_OGE:
3795  case FCMP_OLE:
3796  case FCMP_UGE:
3797  case FCMP_ULE:
3798  return true;
3799  default:
3800  return false;
3801  }
3802 }
3803 
3805  switch (pred) {
3806  case ICMP_SGT:
3807  case ICMP_SLT:
3808  case ICMP_UGT:
3809  case ICMP_ULT:
3810  case FCMP_OGT:
3811  case FCMP_OLT:
3812  case FCMP_UGT:
3813  case FCMP_ULT:
3814  return true;
3815  default:
3816  return false;
3817  }
3818 }
3819 
3821  switch (pred) {
3822  case ICMP_SGE:
3823  return ICMP_SGT;
3824  case ICMP_SLE:
3825  return ICMP_SLT;
3826  case ICMP_UGE:
3827  return ICMP_UGT;
3828  case ICMP_ULE:
3829  return ICMP_ULT;
3830  case FCMP_OGE:
3831  return FCMP_OGT;
3832  case FCMP_OLE:
3833  return FCMP_OLT;
3834  case FCMP_UGE:
3835  return FCMP_UGT;
3836  case FCMP_ULE:
3837  return FCMP_ULT;
3838  default:
3839  return pred;
3840  }
3841 }
3842 
3844  switch (pred) {
3845  case ICMP_SGT:
3846  return ICMP_SGE;
3847  case ICMP_SLT:
3848  return ICMP_SLE;
3849  case ICMP_UGT:
3850  return ICMP_UGE;
3851  case ICMP_ULT:
3852  return ICMP_ULE;
3853  case FCMP_OGT:
3854  return FCMP_OGE;
3855  case FCMP_OLT:
3856  return FCMP_OLE;
3857  case FCMP_UGT:
3858  return FCMP_UGE;
3859  case FCMP_ULT:
3860  return FCMP_ULE;
3861  default:
3862  return pred;
3863  }
3864 }
3865 
3867  assert(CmpInst::isRelational(pred) && "Call only with relational predicate!");
3868 
3869  if (isStrictPredicate(pred))
3870  return getNonStrictPredicate(pred);
3872  return getStrictPredicate(pred);
3873 
3874  llvm_unreachable("Unknown predicate!");
3875 }
3876 
3878  assert(CmpInst::isUnsigned(pred) && "Call only with unsigned predicates!");
3879 
3880  switch (pred) {
3881  default:
3882  llvm_unreachable("Unknown predicate!");
3883  case CmpInst::ICMP_ULT:
3884  return CmpInst::ICMP_SLT;
3885  case CmpInst::ICMP_ULE:
3886  return CmpInst::ICMP_SLE;
3887  case CmpInst::ICMP_UGT:
3888  return CmpInst::ICMP_SGT;
3889  case CmpInst::ICMP_UGE:
3890  return CmpInst::ICMP_SGE;
3891  }
3892 }
3893 
3895  assert(CmpInst::isSigned(pred) && "Call only with signed predicates!");
3896 
3897  switch (pred) {
3898  default:
3899  llvm_unreachable("Unknown predicate!");
3900  case CmpInst::ICMP_SLT:
3901  return CmpInst::ICMP_ULT;
3902  case CmpInst::ICMP_SLE:
3903  return CmpInst::ICMP_ULE;
3904  case CmpInst::ICMP_SGT:
3905  return CmpInst::ICMP_UGT;
3906  case CmpInst::ICMP_SGE:
3907  return CmpInst::ICMP_UGE;
3908  }
3909 }
3910 
3912  switch (predicate) {
3913  default: return false;
3915  case ICmpInst::ICMP_UGE: return true;
3916  }
3917 }
3918 
3919 bool CmpInst::isSigned(Predicate predicate) {
3920  switch (predicate) {
3921  default: return false;
3923  case ICmpInst::ICMP_SGE: return true;
3924  }
3925 }
3926 
3929  "Call only with non-equality predicates!");
3930 
3931  if (isSigned(pred))
3932  return getUnsignedPredicate(pred);
3933  if (isUnsigned(pred))
3934  return getSignedPredicate(pred);
3935 
3936  llvm_unreachable("Unknown predicate!");
3937 }
3938 
3940  switch (predicate) {
3941  default: return false;
3944  case FCmpInst::FCMP_ORD: return true;
3945  }
3946 }
3947 
3949  switch (predicate) {
3950  default: return false;
3953  case FCmpInst::FCMP_UNO: return true;
3954  }
3955 }
3956 
3958  switch(predicate) {
3959  default: return false;
3960  case ICMP_EQ: case ICMP_UGE: case ICMP_ULE: case ICMP_SGE: case ICMP_SLE:
3961  case FCMP_TRUE: case FCMP_UEQ: case FCMP_UGE: case FCMP_ULE: return true;
3962  }
3963 }
3964 
3966  switch(predicate) {
3967  case ICMP_NE: case ICMP_UGT: case ICMP_ULT: case ICMP_SGT: case ICMP_SLT:
3968  case FCMP_FALSE: case FCMP_ONE: case FCMP_OGT: case FCMP_OLT: return true;
3969  default: return false;
3970  }
3971 }
3972 
3974  // If the predicates match, then we know the first condition implies the
3975  // second is true.
3976  if (Pred1 == Pred2)
3977  return true;
3978 
3979  switch (Pred1) {
3980  default:
3981  break;
3982  case ICMP_EQ:
3983  // A == B implies A >=u B, A <=u B, A >=s B, and A <=s B are true.
3984  return Pred2 == ICMP_UGE || Pred2 == ICMP_ULE || Pred2 == ICMP_SGE ||
3985  Pred2 == ICMP_SLE;
3986  case ICMP_UGT: // A >u B implies A != B and A >=u B are true.
3987  return Pred2 == ICMP_NE || Pred2 == ICMP_UGE;
3988  case ICMP_ULT: // A <u B implies A != B and A <=u B are true.
3989  return Pred2 == ICMP_NE || Pred2 == ICMP_ULE;
3990  case ICMP_SGT: // A >s B implies A != B and A >=s B are true.
3991  return Pred2 == ICMP_NE || Pred2 == ICMP_SGE;
3992  case ICMP_SLT: // A <s B implies A != B and A <=s B are true.
3993  return Pred2 == ICMP_NE || Pred2 == ICMP_SLE;
3994  }
3995  return false;
3996 }
3997 
3999  return isImpliedTrueByMatchingCmp(Pred1, getInversePredicate(Pred2));
4000 }
4001 
4002 //===----------------------------------------------------------------------===//
4003 // SwitchInst Implementation
4004 //===----------------------------------------------------------------------===//
4005 
4006 void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumReserved) {
4007  assert(Value && Default && NumReserved);
4008  ReservedSpace = NumReserved;
4010  allocHungoffUses(ReservedSpace);
4011 
4012  Op<0>() = Value;
4013  Op<1>() = Default;
4014 }
4015 
4016 /// SwitchInst ctor - Create a new switch instruction, specifying a value to
4017 /// switch on and a default destination. The number of additional cases can
4018 /// be specified here to make memory allocation more efficient. This
4019 /// constructor can also autoinsert before another instruction.
4020 SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
4021  Instruction *InsertBefore)
4022  : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch,
4023  nullptr, 0, InsertBefore) {
4024  init(Value, Default, 2+NumCases*2);
4025 }
4026 
4027 /// SwitchInst ctor - Create a new switch instruction, specifying a value to
4028 /// switch on and a default destination. The number of additional cases can
4029 /// be specified here to make memory allocation more efficient. This
4030 /// constructor also autoinserts at the end of the specified BasicBlock.
4031 SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
4032  BasicBlock *InsertAtEnd)
4033  : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch,
4034  nullptr, 0, InsertAtEnd) {
4035  init(Value, Default, 2+NumCases*2);
4036 }
4037 
4038 SwitchInst::SwitchInst(const SwitchInst &SI)
4039  : Instruction(SI.getType(), Instruction::Switch, nullptr, 0) {
4040  init(SI.getCondition(), SI.getDefaultDest(), SI.getNumOperands());
4041  setNumHungOffUseOperands(SI.getNumOperands());
4042  Use *OL = getOperandList();
4043  const Use *InOL = SI.getOperandList();
4044  for (unsigned i = 2, E = SI.getNumOperands(); i != E; i += 2) {
4045  OL[i] = InOL[i];
4046  OL[i+1] = InOL[i+1];
4047  }
4048  SubclassOptionalData = SI.SubclassOptionalData;
4049 }
4050 
4051 /// addCase - Add an entry to the switch instruction...
4052 ///
4054  unsigned NewCaseIdx = getNumCases();
4055  unsigned OpNo = getNumOperands();
4056  if (OpNo+2 > ReservedSpace)
4057  growOperands(); // Get more space!
4058  // Initialize some new operands.
4059  assert(OpNo+1 < ReservedSpace && "Growing didn't work!");
4060  setNumHungOffUseOperands(OpNo+2);
4061  CaseHandle Case(this, NewCaseIdx);
4062  Case.setValue(OnVal);
4063  Case.setSuccessor(Dest);
4064 }
4065 
4066 /// removeCase - This method removes the specified case and its successor
4067 /// from the switch instruction.
4069  unsigned idx = I->getCaseIndex();
4070 
4071  assert(2 + idx*2 < getNumOperands() && "Case index out of range!!!");
4072 
4073  unsigned NumOps = getNumOperands();
4074  Use *OL = getOperandList();
4075 
4076  // Overwrite this case with the end of the list.
4077  if (2 + (idx + 1) * 2 != NumOps) {
4078  OL[2 + idx * 2] = OL[NumOps - 2];
4079  OL[2 + idx * 2 + 1] = OL[NumOps - 1];
4080  }
4081 
4082  // Nuke the last value.
4083  OL[NumOps-2].set(nullptr);
4084  OL[NumOps-2+1].set(nullptr);
4085  setNumHungOffUseOperands(NumOps-2);
4086 
4087  return CaseIt(this, idx);
4088 }
4089 
4090 /// growOperands - grow operands - This grows the operand list in response
4091 /// to a push_back style of operation. This grows the number of ops by 3 times.
4092 ///
4093 void SwitchInst::growOperands() {
4094  unsigned e = getNumOperands();
4095  unsigned NumOps = e*3;
4096 
4097  ReservedSpace = NumOps;
4098  growHungoffUses(ReservedSpace);
4099 }
4100 
4101 MDNode *
4103  if (MDNode *ProfileData = SI.getMetadata(LLVMContext::MD_prof))
4104  if (auto *MDName = dyn_cast<MDString>(ProfileData->getOperand(0)))
4105  if (MDName->getString() == "branch_weights")
4106  return ProfileData;
4107  return nullptr;
4108 }
4109 
4111  assert(Changed && "called only if metadata has changed");
4112 
4113  if (!Weights)
4114  return nullptr;
4115 
4116  assert(SI.getNumSuccessors() == Weights->size() &&
4117  "num of prof branch_weights must accord with num of successors");
4118 
4119  bool AllZeroes =
4120  all_of(Weights.getValue(), [](uint32_t W) { return W == 0; });
4121 
4122  if (AllZeroes || Weights.getValue().size() < 2)
4123  return nullptr;
4124 
4125  return MDBuilder(SI.getParent()->getContext()).createBranchWeights(*Weights);
4126 }
4127 
4129  MDNode *ProfileData = getProfBranchWeightsMD(SI);
4130  if (!ProfileData)
4131  return;
4132 
4133  if (ProfileData->getNumOperands() != SI.getNumSuccessors() + 1) {
4134  llvm_unreachable("number of prof branch_weights metadata operands does "
4135  "not correspond to number of succesors");
4136  }
4137 
4138  SmallVector<uint32_t, 8> Weights;
4139  for (unsigned CI = 1, CE = SI.getNumSuccessors(); CI <= CE; ++CI) {
4140  ConstantInt *C = mdconst::extract<ConstantInt>(ProfileData->getOperand(CI));
4141  uint32_t CW = C->getValue().getZExtValue();
4142  Weights.push_back(CW);
4143  }
4144  this->Weights = std::move(Weights);
4145 }
4146 
4149  if (Weights) {
4150  assert(SI.getNumSuccessors() == Weights->size() &&
4151  "num of prof branch_weights must accord with num of successors");
4152  Changed = true;
4153  // Copy the last case to the place of the removed one and shrink.
4154  // This is tightly coupled with the way SwitchInst::removeCase() removes
4155  // the cases in SwitchInst::removeCase(CaseIt).
4156  Weights.getValue()[I->getCaseIndex() + 1] = Weights.getValue().back();
4157  Weights.getValue().pop_back();
4158  }
4159  return SI.removeCase(I);
4160 }
4161 
4163  ConstantInt *OnVal, BasicBlock *Dest,
4165  SI.addCase(OnVal, Dest);
4166 
4167  if (!Weights && W && *W) {
4168  Changed = true;
4169  Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
4170  Weights.getValue()[SI.getNumSuccessors() - 1] = *W;
4171  } else if (Weights) {
4172  Changed = true;
4173  Weights.getValue().push_back(W ? *W : 0);
4174  }
4175  if (Weights)
4176  assert(SI.getNumSuccessors() == Weights->size() &&
4177  "num of prof branch_weights must accord with num of successors");
4178 }
4179 
4182  // Instruction is erased. Mark as unchanged to not touch it in the destructor.
4183  Changed = false;
4184  if (Weights)
4185  Weights->resize(0);
4186  return SI.eraseFromParent();
4187 }
4188 
4191  if (!Weights)
4192  return None;
4193  return Weights.getValue()[idx];
4194 }
4195 
4198  if (!W)
4199  return;
4200 
4201  if (!Weights && *W)
4202  Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
4203 
4204  if (Weights) {
4205  auto &OldW = Weights.getValue()[idx];
4206  if (*W != OldW) {
4207  Changed = true;
4208  OldW = *W;
4209  }
4210  }
4211 }
4212 
4215  unsigned idx) {
4216  if (MDNode *ProfileData = getProfBranchWeightsMD(SI))
4217  if (ProfileData->getNumOperands() == SI.getNumSuccessors() + 1)
4218  return mdconst::extract<ConstantInt>(ProfileData->getOperand(idx + 1))
4219  ->getValue()
4220  .getZExtValue();
4221 
4222  return None;
4223 }
4224 
4225 //===----------------------------------------------------------------------===//
4226 // IndirectBrInst Implementation
4227 //===----------------------------------------------------------------------===//
4228 
4229 void IndirectBrInst::init(Value *Address, unsigned NumDests) {
4230  assert(Address && Address->getType()->isPointerTy() &&
4231  "Address of indirectbr must be a pointer");
4232  ReservedSpace = 1+NumDests;
4234  allocHungoffUses(ReservedSpace);
4235 
4236  Op<0>() = Address;
4237 }
4238 
4239 
4240 /// growOperands - grow operands - This grows the operand list in response
4241 /// to a push_back style of operation. This grows the number of ops by 2 times.
4242 ///
4243 void IndirectBrInst::growOperands() {
4244  unsigned e = getNumOperands();
4245  unsigned NumOps = e*2;
4246 
4247  ReservedSpace = NumOps;
4248  growHungoffUses(ReservedSpace);
4249 }
4250 
4251 IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
4252  Instruction *InsertBefore)
4253  : Instruction(Type::getVoidTy(Address->getContext()),
4254  Instruction::IndirectBr, nullptr, 0, InsertBefore) {
4255  init(Address, NumCases);
4256 }
4257 
4258 IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
4259  BasicBlock *InsertAtEnd)
4260  : Instruction(Type::getVoidTy(Address->getContext()),
4261  Instruction::IndirectBr, nullptr, 0, InsertAtEnd) {
4262  init(Address, NumCases);
4263 }
4264 
4265 IndirectBrInst::IndirectBrInst(const IndirectBrInst &IBI)
4266  : Instruction(Type::getVoidTy(IBI.getContext()), Instruction::IndirectBr,
4267  nullptr, IBI.getNumOperands()) {
4268  allocHungoffUses(IBI.getNumOperands());
4269  Use *OL = getOperandList();
4270  const Use *InOL = IBI.getOperandList();
4271  for (unsigned i = 0, E = IBI.getNumOperands(); i != E; ++i)
4272  OL[i] = InOL[i];
4273  SubclassOptionalData = IBI.SubclassOptionalData;
4274 }
4275 
4276 /// addDestination - Add a destination.
4277 ///
4279  unsigned OpNo = getNumOperands();
4280  if (OpNo+1 > ReservedSpace)
4281  growOperands(); // Get more space!
4282  // Initialize some new operands.
4283  assert(OpNo < ReservedSpace && "Growing didn't work!");
4284  setNumHungOffUseOperands(OpNo+1);
4285  getOperandList()[OpNo] = DestBB;
4286 }
4287 
4288 /// removeDestination - This method removes the specified successor from the
4289 /// indirectbr instruction.
4291  assert(idx < getNumOperands()-1 && "Successor index out of range!");
4292 
4293  unsigned NumOps = getNumOperands();
4294  Use *OL = getOperandList();
4295 
4296  // Replace this value with the last one.
4297  OL[idx+1] = OL[NumOps-1];
4298 
4299  // Nuke the last value.
4300  OL[NumOps-1].set(nullptr);
4301  setNumHungOffUseOperands(NumOps-1);
4302 }
4303 
4304 //===----------------------------------------------------------------------===//
4305 // FreezeInst Implementation
4306 //===----------------------------------------------------------------------===//
4307 
4309  const Twine &Name, Instruction *InsertBefore)
4310  : UnaryInstruction(S->getType(), Freeze, S, InsertBefore) {
4311  setName(Name);
4312 }
4313 
4315  const Twine &Name, BasicBlock *InsertAtEnd)
4316  : UnaryInstruction(S->getType(), Freeze, S, InsertAtEnd) {
4317  setName(Name);
4318 }
4319 
4320 //===----------------------------------------------------------------------===//
4321 // cloneImpl() implementations
4322 //===----------------------------------------------------------------------===//
4323 
4324 // Define these methods here so vtables don't get emitted into every translation
4325 // unit that uses these classes.
4326 
4328  return new (getNumOperands()) GetElementPtrInst(*this);
4329 }
4330 
4332  return Create(getOpcode(), Op<0>());
4333 }
4334 
4336  return Create(getOpcode(), Op<0>(), Op<1>());
4337 }
4338 
4340  return new FCmpInst(getPredicate(), Op<0>(), Op<1>());
4341 }
4342 
4344  return new ICmpInst(getPredicate(), Op<0>(), Op<1>());
4345 }
4346 
4348  return new ExtractValueInst(*this);
4349 }
4350 
4352  return new InsertValueInst(*this);
4353 }
4354 
4356  AllocaInst *Result =
4357  new AllocaInst(getAllocatedType(), getType()->getAddressSpace(),
4358  getOperand(0), getAlign());
4359  Result->setUsedWithInAlloca(isUsedWithInAlloca());
4360  Result->setSwiftError(isSwiftError());
4361  return Result;
4362 }
4363 
4365  return new LoadInst(getType(), getOperand(0), Twine(), isVolatile(),
4367 }
4368 
4370  return new StoreInst(getOperand(0), getOperand(1), isVolatile(), getAlign(),
4372 }
4373 
4375  AtomicCmpXchgInst *Result = new AtomicCmpXchgInst(
4376  getOperand(0), getOperand(1), getOperand(2), getAlign(),
4378  Result->setVolatile(isVolatile());
4379  Result->setWeak(isWeak());
4380  return Result;
4381 }
4382 
4384  AtomicRMWInst *Result =
4387  Result->setVolatile(isVolatile());
4388  return Result;
4389 }
4390 
4392  return new FenceInst(getContext(), getOrdering(), getSyncScopeID());
4393 }
4394 
4396  return new TruncInst(getOperand(0), getType());
4397 }
4398 
4400  return new ZExtInst(getOperand(0), getType());
4401 }
4402 
4404  return new SExtInst(getOperand(0), getType());
4405 }
4406 
4408  return new FPTruncInst(getOperand(0), getType());
4409 }
4410 
4412  return new FPExtInst(getOperand(0), getType());
4413 }
4414 
4416  return new UIToFPInst(getOperand(0), getType());
4417 }
4418