LLVM  16.0.0git
Instructions.cpp
Go to the documentation of this file.
1 //===- Instructions.cpp - Implement the LLVM instructions -----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements all of the non-inline methods for the LLVM instruction
10 // classes.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/IR/Instructions.h"
15 #include "LLVMContextImpl.h"
16 #include "llvm/ADT/None.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/Twine.h"
20 #include "llvm/IR/Attributes.h"
21 #include "llvm/IR/BasicBlock.h"
22 #include "llvm/IR/Constant.h"
23 #include "llvm/IR/Constants.h"
24 #include "llvm/IR/DataLayout.h"
25 #include "llvm/IR/DerivedTypes.h"
26 #include "llvm/IR/Function.h"
27 #include "llvm/IR/InstrTypes.h"
28 #include "llvm/IR/Instruction.h"
29 #include "llvm/IR/Intrinsics.h"
30 #include "llvm/IR/LLVMContext.h"
31 #include "llvm/IR/MDBuilder.h"
32 #include "llvm/IR/Metadata.h"
33 #include "llvm/IR/Module.h"
34 #include "llvm/IR/Operator.h"
35 #include "llvm/IR/Type.h"
36 #include "llvm/IR/Value.h"
38 #include "llvm/Support/Casting.h"
41 #include "llvm/Support/TypeSize.h"
42 #include <algorithm>
43 #include <cassert>
44 #include <cstdint>
45 #include <vector>
46 
47 using namespace llvm;
48 
50  "disable-i2p-p2i-opt", cl::init(false),
51  cl::desc("Disables inttoptr/ptrtoint roundtrip optimization"));
52 
53 //===----------------------------------------------------------------------===//
54 // AllocaInst Class
55 //===----------------------------------------------------------------------===//
56 
59  TypeSize Size = DL.getTypeAllocSizeInBits(getAllocatedType());
60  if (isArrayAllocation()) {
61  auto *C = dyn_cast<ConstantInt>(getArraySize());
62  if (!C)
63  return None;
64  assert(!Size.isScalable() && "Array elements cannot have a scalable size");
65  Size *= C->getZExtValue();
66  }
67  return Size;
68 }
69 
70 //===----------------------------------------------------------------------===//
71 // SelectInst Class
72 //===----------------------------------------------------------------------===//
73 
74 /// areInvalidOperands - Return a string if the specified operands are invalid
75 /// for a select operation, otherwise return null.
76 const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) {
77  if (Op1->getType() != Op2->getType())
78  return "both values to select must have same type";
79 
80  if (Op1->getType()->isTokenTy())
81  return "select values cannot have token type";
82 
83  if (VectorType *VT = dyn_cast<VectorType>(Op0->getType())) {
84  // Vector select.
85  if (VT->getElementType() != Type::getInt1Ty(Op0->getContext()))
86  return "vector select condition element type must be i1";
87  VectorType *ET = dyn_cast<VectorType>(Op1->getType());
88  if (!ET)
89  return "selected values for vector select must be vectors";
90  if (ET->getElementCount() != VT->getElementCount())
91  return "vector select requires selected vectors to have "
92  "the same vector length as select condition";
93  } else if (Op0->getType() != Type::getInt1Ty(Op0->getContext())) {
94  return "select condition must be i1 or <n x i1>";
95  }
96  return nullptr;
97 }
98 
99 //===----------------------------------------------------------------------===//
100 // PHINode Class
101 //===----------------------------------------------------------------------===//
102 
103 PHINode::PHINode(const PHINode &PN)
104  : Instruction(PN.getType(), Instruction::PHI, nullptr, PN.getNumOperands()),
105  ReservedSpace(PN.getNumOperands()) {
107  std::copy(PN.op_begin(), PN.op_end(), op_begin());
108  std::copy(PN.block_begin(), PN.block_end(), block_begin());
110 }
111 
112 // removeIncomingValue - Remove an incoming value. This is useful if a
113 // predecessor basic block is deleted.
114 Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) {
115  Value *Removed = getIncomingValue(Idx);
116 
117  // Move everything after this operand down.
118  //
119  // FIXME: we could just swap with the end of the list, then erase. However,
120  // clients might not expect this to happen. The code as it is thrashes the
121  // use/def lists, which is kinda lame.
122  std::copy(op_begin() + Idx + 1, op_end(), op_begin() + Idx);
123  std::copy(block_begin() + Idx + 1, block_end(), block_begin() + Idx);
124 
125  // Nuke the last value.
126  Op<-1>().set(nullptr);
128 
129  // If the PHI node is dead, because it has zero entries, nuke it now.
130  if (getNumOperands() == 0 && DeletePHIIfEmpty) {
131  // If anyone is using this PHI, make them use a dummy value instead...
133  eraseFromParent();
134  }
135  return Removed;
136 }
137 
138 /// growOperands - grow operands - This grows the operand list in response
139 /// to a push_back style of operation. This grows the number of ops by 1.5
140 /// times.
141 ///
142 void PHINode::growOperands() {
143  unsigned e = getNumOperands();
144  unsigned NumOps = e + e / 2;
145  if (NumOps < 2) NumOps = 2; // 2 op PHI nodes are VERY common.
146 
147  ReservedSpace = NumOps;
148  growHungoffUses(ReservedSpace, /* IsPhi */ true);
149 }
150 
151 /// hasConstantValue - If the specified PHI node always merges together the same
152 /// value, return the value, otherwise return null.
154  // Exploit the fact that phi nodes always have at least one entry.
155  Value *ConstantValue = getIncomingValue(0);
156  for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i)
157  if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) {
158  if (ConstantValue != this)
159  return nullptr; // Incoming values not all the same.
160  // The case where the first value is this PHI.
161  ConstantValue = getIncomingValue(i);
162  }
163  if (ConstantValue == this)
164  return UndefValue::get(getType());
165  return ConstantValue;
166 }
167 
168 /// hasConstantOrUndefValue - Whether the specified PHI node always merges
169 /// together the same value, assuming that undefs result in the same value as
170 /// non-undefs.
171 /// Unlike \ref hasConstantValue, this does not return a value because the
172 /// unique non-undef incoming value need not dominate the PHI node.
174  Value *ConstantValue = nullptr;
175  for (unsigned i = 0, e = getNumIncomingValues(); i != e; ++i) {
176  Value *Incoming = getIncomingValue(i);
177  if (Incoming != this && !isa<UndefValue>(Incoming)) {
178  if (ConstantValue && ConstantValue != Incoming)
179  return false;
180  ConstantValue = Incoming;
181  }
182  }
183  return true;
184 }
185 
186 //===----------------------------------------------------------------------===//
187 // LandingPadInst Implementation
188 //===----------------------------------------------------------------------===//
189 
190 LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
191  const Twine &NameStr, Instruction *InsertBefore)
192  : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertBefore) {
193  init(NumReservedValues, NameStr);
194 }
195 
196 LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
197  const Twine &NameStr, BasicBlock *InsertAtEnd)
198  : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertAtEnd) {
199  init(NumReservedValues, NameStr);
200 }
201 
202 LandingPadInst::LandingPadInst(const LandingPadInst &LP)
203  : Instruction(LP.getType(), Instruction::LandingPad, nullptr,
204  LP.getNumOperands()),
205  ReservedSpace(LP.getNumOperands()) {
207  Use *OL = getOperandList();
208  const Use *InOL = LP.getOperandList();
209  for (unsigned I = 0, E = ReservedSpace; I != E; ++I)
210  OL[I] = InOL[I];
211 
212  setCleanup(LP.isCleanup());
213 }
214 
215 LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
216  const Twine &NameStr,
217  Instruction *InsertBefore) {
218  return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertBefore);
219 }
220 
221 LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
222  const Twine &NameStr,
223  BasicBlock *InsertAtEnd) {
224  return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertAtEnd);
225 }
226 
227 void LandingPadInst::init(unsigned NumReservedValues, const Twine &NameStr) {
228  ReservedSpace = NumReservedValues;
230  allocHungoffUses(ReservedSpace);
231  setName(NameStr);
232  setCleanup(false);
233 }
234 
235 /// growOperands - grow operands - This grows the operand list in response to a
236 /// push_back style of operation. This grows the number of ops by 2 times.
237 void LandingPadInst::growOperands(unsigned Size) {
238  unsigned e = getNumOperands();
239  if (ReservedSpace >= e + Size) return;
240  ReservedSpace = (std::max(e, 1U) + Size / 2) * 2;
241  growHungoffUses(ReservedSpace);
242 }
243 
245  unsigned OpNo = getNumOperands();
246  growOperands(1);
247  assert(OpNo < ReservedSpace && "Growing didn't work!");
249  getOperandList()[OpNo] = Val;
250 }
251 
252 //===----------------------------------------------------------------------===//
253 // CallBase Implementation
254 //===----------------------------------------------------------------------===//
255 
257  Instruction *InsertPt) {
258  switch (CB->getOpcode()) {
259  case Instruction::Call:
260  return CallInst::Create(cast<CallInst>(CB), Bundles, InsertPt);
261  case Instruction::Invoke:
262  return InvokeInst::Create(cast<InvokeInst>(CB), Bundles, InsertPt);
263  case Instruction::CallBr:
264  return CallBrInst::Create(cast<CallBrInst>(CB), Bundles, InsertPt);
265  default:
266  llvm_unreachable("Unknown CallBase sub-class!");
267  }
268 }
269 
271  Instruction *InsertPt) {
273  for (unsigned i = 0, e = CI->getNumOperandBundles(); i < e; ++i) {
274  auto ChildOB = CI->getOperandBundleAt(i);
275  if (ChildOB.getTagName() != OpB.getTag())
276  OpDefs.emplace_back(ChildOB);
277  }
278  OpDefs.emplace_back(OpB);
279  return CallBase::Create(CI, OpDefs, InsertPt);
280 }
281 
282 
284 
286  assert(getOpcode() == Instruction::CallBr && "Unexpected opcode!");
287  return cast<CallBrInst>(this)->getNumIndirectDests() + 1;
288 }
289 
291  const Value *V = getCalledOperand();
292  if (isa<Function>(V) || isa<Constant>(V))
293  return false;
294  return !isInlineAsm();
295 }
296 
297 /// Tests if this call site must be tail call optimized. Only a CallInst can
298 /// be tail call optimized.
300  if (auto *CI = dyn_cast<CallInst>(this))
301  return CI->isMustTailCall();
302  return false;
303 }
304 
305 /// Tests if this call site is marked as a tail call.
306 bool CallBase::isTailCall() const {
307  if (auto *CI = dyn_cast<CallInst>(this))
308  return CI->isTailCall();
309  return false;
310 }
311 
313  if (auto *F = getCalledFunction())
314  return F->getIntrinsicID();
316 }
317 
319  if (hasRetAttr(Attribute::NonNull))
320  return true;
321 
322  if (getRetDereferenceableBytes() > 0 &&
323  !NullPointerIsDefined(getCaller(), getType()->getPointerAddressSpace()))
324  return true;
325 
326  return false;
327 }
328 
330  unsigned Index;
331 
334  if (const Function *F = getCalledFunction())
335  if (F->getAttributes().hasAttrSomewhere(Kind, &Index))
337 
338  return nullptr;
339 }
340 
341 /// Determine whether the argument or parameter has the given attribute.
342 bool CallBase::paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
343  assert(ArgNo < arg_size() && "Param index out of bounds!");
344 
345  if (Attrs.hasParamAttr(ArgNo, Kind))
346  return true;
347  if (const Function *F = getCalledFunction())
348  return F->getAttributes().hasParamAttr(ArgNo, Kind);
349  return false;
350 }
351 
352 bool CallBase::hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const {
353  Value *V = getCalledOperand();
354  if (auto *CE = dyn_cast<ConstantExpr>(V))
355  if (CE->getOpcode() == BitCast)
356  V = CE->getOperand(0);
357 
358  if (auto *F = dyn_cast<Function>(V))
359  return F->getAttributes().hasFnAttr(Kind);
360 
361  return false;
362 }
363 
364 bool CallBase::hasFnAttrOnCalledFunction(StringRef Kind) const {
365  Value *V = getCalledOperand();
366  if (auto *CE = dyn_cast<ConstantExpr>(V))
367  if (CE->getOpcode() == BitCast)
368  V = CE->getOperand(0);
369 
370  if (auto *F = dyn_cast<Function>(V))
371  return F->getAttributes().hasFnAttr(Kind);
372 
373  return false;
374 }
375 
376 template <typename AK>
377 Attribute CallBase::getFnAttrOnCalledFunction(AK Kind) const {
378  // Operand bundles override attributes on the called function, but don't
379  // override attributes directly present on the call instruction.
381  return Attribute();
382  Value *V = getCalledOperand();
383  if (auto *CE = dyn_cast<ConstantExpr>(V))
384  if (CE->getOpcode() == BitCast)
385  V = CE->getOperand(0);
386 
387  if (auto *F = dyn_cast<Function>(V))
388  return F->getAttributes().getFnAttr(Kind);
389 
390  return Attribute();
391 }
392 
393 template Attribute
394 CallBase::getFnAttrOnCalledFunction(Attribute::AttrKind Kind) const;
395 template Attribute CallBase::getFnAttrOnCalledFunction(StringRef Kind) const;
396 
398  SmallVectorImpl<OperandBundleDef> &Defs) const {
399  for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
401 }
402 
405  const unsigned BeginIndex) {
406  auto It = op_begin() + BeginIndex;
407  for (auto &B : Bundles)
408  It = std::copy(B.input_begin(), B.input_end(), It);
409 
410  auto *ContextImpl = getContext().pImpl;
411  auto BI = Bundles.begin();
412  unsigned CurrentIndex = BeginIndex;
413 
414  for (auto &BOI : bundle_op_infos()) {
415  assert(BI != Bundles.end() && "Incorrect allocation?");
416 
417  BOI.Tag = ContextImpl->getOrInsertBundleTag(BI->getTag());
418  BOI.Begin = CurrentIndex;
419  BOI.End = CurrentIndex + BI->input_size();
420  CurrentIndex = BOI.End;
421  BI++;
422  }
423 
424  assert(BI == Bundles.end() && "Incorrect allocation?");
425 
426  return It;
427 }
428 
430  /// When there isn't many bundles, we do a simple linear search.
431  /// Else fallback to a binary-search that use the fact that bundles usually
432  /// have similar number of argument to get faster convergence.
434  for (auto &BOI : bundle_op_infos())
435  if (BOI.Begin <= OpIdx && OpIdx < BOI.End)
436  return BOI;
437 
438  llvm_unreachable("Did not find operand bundle for operand!");
439  }
440 
441  assert(OpIdx >= arg_size() && "the Idx is not in the operand bundles");
443  OpIdx < std::prev(bundle_op_info_end())->End &&
444  "The Idx isn't in the operand bundle");
445 
446  /// We need a decimal number below and to prevent using floating point numbers
447  /// we use an intergal value multiplied by this constant.
448  constexpr unsigned NumberScaling = 1024;
449 
452  bundle_op_iterator Current = Begin;
453 
454  while (Begin != End) {
455  unsigned ScaledOperandPerBundle =
456  NumberScaling * (std::prev(End)->End - Begin->Begin) / (End - Begin);
457  Current = Begin + (((OpIdx - Begin->Begin) * NumberScaling) /
458  ScaledOperandPerBundle);
459  if (Current >= End)
460  Current = std::prev(End);
461  assert(Current < End && Current >= Begin &&
462  "the operand bundle doesn't cover every value in the range");
463  if (OpIdx >= Current->Begin && OpIdx < Current->End)
464  break;
465  if (OpIdx >= Current->End)
466  Begin = Current + 1;
467  else
468  End = Current;
469  }
470 
471  assert(OpIdx >= Current->Begin && OpIdx < Current->End &&
472  "the operand bundle doesn't cover every value in the range");
473  return *Current;
474 }
475 
478  Instruction *InsertPt) {
479  if (CB->getOperandBundle(ID))
480  return CB;
481 
483  CB->getOperandBundlesAsDefs(Bundles);
484  Bundles.push_back(OB);
485  return Create(CB, Bundles, InsertPt);
486 }
487 
489  Instruction *InsertPt) {
491  bool CreateNew = false;
492 
493  for (unsigned I = 0, E = CB->getNumOperandBundles(); I != E; ++I) {
494  auto Bundle = CB->getOperandBundleAt(I);
495  if (Bundle.getTagID() == ID) {
496  CreateNew = true;
497  continue;
498  }
499  Bundles.emplace_back(Bundle);
500  }
501 
502  return CreateNew ? Create(CB, Bundles, InsertPt) : CB;
503 }
504 
506  // Implementation note: this is a conservative implementation of operand
507  // bundle semantics, where *any* non-assume operand bundle (other than
508  // ptrauth) forces a callsite to be at least readonly.
511  getIntrinsicID() != Intrinsic::assume;
512 }
513 
518  getIntrinsicID() != Intrinsic::assume;
519 }
520 
521 //===----------------------------------------------------------------------===//
522 // CallInst Implementation
523 //===----------------------------------------------------------------------===//
524 
525 void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
526  ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) {
527  this->FTy = FTy;
528  assert(getNumOperands() == Args.size() + CountBundleInputs(Bundles) + 1 &&
529  "NumOperands not set up?");
530 
531 #ifndef NDEBUG
532  assert((Args.size() == FTy->getNumParams() ||
533  (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
534  "Calling a function with bad signature!");
535 
536  for (unsigned i = 0; i != Args.size(); ++i)
537  assert((i >= FTy->getNumParams() ||
538  FTy->getParamType(i) == Args[i]->getType()) &&
539  "Calling a function with a bad signature!");
540 #endif
541 
542  // Set operands in order of their index to match use-list-order
543  // prediction.
545  setCalledOperand(Func);
546 
547  auto It = populateBundleOperandInfos(Bundles, Args.size());
548  (void)It;
549  assert(It + 1 == op_end() && "Should add up!");
550 
551  setName(NameStr);
552 }
553 
554 void CallInst::init(FunctionType *FTy, Value *Func, const Twine &NameStr) {
555  this->FTy = FTy;
556  assert(getNumOperands() == 1 && "NumOperands not set up?");
557  setCalledOperand(Func);
558 
559  assert(FTy->getNumParams() == 0 && "Calling a function with bad signature");
560 
561  setName(NameStr);
562 }
563 
564 CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
565  Instruction *InsertBefore)
566  : CallBase(Ty->getReturnType(), Instruction::Call,
567  OperandTraits<CallBase>::op_end(this) - 1, 1, InsertBefore) {
568  init(Ty, Func, Name);
569 }
570 
571 CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
572  BasicBlock *InsertAtEnd)
573  : CallBase(Ty->getReturnType(), Instruction::Call,
574  OperandTraits<CallBase>::op_end(this) - 1, 1, InsertAtEnd) {
575  init(Ty, Func, Name);
576 }
577 
578 CallInst::CallInst(const CallInst &CI)
579  : CallBase(CI.Attrs, CI.FTy, CI.getType(), Instruction::Call,
580  OperandTraits<CallBase>::op_end(this) - CI.getNumOperands(),
581  CI.getNumOperands()) {
582  setTailCallKind(CI.getTailCallKind());
584 
585  std::copy(CI.op_begin(), CI.op_end(), op_begin());
589 }
590 
592  Instruction *InsertPt) {
593  std::vector<Value *> Args(CI->arg_begin(), CI->arg_end());
594 
595  auto *NewCI = CallInst::Create(CI->getFunctionType(), CI->getCalledOperand(),
596  Args, OpB, CI->getName(), InsertPt);
597  NewCI->setTailCallKind(CI->getTailCallKind());
598  NewCI->setCallingConv(CI->getCallingConv());
599  NewCI->SubclassOptionalData = CI->SubclassOptionalData;
600  NewCI->setAttributes(CI->getAttributes());
601  NewCI->setDebugLoc(CI->getDebugLoc());
602  return NewCI;
603 }
604 
605 // Update profile weight for call instruction by scaling it using the ratio
606 // of S/T. The meaning of "branch_weights" meta data for call instruction is
607 // transfered to represent call count.
609  auto *ProfileData = getMetadata(LLVMContext::MD_prof);
610  if (ProfileData == nullptr)
611  return;
612 
613  auto *ProfDataName = dyn_cast<MDString>(ProfileData->getOperand(0));
614  if (!ProfDataName || (!ProfDataName->getString().equals("branch_weights") &&
615  !ProfDataName->getString().equals("VP")))
616  return;
617 
618  if (T == 0) {
619  LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
620  "div by 0. Ignoring. Likely the function "
621  << getParent()->getParent()->getName()
622  << " has 0 entry count, and contains call instructions "
623  "with non-zero prof info.");
624  return;
625  }
626 
627  MDBuilder MDB(getContext());
629  Vals.push_back(ProfileData->getOperand(0));
630  APInt APS(128, S), APT(128, T);
631  if (ProfDataName->getString().equals("branch_weights") &&
632  ProfileData->getNumOperands() > 0) {
633  // Using APInt::div may be expensive, but most cases should fit 64 bits.
634  APInt Val(128, mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(1))
635  ->getValue()
636  .getZExtValue());
637  Val *= APS;
638  Vals.push_back(MDB.createConstant(
640  Val.udiv(APT).getLimitedValue(UINT32_MAX))));
641  } else if (ProfDataName->getString().equals("VP"))
642  for (unsigned i = 1; i < ProfileData->getNumOperands(); i += 2) {
643  // The first value is the key of the value profile, which will not change.
644  Vals.push_back(ProfileData->getOperand(i));
645  uint64_t Count =
646  mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(i + 1))
647  ->getValue()
648  .getZExtValue();
649  // Don't scale the magic number.
650  if (Count == NOMORE_ICP_MAGICNUM) {
651  Vals.push_back(ProfileData->getOperand(i + 1));
652  continue;
653  }
654  // Using APInt::div may be expensive, but most cases should fit 64 bits.
655  APInt Val(128, Count);
656  Val *= APS;
657  Vals.push_back(MDB.createConstant(
659  Val.udiv(APT).getLimitedValue())));
660  }
661  setMetadata(LLVMContext::MD_prof, MDNode::get(getContext(), Vals));
662 }
663 
664 /// IsConstantOne - Return true only if val is constant int 1
665 static bool IsConstantOne(Value *val) {
666  assert(val && "IsConstantOne does not work with nullptr val");
667  const ConstantInt *CVal = dyn_cast<ConstantInt>(val);
668  return CVal && CVal->isOne();
669 }
670 
671 static Instruction *createMalloc(Instruction *InsertBefore,
672  BasicBlock *InsertAtEnd, Type *IntPtrTy,
673  Type *AllocTy, Value *AllocSize,
674  Value *ArraySize,
676  Function *MallocF, const Twine &Name) {
677  assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) &&
678  "createMalloc needs either InsertBefore or InsertAtEnd");
679 
680  // malloc(type) becomes:
681  // bitcast (i8* malloc(typeSize)) to type*
682  // malloc(type, arraySize) becomes:
683  // bitcast (i8* malloc(typeSize*arraySize)) to type*
684  if (!ArraySize)
685  ArraySize = ConstantInt::get(IntPtrTy, 1);
686  else if (ArraySize->getType() != IntPtrTy) {
687  if (InsertBefore)
688  ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, false,
689  "", InsertBefore);
690  else
691  ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, false,
692  "", InsertAtEnd);
693  }
694 
695  if (!IsConstantOne(ArraySize)) {
696  if (IsConstantOne(AllocSize)) {
697  AllocSize = ArraySize; // Operand * 1 = Operand
698  } else if (Constant *CO = dyn_cast<Constant>(ArraySize)) {
699  Constant *Scale = ConstantExpr::getIntegerCast(CO, IntPtrTy,
700  false /*ZExt*/);
701  // Malloc arg is constant product of type size and array size
702  AllocSize = ConstantExpr::getMul(Scale, cast<Constant>(AllocSize));
703  } else {
704  // Multiply type size by the array size...
705  if (InsertBefore)
706  AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize,
707  "mallocsize", InsertBefore);
708  else
709  AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize,
710  "mallocsize", InsertAtEnd);
711  }
712  }
713 
714  assert(AllocSize->getType() == IntPtrTy && "malloc arg is wrong size");
715  // Create the call to Malloc.
716  BasicBlock *BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd;
717  Module *M = BB->getParent()->getParent();
718  Type *BPTy = Type::getInt8PtrTy(BB->getContext());
719  FunctionCallee MallocFunc = MallocF;
720  if (!MallocFunc)
721  // prototype malloc as "void *malloc(size_t)"
722  MallocFunc = M->getOrInsertFunction("malloc", BPTy, IntPtrTy);
723  PointerType *AllocPtrType = PointerType::getUnqual(AllocTy);
724  CallInst *MCall = nullptr;
725  Instruction *Result = nullptr;
726  if (InsertBefore) {
727  MCall = CallInst::Create(MallocFunc, AllocSize, OpB, "malloccall",
728  InsertBefore);
729  Result = MCall;
730  if (Result->getType() != AllocPtrType)
731  // Create a cast instruction to convert to the right type...
732  Result = new BitCastInst(MCall, AllocPtrType, Name, InsertBefore);
733  } else {
734  MCall = CallInst::Create(MallocFunc, AllocSize, OpB, "malloccall");
735  Result = MCall;
736  if (Result->getType() != AllocPtrType) {
737  InsertAtEnd->getInstList().push_back(MCall);
738  // Create a cast instruction to convert to the right type...
739  Result = new BitCastInst(MCall, AllocPtrType, Name);
740  }
741  }
742  MCall->setTailCall();
743  if (Function *F = dyn_cast<Function>(MallocFunc.getCallee())) {
744  MCall->setCallingConv(F->getCallingConv());
745  if (!F->returnDoesNotAlias())
746  F->setReturnDoesNotAlias();
747  }
748  assert(!MCall->getType()->isVoidTy() && "Malloc has void return type");
749 
750  return Result;
751 }
752 
753 /// CreateMalloc - Generate the IR for a call to malloc:
754 /// 1. Compute the malloc call's argument as the specified type's size,
755 /// possibly multiplied by the array size if the array size is not
756 /// constant 1.
757 /// 2. Call malloc with that argument.
758 /// 3. Bitcast the result of the malloc call to the specified type.
760  Type *IntPtrTy, Type *AllocTy,
761  Value *AllocSize, Value *ArraySize,
762  Function *MallocF,
763  const Twine &Name) {
764  return createMalloc(InsertBefore, nullptr, IntPtrTy, AllocTy, AllocSize,
765  ArraySize, None, MallocF, Name);
766 }
768  Type *IntPtrTy, Type *AllocTy,
769  Value *AllocSize, Value *ArraySize,
771  Function *MallocF,
772  const Twine &Name) {
773  return createMalloc(InsertBefore, nullptr, IntPtrTy, AllocTy, AllocSize,
774  ArraySize, OpB, MallocF, Name);
775 }
776 
777 /// CreateMalloc - Generate the IR for a call to malloc:
778 /// 1. Compute the malloc call's argument as the specified type's size,
779 /// possibly multiplied by the array size if the array size is not
780 /// constant 1.
781 /// 2. Call malloc with that argument.
782 /// 3. Bitcast the result of the malloc call to the specified type.
783 /// Note: This function does not add the bitcast to the basic block, that is the
784 /// responsibility of the caller.
786  Type *IntPtrTy, Type *AllocTy,
787  Value *AllocSize, Value *ArraySize,
788  Function *MallocF, const Twine &Name) {
789  return createMalloc(nullptr, InsertAtEnd, IntPtrTy, AllocTy, AllocSize,
790  ArraySize, None, MallocF, Name);
791 }
793  Type *IntPtrTy, Type *AllocTy,
794  Value *AllocSize, Value *ArraySize,
796  Function *MallocF, const Twine &Name) {
797  return createMalloc(nullptr, InsertAtEnd, IntPtrTy, AllocTy, AllocSize,
798  ArraySize, OpB, MallocF, Name);
799 }
800 
803  Instruction *InsertBefore,
804  BasicBlock *InsertAtEnd) {
805  assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) &&
806  "createFree needs either InsertBefore or InsertAtEnd");
807  assert(Source->getType()->isPointerTy() &&
808  "Can not free something of nonpointer type!");
809 
810  BasicBlock *BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd;
811  Module *M = BB->getParent()->getParent();
812 
813  Type *VoidTy = Type::getVoidTy(M->getContext());
814  Type *IntPtrTy = Type::getInt8PtrTy(M->getContext());
815  // prototype free as "void free(void*)"
816  FunctionCallee FreeFunc = M->getOrInsertFunction("free", VoidTy, IntPtrTy);
817  CallInst *Result = nullptr;
818  Value *PtrCast = Source;
819  if (InsertBefore) {
820  if (Source->getType() != IntPtrTy)
821  PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertBefore);
822  Result = CallInst::Create(FreeFunc, PtrCast, Bundles, "", InsertBefore);
823  } else {
824  if (Source->getType() != IntPtrTy)
825  PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertAtEnd);
826  Result = CallInst::Create(FreeFunc, PtrCast, Bundles, "");
827  }
828  Result->setTailCall();
829  if (Function *F = dyn_cast<Function>(FreeFunc.getCallee()))
830  Result->setCallingConv(F->getCallingConv());
831 
832  return Result;
833 }
834 
835 /// CreateFree - Generate the IR for a call to the builtin free function.
837  return createFree(Source, None, InsertBefore, nullptr);
838 }
841  Instruction *InsertBefore) {
842  return createFree(Source, Bundles, InsertBefore, nullptr);
843 }
844 
845 /// CreateFree - Generate the IR for a call to the builtin free function.
846 /// Note: This function does not add the call to the basic block, that is the
847 /// responsibility of the caller.
849  Instruction *FreeCall = createFree(Source, None, nullptr, InsertAtEnd);
850  assert(FreeCall && "CreateFree did not create a CallInst");
851  return FreeCall;
852 }
855  BasicBlock *InsertAtEnd) {
856  Instruction *FreeCall = createFree(Source, Bundles, nullptr, InsertAtEnd);
857  assert(FreeCall && "CreateFree did not create a CallInst");
858  return FreeCall;
859 }
860 
861 //===----------------------------------------------------------------------===//
862 // InvokeInst Implementation
863 //===----------------------------------------------------------------------===//
864 
865 void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal,
866  BasicBlock *IfException, ArrayRef<Value *> Args,
868  const Twine &NameStr) {
869  this->FTy = FTy;
870 
871  assert((int)getNumOperands() ==
872  ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)) &&
873  "NumOperands not set up?");
874 
875 #ifndef NDEBUG
876  assert(((Args.size() == FTy->getNumParams()) ||
877  (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
878  "Invoking a function with bad signature");
879 
880  for (unsigned i = 0, e = Args.size(); i != e; i++)
881  assert((i >= FTy->getNumParams() ||
882  FTy->getParamType(i) == Args[i]->getType()) &&
883  "Invoking a function with a bad signature!");
884 #endif
885 
886  // Set operands in order of their index to match use-list-order
887  // prediction.
889  setNormalDest(IfNormal);
890  setUnwindDest(IfException);
891  setCalledOperand(Fn);
892 
893  auto It = populateBundleOperandInfos(Bundles, Args.size());
894  (void)It;
895  assert(It + 3 == op_end() && "Should add up!");
896 
897  setName(NameStr);
898 }
899 
900 InvokeInst::InvokeInst(const InvokeInst &II)
901  : CallBase(II.Attrs, II.FTy, II.getType(), Instruction::Invoke,
902  OperandTraits<CallBase>::op_end(this) - II.getNumOperands(),
903  II.getNumOperands()) {
905  std::copy(II.op_begin(), II.op_end(), op_begin());
909 }
910 
912  Instruction *InsertPt) {
913  std::vector<Value *> Args(II->arg_begin(), II->arg_end());
914 
915  auto *NewII = InvokeInst::Create(
916  II->getFunctionType(), II->getCalledOperand(), II->getNormalDest(),
917  II->getUnwindDest(), Args, OpB, II->getName(), InsertPt);
918  NewII->setCallingConv(II->getCallingConv());
919  NewII->SubclassOptionalData = II->SubclassOptionalData;
920  NewII->setAttributes(II->getAttributes());
921  NewII->setDebugLoc(II->getDebugLoc());
922  return NewII;
923 }
924 
926  return cast<LandingPadInst>(getUnwindDest()->getFirstNonPHI());
927 }
928 
929 //===----------------------------------------------------------------------===//
930 // CallBrInst Implementation
931 //===----------------------------------------------------------------------===//
932 
933 void CallBrInst::init(FunctionType *FTy, Value *Fn, BasicBlock *Fallthrough,
934  ArrayRef<BasicBlock *> IndirectDests,
937  const Twine &NameStr) {
938  this->FTy = FTy;
939 
940  assert((int)getNumOperands() ==
941  ComputeNumOperands(Args.size(), IndirectDests.size(),
942  CountBundleInputs(Bundles)) &&
943  "NumOperands not set up?");
944 
945 #ifndef NDEBUG
946  assert(((Args.size() == FTy->getNumParams()) ||
947  (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
948  "Calling a function with bad signature");
949 
950  for (unsigned i = 0, e = Args.size(); i != e; i++)
951  assert((i >= FTy->getNumParams() ||
952  FTy->getParamType(i) == Args[i]->getType()) &&
953  "Calling a function with a bad signature!");
954 #endif
955 
956  // Set operands in order of their index to match use-list-order
957  // prediction.
958  std::copy(Args.begin(), Args.end(), op_begin());
959  NumIndirectDests = IndirectDests.size();
960  setDefaultDest(Fallthrough);
961  for (unsigned i = 0; i != NumIndirectDests; ++i)
962  setIndirectDest(i, IndirectDests[i]);
963  setCalledOperand(Fn);
964 
965  auto It = populateBundleOperandInfos(Bundles, Args.size());
966  (void)It;
967  assert(It + 2 + IndirectDests.size() == op_end() && "Should add up!");
968 
969  setName(NameStr);
970 }
971 
972 CallBrInst::CallBrInst(const CallBrInst &CBI)
973  : CallBase(CBI.Attrs, CBI.FTy, CBI.getType(), Instruction::CallBr,
974  OperandTraits<CallBase>::op_end(this) - CBI.getNumOperands(),
975  CBI.getNumOperands()) {
977  std::copy(CBI.op_begin(), CBI.op_end(), op_begin());
981  NumIndirectDests = CBI.NumIndirectDests;
982 }
983 
985  Instruction *InsertPt) {
986  std::vector<Value *> Args(CBI->arg_begin(), CBI->arg_end());
987 
988  auto *NewCBI = CallBrInst::Create(
989  CBI->getFunctionType(), CBI->getCalledOperand(), CBI->getDefaultDest(),
990  CBI->getIndirectDests(), Args, OpB, CBI->getName(), InsertPt);
991  NewCBI->setCallingConv(CBI->getCallingConv());
992  NewCBI->SubclassOptionalData = CBI->SubclassOptionalData;
993  NewCBI->setAttributes(CBI->getAttributes());
994  NewCBI->setDebugLoc(CBI->getDebugLoc());
995  NewCBI->NumIndirectDests = CBI->NumIndirectDests;
996  return NewCBI;
997 }
998 
999 //===----------------------------------------------------------------------===//
1000 // ReturnInst Implementation
1001 //===----------------------------------------------------------------------===//
1002 
1003 ReturnInst::ReturnInst(const ReturnInst &RI)
1004  : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Ret,
1005  OperandTraits<ReturnInst>::op_end(this) - RI.getNumOperands(),
1006  RI.getNumOperands()) {
1007  if (RI.getNumOperands())
1008  Op<0>() = RI.Op<0>();
1010 }
1011 
1012 ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, Instruction *InsertBefore)
1013  : Instruction(Type::getVoidTy(C), Instruction::Ret,
1014  OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal,
1015  InsertBefore) {
1016  if (retVal)
1017  Op<0>() = retVal;
1018 }
1019 
1020 ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd)
1021  : Instruction(Type::getVoidTy(C), Instruction::Ret,
1022  OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal,
1023  InsertAtEnd) {
1024  if (retVal)
1025  Op<0>() = retVal;
1026 }
1027 
1028 ReturnInst::ReturnInst(LLVMContext &Context, BasicBlock *InsertAtEnd)
1029  : Instruction(Type::getVoidTy(Context), Instruction::Ret,
1030  OperandTraits<ReturnInst>::op_end(this), 0, InsertAtEnd) {}
1031 
1032 //===----------------------------------------------------------------------===//
1033 // ResumeInst Implementation
1034 //===----------------------------------------------------------------------===//
1035 
1036 ResumeInst::ResumeInst(const ResumeInst &RI)
1037  : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Resume,
1038  OperandTraits<ResumeInst>::op_begin(this), 1) {
1039  Op<0>() = RI.Op<0>();
1040 }
1041 
1042 ResumeInst::ResumeInst(Value *Exn, Instruction *InsertBefore)
1043  : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
1044  OperandTraits<ResumeInst>::op_begin(this), 1, InsertBefore) {
1045  Op<0>() = Exn;
1046 }
1047 
1048 ResumeInst::ResumeInst(Value *Exn, BasicBlock *InsertAtEnd)
1049  : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
1050  OperandTraits<ResumeInst>::op_begin(this), 1, InsertAtEnd) {
1051  Op<0>() = Exn;
1052 }
1053 
1054 //===----------------------------------------------------------------------===//
1055 // CleanupReturnInst Implementation
1056 //===----------------------------------------------------------------------===//
1057 
1058 CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst &CRI)
1059  : Instruction(CRI.getType(), Instruction::CleanupRet,
1061  CRI.getNumOperands(),
1062  CRI.getNumOperands()) {
1063  setSubclassData<Instruction::OpaqueField>(
1065  Op<0>() = CRI.Op<0>();
1066  if (CRI.hasUnwindDest())
1067  Op<1>() = CRI.Op<1>();
1068 }
1069 
1070 void CleanupReturnInst::init(Value *CleanupPad, BasicBlock *UnwindBB) {
1071  if (UnwindBB)
1072  setSubclassData<UnwindDestField>(true);
1073 
1074  Op<0>() = CleanupPad;
1075  if (UnwindBB)
1076  Op<1>() = UnwindBB;
1077 }
1078 
1079 CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
1080  unsigned Values, Instruction *InsertBefore)
1081  : Instruction(Type::getVoidTy(CleanupPad->getContext()),
1082  Instruction::CleanupRet,
1083  OperandTraits<CleanupReturnInst>::op_end(this) - Values,
1084  Values, InsertBefore) {
1085  init(CleanupPad, UnwindBB);
1086 }
1087 
1088 CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
1089  unsigned Values, BasicBlock *InsertAtEnd)
1090  : Instruction(Type::getVoidTy(CleanupPad->getContext()),
1091  Instruction::CleanupRet,
1092  OperandTraits<CleanupReturnInst>::op_end(this) - Values,
1093  Values, InsertAtEnd) {
1094  init(CleanupPad, UnwindBB);
1095 }
1096 
1097 //===----------------------------------------------------------------------===//
1098 // CatchReturnInst Implementation
1099 //===----------------------------------------------------------------------===//
1100 void CatchReturnInst::init(Value *CatchPad, BasicBlock *BB) {
1101  Op<0>() = CatchPad;
1102  Op<1>() = BB;
1103 }
1104 
1105 CatchReturnInst::CatchReturnInst(const CatchReturnInst &CRI)
1106  : Instruction(Type::getVoidTy(CRI.getContext()), Instruction::CatchRet,
1107  OperandTraits<CatchReturnInst>::op_begin(this), 2) {
1108  Op<0>() = CRI.Op<0>();
1109  Op<1>() = CRI.Op<1>();
1110 }
1111 
1112 CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
1113  Instruction *InsertBefore)
1114  : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
1115  OperandTraits<CatchReturnInst>::op_begin(this), 2,
1116  InsertBefore) {
1117  init(CatchPad, BB);
1118 }
1119 
1120 CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
1121  BasicBlock *InsertAtEnd)
1122  : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
1123  OperandTraits<CatchReturnInst>::op_begin(this), 2,
1124  InsertAtEnd) {
1125  init(CatchPad, BB);
1126 }
1127 
1128 //===----------------------------------------------------------------------===//
1129 // CatchSwitchInst Implementation
1130 //===----------------------------------------------------------------------===//
1131 
1132 CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
1133  unsigned NumReservedValues,
1134  const Twine &NameStr,
1135  Instruction *InsertBefore)
1136  : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
1137  InsertBefore) {
1138  if (UnwindDest)
1139  ++NumReservedValues;
1140  init(ParentPad, UnwindDest, NumReservedValues + 1);
1141  setName(NameStr);
1142 }
1143 
1144 CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
1145  unsigned NumReservedValues,
1146  const Twine &NameStr, BasicBlock *InsertAtEnd)
1147  : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
1148  InsertAtEnd) {
1149  if (UnwindDest)
1150  ++NumReservedValues;
1151  init(ParentPad, UnwindDest, NumReservedValues + 1);
1152  setName(NameStr);
1153 }
1154 
1155 CatchSwitchInst::CatchSwitchInst(const CatchSwitchInst &CSI)
1156  : Instruction(CSI.getType(), Instruction::CatchSwitch, nullptr,
1157  CSI.getNumOperands()) {
1158  init(CSI.getParentPad(), CSI.getUnwindDest(), CSI.getNumOperands());
1159  setNumHungOffUseOperands(ReservedSpace);
1160  Use *OL = getOperandList();
1161  const Use *InOL = CSI.getOperandList();
1162  for (unsigned I = 1, E = ReservedSpace; I != E; ++I)
1163  OL[I] = InOL[I];
1164 }
1165 
1166 void CatchSwitchInst::init(Value *ParentPad, BasicBlock *UnwindDest,
1167  unsigned NumReservedValues) {
1168  assert(ParentPad && NumReservedValues);
1169 
1170  ReservedSpace = NumReservedValues;
1171  setNumHungOffUseOperands(UnwindDest ? 2 : 1);
1172  allocHungoffUses(ReservedSpace);
1173 
1174  Op<0>() = ParentPad;
1175  if (UnwindDest) {
1176  setSubclassData<UnwindDestField>(true);
1177  setUnwindDest(UnwindDest);
1178  }
1179 }
1180 
1181 /// growOperands - grow operands - This grows the operand list in response to a
1182 /// push_back style of operation. This grows the number of ops by 2 times.
1183 void CatchSwitchInst::growOperands(unsigned Size) {
1184  unsigned NumOperands = getNumOperands();
1185  assert(NumOperands >= 1);
1186  if (ReservedSpace >= NumOperands + Size)
1187  return;
1188  ReservedSpace = (NumOperands + Size / 2) * 2;
1189  growHungoffUses(ReservedSpace);
1190 }
1191 
1193  unsigned OpNo = getNumOperands();
1194  growOperands(1);
1195  assert(OpNo < ReservedSpace && "Growing didn't work!");
1197  getOperandList()[OpNo] = Handler;
1198 }
1199 
1201  // Move all subsequent handlers up one.
1202  Use *EndDst = op_end() - 1;
1203  for (Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst)
1204  *CurDst = *(CurDst + 1);
1205  // Null out the last handler use.
1206  *EndDst = nullptr;
1207 
1209 }
1210 
1211 //===----------------------------------------------------------------------===//
1212 // FuncletPadInst Implementation
1213 //===----------------------------------------------------------------------===//
1214 void FuncletPadInst::init(Value *ParentPad, ArrayRef<Value *> Args,
1215  const Twine &NameStr) {
1216  assert(getNumOperands() == 1 + Args.size() && "NumOperands not set up?");
1217  llvm::copy(Args, op_begin());
1218  setParentPad(ParentPad);
1219  setName(NameStr);
1220 }
1221 
1222 FuncletPadInst::FuncletPadInst(const FuncletPadInst &FPI)
1223  : Instruction(FPI.getType(), FPI.getOpcode(),
1224  OperandTraits<FuncletPadInst>::op_end(this) -
1225  FPI.getNumOperands(),
1226  FPI.getNumOperands()) {
1227  std::copy(FPI.op_begin(), FPI.op_end(), op_begin());
1228  setParentPad(FPI.getParentPad());
1229 }
1230 
1231 FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
1232  ArrayRef<Value *> Args, unsigned Values,
1233  const Twine &NameStr, Instruction *InsertBefore)
1234  : Instruction(ParentPad->getType(), Op,
1235  OperandTraits<FuncletPadInst>::op_end(this) - Values, Values,
1236  InsertBefore) {
1237  init(ParentPad, Args, NameStr);
1238 }
1239 
1240 FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
1241  ArrayRef<Value *> Args, unsigned Values,
1242  const Twine &NameStr, BasicBlock *InsertAtEnd)
1243  : Instruction(ParentPad->getType(), Op,
1244  OperandTraits<FuncletPadInst>::op_end(this) - Values, Values,
1245  InsertAtEnd) {
1246  init(ParentPad, Args, NameStr);
1247 }
1248 
1249 //===----------------------------------------------------------------------===//
1250 // UnreachableInst Implementation
1251 //===----------------------------------------------------------------------===//
1252 
1254  Instruction *InsertBefore)
1255  : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr,
1256  0, InsertBefore) {}
1258  : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr,
1259  0, InsertAtEnd) {}
1260 
1261 //===----------------------------------------------------------------------===//
1262 // BranchInst Implementation
1263 //===----------------------------------------------------------------------===//
1264 
1265 void BranchInst::AssertOK() {
1266  if (isConditional())
1267  assert(getCondition()->getType()->isIntegerTy(1) &&
1268  "May only branch on boolean predicates!");
1269 }
1270 
1271 BranchInst::BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore)
1272  : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1273  OperandTraits<BranchInst>::op_end(this) - 1, 1,
1274  InsertBefore) {
1275  assert(IfTrue && "Branch destination may not be null!");
1276  Op<-1>() = IfTrue;
1277 }
1278 
1279 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
1280  Instruction *InsertBefore)
1281  : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1282  OperandTraits<BranchInst>::op_end(this) - 3, 3,
1283  InsertBefore) {
1284  // Assign in order of operand index to make use-list order predictable.
1285  Op<-3>() = Cond;
1286  Op<-2>() = IfFalse;
1287  Op<-1>() = IfTrue;
1288 #ifndef NDEBUG
1289  AssertOK();
1290 #endif
1291 }
1292 
1293 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd)
1294  : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1295  OperandTraits<BranchInst>::op_end(this) - 1, 1, InsertAtEnd) {
1296  assert(IfTrue && "Branch destination may not be null!");
1297  Op<-1>() = IfTrue;
1298 }
1299 
1300 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
1301  BasicBlock *InsertAtEnd)
1302  : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1303  OperandTraits<BranchInst>::op_end(this) - 3, 3, InsertAtEnd) {
1304  // Assign in order of operand index to make use-list order predictable.
1305  Op<-3>() = Cond;
1306  Op<-2>() = IfFalse;
1307  Op<-1>() = IfTrue;
1308 #ifndef NDEBUG
1309  AssertOK();
1310 #endif
1311 }
1312 
1313 BranchInst::BranchInst(const BranchInst &BI)
1314  : Instruction(Type::getVoidTy(BI.getContext()), Instruction::Br,
1315  OperandTraits<BranchInst>::op_end(this) - BI.getNumOperands(),
1316  BI.getNumOperands()) {
1317  // Assign in order of operand index to make use-list order predictable.
1318  if (BI.getNumOperands() != 1) {
1319  assert(BI.getNumOperands() == 3 && "BR can have 1 or 3 operands!");
1320  Op<-3>() = BI.Op<-3>();
1321  Op<-2>() = BI.Op<-2>();
1322  }
1323  Op<-1>() = BI.Op<-1>();
1325 }
1326 
1328  assert(isConditional() &&
1329  "Cannot swap successors of an unconditional branch");
1330  Op<-1>().swap(Op<-2>());
1331 
1332  // Update profile metadata if present and it matches our structural
1333  // expectations.
1334  swapProfMetadata();
1335 }
1336 
1337 //===----------------------------------------------------------------------===//
1338 // AllocaInst Implementation
1339 //===----------------------------------------------------------------------===//
1340 
1342  if (!Amt)
1344  else {
1345  assert(!isa<BasicBlock>(Amt) &&
1346  "Passed basic block into allocation size parameter! Use other ctor");
1347  assert(Amt->getType()->isIntegerTy() &&
1348  "Allocation array size is not an integer!");
1349  }
1350  return Amt;
1351 }
1352 
1354  assert(BB && "Insertion BB cannot be null when alignment not provided!");
1355  assert(BB->getParent() &&
1356  "BB must be in a Function when alignment not provided!");
1357  const DataLayout &DL = BB->getModule()->getDataLayout();
1358  return DL.getPrefTypeAlign(Ty);
1359 }
1360 
1362  assert(I && "Insertion position cannot be null when alignment not provided!");
1363  return computeAllocaDefaultAlign(Ty, I->getParent());
1364 }
1365 
1366 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
1367  Instruction *InsertBefore)
1368  : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertBefore) {}
1369 
1370 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
1371  BasicBlock *InsertAtEnd)
1372  : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertAtEnd) {}
1373 
1374 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1375  const Twine &Name, Instruction *InsertBefore)
1376  : AllocaInst(Ty, AddrSpace, ArraySize,
1377  computeAllocaDefaultAlign(Ty, InsertBefore), Name,
1378  InsertBefore) {}
1379 
1380 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1381  const Twine &Name, BasicBlock *InsertAtEnd)
1382  : AllocaInst(Ty, AddrSpace, ArraySize,
1383  computeAllocaDefaultAlign(Ty, InsertAtEnd), Name,
1384  InsertAtEnd) {}
1385 
1386 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1387  Align Align, const Twine &Name,
1388  Instruction *InsertBefore)
1389  : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca,
1390  getAISize(Ty->getContext(), ArraySize), InsertBefore),
1391  AllocatedType(Ty) {
1393  assert(!Ty->isVoidTy() && "Cannot allocate void!");
1394  setName(Name);
1395 }
1396 
1397 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1398  Align Align, const Twine &Name, BasicBlock *InsertAtEnd)
1399  : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca,
1400  getAISize(Ty->getContext(), ArraySize), InsertAtEnd),
1401  AllocatedType(Ty) {
1403  assert(!Ty->isVoidTy() && "Cannot allocate void!");
1404  setName(Name);
1405 }
1406 
1407 
1409  if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(0)))
1410  return !CI->isOne();
1411  return true;
1412 }
1413 
1414 /// isStaticAlloca - Return true if this alloca is in the entry block of the
1415 /// function and is a constant size. If so, the code generator will fold it
1416 /// into the prolog/epilog code, so it is basically free.
1418  // Must be constant size.
1419  if (!isa<ConstantInt>(getArraySize())) return false;
1420 
1421  // Must be in the entry block.
1422  const BasicBlock *Parent = getParent();
1423  return Parent == &Parent->getParent()->front() && !isUsedWithInAlloca();
1424 }
1425 
1426 //===----------------------------------------------------------------------===//
1427 // LoadInst Implementation
1428 //===----------------------------------------------------------------------===//
1429 
1430 void LoadInst::AssertOK() {
1431  assert(getOperand(0)->getType()->isPointerTy() &&
1432  "Ptr must have pointer type.");
1433 }
1434 
1436  assert(BB && "Insertion BB cannot be null when alignment not provided!");
1437  assert(BB->getParent() &&
1438  "BB must be in a Function when alignment not provided!");
1439  const DataLayout &DL = BB->getModule()->getDataLayout();
1440  return DL.getABITypeAlign(Ty);
1441 }
1442 
1444  assert(I && "Insertion position cannot be null when alignment not provided!");
1445  return computeLoadStoreDefaultAlign(Ty, I->getParent());
1446 }
1447 
1449  Instruction *InsertBef)
1450  : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertBef) {}
1451 
1453  BasicBlock *InsertAE)
1454  : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertAE) {}
1455 
1456 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1457  Instruction *InsertBef)
1458  : LoadInst(Ty, Ptr, Name, isVolatile,
1459  computeLoadStoreDefaultAlign(Ty, InsertBef), InsertBef) {}
1460 
1461 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1462  BasicBlock *InsertAE)
1463  : LoadInst(Ty, Ptr, Name, isVolatile,
1464  computeLoadStoreDefaultAlign(Ty, InsertAE), InsertAE) {}
1465 
1466 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1467  Align Align, Instruction *InsertBef)
1468  : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
1469  SyncScope::System, InsertBef) {}
1470 
1471 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1472  Align Align, BasicBlock *InsertAE)
1473  : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
1474  SyncScope::System, InsertAE) {}
1475 
1476 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1477  Align Align, AtomicOrdering Order, SyncScope::ID SSID,
1478  Instruction *InsertBef)
1479  : UnaryInstruction(Ty, Load, Ptr, InsertBef) {
1480  assert(cast<PointerType>(Ptr->getType())->isOpaqueOrPointeeTypeMatches(Ty));
1483  setAtomic(Order, SSID);
1484  AssertOK();
1485  setName(Name);
1486 }
1487 
1488 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1489  Align Align, AtomicOrdering Order, SyncScope::ID SSID,
1490  BasicBlock *InsertAE)
1491  : UnaryInstruction(Ty, Load, Ptr, InsertAE) {
1492  assert(cast<PointerType>(Ptr->getType())->isOpaqueOrPointeeTypeMatches(Ty));
1495  setAtomic(Order, SSID);
1496  AssertOK();
1497  setName(Name);
1498 }
1499 
1500 //===----------------------------------------------------------------------===//
1501 // StoreInst Implementation
1502 //===----------------------------------------------------------------------===//
1503 
1504 void StoreInst::AssertOK() {
1505  assert(getOperand(0) && getOperand(1) && "Both operands must be non-null!");
1506  assert(getOperand(1)->getType()->isPointerTy() &&
1507  "Ptr must have pointer type!");
1508  assert(cast<PointerType>(getOperand(1)->getType())
1509  ->isOpaqueOrPointeeTypeMatches(getOperand(0)->getType()) &&
1510  "Ptr must be a pointer to Val type!");
1511 }
1512 
1514  : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {}
1515 
1517  : StoreInst(val, addr, /*isVolatile=*/false, InsertAtEnd) {}
1518 
1519 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1520  Instruction *InsertBefore)
1521  : StoreInst(val, addr, isVolatile,
1522  computeLoadStoreDefaultAlign(val->getType(), InsertBefore),
1523  InsertBefore) {}
1524 
1525 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1526  BasicBlock *InsertAtEnd)
1527  : StoreInst(val, addr, isVolatile,
1528  computeLoadStoreDefaultAlign(val->getType(), InsertAtEnd),
1529  InsertAtEnd) {}
1530 
1531 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1532  Instruction *InsertBefore)
1533  : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
1534  SyncScope::System, InsertBefore) {}
1535 
1536 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1537  BasicBlock *InsertAtEnd)
1538  : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
1539  SyncScope::System, InsertAtEnd) {}
1540 
1541 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1542  AtomicOrdering Order, SyncScope::ID SSID,
1543  Instruction *InsertBefore)
1544  : Instruction(Type::getVoidTy(val->getContext()), Store,
1545  OperandTraits<StoreInst>::op_begin(this),
1546  OperandTraits<StoreInst>::operands(this), InsertBefore) {
1547  Op<0>() = val;
1548  Op<1>() = addr;
1551  setAtomic(Order, SSID);
1552  AssertOK();
1553 }
1554 
1555 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1556  AtomicOrdering Order, SyncScope::ID SSID,
1557  BasicBlock *InsertAtEnd)
1558  : Instruction(Type::getVoidTy(val->getContext()), Store,
1559  OperandTraits<StoreInst>::op_begin(this),
1560  OperandTraits<StoreInst>::operands(this), InsertAtEnd) {
1561  Op<0>() = val;
1562  Op<1>() = addr;
1565  setAtomic(Order, SSID);
1566  AssertOK();
1567 }
1568 
1569 
1570 //===----------------------------------------------------------------------===//
1571 // AtomicCmpXchgInst Implementation
1572 //===----------------------------------------------------------------------===//
1573 
1574 void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
1575  Align Alignment, AtomicOrdering SuccessOrdering,
1576  AtomicOrdering FailureOrdering,
1577  SyncScope::ID SSID) {
1578  Op<0>() = Ptr;
1579  Op<1>() = Cmp;
1580  Op<2>() = NewVal;
1581  setSuccessOrdering(SuccessOrdering);
1582  setFailureOrdering(FailureOrdering);
1583  setSyncScopeID(SSID);
1584  setAlignment(Alignment);
1585 
1586  assert(getOperand(0) && getOperand(1) && getOperand(2) &&
1587  "All operands must be non-null!");
1588  assert(getOperand(0)->getType()->isPointerTy() &&
1589  "Ptr must have pointer type!");
1590  assert(cast<PointerType>(getOperand(0)->getType())
1591  ->isOpaqueOrPointeeTypeMatches(getOperand(1)->getType()) &&
1592  "Ptr must be a pointer to Cmp type!");
1593  assert(cast<PointerType>(getOperand(0)->getType())
1594  ->isOpaqueOrPointeeTypeMatches(getOperand(2)->getType()) &&
1595  "Ptr must be a pointer to NewVal type!");
1596  assert(getOperand(1)->getType() == getOperand(2)->getType() &&
1597  "Cmp type and NewVal type must be same!");
1598 }
1599 
1601  Align Alignment,
1602  AtomicOrdering SuccessOrdering,
1603  AtomicOrdering FailureOrdering,
1604  SyncScope::ID SSID,
1605  Instruction *InsertBefore)
1606  : Instruction(
1607  StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
1608  AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
1609  OperandTraits<AtomicCmpXchgInst>::operands(this), InsertBefore) {
1610  Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
1611 }
1612 
1614  Align Alignment,
1615  AtomicOrdering SuccessOrdering,
1616  AtomicOrdering FailureOrdering,
1617  SyncScope::ID SSID,
1618  BasicBlock *InsertAtEnd)
1619  : Instruction(
1620  StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
1621  AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
1622  OperandTraits<AtomicCmpXchgInst>::operands(this), InsertAtEnd) {
1623  Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
1624 }
1625 
1626 //===----------------------------------------------------------------------===//
1627 // AtomicRMWInst Implementation
1628 //===----------------------------------------------------------------------===//
1629 
1630 void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,
1631  Align Alignment, AtomicOrdering Ordering,
1632  SyncScope::ID SSID) {
1633  assert(Ordering != AtomicOrdering::NotAtomic &&
1634  "atomicrmw instructions can only be atomic.");
1635  assert(Ordering != AtomicOrdering::Unordered &&
1636  "atomicrmw instructions cannot be unordered.");
1637  Op<0>() = Ptr;
1638  Op<1>() = Val;
1640  setOrdering(Ordering);
1641  setSyncScopeID(SSID);
1642  setAlignment(Alignment);
1643 
1644  assert(getOperand(0) && getOperand(1) &&
1645  "All operands must be non-null!");
1646  assert(getOperand(0)->getType()->isPointerTy() &&
1647  "Ptr must have pointer type!");
1648  assert(cast<PointerType>(getOperand(0)->getType())
1649  ->isOpaqueOrPointeeTypeMatches(getOperand(1)->getType()) &&
1650  "Ptr must be a pointer to Val type!");
1651  assert(Ordering != AtomicOrdering::NotAtomic &&
1652  "AtomicRMW instructions must be atomic!");
1653 }
1654 
1656  Align Alignment, AtomicOrdering Ordering,
1657  SyncScope::ID SSID, Instruction *InsertBefore)
1658  : Instruction(Val->getType(), AtomicRMW,
1659  OperandTraits<AtomicRMWInst>::op_begin(this),
1660  OperandTraits<AtomicRMWInst>::operands(this), InsertBefore) {
1661  Init(Operation, Ptr, Val, Alignment, Ordering, SSID);
1662 }
1663 
1665  Align Alignment, AtomicOrdering Ordering,
1666  SyncScope::ID SSID, BasicBlock *InsertAtEnd)
1667  : Instruction(Val->getType(), AtomicRMW,
1668  OperandTraits<AtomicRMWInst>::op_begin(this),
1669  OperandTraits<AtomicRMWInst>::operands(this), InsertAtEnd) {
1670  Init(Operation, Ptr, Val, Alignment, Ordering, SSID);
1671 }
1672 
1674  switch (Op) {
1675  case AtomicRMWInst::Xchg:
1676  return "xchg";
1677  case AtomicRMWInst::Add:
1678  return "add";
1679  case AtomicRMWInst::Sub:
1680  return "sub";
1681  case AtomicRMWInst::And:
1682  return "and";
1683  case AtomicRMWInst::Nand:
1684  return "nand";
1685  case AtomicRMWInst::Or:
1686  return "or";
1687  case AtomicRMWInst::Xor:
1688  return "xor";
1689  case AtomicRMWInst::Max:
1690  return "max";
1691  case AtomicRMWInst::Min:
1692  return "min";
1693  case AtomicRMWInst::UMax:
1694  return "umax";
1695  case AtomicRMWInst::UMin:
1696  return "umin";
1697  case AtomicRMWInst::FAdd:
1698  return "fadd";
1699  case AtomicRMWInst::FSub:
1700  return "fsub";
1701  case AtomicRMWInst::FMax:
1702  return "fmax";
1703  case AtomicRMWInst::FMin:
1704  return "fmin";
1706  return "<invalid operation>";
1707  }
1708 
1709  llvm_unreachable("invalid atomicrmw operation");
1710 }
1711 
1712 //===----------------------------------------------------------------------===//
1713 // FenceInst Implementation
1714 //===----------------------------------------------------------------------===//
1715 
1717  SyncScope::ID SSID,
1718  Instruction *InsertBefore)
1719  : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertBefore) {
1720  setOrdering(Ordering);
1721  setSyncScopeID(SSID);
1722 }
1723 
1725  SyncScope::ID SSID,
1726  BasicBlock *InsertAtEnd)
1727  : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertAtEnd) {
1728  setOrdering(Ordering);
1729  setSyncScopeID(SSID);
1730 }
1731 
1732 //===----------------------------------------------------------------------===//
1733 // GetElementPtrInst Implementation
1734 //===----------------------------------------------------------------------===//
1735 
1736 void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList,
1737  const Twine &Name) {
1738  assert(getNumOperands() == 1 + IdxList.size() &&
1739  "NumOperands not initialized?");
1740  Op<0>() = Ptr;
1741  llvm::copy(IdxList, op_begin() + 1);
1742  setName(Name);
1743 }
1744 
1745 GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI)
1746  : Instruction(GEPI.getType(), GetElementPtr,
1748  GEPI.getNumOperands(),
1749  GEPI.getNumOperands()),
1750  SourceElementType(GEPI.SourceElementType),
1751  ResultElementType(GEPI.ResultElementType) {
1752  std::copy(GEPI.op_begin(), GEPI.op_end(), op_begin());
1754 }
1755 
1757  if (auto *Struct = dyn_cast<StructType>(Ty)) {
1758  if (!Struct->indexValid(Idx))
1759  return nullptr;
1760  return Struct->getTypeAtIndex(Idx);
1761  }
1762  if (!Idx->getType()->isIntOrIntVectorTy())
1763  return nullptr;
1764  if (auto *Array = dyn_cast<ArrayType>(Ty))
1765  return Array->getElementType();
1766  if (auto *Vector = dyn_cast<VectorType>(Ty))
1767  return Vector->getElementType();
1768  return nullptr;
1769 }
1770 
1772  if (auto *Struct = dyn_cast<StructType>(Ty)) {
1773  if (Idx >= Struct->getNumElements())
1774  return nullptr;
1775  return Struct->getElementType(Idx);
1776  }
1777  if (auto *Array = dyn_cast<ArrayType>(Ty))
1778  return Array->getElementType();
1779  if (auto *Vector = dyn_cast<VectorType>(Ty))
1780  return Vector->getElementType();
1781  return nullptr;
1782 }
1783 
1784 template <typename IndexTy>
1786  if (IdxList.empty())
1787  return Ty;
1788  for (IndexTy V : IdxList.slice(1)) {
1790  if (!Ty)
1791  return Ty;
1792  }
1793  return Ty;
1794 }
1795 
1797  return getIndexedTypeInternal(Ty, IdxList);
1798 }
1799 
1801  ArrayRef<Constant *> IdxList) {
1802  return getIndexedTypeInternal(Ty, IdxList);
1803 }
1804 
1806  return getIndexedTypeInternal(Ty, IdxList);
1807 }
1808 
1809 /// hasAllZeroIndices - Return true if all of the indices of this GEP are
1810 /// zeros. If so, the result pointer and the first operand have the same
1811 /// value, just potentially different types.
1813  for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1814  if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(i))) {
1815  if (!CI->isZero()) return false;
1816  } else {
1817  return false;
1818  }
1819  }
1820  return true;
1821 }
1822 
1823 /// hasAllConstantIndices - Return true if all of the indices of this GEP are
1824 /// constant integers. If so, the result pointer and the first operand have
1825 /// a constant offset between them.
1827  for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1828  if (!isa<ConstantInt>(getOperand(i)))
1829  return false;
1830  }
1831  return true;
1832 }
1833 
1835  cast<GEPOperator>(this)->setIsInBounds(B);
1836 }
1837 
1839  return cast<GEPOperator>(this)->isInBounds();
1840 }
1841 
1843  APInt &Offset) const {
1844  // Delegate to the generic GEPOperator implementation.
1845  return cast<GEPOperator>(this)->accumulateConstantOffset(DL, Offset);
1846 }
1847 
1849  const DataLayout &DL, unsigned BitWidth,
1850  MapVector<Value *, APInt> &VariableOffsets,
1851  APInt &ConstantOffset) const {
1852  // Delegate to the generic GEPOperator implementation.
1853  return cast<GEPOperator>(this)->collectOffset(DL, BitWidth, VariableOffsets,
1854  ConstantOffset);
1855 }
1856 
1857 //===----------------------------------------------------------------------===//
1858 // ExtractElementInst Implementation
1859 //===----------------------------------------------------------------------===//
1860 
1861 ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
1862  const Twine &Name,
1863  Instruction *InsertBef)
1864  : Instruction(cast<VectorType>(Val->getType())->getElementType(),
1865  ExtractElement,
1867  2, InsertBef) {
1868  assert(isValidOperands(Val, Index) &&
1869  "Invalid extractelement instruction operands!");
1870  Op<0>() = Val;
1871  Op<1>() = Index;
1872  setName(Name);
1873 }
1874 
1875 ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
1876  const Twine &Name,
1877  BasicBlock *InsertAE)
1878  : Instruction(cast<VectorType>(Val->getType())->getElementType(),
1879  ExtractElement,
1881  2, InsertAE) {
1882  assert(isValidOperands(Val, Index) &&
1883  "Invalid extractelement instruction operands!");
1884 
1885  Op<0>() = Val;
1886  Op<1>() = Index;
1887  setName(Name);
1888 }
1889 
1891  if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy())
1892  return false;
1893  return true;
1894 }
1895 
1896 //===----------------------------------------------------------------------===//
1897 // InsertElementInst Implementation
1898 //===----------------------------------------------------------------------===//
1899 
1900 InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
1901  const Twine &Name,
1902  Instruction *InsertBef)
1903  : Instruction(Vec->getType(), InsertElement,
1904  OperandTraits<InsertElementInst>::op_begin(this),
1905  3, InsertBef) {
1906  assert(isValidOperands(Vec, Elt, Index) &&
1907  "Invalid insertelement instruction operands!");
1908  Op<0>() = Vec;
1909  Op<1>() = Elt;
1910  Op<2>() = Index;
1911  setName(Name);
1912 }
1913 
1914 InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
1915  const Twine &Name,
1916  BasicBlock *InsertAE)
1917  : Instruction(Vec->getType(), InsertElement,
1918  OperandTraits<InsertElementInst>::op_begin(this),
1919  3, InsertAE) {
1920  assert(isValidOperands(Vec, Elt, Index) &&
1921  "Invalid insertelement instruction operands!");
1922 
1923  Op<0>() = Vec;
1924  Op<1>() = Elt;
1925  Op<2>() = Index;
1926  setName(Name);
1927 }
1928 
1929 bool InsertElementInst::isValidOperands(const Value *Vec, const Value *Elt,
1930  const Value *Index) {
1931  if (!Vec->getType()->isVectorTy())
1932  return false; // First operand of insertelement must be vector type.
1933 
1934  if (Elt->getType() != cast<VectorType>(Vec->getType())->getElementType())
1935  return false;// Second operand of insertelement must be vector element type.
1936 
1937  if (!Index->getType()->isIntegerTy())
1938  return false; // Third operand of insertelement must be i32.
1939  return true;
1940 }
1941 
1942 //===----------------------------------------------------------------------===//
1943 // ShuffleVectorInst Implementation
1944 //===----------------------------------------------------------------------===//
1945 
1947  assert(V && "Cannot create placeholder of nullptr V");
1948  return PoisonValue::get(V->getType());
1949 }
1950 
1952  Instruction *InsertBefore)
1954  InsertBefore) {}
1955 
1957  BasicBlock *InsertAtEnd)
1959  InsertAtEnd) {}
1960 
1962  const Twine &Name,
1963  Instruction *InsertBefore)
1965  InsertBefore) {}
1966 
1968  const Twine &Name, BasicBlock *InsertAtEnd)
1970  InsertAtEnd) {}
1971 
1973  const Twine &Name,
1974  Instruction *InsertBefore)
1975  : Instruction(
1976  VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
1977  cast<VectorType>(Mask->getType())->getElementCount()),
1978  ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
1979  OperandTraits<ShuffleVectorInst>::operands(this), InsertBefore) {
1980  assert(isValidOperands(V1, V2, Mask) &&
1981  "Invalid shuffle vector instruction operands!");
1982 
1983  Op<0>() = V1;
1984  Op<1>() = V2;
1985  SmallVector<int, 16> MaskArr;
1986  getShuffleMask(cast<Constant>(Mask), MaskArr);
1987  setShuffleMask(MaskArr);
1988  setName(Name);
1989 }
1990 
1992  const Twine &Name, BasicBlock *InsertAtEnd)
1993  : Instruction(
1994  VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
1995  cast<VectorType>(Mask->getType())->getElementCount()),
1996  ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
1997  OperandTraits<ShuffleVectorInst>::operands(this), InsertAtEnd) {
1998  assert(isValidOperands(V1, V2, Mask) &&
1999  "Invalid shuffle vector instruction operands!");
2000 
2001  Op<0>() = V1;
2002  Op<1>() = V2;
2003  SmallVector<int, 16> MaskArr;
2004  getShuffleMask(cast<Constant>(Mask), MaskArr);
2005  setShuffleMask(MaskArr);
2006  setName(Name);
2007 }
2008 
2010  const Twine &Name,
2011  Instruction *InsertBefore)
2012  : Instruction(
2013  VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
2014  Mask.size(), isa<ScalableVectorType>(V1->getType())),
2015  ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
2016  OperandTraits<ShuffleVectorInst>::operands(this), InsertBefore) {
2017  assert(isValidOperands(V1, V2, Mask) &&
2018  "Invalid shuffle vector instruction operands!");
2019  Op<0>() = V1;
2020  Op<1>() = V2;
2022  setName(Name);
2023 }
2024 
2026  const Twine &Name, BasicBlock *InsertAtEnd)
2027  : Instruction(
2028  VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
2029  Mask.size(), isa<ScalableVectorType>(V1->getType())),
2030  ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
2031  OperandTraits<ShuffleVectorInst>::operands(this), InsertAtEnd) {
2032  assert(isValidOperands(V1, V2, Mask) &&
2033  "Invalid shuffle vector instruction operands!");
2034 
2035  Op<0>() = V1;
2036  Op<1>() = V2;
2038  setName(Name);
2039 }
2040 
2042  int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2043  int NumMaskElts = ShuffleMask.size();
2044  SmallVector<int, 16> NewMask(NumMaskElts);
2045  for (int i = 0; i != NumMaskElts; ++i) {
2046  int MaskElt = getMaskValue(i);
2047  if (MaskElt == UndefMaskElem) {
2048  NewMask[i] = UndefMaskElem;
2049  continue;
2050  }
2051  assert(MaskElt >= 0 && MaskElt < 2 * NumOpElts && "Out-of-range mask");
2052  MaskElt = (MaskElt < NumOpElts) ? MaskElt + NumOpElts : MaskElt - NumOpElts;
2053  NewMask[i] = MaskElt;
2054  }
2055  setShuffleMask(NewMask);
2056  Op<0>().swap(Op<1>());
2057 }
2058 
2060  ArrayRef<int> Mask) {
2061  // V1 and V2 must be vectors of the same type.
2062  if (!isa<VectorType>(V1->getType()) || V1->getType() != V2->getType())
2063  return false;
2064 
2065  // Make sure the mask elements make sense.
2066  int V1Size =
2067  cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue();
2068  for (int Elem : Mask)
2069  if (Elem != UndefMaskElem && Elem >= V1Size * 2)
2070  return false;
2071 
2072  if (isa<ScalableVectorType>(V1->getType()))
2073  if ((Mask[0] != 0 && Mask[0] != UndefMaskElem) || !all_equal(Mask))
2074  return false;
2075 
2076  return true;
2077 }
2078 
2080  const Value *Mask) {
2081  // V1 and V2 must be vectors of the same type.
2082  if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType())
2083  return false;
2084 
2085  // Mask must be vector of i32, and must be the same kind of vector as the
2086  // input vectors
2087  auto *MaskTy = dyn_cast<VectorType>(Mask->getType());
2088  if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32) ||
2089  isa<ScalableVectorType>(MaskTy) != isa<ScalableVectorType>(V1->getType()))
2090  return false;
2091 
2092  // Check to see if Mask is valid.
2093  if (isa<UndefValue>(Mask) || isa<ConstantAggregateZero>(Mask))
2094  return true;
2095 
2096  if (const auto *MV = dyn_cast<ConstantVector>(Mask)) {
2097  unsigned V1Size = cast<FixedVectorType>(V1->getType())->getNumElements();
2098  for (Value *Op : MV->operands()) {
2099  if (auto *CI = dyn_cast<ConstantInt>(Op)) {
2100  if (CI->uge(V1Size*2))
2101  return false;
2102  } else if (!isa<UndefValue>(Op)) {
2103  return false;
2104  }
2105  }
2106  return true;
2107  }
2108 
2109  if (const auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
2110  unsigned V1Size = cast<FixedVectorType>(V1->getType())->getNumElements();
2111  for (unsigned i = 0, e = cast<FixedVectorType>(MaskTy)->getNumElements();
2112  i != e; ++i)
2113  if (CDS->getElementAsInteger(i) >= V1Size*2)
2114  return false;
2115  return true;
2116  }
2117 
2118  return false;
2119 }
2120 
2122  SmallVectorImpl<int> &Result) {
2123  ElementCount EC = cast<VectorType>(Mask->getType())->getElementCount();
2124 
2125  if (isa<ConstantAggregateZero>(Mask)) {
2126  Result.resize(EC.getKnownMinValue(), 0);
2127  return;
2128  }
2129 
2130  Result.reserve(EC.getKnownMinValue());
2131 
2132  if (EC.isScalable()) {
2133  assert((isa<ConstantAggregateZero>(Mask) || isa<UndefValue>(Mask)) &&
2134  "Scalable vector shuffle mask must be undef or zeroinitializer");
2135  int MaskVal = isa<UndefValue>(Mask) ? -1 : 0;
2136  for (unsigned I = 0; I < EC.getKnownMinValue(); ++I)
2137  Result.emplace_back(MaskVal);
2138  return;
2139  }
2140 
2141  unsigned NumElts = EC.getKnownMinValue();
2142 
2143  if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
2144  for (unsigned i = 0; i != NumElts; ++i)
2145  Result.push_back(CDS->getElementAsInteger(i));
2146  return;
2147  }
2148  for (unsigned i = 0; i != NumElts; ++i) {
2149  Constant *C = Mask->getAggregateElement(i);
2150  Result.push_back(isa<UndefValue>(C) ? -1 :
2151  cast<ConstantInt>(C)->getZExtValue());
2152  }
2153 }
2154 
2156  ShuffleMask.assign(Mask.begin(), Mask.end());
2157  ShuffleMaskForBitcode = convertShuffleMaskForBitcode(Mask, getType());
2158 }
2159 
2161  Type *ResultTy) {
2162  Type *Int32Ty = Type::getInt32Ty(ResultTy->getContext());
2163  if (isa<ScalableVectorType>(ResultTy)) {
2164  assert(all_equal(Mask) && "Unexpected shuffle");
2165  Type *VecTy = VectorType::get(Int32Ty, Mask.size(), true);
2166  if (Mask[0] == 0)
2167  return Constant::getNullValue(VecTy);
2168  return UndefValue::get(VecTy);
2169  }
2170  SmallVector<Constant *, 16> MaskConst;
2171  for (int Elem : Mask) {
2172  if (Elem == UndefMaskElem)
2173  MaskConst.push_back(UndefValue::get(Int32Ty));
2174  else
2175  MaskConst.push_back(ConstantInt::get(Int32Ty, Elem));
2176  }
2177  return ConstantVector::get(MaskConst);
2178 }
2179 
2180 static bool isSingleSourceMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
2181  assert(!Mask.empty() && "Shuffle mask must contain elements");
2182  bool UsesLHS = false;
2183  bool UsesRHS = false;
2184  for (int I : Mask) {
2185  if (I == -1)
2186  continue;
2187  assert(I >= 0 && I < (NumOpElts * 2) &&
2188  "Out-of-bounds shuffle mask element");
2189  UsesLHS |= (I < NumOpElts);
2190  UsesRHS |= (I >= NumOpElts);
2191  if (UsesLHS && UsesRHS)
2192  return false;
2193  }
2194  // Allow for degenerate case: completely undef mask means neither source is used.
2195  return UsesLHS || UsesRHS;
2196 }
2197 
2199  // We don't have vector operand size information, so assume operands are the
2200  // same size as the mask.
2201  return isSingleSourceMaskImpl(Mask, Mask.size());
2202 }
2203 
2204 static bool isIdentityMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
2205  if (!isSingleSourceMaskImpl(Mask, NumOpElts))
2206  return false;
2207  for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) {
2208  if (Mask[i] == -1)
2209  continue;
2210  if (Mask[i] != i && Mask[i] != (NumOpElts + i))
2211  return false;
2212  }
2213  return true;
2214 }
2215 
2217  // We don't have vector operand size information, so assume operands are the
2218  // same size as the mask.
2219  return isIdentityMaskImpl(Mask, Mask.size());
2220 }
2221 
2223  if (!isSingleSourceMask(Mask))
2224  return false;
2225 
2226  // The number of elements in the mask must be at least 2.
2227  int NumElts = Mask.size();
2228  if (NumElts < 2)
2229  return false;
2230 
2231  for (int i = 0; i < NumElts; ++i) {
2232  if (Mask[i] == -1)
2233  continue;
2234  if (Mask[i] != (NumElts - 1 - i) && Mask[i] != (NumElts + NumElts - 1 - i))
2235  return false;
2236  }
2237  return true;
2238 }
2239 
2241  if (!isSingleSourceMask(Mask))
2242  return false;
2243  for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) {
2244  if (Mask[i] == -1)
2245  continue;
2246  if (Mask[i] != 0 && Mask[i] != NumElts)
2247  return false;
2248  }
2249  return true;
2250 }
2251 
2253  // Select is differentiated from identity. It requires using both sources.
2254  if (isSingleSourceMask(Mask))
2255  return false;
2256  for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) {
2257  if (Mask[i] == -1)
2258  continue;
2259  if (Mask[i] != i && Mask[i] != (NumElts + i))
2260  return false;
2261  }
2262  return true;
2263 }
2264 
2266  // Example masks that will return true:
2267  // v1 = <a, b, c, d>
2268  // v2 = <e, f, g, h>
2269  // trn1 = shufflevector v1, v2 <0, 4, 2, 6> = <a, e, c, g>
2270  // trn2 = shufflevector v1, v2 <1, 5, 3, 7> = <b, f, d, h>
2271 
2272  // 1. The number of elements in the mask must be a power-of-2 and at least 2.
2273  int NumElts = Mask.size();
2274  if (NumElts < 2 || !isPowerOf2_32(NumElts))
2275  return false;
2276 
2277  // 2. The first element of the mask must be either a 0 or a 1.
2278  if (Mask[0] != 0 && Mask[0] != 1)
2279  return false;
2280 
2281  // 3. The difference between the first 2 elements must be equal to the
2282  // number of elements in the mask.
2283  if ((Mask[1] - Mask[0]) != NumElts)
2284  return false;
2285 
2286  // 4. The difference between consecutive even-numbered and odd-numbered
2287  // elements must be equal to 2.
2288  for (int i = 2; i < NumElts; ++i) {
2289  int MaskEltVal = Mask[i];
2290  if (MaskEltVal == -1)
2291  return false;
2292  int MaskEltPrevVal = Mask[i - 2];
2293  if (MaskEltVal - MaskEltPrevVal != 2)
2294  return false;
2295  }
2296  return true;
2297 }
2298 
2300  // Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2301  int StartIndex = -1;
2302  for (int I = 0, E = Mask.size(); I != E; ++I) {
2303  int MaskEltVal = Mask[I];
2304  if (MaskEltVal == -1)
2305  continue;
2306 
2307  if (StartIndex == -1) {
2308  // Don't support a StartIndex that begins in the second input, or if the
2309  // first non-undef index would access below the StartIndex.
2310  if (MaskEltVal < I || E <= (MaskEltVal - I))
2311  return false;
2312 
2313  StartIndex = MaskEltVal - I;
2314  continue;
2315  }
2316 
2317  // Splice is sequential starting from StartIndex.
2318  if (MaskEltVal != (StartIndex + I))
2319  return false;
2320  }
2321 
2322  if (StartIndex == -1)
2323  return false;
2324 
2325  // NOTE: This accepts StartIndex == 0 (COPY).
2326  Index = StartIndex;
2327  return true;
2328 }
2329 
2331  int NumSrcElts, int &Index) {
2332  // Must extract from a single source.
2333  if (!isSingleSourceMaskImpl(Mask, NumSrcElts))
2334  return false;
2335 
2336  // Must be smaller (else this is an Identity shuffle).
2337  if (NumSrcElts <= (int)Mask.size())
2338  return false;
2339 
2340  // Find start of extraction, accounting that we may start with an UNDEF.
2341  int SubIndex = -1;
2342  for (int i = 0, e = Mask.size(); i != e; ++i) {
2343  int M = Mask[i];
2344  if (M < 0)
2345  continue;
2346  int Offset = (M % NumSrcElts) - i;
2347  if (0 <= SubIndex && SubIndex != Offset)
2348  return false;
2349  SubIndex = Offset;
2350  }
2351 
2352  if (0 <= SubIndex && SubIndex + (int)Mask.size() <= NumSrcElts) {
2353  Index = SubIndex;
2354  return true;
2355  }
2356  return false;
2357 }
2358 
2360  int NumSrcElts, int &NumSubElts,
2361  int &Index) {
2362  int NumMaskElts = Mask.size();
2363 
2364  // Don't try to match if we're shuffling to a smaller size.
2365  if (NumMaskElts < NumSrcElts)
2366  return false;
2367 
2368  // TODO: We don't recognize self-insertion/widening.
2369  if (isSingleSourceMaskImpl(Mask, NumSrcElts))
2370  return false;
2371 
2372  // Determine which mask elements are attributed to which source.
2373  APInt UndefElts = APInt::getZero(NumMaskElts);
2374  APInt Src0Elts = APInt::getZero(NumMaskElts);
2375  APInt Src1Elts = APInt::getZero(NumMaskElts);
2376  bool Src0Identity = true;
2377  bool Src1Identity = true;
2378 
2379  for (int i = 0; i != NumMaskElts; ++i) {
2380  int M = Mask[i];
2381  if (M < 0) {
2382  UndefElts.setBit(i);
2383  continue;
2384  }
2385  if (M < NumSrcElts) {
2386  Src0Elts.setBit(i);
2387  Src0Identity &= (M == i);
2388  continue;
2389  }
2390  Src1Elts.setBit(i);
2391  Src1Identity &= (M == (i + NumSrcElts));
2392  }
2393  assert((Src0Elts | Src1Elts | UndefElts).isAllOnes() &&
2394  "unknown shuffle elements");
2395  assert(!Src0Elts.isZero() && !Src1Elts.isZero() &&
2396  "2-source shuffle not found");
2397 
2398  // Determine lo/hi span ranges.
2399  // TODO: How should we handle undefs at the start of subvector insertions?
2400  int Src0Lo = Src0Elts.countTrailingZeros();
2401  int Src1Lo = Src1Elts.countTrailingZeros();
2402  int Src0Hi = NumMaskElts - Src0Elts.countLeadingZeros();
2403  int Src1Hi = NumMaskElts - Src1Elts.countLeadingZeros();
2404 
2405  // If src0 is in place, see if the src1 elements is inplace within its own
2406  // span.
2407  if (Src0Identity) {
2408  int NumSub1Elts = Src1Hi - Src1Lo;
2409  ArrayRef<int> Sub1Mask = Mask.slice(Src1Lo, NumSub1Elts);
2410  if (isIdentityMaskImpl(Sub1Mask, NumSrcElts)) {
2411  NumSubElts = NumSub1Elts;
2412  Index = Src1Lo;
2413  return true;
2414  }
2415  }
2416 
2417  // If src1 is in place, see if the src0 elements is inplace within its own
2418  // span.
2419  if (Src1Identity) {
2420  int NumSub0Elts = Src0Hi - Src0Lo;
2421  ArrayRef<int> Sub0Mask = Mask.slice(Src0Lo, NumSub0Elts);
2422  if (isIdentityMaskImpl(Sub0Mask, NumSrcElts)) {
2423  NumSubElts = NumSub0Elts;
2424  Index = Src0Lo;
2425  return true;
2426  }
2427  }
2428 
2429  return false;
2430 }
2431 
2433  if (isa<UndefValue>(Op<2>()))
2434  return false;
2435 
2436  // FIXME: Not currently possible to express a shuffle mask for a scalable
2437  // vector for this case.
2438  if (isa<ScalableVectorType>(getType()))
2439  return false;
2440 
2441  int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2442  int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2443  if (NumMaskElts <= NumOpElts)
2444  return false;
2445 
2446  // The first part of the mask must choose elements from exactly 1 source op.
2448  if (!isIdentityMaskImpl(Mask, NumOpElts))
2449  return false;
2450 
2451  // All extending must be with undef elements.
2452  for (int i = NumOpElts; i < NumMaskElts; ++i)
2453  if (Mask[i] != -1)
2454  return false;
2455 
2456  return true;
2457 }
2458 
2460  if (isa<UndefValue>(Op<2>()))
2461  return false;
2462 
2463  // FIXME: Not currently possible to express a shuffle mask for a scalable
2464  // vector for this case.
2465  if (isa<ScalableVectorType>(getType()))
2466  return false;
2467 
2468  int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2469  int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2470  if (NumMaskElts >= NumOpElts)
2471  return false;
2472 
2473  return isIdentityMaskImpl(getShuffleMask(), NumOpElts);
2474 }
2475 
2477  // Vector concatenation is differentiated from identity with padding.
2478  if (isa<UndefValue>(Op<0>()) || isa<UndefValue>(Op<1>()) ||
2479  isa<UndefValue>(Op<2>()))
2480  return false;
2481 
2482  // FIXME: Not currently possible to express a shuffle mask for a scalable
2483  // vector for this case.
2484  if (isa<ScalableVectorType>(getType()))
2485  return false;
2486 
2487  int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2488  int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2489  if (NumMaskElts != NumOpElts * 2)
2490  return false;
2491 
2492  // Use the mask length rather than the operands' vector lengths here. We
2493  // already know that the shuffle returns a vector twice as long as the inputs,
2494  // and neither of the inputs are undef vectors. If the mask picks consecutive
2495  // elements from both inputs, then this is a concatenation of the inputs.
2496  return isIdentityMaskImpl(getShuffleMask(), NumMaskElts);
2497 }
2498 
2500  int ReplicationFactor, int VF) {
2501  assert(Mask.size() == (unsigned)ReplicationFactor * VF &&
2502  "Unexpected mask size.");
2503 
2504  for (int CurrElt : seq(0, VF)) {
2505  ArrayRef<int> CurrSubMask = Mask.take_front(ReplicationFactor);
2506  assert(CurrSubMask.size() == (unsigned)ReplicationFactor &&
2507  "Run out of mask?");
2508  Mask = Mask.drop_front(ReplicationFactor);
2509  if (!all_of(CurrSubMask, [CurrElt](int MaskElt) {
2510  return MaskElt == UndefMaskElem || MaskElt == CurrElt;
2511  }))
2512  return false;
2513  }
2514  assert(Mask.empty() && "Did not consume the whole mask?");
2515 
2516  return true;
2517 }
2518 
2520  int &ReplicationFactor, int &VF) {
2521  // undef-less case is trivial.
2523  ReplicationFactor =
2524  Mask.take_while([](int MaskElt) { return MaskElt == 0; }).size();
2525  if (ReplicationFactor == 0 || Mask.size() % ReplicationFactor != 0)
2526  return false;
2527  VF = Mask.size() / ReplicationFactor;
2528  return isReplicationMaskWithParams(Mask, ReplicationFactor, VF);
2529  }
2530 
2531  // However, if the mask contains undef's, we have to enumerate possible tuples
2532  // and pick one. There are bounds on replication factor: [1, mask size]
2533  // (where RF=1 is an identity shuffle, RF=mask size is a broadcast shuffle)
2534  // Additionally, mask size is a replication factor multiplied by vector size,
2535  // which further significantly reduces the search space.
2536 
2537  // Before doing that, let's perform basic correctness checking first.
2538  int Largest = -1;
2539  for (int MaskElt : Mask) {
2540  if (MaskElt == UndefMaskElem)
2541  continue;
2542  // Elements must be in non-decreasing order.
2543  if (MaskElt < Largest)
2544  return false;
2545  Largest = std::max(Largest, MaskElt);
2546  }
2547 
2548  // Prefer larger replication factor if all else equal.
2549  for (int PossibleReplicationFactor :
2550  reverse(seq_inclusive<unsigned>(1, Mask.size()))) {
2551  if (Mask.size() % PossibleReplicationFactor != 0)
2552  continue;
2553  int PossibleVF = Mask.size() / PossibleReplicationFactor;
2554  if (!isReplicationMaskWithParams(Mask, PossibleReplicationFactor,
2555  PossibleVF))
2556  continue;
2557  ReplicationFactor = PossibleReplicationFactor;
2558  VF = PossibleVF;
2559  return true;
2560  }
2561 
2562  return false;
2563 }
2564 
2565 bool ShuffleVectorInst::isReplicationMask(int &ReplicationFactor,
2566  int &VF) const {
2567  // Not possible to express a shuffle mask for a scalable vector for this
2568  // case.
2569  if (isa<ScalableVectorType>(getType()))
2570  return false;
2571 
2572  VF = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2573  if (ShuffleMask.size() % VF != 0)
2574  return false;
2575  ReplicationFactor = ShuffleMask.size() / VF;
2576 
2577  return isReplicationMaskWithParams(ShuffleMask, ReplicationFactor, VF);
2578 }
2579 
2581  if (VF <= 0 || Mask.size() < static_cast<unsigned>(VF) ||
2582  Mask.size() % VF != 0)
2583  return false;
2584  for (unsigned K = 0, Sz = Mask.size(); K < Sz; K += VF) {
2585  ArrayRef<int> SubMask = Mask.slice(K, VF);
2586  if (all_of(SubMask, [](int Idx) { return Idx == UndefMaskElem; }))
2587  continue;
2588  SmallBitVector Used(VF, false);
2589  for_each(SubMask, [&Used, VF](int Idx) {
2590  if (Idx != UndefMaskElem && Idx < VF)
2591  Used.set(Idx);
2592  });
2593  if (!Used.all())
2594  return false;
2595  }
2596  return true;
2597 }
2598 
2599 /// Return true if this shuffle mask is a replication mask.
2601  // Not possible to express a shuffle mask for a scalable vector for this
2602  // case.
2603  if (isa<ScalableVectorType>(getType()))
2604  return false;
2605  if (!isSingleSourceMask(ShuffleMask))
2606  return false;
2607 
2608  return isOneUseSingleSourceMask(ShuffleMask, VF);
2609 }
2610 
2611 //===----------------------------------------------------------------------===//
2612 // InsertValueInst Class
2613 //===----------------------------------------------------------------------===//
2614 
2615 void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2616  const Twine &Name) {
2617  assert(getNumOperands() == 2 && "NumOperands not initialized?");
2618 
2619  // There's no fundamental reason why we require at least one index
2620  // (other than weirdness with &*IdxBegin being invalid; see
2621  // getelementptr's init routine for example). But there's no
2622  // present need to support it.
2623  assert(!Idxs.empty() && "InsertValueInst must have at least one index");
2624 
2626  Val->getType() && "Inserted value must match indexed type!");
2627  Op<0>() = Agg;
2628  Op<1>() = Val;
2629 
2630  Indices.append(Idxs.begin(), Idxs.end());
2631  setName(Name);
2632 }
2633 
2634 InsertValueInst::InsertValueInst(const InsertValueInst &IVI)
2635  : Instruction(IVI.getType(), InsertValue,
2636  OperandTraits<InsertValueInst>::op_begin(this), 2),
2637  Indices(IVI.Indices) {
2638  Op<0>() = IVI.getOperand(0);
2639  Op<1>() = IVI.getOperand(1);
2641 }
2642 
2643 //===----------------------------------------------------------------------===//
2644 // ExtractValueInst Class
2645 //===----------------------------------------------------------------------===//
2646 
2647 void ExtractValueInst::init(ArrayRef<unsigned> Idxs, const Twine &Name) {
2648  assert(getNumOperands() == 1 && "NumOperands not initialized?");
2649 
2650  // There's no fundamental reason why we require at least one index.
2651  // But there's no present need to support it.
2652  assert(!Idxs.empty() && "ExtractValueInst must have at least one index");
2653 
2654  Indices.append(Idxs.begin(), Idxs.end());
2655  setName(Name);
2656 }
2657 
2658 ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI)
2659  : UnaryInstruction(EVI.getType(), ExtractValue, EVI.getOperand(0)),
2660  Indices(EVI.Indices) {
2662 }
2663 
2664 // getIndexedType - Returns the type of the element that would be extracted
2665 // with an extractvalue instruction with the specified parameters.
2666 //
2667 // A null type is returned if the indices are invalid for the specified
2668 // pointer type.
2669 //
2671  ArrayRef<unsigned> Idxs) {
2672  for (unsigned Index : Idxs) {
2673  // We can't use CompositeType::indexValid(Index) here.
2674  // indexValid() always returns true for arrays because getelementptr allows
2675  // out-of-bounds indices. Since we don't allow those for extractvalue and
2676  // insertvalue we need to check array indexing manually.
2677  // Since the only other types we can index into are struct types it's just
2678  // as easy to check those manually as well.
2679  if (ArrayType *AT = dyn_cast<ArrayType>(Agg)) {
2680  if (Index >= AT->getNumElements())
2681  return nullptr;
2682  Agg = AT->getElementType();
2683  } else if (StructType *ST = dyn_cast<StructType>(Agg)) {
2684  if (Index >= ST->getNumElements())
2685  return nullptr;
2686  Agg = ST->getElementType(Index);
2687  } else {
2688  // Not a valid type to index into.
2689  return nullptr;
2690  }
2691  }
2692  return const_cast<Type*>(Agg);
2693 }
2694 
2695 //===----------------------------------------------------------------------===//
2696 // UnaryOperator Class
2697 //===----------------------------------------------------------------------===//
2698 
2700  Type *Ty, const Twine &Name,
2701  Instruction *InsertBefore)
2702  : UnaryInstruction(Ty, iType, S, InsertBefore) {
2703  Op<0>() = S;
2704  setName(Name);
2705  AssertOK();
2706 }
2707 
2709  Type *Ty, const Twine &Name,
2710  BasicBlock *InsertAtEnd)
2711  : UnaryInstruction(Ty, iType, S, InsertAtEnd) {
2712  Op<0>() = S;
2713  setName(Name);
2714  AssertOK();
2715 }
2716 
2718  const Twine &Name,
2719  Instruction *InsertBefore) {
2720  return new UnaryOperator(Op, S, S->getType(), Name, InsertBefore);
2721 }
2722 
2724  const Twine &Name,
2725  BasicBlock *InsertAtEnd) {
2726  UnaryOperator *Res = Create(Op, S, Name);
2727  InsertAtEnd->getInstList().push_back(Res);
2728  return Res;
2729 }
2730 
2731 void UnaryOperator::AssertOK() {
2732  Value *LHS = getOperand(0);
2733  (void)LHS; // Silence warnings.
2734 #ifndef NDEBUG
2735  switch (getOpcode()) {
2736  case FNeg:
2737  assert(getType() == LHS->getType() &&
2738  "Unary operation should return same type as operand!");
2739  assert(getType()->isFPOrFPVectorTy() &&
2740  "Tried to create a floating-point operation on a "
2741  "non-floating-point type!");
2742  break;
2743  default: llvm_unreachable("Invalid opcode provided");
2744  }
2745 #endif
2746 }
2747 
2748 //===----------------------------------------------------------------------===//
2749 // BinaryOperator Class
2750 //===----------------------------------------------------------------------===//
2751 
2753  Type *Ty, const Twine &Name,
2754  Instruction *InsertBefore)
2755  : Instruction(Ty, iType,
2756  OperandTraits<BinaryOperator>::op_begin(this),
2757  OperandTraits<BinaryOperator>::operands(this),
2758  InsertBefore) {
2759  Op<0>() = S1;
2760  Op<1>() = S2;
2761  setName(Name);
2762  AssertOK();
2763 }
2764 
2766  Type *Ty, const Twine &Name,
2767  BasicBlock *InsertAtEnd)
2768  : Instruction(Ty, iType,
2769  OperandTraits<BinaryOperator>::op_begin(this),
2770  OperandTraits<BinaryOperator>::operands(this),
2771  InsertAtEnd) {
2772  Op<0>() = S1;
2773  Op<1>() = S2;
2774  setName(Name);
2775  AssertOK();
2776 }
2777 
2778 void BinaryOperator::AssertOK() {
2779  Value *LHS = getOperand(0), *RHS = getOperand(1);
2780  (void)LHS; (void)RHS; // Silence warnings.
2781  assert(LHS->getType() == RHS->getType() &&
2782  "Binary operator operand types must match!");
2783 #ifndef NDEBUG
2784  switch (getOpcode()) {
2785  case Add: case Sub:
2786  case Mul:
2787  assert(getType() == LHS->getType() &&
2788  "Arithmetic operation should return same type as operands!");
2789  assert(getType()->isIntOrIntVectorTy() &&
2790  "Tried to create an integer operation on a non-integer type!");
2791  break;
2792  case FAdd: case FSub:
2793  case FMul:
2794  assert(getType() == LHS->getType() &&
2795  "Arithmetic operation should return same type as operands!");
2796  assert(getType()->isFPOrFPVectorTy() &&
2797  "Tried to create a floating-point operation on a "
2798  "non-floating-point type!");
2799  break;
2800  case UDiv:
2801  case SDiv:
2802  assert(getType() == LHS->getType() &&
2803  "Arithmetic operation should return same type as operands!");
2804  assert(getType()->isIntOrIntVectorTy() &&
2805  "Incorrect operand type (not integer) for S/UDIV");
2806  break;
2807  case FDiv:
2808  assert(getType() == LHS->getType() &&
2809  "Arithmetic operation should return same type as operands!");
2810  assert(getType()->isFPOrFPVectorTy() &&
2811  "Incorrect operand type (not floating point) for FDIV");
2812  break;
2813  case URem:
2814  case SRem:
2815  assert(getType() == LHS->getType() &&
2816  "Arithmetic operation should return same type as operands!");
2817  assert(getType()->isIntOrIntVectorTy() &&
2818  "Incorrect operand type (not integer) for S/UREM");
2819  break;
2820  case FRem:
2821  assert(getType() == LHS->getType() &&
2822  "Arithmetic operation should return same type as operands!");
2823  assert(getType()->isFPOrFPVectorTy() &&
2824  "Incorrect operand type (not floating point) for FREM");
2825  break;
2826  case Shl:
2827  case LShr:
2828  case AShr:
2829  assert(getType() == LHS->getType() &&
2830  "Shift operation should return same type as operands!");
2831  assert(getType()->isIntOrIntVectorTy() &&
2832  "Tried to create a shift operation on a non-integral type!");
2833  break;
2834  case And: case Or:
2835  case Xor:
2836  assert(getType() == LHS->getType() &&
2837  "Logical operation should return same type as operands!");
2838  assert(getType()->isIntOrIntVectorTy() &&
2839  "Tried to create a logical operation on a non-integral type!");
2840  break;
2841  default: llvm_unreachable("Invalid opcode provided");
2842  }
2843 #endif
2844 }
2845 
2847  const Twine &Name,
2848  Instruction *InsertBefore) {
2849  assert(S1->getType() == S2->getType() &&
2850  "Cannot create binary operator with two operands of differing type!");
2851  return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore);
2852 }
2853 
2855  const Twine &Name,
2856  BasicBlock *InsertAtEnd) {
2857  BinaryOperator *Res = Create(Op, S1, S2, Name);
2858  InsertAtEnd->getInstList().push_back(Res);
2859  return Res;
2860 }
2861 
2863  Instruction *InsertBefore) {
2865  return new BinaryOperator(Instruction::Sub,
2866  zero, Op,
2867  Op->getType(), Name, InsertBefore);
2868 }
2869 
2871  BasicBlock *InsertAtEnd) {
2873  return new BinaryOperator(Instruction::Sub,
2874  zero, Op,
2875  Op->getType(), Name, InsertAtEnd);
2876 }
2877 
2879  Instruction *InsertBefore) {
2881  return BinaryOperator::CreateNSWSub(zero, Op, Name, InsertBefore);
2882 }
2883 
2885  BasicBlock *InsertAtEnd) {
2887  return BinaryOperator::CreateNSWSub(zero, Op, Name, InsertAtEnd);
2888 }
2889 
2891  Instruction *InsertBefore) {
2893  return BinaryOperator::CreateNUWSub(zero, Op, Name, InsertBefore);
2894 }
2895 
2897  BasicBlock *InsertAtEnd) {
2899  return BinaryOperator::CreateNUWSub(zero, Op, Name, InsertAtEnd);
2900 }
2901 
2903  Instruction *InsertBefore) {
2905  return new BinaryOperator(Instruction::Xor, Op, C,
2906  Op->getType(), Name, InsertBefore);
2907 }
2908 
2910  BasicBlock *InsertAtEnd) {
2912  return new BinaryOperator(Instruction::Xor, Op, AllOnes,
2913  Op->getType(), Name, InsertAtEnd);
2914 }
2915 
2916 // Exchange the two operands to this instruction. This instruction is safe to
2917 // use on any binary instruction and does not modify the semantics of the
2918 // instruction. If the instruction is order-dependent (SetLT f.e.), the opcode
2919 // is changed.
2921  if (!isCommutative())
2922  return true; // Can't commute operands
2923  Op<0>().swap(Op<1>());
2924  return false;
2925 }
2926 
2927 //===----------------------------------------------------------------------===//
2928 // FPMathOperator Class
2929 //===----------------------------------------------------------------------===//
2930 
2932  const MDNode *MD =
2933  cast<Instruction>(this)->getMetadata(LLVMContext::MD_fpmath);
2934  if (!MD)
2935  return 0.0;
2936  ConstantFP *Accuracy = mdconst::extract<ConstantFP>(MD->getOperand(0));
2937  return Accuracy->getValueAPF().convertToFloat();
2938 }
2939 
2940 //===----------------------------------------------------------------------===//
2941 // CastInst Class
2942 //===----------------------------------------------------------------------===//
2943 
2944 // Just determine if this cast only deals with integral->integral conversion.
2946  switch (getOpcode()) {
2947  default: return false;
2948  case Instruction::ZExt:
2949  case Instruction::SExt:
2950  case Instruction::Trunc:
2951  return true;
2952  case Instruction::BitCast:
2953  return getOperand(0)->getType()->isIntegerTy() &&
2954  getType()->isIntegerTy();
2955  }
2956 }
2957 
2959  // Only BitCast can be lossless, exit fast if we're not BitCast
2960  if (getOpcode() != Instruction::BitCast)
2961  return false;
2962 
2963  // Identity cast is always lossless
2964  Type *SrcTy = getOperand(0)->getType();
2965  Type *DstTy = getType();
2966  if (SrcTy == DstTy)
2967  return true;
2968 
2969  // Pointer to pointer is always lossless.
2970  if (SrcTy->isPointerTy())
2971  return DstTy->isPointerTy();
2972  return false; // Other types have no identity values
2973 }
2974 
2975 /// This function determines if the CastInst does not require any bits to be
2976 /// changed in order to effect the cast. Essentially, it identifies cases where
2977 /// no code gen is necessary for the cast, hence the name no-op cast. For
2978 /// example, the following are all no-op casts:
2979 /// # bitcast i32* %x to i8*
2980 /// # bitcast <2 x i32> %x to <4 x i16>
2981 /// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only
2982 /// Determine if the described cast is a no-op.
2984  Type *SrcTy,
2985  Type *DestTy,
2986  const DataLayout &DL) {
2987  assert(castIsValid(Opcode, SrcTy, DestTy) && "method precondition");
2988  switch (Opcode) {
2989  default: llvm_unreachable("Invalid CastOp");
2990  case Instruction::Trunc:
2991  case Instruction::ZExt:
2992  case Instruction::SExt:
2993  case Instruction::FPTrunc:
2994  case Instruction::FPExt:
2995  case Instruction::UIToFP:
2996  case Instruction::SIToFP:
2997  case Instruction::FPToUI:
2998  case Instruction::FPToSI:
2999  case Instruction::AddrSpaceCast:
3000  // TODO: Target informations may give a more accurate answer here.
3001  return false;
3002  case Instruction::BitCast:
3003  return true; // BitCast never modifies bits.
3004  case Instruction::PtrToInt:
3005  return DL.getIntPtrType(SrcTy)->getScalarSizeInBits() ==
3006  DestTy->getScalarSizeInBits();
3007  case Instruction::IntToPtr:
3008  return DL.getIntPtrType(DestTy)->getScalarSizeInBits() ==
3009  SrcTy->getScalarSizeInBits();
3010  }
3011 }
3012 
3013 bool CastInst::isNoopCast(const DataLayout &DL) const {
3014  return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), DL);
3015 }
3016 
3017 /// This function determines if a pair of casts can be eliminated and what
3018 /// opcode should be used in the elimination. This assumes that there are two
3019 /// instructions like this:
3020 /// * %F = firstOpcode SrcTy %x to MidTy
3021 /// * %S = secondOpcode MidTy %F to DstTy
3022 /// The function returns a resultOpcode so these two casts can be replaced with:
3023 /// * %Replacement = resultOpcode %SrcTy %x to DstTy
3024 /// If no such cast is permitted, the function returns 0.
3026  Instruction::CastOps firstOp, Instruction::CastOps secondOp,
3027  Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy,
3028  Type *DstIntPtrTy) {
3029  // Define the 144 possibilities for these two cast instructions. The values
3030  // in this matrix determine what to do in a given situation and select the
3031  // case in the switch below. The rows correspond to firstOp, the columns
3032  // correspond to secondOp. In looking at the table below, keep in mind
3033  // the following cast properties:
3034  //
3035  // Size Compare Source Destination
3036  // Operator Src ? Size Type Sign Type Sign
3037  // -------- ------------ ------------------- ---------------------
3038  // TRUNC > Integer Any Integral Any
3039  // ZEXT < Integral Unsigned Integer Any
3040  // SEXT < Integral Signed Integer Any
3041  // FPTOUI n/a FloatPt n/a Integral Unsigned
3042  // FPTOSI n/a FloatPt n/a Integral Signed
3043  // UITOFP n/a Integral Unsigned FloatPt n/a
3044  // SITOFP n/a Integral Signed FloatPt n/a
3045  // FPTRUNC > FloatPt n/a FloatPt n/a
3046  // FPEXT < FloatPt n/a FloatPt n/a
3047  // PTRTOINT n/a Pointer n/a Integral Unsigned
3048  // INTTOPTR n/a Integral Unsigned Pointer n/a
3049  // BITCAST = FirstClass n/a FirstClass n/a
3050  // ADDRSPCST n/a Pointer n/a Pointer n/a
3051  //
3052  // NOTE: some transforms are safe, but we consider them to be non-profitable.
3053  // For example, we could merge "fptoui double to i32" + "zext i32 to i64",
3054  // into "fptoui double to i64", but this loses information about the range
3055  // of the produced value (we no longer know the top-part is all zeros).
3056  // Further this conversion is often much more expensive for typical hardware,
3057  // and causes issues when building libgcc. We disallow fptosi+sext for the
3058  // same reason.
3059  const unsigned numCastOps =
3060  Instruction::CastOpsEnd - Instruction::CastOpsBegin;
3061  static const uint8_t CastResults[numCastOps][numCastOps] = {
3062  // T F F U S F F P I B A -+
3063  // R Z S P P I I T P 2 N T S |
3064  // U E E 2 2 2 2 R E I T C C +- secondOp
3065  // N X X U S F F N X N 2 V V |
3066  // C T T I I P P C T T P T T -+
3067  { 1, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // Trunc -+
3068  { 8, 1, 9,99,99, 2,17,99,99,99, 2, 3, 0}, // ZExt |
3069  { 8, 0, 1,99,99, 0, 2,99,99,99, 0, 3, 0}, // SExt |
3070  { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToUI |
3071  { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToSI |
3072  { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // UIToFP +- firstOp
3073  { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // SIToFP |
3074  { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // FPTrunc |
3075  { 99,99,99, 2, 2,99,99, 8, 2,99,99, 4, 0}, // FPExt |
3076  { 1, 0, 0,99,99, 0, 0,99,99,99, 7, 3, 0}, // PtrToInt |
3077  { 99,99,99,99,99,99,99,99,99,11,99,15, 0}, // IntToPtr |
3078  { 5, 5, 5, 6, 6, 5, 5, 6, 6,16, 5, 1,14}, // BitCast |
3079  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+
3080  };
3081 
3082  // TODO: This logic could be encoded into the table above and handled in the
3083  // switch below.
3084  // If either of the casts are a bitcast from scalar to vector, disallow the
3085  // merging. However, any pair of bitcasts are allowed.
3086  bool IsFirstBitcast = (firstOp == Instruction::BitCast);
3087  bool IsSecondBitcast = (secondOp == Instruction::BitCast);
3088  bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast;
3089 
3090  // Check if any of the casts convert scalars <-> vectors.
3091  if ((IsFirstBitcast && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) ||
3092  (IsSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy)))
3093  if (!AreBothBitcasts)
3094  return 0;
3095 
3096  int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin]
3097  [secondOp-Instruction::CastOpsBegin];
3098  switch (ElimCase) {
3099  case 0:
3100  // Categorically disallowed.
3101  return 0;
3102  case 1:
3103  // Allowed, use first cast's opcode.
3104  return firstOp;
3105  case 2:
3106  // Allowed, use second cast's opcode.
3107  return secondOp;
3108  case 3:
3109  // No-op cast in second op implies firstOp as long as the DestTy
3110  // is integer and we are not converting between a vector and a
3111  // non-vector type.
3112  if (!SrcTy->isVectorTy() && DstTy->isIntegerTy())
3113  return firstOp;
3114  return 0;
3115  case 4:
3116  // No-op cast in second op implies firstOp as long as the DestTy
3117  // is floating point.
3118  if (DstTy->isFloatingPointTy())
3119  return firstOp;
3120  return 0;
3121  case 5:
3122  // No-op cast in first op implies secondOp as long as the SrcTy
3123  // is an integer.
3124  if (SrcTy->isIntegerTy())
3125  return secondOp;
3126  return 0;
3127  case 6:
3128  // No-op cast in first op implies secondOp as long as the SrcTy
3129  // is a floating point.
3130  if (SrcTy->isFloatingPointTy())
3131  return secondOp;
3132  return 0;
3133  case 7: {
3134  // Disable inttoptr/ptrtoint optimization if enabled.
3135  if (DisableI2pP2iOpt)
3136  return 0;
3137 
3138  // Cannot simplify if address spaces are different!
3139  if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
3140  return 0;
3141 
3142  unsigned MidSize = MidTy->getScalarSizeInBits();
3143  // We can still fold this without knowing the actual sizes as long we
3144  // know that the intermediate pointer is the largest possible
3145  // pointer size.
3146  // FIXME: Is this always true?
3147  if (MidSize == 64)
3148  return Instruction::BitCast;
3149 
3150  // ptrtoint, inttoptr -> bitcast (ptr -> ptr) if int size is >= ptr size.
3151  if (!SrcIntPtrTy || DstIntPtrTy != SrcIntPtrTy)
3152  return 0;
3153  unsigned PtrSize = SrcIntPtrTy->getScalarSizeInBits();
3154  if (MidSize >= PtrSize)
3155  return Instruction::BitCast;
3156  return 0;
3157  }
3158  case 8: {
3159  // ext, trunc -> bitcast, if the SrcTy and DstTy are the same
3160  // ext, trunc -> ext, if sizeof(SrcTy) < sizeof(DstTy)
3161  // ext, trunc -> trunc, if sizeof(SrcTy) > sizeof(DstTy)
3162  unsigned SrcSize = SrcTy->getScalarSizeInBits();
3163  unsigned DstSize = DstTy->getScalarSizeInBits();
3164  if (SrcTy == DstTy)
3165  return Instruction::BitCast;
3166  if (SrcSize < DstSize)
3167  return firstOp;
3168  if (SrcSize > DstSize)
3169  return secondOp;
3170  return 0;
3171  }
3172  case 9:
3173  // zext, sext -> zext, because sext can't sign extend after zext
3174  return Instruction::ZExt;
3175  case 11: {
3176  // inttoptr, ptrtoint -> bitcast if SrcSize<=PtrSize and SrcSize==DstSize
3177  if (!MidIntPtrTy)
3178  return 0;
3179  unsigned PtrSize = MidIntPtrTy->getScalarSizeInBits();
3180  unsigned SrcSize = SrcTy->getScalarSizeInBits();
3181  unsigned DstSize = DstTy->getScalarSizeInBits();
3182  if (SrcSize <= PtrSize && SrcSize == DstSize)
3183  return Instruction::BitCast;
3184  return 0;
3185  }
3186  case 12:
3187  // addrspacecast, addrspacecast -> bitcast, if SrcAS == DstAS
3188  // addrspacecast, addrspacecast -> addrspacecast, if SrcAS != DstAS
3189  if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
3190  return Instruction::AddrSpaceCast;
3191  return Instruction::BitCast;
3192  case 13:
3193  // FIXME: this state can be merged with (1), but the following assert
3194  // is useful to check the correcteness of the sequence due to semantic
3195  // change of bitcast.
3196  assert(
3197  SrcTy->isPtrOrPtrVectorTy() &&
3198  MidTy->isPtrOrPtrVectorTy() &&
3199  DstTy->isPtrOrPtrVectorTy() &&
3200  SrcTy->getPointerAddressSpace() != MidTy->getPointerAddressSpace() &&
3201  MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
3202  "Illegal addrspacecast, bitcast sequence!");
3203  // Allowed, use first cast's opcode
3204  return firstOp;
3205  case 14: {
3206  // bitcast, addrspacecast -> addrspacecast if the element type of
3207  // bitcast's source is the same as that of addrspacecast's destination.
3208  PointerType *SrcPtrTy = cast<PointerType>(SrcTy->getScalarType());
3209  PointerType *DstPtrTy = cast<PointerType>(DstTy->getScalarType());
3210  if (SrcPtrTy->hasSameElementTypeAs(DstPtrTy))
3211  return Instruction::AddrSpaceCast;
3212  return 0;
3213  }
3214  case 15:
3215  // FIXME: this state can be merged with (1), but the following assert
3216  // is useful to check the correcteness of the sequence due to semantic
3217  // change of bitcast.
3218  assert(
3219  SrcTy->isIntOrIntVectorTy() &&
3220  MidTy->isPtrOrPtrVectorTy() &&
3221  DstTy->isPtrOrPtrVectorTy() &&
3222  MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
3223  "Illegal inttoptr, bitcast sequence!");
3224  // Allowed, use first cast's opcode
3225  return firstOp;
3226  case 16:
3227  // FIXME: this state can be merged with (2), but the following assert
3228  // is useful to check the correcteness of the sequence due to semantic
3229  // change of bitcast.
3230  assert(
3231  SrcTy->isPtrOrPtrVectorTy() &&
3232  MidTy->isPtrOrPtrVectorTy() &&
3233  DstTy->isIntOrIntVectorTy() &&
3234  SrcTy->getPointerAddressSpace() == MidTy->getPointerAddressSpace() &&
3235  "Illegal bitcast, ptrtoint sequence!");
3236  // Allowed, use second cast's opcode
3237  return secondOp;
3238  case 17:
3239  // (sitofp (zext x)) -> (uitofp x)
3240  return Instruction::UIToFP;
3241  case 99:
3242  // Cast combination can't happen (error in input). This is for all cases
3243  // where the MidTy is not the same for the two cast instructions.
3244  llvm_unreachable("Invalid Cast Combination");
3245  default:
3246  llvm_unreachable("Error in CastResults table!!!");
3247  }
3248 }
3249 
3251  const Twine &Name, Instruction *InsertBefore) {
3252  assert(castIsValid(op, S, Ty) && "Invalid cast!");
3253  // Construct and return the appropriate CastInst subclass
3254  switch (op) {
3255  case Trunc: return new TruncInst (S, Ty, Name, InsertBefore);
3256  case ZExt: return new ZExtInst (S, Ty, Name, InsertBefore);
3257  case SExt: return new SExtInst (S, Ty, Name, InsertBefore);
3258  case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertBefore);
3259  case FPExt: return new FPExtInst (S, Ty, Name, InsertBefore);
3260  case UIToFP: return new UIToFPInst (S, Ty, Name, InsertBefore);
3261  case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore);
3262  case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore);
3263  case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore);
3264  case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore);
3265  case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore);
3266  case BitCast: return new BitCastInst (S, Ty, Name, InsertBefore);
3267  case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertBefore);
3268  default: llvm_unreachable("Invalid opcode provided");
3269  }
3270 }
3271 
3273  const Twine &Name, BasicBlock *InsertAtEnd) {
3274  assert(castIsValid(op, S, Ty) && "Invalid cast!");
3275  // Construct and return the appropriate CastInst subclass
3276  switch (op) {
3277  case Trunc: return new TruncInst (S, Ty, Name, InsertAtEnd);
3278  case ZExt: return new ZExtInst (S, Ty, Name, InsertAtEnd);
3279  case SExt: return new SExtInst (S, Ty, Name, InsertAtEnd);
3280  case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertAtEnd);
3281  case FPExt: return new FPExtInst (S, Ty, Name, InsertAtEnd);
3282  case UIToFP: return new UIToFPInst (S, Ty, Name, InsertAtEnd);
3283  case SIToFP: return new SIToFPInst (S, Ty, Name, InsertAtEnd);
3284  case FPToUI: return new FPToUIInst (S, Ty, Name, InsertAtEnd);
3285  case FPToSI: return new FPToSIInst (S, Ty, Name, InsertAtEnd);
3286  case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertAtEnd);
3287  case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertAtEnd);
3288  case BitCast: return new BitCastInst (S, Ty, Name, InsertAtEnd);
3289  case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertAtEnd);
3290  default: llvm_unreachable("Invalid opcode provided");
3291  }
3292 }
3293 
3295  const Twine &Name,
3296  Instruction *InsertBefore) {
3297  if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3298  return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3299  return Create(Instruction::ZExt, S, Ty, Name, InsertBefore);
3300 }
3301 
3303  const Twine &Name,
3304  BasicBlock *InsertAtEnd) {
3305  if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3306  return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
3307  return Create(Instruction::ZExt, S, Ty, Name, InsertAtEnd);
3308 }
3309 
3311  const Twine &Name,
3312  Instruction *InsertBefore) {
3313  if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3314  return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3315  return Create(Instruction::SExt, S, Ty, Name, InsertBefore);
3316 }
3317 
3319  const Twine &Name,
3320  BasicBlock *InsertAtEnd) {
3321  if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3322  return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
3323  return Create(Instruction::SExt, S, Ty, Name, InsertAtEnd);
3324 }
3325 
3327  const Twine &Name,
3328  Instruction *InsertBefore) {
3329  if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3330  return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3331  return Create(Instruction::Trunc, S, Ty, Name, InsertBefore);
3332 }
3333 
3335  const Twine &Name,
3336  BasicBlock *InsertAtEnd) {
3337  if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3338  return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
3339  return Create(Instruction::Trunc, S, Ty, Name, InsertAtEnd);
3340 }
3341 
3343  const Twine &Name,
3344  BasicBlock *InsertAtEnd) {
3345  assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3346  assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
3347  "Invalid cast");
3348  assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
3349  assert((!Ty->isVectorTy() ||
3350  cast<VectorType>(Ty)->getElementCount() ==
3351  cast<VectorType>(S->getType())->getElementCount()) &&
3352  "Invalid cast");
3353 
3354  if (Ty->isIntOrIntVectorTy())
3355  return Create(Instruction::PtrToInt, S, Ty, Name, InsertAtEnd);
3356 
3357  return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertAtEnd);
3358 }
3359 
3360 /// Create a BitCast or a PtrToInt cast instruction
3362  const Twine &Name,
3363  Instruction *InsertBefore) {
3364  assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3365  assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
3366  "Invalid cast");
3367  assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
3368  assert((!Ty->isVectorTy() ||
3369  cast<VectorType>(Ty)->getElementCount() ==
3370  cast<VectorType>(S->getType())->getElementCount()) &&
3371  "Invalid cast");
3372 
3373  if (Ty->isIntOrIntVectorTy())
3374  return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3375 
3376  return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore);
3377 }
3378 
3380  Value *S, Type *Ty,
3381  const Twine &Name,
3382  BasicBlock *InsertAtEnd) {
3383  assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3384  assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
3385 
3386  if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace())
3387  return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertAtEnd);
3388 
3389  return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
3390 }
3391 
3393  Value *S, Type *Ty,
3394  const Twine &Name,
3395  Instruction *InsertBefore) {
3396  assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3397  assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
3398 
3399  if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace())
3400  return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore);
3401 
3402  return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3403 }
3404 
3406  const Twine &Name,
3407  Instruction *InsertBefore) {
3408  if (S->getType()->isPointerTy() && Ty->isIntegerTy())
3409  return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3410  if (S->getType()->isIntegerTy() && Ty->isPointerTy())
3411  return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore);
3412 
3413  return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3414 }
3415 
3417  bool isSigned, const Twine &Name,
3418  Instruction *InsertBefore) {
3419  assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
3420  "Invalid integer cast");
3421  unsigned SrcBits = C->getType()->getScalarSizeInBits();
3422  unsigned DstBits = Ty->getScalarSizeInBits();
3423  Instruction::CastOps opcode =
3424  (SrcBits == DstBits ? Instruction::BitCast :
3425  (SrcBits > DstBits ? Instruction::Trunc :
3426  (isSigned ? Instruction::SExt : Instruction::ZExt)));
3427  return Create(opcode, C, Ty, Name, InsertBefore);
3428 }
3429 
3431  bool isSigned, const Twine &Name,
3432  BasicBlock *InsertAtEnd) {
3433  assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
3434  "Invalid cast");
3435  unsigned SrcBits = C->getType()->getScalarSizeInBits();
3436  unsigned DstBits = Ty->getScalarSizeInBits();
3437  Instruction::CastOps opcode =
3438  (SrcBits == DstBits ? Instruction::BitCast :
3439  (SrcBits > DstBits ? Instruction::Trunc :
3440  (isSigned ? Instruction::SExt : Instruction::ZExt)));
3441  return Create(opcode, C, Ty, Name, InsertAtEnd);
3442 }
3443 
3445  const Twine &Name,
3446  Instruction *InsertBefore) {
3447  assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
3448  "Invalid cast");
3449  unsigned SrcBits = C->getType()->getScalarSizeInBits();
3450  unsigned DstBits = Ty->getScalarSizeInBits();
3451  Instruction::CastOps opcode =
3452  (SrcBits == DstBits ? Instruction::BitCast :
3453  (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
3454  return Create(opcode, C, Ty, Name, InsertBefore);
3455 }
3456 
3458  const Twine &Name,
3459  BasicBlock *InsertAtEnd) {
3460  assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
3461  "Invalid cast");
3462  unsigned SrcBits = C->getType()->getScalarSizeInBits();
3463  unsigned DstBits = Ty->getScalarSizeInBits();
3464  Instruction::CastOps opcode =
3465  (SrcBits == DstBits ? Instruction::BitCast :
3466  (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
3467  return Create(opcode, C, Ty, Name, InsertAtEnd);
3468 }
3469 
3470 bool CastInst::isBitCastable(Type *SrcTy, Type *DestTy) {
3471  if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType())
3472  return false;
3473 
3474  if (SrcTy == DestTy)
3475  return true;
3476 
3477  if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) {
3478  if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) {
3479  if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3480  // An element by element cast. Valid if casting the elements is valid.
3481  SrcTy = SrcVecTy->getElementType();
3482  DestTy = DestVecTy->getElementType();
3483  }
3484  }
3485  }
3486 
3487  if (PointerType *DestPtrTy = dyn_cast<PointerType>(DestTy)) {
3488  if (PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy)) {
3489  return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace();
3490  }
3491  }
3492 
3493  TypeSize SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
3494  TypeSize DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
3495 
3496  // Could still have vectors of pointers if the number of elements doesn't
3497  // match
3498  if (SrcBits.getKnownMinSize() == 0 || DestBits.getKnownMinSize() == 0)
3499  return false;
3500 
3501  if (SrcBits != DestBits)
3502  return false;
3503 
3504  if (DestTy->isX86_MMXTy() || SrcTy->isX86_MMXTy())
3505  return false;
3506 
3507  return true;
3508 }
3509 
3511  const DataLayout &DL) {
3512  // ptrtoint and inttoptr are not allowed on non-integral pointers
3513  if (auto *PtrTy = dyn_cast<PointerType>(SrcTy))
3514  if (auto *IntTy = dyn_cast<IntegerType>(DestTy))
3515  return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
3516  !DL.isNonIntegralPointerType(PtrTy));
3517  if (auto *PtrTy = dyn_cast<PointerType>(DestTy))
3518  if (auto *IntTy = dyn_cast<IntegerType>(SrcTy))
3519  return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
3520  !DL.isNonIntegralPointerType(PtrTy));
3521 
3522  return isBitCastable(SrcTy, DestTy);
3523 }
3524 
3525 // Provide a way to get a "cast" where the cast opcode is inferred from the
3526 // types and size of the operand. This, basically, is a parallel of the
3527 // logic in the castIsValid function below. This axiom should hold:
3528 // castIsValid( getCastOpcode(Val, Ty), Val, Ty)
3529 // should not assert in castIsValid. In other words, this produces a "correct"
3530 // casting opcode for the arguments passed to it.
3533  const Value *Src, bool SrcIsSigned, Type *DestTy, bool DestIsSigned) {
3534  Type *SrcTy = Src->getType();
3535 
3536  assert(SrcTy->isFirstClassType() && DestTy->isFirstClassType() &&
3537  "Only first class types are castable!");
3538 
3539  if (SrcTy == DestTy)
3540  return BitCast;
3541 
3542  // FIXME: Check address space sizes here
3543  if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy))
3544  if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy))
3545  if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3546  // An element by element cast. Find the appropriate opcode based on the
3547  // element types.
3548  SrcTy = SrcVecTy->getElementType();
3549  DestTy = DestVecTy->getElementType();
3550  }
3551 
3552  // Get the bit sizes, we'll need these
3553  unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
3554  unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
3555 
3556  // Run through the possibilities ...
3557  if (DestTy->isIntegerTy()) { // Casting to integral
3558  if (SrcTy->isIntegerTy()) { // Casting from integral
3559  if (DestBits < SrcBits)
3560  return Trunc; // int -> smaller int
3561  else if (DestBits > SrcBits) { // its an extension
3562  if (SrcIsSigned)
3563  return SExt; // signed -> SEXT
3564  else
3565  return ZExt; // unsigned -> ZEXT
3566  } else {
3567  return BitCast; // Same size, No-op cast
3568  }
3569  } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
3570  if (DestIsSigned)
3571  return FPToSI; // FP -> sint
3572  else
3573  return FPToUI; // FP -> uint
3574  } else if (SrcTy->isVectorTy()) {
3575  assert(DestBits == SrcBits &&
3576  "Casting vector to integer of different width");
3577  return BitCast; // Same size, no-op cast
3578  } else {
3579  assert(SrcTy->isPointerTy() &&
3580  "Casting from a value that is not first-class type");
3581  return PtrToInt; // ptr -> int
3582  }
3583  } else if (DestTy->isFloatingPointTy()) { // Casting to floating pt
3584  if (SrcTy->isIntegerTy()) { // Casting from integral
3585  if (SrcIsSigned)
3586  return SIToFP; // sint -> FP
3587  else
3588  return UIToFP; // uint -> FP
3589  } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
3590  if (DestBits < SrcBits) {
3591  return FPTrunc; // FP -> smaller FP
3592  } else if (DestBits > SrcBits) {
3593  return FPExt; // FP -> larger FP
3594  } else {
3595  return BitCast; // same size, no-op cast
3596  }
3597  } else if (SrcTy->isVectorTy()) {
3598  assert(DestBits == SrcBits &&
3599  "Casting vector to floating point of different width");
3600  return BitCast; // same size, no-op cast
3601  }
3602  llvm_unreachable("Casting pointer or non-first class to float");
3603  } else if (DestTy->isVectorTy()) {
3604  assert(DestBits == SrcBits &&
3605  "Illegal cast to vector (wrong type or size)");
3606  return BitCast;
3607  } else if (DestTy->isPointerTy()) {
3608  if (SrcTy->isPointerTy()) {
3609  if (DestTy->getPointerAddressSpace() != SrcTy->getPointerAddressSpace())
3610  return AddrSpaceCast;
3611  return BitCast; // ptr -> ptr
3612  } else if (SrcTy->isIntegerTy()) {
3613  return IntToPtr; // int -> ptr
3614  }
3615  llvm_unreachable("Casting pointer to other than pointer or int");
3616  } else if (DestTy->isX86_MMXTy()) {
3617  if (SrcTy->isVectorTy()) {
3618  assert(DestBits == SrcBits && "Casting vector of wrong width to X86_MMX");
3619  return BitCast; // 64-bit vector to MMX
3620  }
3621  llvm_unreachable("Illegal cast to X86_MMX");
3622  }
3623  llvm_unreachable("Casting to type that is not first-class");
3624 }
3625 
3626 //===----------------------------------------------------------------------===//
3627 // CastInst SubClass Constructors
3628 //===----------------------------------------------------------------------===//
3629 
3630 /// Check that the construction parameters for a CastInst are correct. This
3631 /// could be broken out into the separate constructors but it is useful to have
3632 /// it in one place and to eliminate the redundant code for getting the sizes
3633 /// of the types involved.
3634 bool
3636  if (!SrcTy->isFirstClassType() || !DstTy->isFirstClassType() ||
3637  SrcTy->isAggregateType() || DstTy->isAggregateType())
3638  return false;
3639 
3640  // Get the size of the types in bits, and whether we are dealing
3641  // with vector types, we'll need this later.
3642  bool SrcIsVec = isa<VectorType>(SrcTy);
3643  bool DstIsVec = isa<VectorType>(DstTy);
3644  unsigned SrcScalarBitSize = SrcTy->getScalarSizeInBits();
3645  unsigned DstScalarBitSize = DstTy->getScalarSizeInBits();
3646 
3647  // If these are vector types, get the lengths of the vectors (using zero for
3648  // scalar types means that checking that vector lengths match also checks that
3649  // scalars are not being converted to vectors or vectors to scalars).
3650  ElementCount SrcEC = SrcIsVec ? cast<VectorType>(SrcTy)->getElementCount()
3652  ElementCount DstEC = DstIsVec ? cast<VectorType>(DstTy)->getElementCount()
3654 
3655  // Switch on the opcode provided
3656  switch (op) {
3657  default: return false; // This is an input error
3658  case Instruction::Trunc:
3659  return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3660  SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
3661  case Instruction::ZExt:
3662  return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3663  SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3664  case Instruction::SExt:
3665  return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3666  SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3667  case Instruction::FPTrunc:
3668  return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
3669  SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
3670  case Instruction::FPExt:
3671  return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
3672  SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3673  case Instruction::UIToFP:
3674  case Instruction::SIToFP:
3675  return SrcTy->isIntOrIntVectorTy() && DstTy->isFPOrFPVectorTy() &&
3676  SrcEC == DstEC;
3677  case Instruction::FPToUI:
3678  case Instruction::FPToSI:
3679  return SrcTy->isFPOrFPVectorTy() && DstTy->isIntOrIntVectorTy() &&
3680  SrcEC == DstEC;
3681  case Instruction::PtrToInt:
3682  if (SrcEC != DstEC)
3683  return false;
3684  return SrcTy->isPtrOrPtrVectorTy() && DstTy->isIntOrIntVectorTy();
3685  case Instruction::IntToPtr:
3686  if (SrcEC != DstEC)
3687  return false;
3688  return SrcTy->isIntOrIntVectorTy() && DstTy->isPtrOrPtrVectorTy();
3689  case Instruction::BitCast: {
3690  PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
3691  PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
3692 
3693  // BitCast implies a no-op cast of type only. No bits change.
3694  // However, you can't cast pointers to anything but pointers.
3695  if (!SrcPtrTy != !DstPtrTy)
3696  return false;
3697 
3698  // For non-pointer cases, the cast is okay if the source and destination bit
3699  // widths are identical.
3700  if (!SrcPtrTy)
3701  return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits();
3702 
3703  // If both are pointers then the address spaces must match.
3704  if (SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace())
3705  return false;
3706 
3707  // A vector of pointers must have the same number of elements.
3708  if (SrcIsVec && DstIsVec)
3709  return SrcEC == DstEC;
3710  if (SrcIsVec)
3711  return SrcEC == ElementCount::getFixed(1);
3712  if (DstIsVec)
3713  return DstEC == ElementCount::getFixed(1);
3714 
3715  return true;
3716  }
3717  case Instruction::AddrSpaceCast: {
3718  PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
3719  if (!SrcPtrTy)
3720  return false;
3721 
3722  PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
3723  if (!DstPtrTy)
3724  return false;
3725 
3726  if (SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace())
3727  return false;
3728 
3729  return SrcEC == DstEC;
3730  }
3731  }
3732 }
3733 
3735  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3736 ) : CastInst(Ty, Trunc, S, Name, InsertBefore) {
3737  assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
3738 }
3739 
3741  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3742 ) : CastInst(Ty, Trunc, S, Name, InsertAtEnd) {
3743  assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
3744 }
3745 
3747  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3748 ) : CastInst(Ty, ZExt, S, Name, InsertBefore) {
3749  assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
3750 }
3751 
3753  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3754 ) : CastInst(Ty, ZExt, S, Name, InsertAtEnd) {
3755  assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
3756 }
3758  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3759 ) : CastInst(Ty, SExt, S, Name, InsertBefore) {
3760  assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
3761 }
3762 
3764  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3765 ) : CastInst(Ty, SExt, S, Name, InsertAtEnd) {
3766  assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
3767 }
3768 
3770  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3771 ) : CastInst(Ty, FPTrunc, S, Name, InsertBefore) {
3772  assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
3773 }
3774 
3776  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3777 ) : CastInst(Ty, FPTrunc, S, Name, InsertAtEnd) {
3778  assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
3779 }
3780 
3782  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3783 ) : CastInst(Ty, FPExt, S, Name, InsertBefore) {
3784  assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
3785 }
3786 
3788  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3789 ) : CastInst(Ty, FPExt, S, Name, InsertAtEnd) {
3790  assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
3791 }
3792 
3794  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3795 ) : CastInst(Ty, UIToFP, S, Name, InsertBefore) {
3796  assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
3797 }
3798 
3800  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3801 ) : CastInst(Ty, UIToFP, S, Name, InsertAtEnd) {
3802  assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
3803 }
3804 
3806  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3807 ) : CastInst(Ty, SIToFP, S, Name, InsertBefore) {
3808  assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
3809 }
3810 
3812  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3813 ) : CastInst(Ty, SIToFP, S, Name, InsertAtEnd) {
3814  assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
3815 }
3816 
3818  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3819 ) : CastInst(Ty, FPToUI, S, Name, InsertBefore) {
3820  assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
3821 }
3822 
3824  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3825 ) : CastInst(Ty, FPToUI, S, Name, InsertAtEnd) {
3826  assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
3827 }
3828 
3830  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3831 ) : CastInst(Ty, FPToSI, S, Name, InsertBefore) {
3832  assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
3833 }
3834 
3836  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3837 ) : CastInst(Ty, FPToSI, S, Name, InsertAtEnd) {
3838  assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
3839 }
3840 
3842  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3843 ) : CastInst(Ty, PtrToInt, S, Name, InsertBefore) {
3844  assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
3845 }
3846 
3848  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3849 ) : CastInst(Ty, PtrToInt, S, Name, InsertAtEnd) {
3850  assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
3851 }
3852 
3854  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3855 ) : CastInst(Ty, IntToPtr, S, Name, InsertBefore) {
3856  assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
3857 }
3858 
3860  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3861 ) : CastInst(Ty, IntToPtr, S, Name, InsertAtEnd) {
3862  assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
3863 }
3864 
3866  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3867 ) : CastInst(Ty, BitCast, S, Name, InsertBefore) {
3868  assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
3869 }
3870 
3872  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3873 ) : CastInst(Ty, BitCast, S, Name, InsertAtEnd) {
3874  assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
3875 }
3876 
3878  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3879 ) : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) {
3880  assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
3881 }
3882 
3884  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3885 ) : CastInst(Ty, AddrSpaceCast, S, Name, InsertAtEnd) {
3886  assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
3887 }
3888 
3889 //===----------------------------------------------------------------------===//
3890 // CmpInst Classes
3891 //===----------------------------------------------------------------------===//
3892 
3894  Value *RHS, const Twine &Name, Instruction *InsertBefore,
3895  Instruction *FlagsSource)
3896  : Instruction(ty, op,
3897  OperandTraits<CmpInst>::op_begin(this),
3898  OperandTraits<CmpInst>::operands(this),
3899  InsertBefore) {
3900  Op<0>() = LHS;
3901  Op<1>() = RHS;
3902  setPredicate((Predicate)predicate);
3903  setName(Name);
3904  if (FlagsSource)
3905  copyIRFlags(FlagsSource);
3906 }
3907 
3909  Value *RHS, const Twine &Name, BasicBlock *InsertAtEnd)
3910  : Instruction(ty, op,
3911  OperandTraits<CmpInst>::op_begin(this),
3912  OperandTraits<CmpInst>::operands(this),
3913  InsertAtEnd) {
3914  Op<0>() = LHS;
3915  Op<1>() = RHS;
3916  setPredicate((Predicate)predicate);
3917  setName(Name);
3918 }
3919 
3920 CmpInst *
3922  const Twine &Name, Instruction *InsertBefore) {
3923  if (Op == Instruction::ICmp) {
3924  if (InsertBefore)
3925  return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate),
3926  S1, S2, Name);
3927  else
3928  return new ICmpInst(CmpInst::Predicate(predicate),
3929  S1, S2, Name);
3930  }
3931 
3932  if (InsertBefore)
3933  return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate),
3934  S1, S2, Name);
3935  else
3936  return new FCmpInst(CmpInst::Predicate(predicate),
3937  S1, S2, Name);
3938 }
3939 
3940 CmpInst *
3942  const Twine &Name, BasicBlock *InsertAtEnd) {
3943  if (Op == Instruction::ICmp) {
3944  return new ICmpInst(*InsertAtEnd, CmpInst::Predicate(predicate),
3945  S1, S2, Name);
3946  }
3947  return new FCmpInst(*InsertAtEnd, CmpInst::Predicate(predicate),
3948  S1, S2, Name);
3949 }
3950 
3952  if (ICmpInst *IC = dyn_cast<ICmpInst>(this))
3953  IC->swapOperands();
3954  else
3955  cast<FCmpInst>(this)->swapOperands();
3956 }
3957 
3959  if (const ICmpInst *IC = dyn_cast<ICmpInst>(this))
3960  return IC->isCommutative();
3961  return cast<FCmpInst>(this)->isCommutative();
3962 }
3963 
3966  return ICmpInst::isEquality(P);
3968  return FCmpInst::isEquality(P);
3969  llvm_unreachable("Unsupported predicate kind");
3970 }
3971 
3973  switch (pred) {
3974  default: llvm_unreachable("Unknown cmp predicate!");
3975  case ICMP_EQ: return ICMP_NE;
3976  case ICMP_NE: return ICMP_EQ;
3977  case ICMP_UGT: return ICMP_ULE;
3978  case ICMP_ULT: return ICMP_UGE;
3979  case ICMP_UGE: return ICMP_ULT;
3980  case ICMP_ULE: return ICMP_UGT;
3981  case ICMP_SGT: return ICMP_SLE;
3982  case ICMP_SLT: return ICMP_SGE;
3983  case ICMP_SGE: return ICMP_SLT;
3984  case ICMP_SLE: return ICMP_SGT;
3985 
3986  case FCMP_OEQ: return FCMP_UNE;
3987  case FCMP_ONE: return FCMP_UEQ;
3988  case FCMP_OGT: return FCMP_ULE;
3989  case FCMP_OLT: return FCMP_UGE;
3990  case FCMP_OGE: return FCMP_ULT;
3991  case FCMP_OLE: return FCMP_UGT;
3992  case FCMP_UEQ: return FCMP_ONE;
3993  case FCMP_UNE: return FCMP_OEQ;
3994  case FCMP_UGT: return FCMP_OLE;
3995  case FCMP_ULT: return FCMP_OGE;
3996  case FCMP_UGE: return FCMP_OLT;
3997  case FCMP_ULE: return FCMP_OGT;
3998  case FCMP_ORD: return FCMP_UNO;
3999  case FCMP_UNO: return FCMP_ORD;
4000  case FCMP_TRUE: return FCMP_FALSE;
4001  case FCMP_FALSE: return FCMP_TRUE;
4002  }
4003 }
4004 
4006  switch (Pred) {
4007  default: return "unknown";
4008  case FCmpInst::FCMP_FALSE: return "false";
4009  case FCmpInst::FCMP_OEQ: return "oeq";
4010  case FCmpInst::FCMP_OGT: return "ogt";
4011  case FCmpInst::FCMP_OGE: return "oge";
4012  case FCmpInst::FCMP_OLT: return "olt";
4013  case FCmpInst::FCMP_OLE: return "ole";
4014  case FCmpInst::FCMP_ONE: return "one";
4015  case FCmpInst::FCMP_ORD: return "ord";
4016  case FCmpInst::FCMP_UNO: return "uno";
4017  case FCmpInst::FCMP_UEQ: return "ueq";
4018  case FCmpInst::FCMP_UGT: return "ugt";
4019  case FCmpInst::FCMP_UGE: return "uge";
4020  case FCmpInst::FCMP_ULT: return "ult";
4021  case FCmpInst::FCMP_ULE: return "ule";
4022  case FCmpInst::FCMP_UNE: return "une";
4023  case FCmpInst::FCMP_TRUE: return "true";
4024  case ICmpInst::ICMP_EQ: return "eq";
4025  case ICmpInst::ICMP_NE: return "ne";
4026  case ICmpInst::ICMP_SGT: return "sgt";
4027  case ICmpInst::ICMP_SGE: return "sge";
4028  case ICmpInst::ICMP_SLT: return "slt";
4029  case ICmpInst::ICMP_SLE: return "sle";
4030  case ICmpInst::ICMP_UGT: return "ugt";
4031  case ICmpInst::ICMP_UGE: return "uge";
4032  case ICmpInst::ICMP_ULT: return "ult";
4033  case ICmpInst::ICMP_ULE: return "ule";
4034  }
4035 }
4036 
4038  switch (pred) {
4039  default: llvm_unreachable("Unknown icmp predicate!");
4040  case ICMP_EQ: case ICMP_NE:
4041  case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE:
4042  return pred;
4043  case ICMP_UGT: return ICMP_SGT;
4044  case ICMP_ULT: return ICMP_SLT;
4045  case ICMP_UGE: return ICMP_SGE;
4046  case ICMP_ULE: return ICMP_SLE;
4047  }
4048 }
4049 
4051  switch (pred) {
4052  default: llvm_unreachable("Unknown icmp predicate!");
4053  case ICMP_EQ: case ICMP_NE:
4054  case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE:
4055  return pred;
4056  case ICMP_SGT: return ICMP_UGT;
4057  case ICMP_SLT: return ICMP_ULT;
4058  case ICMP_SGE: return ICMP_UGE;
4059  case ICMP_SLE: return ICMP_ULE;
4060  }
4061 }
4062 
4064  switch (pred) {
4065  default: llvm_unreachable("Unknown cmp predicate!");
4066  case ICMP_EQ: case ICMP_NE:
4067  return pred;
4068  case ICMP_SGT: return ICMP_SLT;
4069  case ICMP_SLT: return ICMP_SGT;
4070  case ICMP_SGE: return ICMP_SLE;
4071  case ICMP_SLE: return ICMP_SGE;
4072  case ICMP_UGT: return ICMP_ULT;
4073  case ICMP_ULT: return ICMP_UGT;
4074  case ICMP_UGE: return ICMP_ULE;
4075  case ICMP_ULE: return ICMP_UGE;
4076 
4077  case FCMP_FALSE: case FCMP_TRUE:
4078  case FCMP_OEQ: case FCMP_ONE:
4079  case FCMP_UEQ: case FCMP_UNE:
4080  case FCMP_ORD: case FCMP_UNO:
4081  return pred;
4082  case FCMP_OGT: return FCMP_OLT;
4083  case FCMP_OLT: return FCMP_OGT;
4084  case FCMP_OGE: return FCMP_OLE;
4085  case FCMP_OLE: return FCMP_OGE;
4086  case FCMP_UGT: return FCMP_ULT;
4087  case FCMP_ULT: return FCMP_UGT;
4088  case FCMP_UGE: return FCMP_ULE;
4089  case FCMP_ULE: return FCMP_UGE;
4090  }
4091 }
4092 
4094  switch (pred) {
4095  case ICMP_SGE:
4096  case ICMP_SLE:
4097  case ICMP_UGE:
4098  case ICMP_ULE:
4099  case FCMP_OGE:
4100  case FCMP_OLE:
4101  case FCMP_UGE:
4102  case FCMP_ULE:
4103  return true;
4104  default:
4105  return false;
4106  }
4107 }
4108 
4110  switch (pred) {
4111  case ICMP_SGT:
4112  case ICMP_SLT:
4113  case ICMP_UGT:
4114  case ICMP_ULT:
4115  case FCMP_OGT:
4116  case FCMP_OLT:
4117  case FCMP_UGT:
4118  case FCMP_ULT:
4119  return true;
4120  default:
4121  return false;
4122  }
4123 }
4124 
4126  switch (pred) {
4127  case ICMP_SGE:
4128  return ICMP_SGT;
4129  case ICMP_SLE:
4130  return ICMP_SLT;
4131  case ICMP_UGE:
4132  return ICMP_UGT;
4133  case ICMP_ULE:
4134  return ICMP_ULT;
4135  case FCMP_OGE:
4136  return FCMP_OGT;
4137  case FCMP_OLE:
4138  return FCMP_OLT;
4139  case FCMP_UGE:
4140  return FCMP_UGT;
4141  case FCMP_ULE:
4142  return FCMP_ULT;
4143  default:
4144  return pred;
4145  }
4146 }
4147 
4149  switch (pred) {
4150  case ICMP_SGT:
4151  return ICMP_SGE;
4152  case ICMP_SLT:
4153  return ICMP_SLE;
4154  case ICMP_UGT:
4155  return ICMP_UGE;
4156  case ICMP_ULT:
4157  return ICMP_ULE;
4158  case FCMP_OGT:
4159  return FCMP_OGE;
4160  case FCMP_OLT:
4161  return FCMP_OLE;
4162  case FCMP_UGT:
4163  return FCMP_UGE;
4164  case FCMP_ULT:
4165  return FCMP_ULE;
4166  default:
4167  return pred;
4168  }
4169 }
4170 
4172  assert(CmpInst::isRelational(pred) && "Call only with relational predicate!");
4173 
4174  if (isStrictPredicate(pred))
4175  return getNonStrictPredicate(pred);
4177  return getStrictPredicate(pred);
4178 
4179  llvm_unreachable("Unknown predicate!");
4180 }
4181 
4183  assert(CmpInst::isUnsigned(pred) && "Call only with unsigned predicates!");
4184 
4185  switch (pred) {
4186  default:
4187  llvm_unreachable("Unknown predicate!");
4188  case CmpInst::ICMP_ULT:
4189  return CmpInst::ICMP_SLT;
4190  case CmpInst::ICMP_ULE:
4191  return CmpInst::ICMP_SLE;
4192  case CmpInst::ICMP_UGT:
4193  return CmpInst::ICMP_SGT;
4194  case CmpInst::ICMP_UGE:
4195  return CmpInst::ICMP_SGE;
4196  }
4197 }
4198 
4200  assert(CmpInst::isSigned(pred) && "Call only with signed predicates!");
4201 
4202  switch (pred) {
4203  default:
4204  llvm_unreachable("Unknown predicate!");
4205  case CmpInst::ICMP_SLT:
4206  return CmpInst::ICMP_ULT;
4207  case CmpInst::ICMP_SLE:
4208  return CmpInst::ICMP_ULE;
4209  case CmpInst::ICMP_SGT:
4210  return CmpInst::ICMP_UGT;
4211  case CmpInst::ICMP_SGE:
4212  return CmpInst::ICMP_UGE;
4213  }
4214 }
4215 
4217  switch (predicate) {
4218  default: return false;
4220  case ICmpInst::ICMP_UGE: return true;
4221  }
4222 }
4223 
4224 bool CmpInst::isSigned(Predicate predicate) {
4225  switch (predicate) {
4226  default: return false;
4228  case ICmpInst::ICMP_SGE: return true;
4229  }
4230 }
4231 
4232 bool ICmpInst::compare(const APInt &LHS, const APInt &RHS,
4233  ICmpInst::Predicate Pred) {
4234  assert(ICmpInst::isIntPredicate(Pred) && "Only for integer predicates!");
4235  switch (Pred) {
4236  case ICmpInst::Predicate::ICMP_EQ:
4237  return LHS.eq(RHS);
4238  case ICmpInst::Predicate::ICMP_NE:
4239  return LHS.ne(RHS);
4240  case ICmpInst::Predicate::ICMP_UGT:
4241  return LHS.ugt(RHS);
4242  case ICmpInst::Predicate::ICMP_UGE:
4243  return LHS.uge(RHS);
4244  case ICmpInst::Predicate::ICMP_ULT:
4245  return LHS.ult(RHS);
4246  case ICmpInst::Predicate::ICMP_ULE:
4247  return LHS.ule(RHS);
4248  case ICmpInst::Predicate::ICMP_SGT:
4249  return LHS.sgt(RHS);
4250  case ICmpInst::Predicate::ICMP_SGE:
4251  return LHS.sge(RHS);
4252  case ICmpInst::Predicate::ICMP_SLT:
4253  return LHS.slt(RHS);
4254  case ICmpInst::Predicate::ICMP_SLE:
4255  return LHS.sle(RHS);
4256  default:
4257  llvm_unreachable("Unexpected non-integer predicate.");
4258  };
4259 }
4260 
4262  FCmpInst::Predicate Pred) {
4263  APFloat::cmpResult R = LHS.compare(RHS);
4264  switch (Pred) {
4265  default:
4266  llvm_unreachable("Invalid FCmp Predicate");
4267  case FCmpInst::FCMP_FALSE:
4268  return false;
4269  case FCmpInst::FCMP_TRUE:
4270  return true;
4271  case FCmpInst::FCMP_UNO:
4272  return R == APFloat::cmpUnordered;
4273  case FCmpInst::FCMP_ORD:
4274  return R != APFloat::cmpUnordered;
4275  case FCmpInst::FCMP_UEQ:
4276  return R == APFloat::cmpUnordered || R == APFloat::cmpEqual;
4277  case FCmpInst::FCMP_OEQ:
4278  return R == APFloat::cmpEqual;
4279  case FCmpInst::FCMP_UNE:
4280  return R != APFloat::cmpEqual;
4281  case FCmpInst::FCMP_ONE:
4282  return R == APFloat::cmpLessThan || R == APFloat::cmpGreaterThan;
4283  case FCmpInst::FCMP_ULT:
4284  return R == APFloat::cmpUnordered || R == APFloat::cmpLessThan;
4285  case FCmpInst::FCMP_OLT:
4286  return R == APFloat::cmpLessThan;
4287  case FCmpInst::FCMP_UGT:
4288  return R == APFloat::cmpUnordered || R == APFloat::cmpGreaterThan;
4289  case FCmpInst::FCMP_OGT:
4290  return R == APFloat::cmpGreaterThan;
4291  case FCmpInst::FCMP_ULE:
4292  return R != APFloat::cmpGreaterThan;
4293  case FCmpInst::FCMP_OLE:
4294  return R == APFloat::cmpLessThan || R == APFloat::cmpEqual;
4295  case FCmpInst::FCMP_UGE:
4296  return R != APFloat::cmpLessThan;
4297  case FCmpInst::FCMP_OGE:
4298  return R == APFloat::cmpGreaterThan || R == APFloat::cmpEqual;
4299  }
4300 }
4301 
4304  "Call only with non-equality predicates!");
4305 
4306  if (isSigned(pred))
4307  return getUnsignedPredicate(pred);
4308  if (isUnsigned(pred))
4309  return getSignedPredicate(pred);
4310 
4311  llvm_unreachable("Unknown predicate!");
4312 }
4313 
4315  switch (predicate) {
4316  default: return false;
4319  case FCmpInst::FCMP_ORD: return true;
4320  }
4321 }
4322 
4324  switch (predicate) {
4325  default: return false;
4328  case FCmpInst::FCMP_UNO: return true;
4329  }
4330 }
4331 
4333  switch(predicate) {
4334  default: return false;
4335  case ICMP_EQ: case ICMP_UGE: case ICMP_ULE: case ICMP_SGE: case ICMP_SLE:
4336  case FCMP_TRUE: case FCMP_UEQ: case FCMP_UGE: case FCMP_ULE: return true;
4337  }
4338 }
4339 
4341  switch(predicate) {
4342  case ICMP_NE: case ICMP_UGT: case ICMP_ULT: case ICMP_SGT: case ICMP_SLT:
4343  case FCMP_FALSE: case FCMP_ONE: case FCMP_OGT: case FCMP_OLT: return true;
4344  default: return false;
4345  }
4346 }
4347 
4349  // If the predicates match, then we know the first condition implies the
4350  // second is true.
4351  if (Pred1 == Pred2)
4352  return true;
4353 
4354  switch (Pred1) {
4355  default:
4356  break;
4357  case ICMP_EQ:
4358  // A == B implies A >=u B, A <=u B, A >=s B, and A <=s B are true.
4359  return Pred2 == ICMP_UGE || Pred2 == ICMP_ULE || Pred2 == ICMP_SGE ||
4360  Pred2 == ICMP_SLE;
4361  case ICMP_UGT: // A >u B implies A != B and A >=u B are true.
4362  return Pred2 == ICMP_NE || Pred2 == ICMP_UGE;
4363  case ICMP_ULT: // A <u B implies A != B and A <=u B are true.
4364  return Pred2 == ICMP_NE || Pred2 == ICMP_ULE;
4365  case ICMP_SGT: // A >s B implies A != B and A >=s B are true.
4366  return Pred2 == ICMP_NE || Pred2 == ICMP_SGE;
4367  case ICMP_SLT: // A <s B implies A != B and A <=s B are true.
4368  return Pred2 == ICMP_NE || Pred2 == ICMP_SLE;
4369  }
4370  return false;
4371 }
4372 
4374  return isImpliedTrueByMatchingCmp(Pred1, getInversePredicate(Pred2));
4375 }
4376 
4377 //===----------------------------------------------------------------------===//
4378 // SwitchInst Implementation
4379 //===----------------------------------------------------------------------===//
4380 
4381 void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumReserved) {
4382  assert(Value && Default && NumReserved);
4383  ReservedSpace = NumReserved;
4385  allocHungoffUses(ReservedSpace);
4386 
4387  Op<0>() = Value;
4388  Op<1>() = Default;
4389 }
4390 
4391 /// SwitchInst ctor - Create a new switch instruction, specifying a value to
4392 /// switch on and a default destination. The number of additional cases can
4393 /// be specified here to make memory allocation more efficient. This
4394