LLVM 19.0.0git
Instructions.cpp
Go to the documentation of this file.
1//===- Instructions.cpp - Implement the LLVM instructions -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements all of the non-inline methods for the LLVM instruction
10// classes.
11//
12//===----------------------------------------------------------------------===//
13
15#include "LLVMContextImpl.h"
18#include "llvm/ADT/Twine.h"
19#include "llvm/IR/Attributes.h"
20#include "llvm/IR/BasicBlock.h"
21#include "llvm/IR/Constant.h"
23#include "llvm/IR/Constants.h"
24#include "llvm/IR/DataLayout.h"
26#include "llvm/IR/Function.h"
27#include "llvm/IR/InstrTypes.h"
28#include "llvm/IR/Instruction.h"
29#include "llvm/IR/Intrinsics.h"
30#include "llvm/IR/LLVMContext.h"
31#include "llvm/IR/MDBuilder.h"
32#include "llvm/IR/Metadata.h"
33#include "llvm/IR/Module.h"
34#include "llvm/IR/Operator.h"
36#include "llvm/IR/Type.h"
37#include "llvm/IR/Value.h"
42#include "llvm/Support/ModRef.h"
44#include <algorithm>
45#include <cassert>
46#include <cstdint>
47#include <optional>
48#include <vector>
49
50using namespace llvm;
51
53 "disable-i2p-p2i-opt", cl::init(false),
54 cl::desc("Disables inttoptr/ptrtoint roundtrip optimization"));
55
56//===----------------------------------------------------------------------===//
57// AllocaInst Class
58//===----------------------------------------------------------------------===//
59
60std::optional<TypeSize>
62 TypeSize Size = DL.getTypeAllocSize(getAllocatedType());
63 if (isArrayAllocation()) {
64 auto *C = dyn_cast<ConstantInt>(getArraySize());
65 if (!C)
66 return std::nullopt;
67 assert(!Size.isScalable() && "Array elements cannot have a scalable size");
68 Size *= C->getZExtValue();
69 }
70 return Size;
71}
72
73std::optional<TypeSize>
75 std::optional<TypeSize> Size = getAllocationSize(DL);
76 if (Size)
77 return *Size * 8;
78 return std::nullopt;
79}
80
81//===----------------------------------------------------------------------===//
82// SelectInst Class
83//===----------------------------------------------------------------------===//
84
85/// areInvalidOperands - Return a string if the specified operands are invalid
86/// for a select operation, otherwise return null.
87const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) {
88 if (Op1->getType() != Op2->getType())
89 return "both values to select must have same type";
90
91 if (Op1->getType()->isTokenTy())
92 return "select values cannot have token type";
93
94 if (VectorType *VT = dyn_cast<VectorType>(Op0->getType())) {
95 // Vector select.
96 if (VT->getElementType() != Type::getInt1Ty(Op0->getContext()))
97 return "vector select condition element type must be i1";
98 VectorType *ET = dyn_cast<VectorType>(Op1->getType());
99 if (!ET)
100 return "selected values for vector select must be vectors";
101 if (ET->getElementCount() != VT->getElementCount())
102 return "vector select requires selected vectors to have "
103 "the same vector length as select condition";
104 } else if (Op0->getType() != Type::getInt1Ty(Op0->getContext())) {
105 return "select condition must be i1 or <n x i1>";
106 }
107 return nullptr;
108}
109
110//===----------------------------------------------------------------------===//
111// PHINode Class
112//===----------------------------------------------------------------------===//
113
114PHINode::PHINode(const PHINode &PN)
115 : Instruction(PN.getType(), Instruction::PHI, nullptr, PN.getNumOperands()),
116 ReservedSpace(PN.getNumOperands()) {
118 std::copy(PN.op_begin(), PN.op_end(), op_begin());
119 copyIncomingBlocks(make_range(PN.block_begin(), PN.block_end()));
121}
122
123// removeIncomingValue - Remove an incoming value. This is useful if a
124// predecessor basic block is deleted.
125Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) {
126 Value *Removed = getIncomingValue(Idx);
127
128 // Move everything after this operand down.
129 //
130 // FIXME: we could just swap with the end of the list, then erase. However,
131 // clients might not expect this to happen. The code as it is thrashes the
132 // use/def lists, which is kinda lame.
133 std::copy(op_begin() + Idx + 1, op_end(), op_begin() + Idx);
135
136 // Nuke the last value.
137 Op<-1>().set(nullptr);
139
140 // If the PHI node is dead, because it has zero entries, nuke it now.
141 if (getNumOperands() == 0 && DeletePHIIfEmpty) {
142 // If anyone is using this PHI, make them use a dummy value instead...
145 }
146 return Removed;
147}
148
149void PHINode::removeIncomingValueIf(function_ref<bool(unsigned)> Predicate,
150 bool DeletePHIIfEmpty) {
151 SmallDenseSet<unsigned> RemoveIndices;
152 for (unsigned Idx = 0; Idx < getNumIncomingValues(); ++Idx)
153 if (Predicate(Idx))
154 RemoveIndices.insert(Idx);
155
156 if (RemoveIndices.empty())
157 return;
158
159 // Remove operands.
160 auto NewOpEnd = remove_if(operands(), [&](Use &U) {
161 return RemoveIndices.contains(U.getOperandNo());
162 });
163 for (Use &U : make_range(NewOpEnd, op_end()))
164 U.set(nullptr);
165
166 // Remove incoming blocks.
167 (void)std::remove_if(const_cast<block_iterator>(block_begin()),
168 const_cast<block_iterator>(block_end()), [&](BasicBlock *&BB) {
169 return RemoveIndices.contains(&BB - block_begin());
170 });
171
172 setNumHungOffUseOperands(getNumOperands() - RemoveIndices.size());
173
174 // If the PHI node is dead, because it has zero entries, nuke it now.
175 if (getNumOperands() == 0 && DeletePHIIfEmpty) {
176 // If anyone is using this PHI, make them use a dummy value instead...
179 }
180}
181
182/// growOperands - grow operands - This grows the operand list in response
183/// to a push_back style of operation. This grows the number of ops by 1.5
184/// times.
185///
186void PHINode::growOperands() {
187 unsigned e = getNumOperands();
188 unsigned NumOps = e + e / 2;
189 if (NumOps < 2) NumOps = 2; // 2 op PHI nodes are VERY common.
190
191 ReservedSpace = NumOps;
192 growHungoffUses(ReservedSpace, /* IsPhi */ true);
193}
194
195/// hasConstantValue - If the specified PHI node always merges together the same
196/// value, return the value, otherwise return null.
198 // Exploit the fact that phi nodes always have at least one entry.
199 Value *ConstantValue = getIncomingValue(0);
200 for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i)
201 if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) {
202 if (ConstantValue != this)
203 return nullptr; // Incoming values not all the same.
204 // The case where the first value is this PHI.
205 ConstantValue = getIncomingValue(i);
206 }
207 if (ConstantValue == this)
208 return UndefValue::get(getType());
209 return ConstantValue;
210}
211
212/// hasConstantOrUndefValue - Whether the specified PHI node always merges
213/// together the same value, assuming that undefs result in the same value as
214/// non-undefs.
215/// Unlike \ref hasConstantValue, this does not return a value because the
216/// unique non-undef incoming value need not dominate the PHI node.
218 Value *ConstantValue = nullptr;
219 for (unsigned i = 0, e = getNumIncomingValues(); i != e; ++i) {
221 if (Incoming != this && !isa<UndefValue>(Incoming)) {
222 if (ConstantValue && ConstantValue != Incoming)
223 return false;
224 ConstantValue = Incoming;
225 }
226 }
227 return true;
228}
229
230//===----------------------------------------------------------------------===//
231// LandingPadInst Implementation
232//===----------------------------------------------------------------------===//
233
234LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
235 const Twine &NameStr,
236 BasicBlock::iterator InsertBefore)
237 : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertBefore) {
238 init(NumReservedValues, NameStr);
239}
240
241LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
242 const Twine &NameStr, Instruction *InsertBefore)
243 : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertBefore) {
244 init(NumReservedValues, NameStr);
245}
246
247LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
248 const Twine &NameStr, BasicBlock *InsertAtEnd)
249 : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertAtEnd) {
250 init(NumReservedValues, NameStr);
251}
252
253LandingPadInst::LandingPadInst(const LandingPadInst &LP)
254 : Instruction(LP.getType(), Instruction::LandingPad, nullptr,
255 LP.getNumOperands()),
256 ReservedSpace(LP.getNumOperands()) {
258 Use *OL = getOperandList();
259 const Use *InOL = LP.getOperandList();
260 for (unsigned I = 0, E = ReservedSpace; I != E; ++I)
261 OL[I] = InOL[I];
262
263 setCleanup(LP.isCleanup());
264}
265
266LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
267 const Twine &NameStr,
268 Instruction *InsertBefore) {
269 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertBefore);
270}
271
272LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
273 const Twine &NameStr,
274 BasicBlock *InsertAtEnd) {
275 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertAtEnd);
276}
277
278void LandingPadInst::init(unsigned NumReservedValues, const Twine &NameStr) {
279 ReservedSpace = NumReservedValues;
281 allocHungoffUses(ReservedSpace);
282 setName(NameStr);
283 setCleanup(false);
284}
285
286/// growOperands - grow operands - This grows the operand list in response to a
287/// push_back style of operation. This grows the number of ops by 2 times.
288void LandingPadInst::growOperands(unsigned Size) {
289 unsigned e = getNumOperands();
290 if (ReservedSpace >= e + Size) return;
291 ReservedSpace = (std::max(e, 1U) + Size / 2) * 2;
292 growHungoffUses(ReservedSpace);
293}
294
296 unsigned OpNo = getNumOperands();
297 growOperands(1);
298 assert(OpNo < ReservedSpace && "Growing didn't work!");
300 getOperandList()[OpNo] = Val;
301}
302
303//===----------------------------------------------------------------------===//
304// CallBase Implementation
305//===----------------------------------------------------------------------===//
306
308 BasicBlock::iterator InsertPt) {
309 switch (CB->getOpcode()) {
310 case Instruction::Call:
311 return CallInst::Create(cast<CallInst>(CB), Bundles, InsertPt);
312 case Instruction::Invoke:
313 return InvokeInst::Create(cast<InvokeInst>(CB), Bundles, InsertPt);
314 case Instruction::CallBr:
315 return CallBrInst::Create(cast<CallBrInst>(CB), Bundles, InsertPt);
316 default:
317 llvm_unreachable("Unknown CallBase sub-class!");
318 }
319}
320
322 Instruction *InsertPt) {
323 switch (CB->getOpcode()) {
324 case Instruction::Call:
325 return CallInst::Create(cast<CallInst>(CB), Bundles, InsertPt);
326 case Instruction::Invoke:
327 return InvokeInst::Create(cast<InvokeInst>(CB), Bundles, InsertPt);
328 case Instruction::CallBr:
329 return CallBrInst::Create(cast<CallBrInst>(CB), Bundles, InsertPt);
330 default:
331 llvm_unreachable("Unknown CallBase sub-class!");
332 }
333}
334
336 Instruction *InsertPt) {
338 for (unsigned i = 0, e = CI->getNumOperandBundles(); i < e; ++i) {
339 auto ChildOB = CI->getOperandBundleAt(i);
340 if (ChildOB.getTagName() != OpB.getTag())
341 OpDefs.emplace_back(ChildOB);
342 }
343 OpDefs.emplace_back(OpB);
344 return CallBase::Create(CI, OpDefs, InsertPt);
345}
346
347
349
351 assert(getOpcode() == Instruction::CallBr && "Unexpected opcode!");
352 return cast<CallBrInst>(this)->getNumIndirectDests() + 1;
353}
354
356 const Value *V = getCalledOperand();
357 if (isa<Function>(V) || isa<Constant>(V))
358 return false;
359 return !isInlineAsm();
360}
361
362/// Tests if this call site must be tail call optimized. Only a CallInst can
363/// be tail call optimized.
365 if (auto *CI = dyn_cast<CallInst>(this))
366 return CI->isMustTailCall();
367 return false;
368}
369
370/// Tests if this call site is marked as a tail call.
372 if (auto *CI = dyn_cast<CallInst>(this))
373 return CI->isTailCall();
374 return false;
375}
376
378 if (auto *F = getCalledFunction())
379 return F->getIntrinsicID();
381}
382
385
386 if (const Function *F = getCalledFunction())
387 Mask |= F->getAttributes().getRetNoFPClass();
388 return Mask;
389}
390
393
394 if (const Function *F = getCalledFunction())
395 Mask |= F->getAttributes().getParamNoFPClass(i);
396 return Mask;
397}
398
399std::optional<ConstantRange> CallBase::getRange() const {
400 const Attribute RangeAttr = getRetAttr(llvm::Attribute::Range);
401 if (RangeAttr.isValid())
402 return RangeAttr.getRange();
403 return std::nullopt;
404}
405
407 if (hasRetAttr(Attribute::NonNull))
408 return true;
409
410 if (getRetDereferenceableBytes() > 0 &&
412 return true;
413
414 return false;
415}
416
418 unsigned Index;
419
420 if (Attrs.hasAttrSomewhere(Kind, &Index))
422 if (const Function *F = getCalledFunction())
423 if (F->getAttributes().hasAttrSomewhere(Kind, &Index))
425
426 return nullptr;
427}
428
429/// Determine whether the argument or parameter has the given attribute.
430bool CallBase::paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
431 assert(ArgNo < arg_size() && "Param index out of bounds!");
432
433 if (Attrs.hasParamAttr(ArgNo, Kind))
434 return true;
435
436 const Function *F = getCalledFunction();
437 if (!F)
438 return false;
439
440 if (!F->getAttributes().hasParamAttr(ArgNo, Kind))
441 return false;
442
443 // Take into account mod/ref by operand bundles.
444 switch (Kind) {
445 case Attribute::ReadNone:
447 case Attribute::ReadOnly:
449 case Attribute::WriteOnly:
450 return !hasReadingOperandBundles();
451 default:
452 return true;
453 }
454}
455
456bool CallBase::hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const {
457 if (auto *F = dyn_cast<Function>(getCalledOperand()))
458 return F->getAttributes().hasFnAttr(Kind);
459
460 return false;
461}
462
463bool CallBase::hasFnAttrOnCalledFunction(StringRef Kind) const {
464 if (auto *F = dyn_cast<Function>(getCalledOperand()))
465 return F->getAttributes().hasFnAttr(Kind);
466
467 return false;
468}
469
470template <typename AK>
471Attribute CallBase::getFnAttrOnCalledFunction(AK Kind) const {
472 if constexpr (std::is_same_v<AK, Attribute::AttrKind>) {
473 // getMemoryEffects() correctly combines memory effects from the call-site,
474 // operand bundles and function.
475 assert(Kind != Attribute::Memory && "Use getMemoryEffects() instead");
476 }
477
478 if (auto *F = dyn_cast<Function>(getCalledOperand()))
479 return F->getAttributes().getFnAttr(Kind);
480
481 return Attribute();
482}
483
484template Attribute
485CallBase::getFnAttrOnCalledFunction(Attribute::AttrKind Kind) const;
486template Attribute CallBase::getFnAttrOnCalledFunction(StringRef Kind) const;
487
488template <typename AK>
489Attribute CallBase::getParamAttrOnCalledFunction(unsigned ArgNo,
490 AK Kind) const {
492
493 if (auto *F = dyn_cast<Function>(V))
494 return F->getAttributes().getParamAttr(ArgNo, Kind);
495
496 return Attribute();
497}
498template Attribute
499CallBase::getParamAttrOnCalledFunction(unsigned ArgNo,
500 Attribute::AttrKind Kind) const;
501template Attribute CallBase::getParamAttrOnCalledFunction(unsigned ArgNo,
502 StringRef Kind) const;
503
506 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
508}
509
512 const unsigned BeginIndex) {
513 auto It = op_begin() + BeginIndex;
514 for (auto &B : Bundles)
515 It = std::copy(B.input_begin(), B.input_end(), It);
516
517 auto *ContextImpl = getContext().pImpl;
518 auto BI = Bundles.begin();
519 unsigned CurrentIndex = BeginIndex;
520
521 for (auto &BOI : bundle_op_infos()) {
522 assert(BI != Bundles.end() && "Incorrect allocation?");
523
524 BOI.Tag = ContextImpl->getOrInsertBundleTag(BI->getTag());
525 BOI.Begin = CurrentIndex;
526 BOI.End = CurrentIndex + BI->input_size();
527 CurrentIndex = BOI.End;
528 BI++;
529 }
530
531 assert(BI == Bundles.end() && "Incorrect allocation?");
532
533 return It;
534}
535
537 /// When there isn't many bundles, we do a simple linear search.
538 /// Else fallback to a binary-search that use the fact that bundles usually
539 /// have similar number of argument to get faster convergence.
541 for (auto &BOI : bundle_op_infos())
542 if (BOI.Begin <= OpIdx && OpIdx < BOI.End)
543 return BOI;
544
545 llvm_unreachable("Did not find operand bundle for operand!");
546 }
547
548 assert(OpIdx >= arg_size() && "the Idx is not in the operand bundles");
550 OpIdx < std::prev(bundle_op_info_end())->End &&
551 "The Idx isn't in the operand bundle");
552
553 /// We need a decimal number below and to prevent using floating point numbers
554 /// we use an intergal value multiplied by this constant.
555 constexpr unsigned NumberScaling = 1024;
556
559 bundle_op_iterator Current = Begin;
560
561 while (Begin != End) {
562 unsigned ScaledOperandPerBundle =
563 NumberScaling * (std::prev(End)->End - Begin->Begin) / (End - Begin);
564 Current = Begin + (((OpIdx - Begin->Begin) * NumberScaling) /
565 ScaledOperandPerBundle);
566 if (Current >= End)
567 Current = std::prev(End);
568 assert(Current < End && Current >= Begin &&
569 "the operand bundle doesn't cover every value in the range");
570 if (OpIdx >= Current->Begin && OpIdx < Current->End)
571 break;
572 if (OpIdx >= Current->End)
573 Begin = Current + 1;
574 else
575 End = Current;
576 }
577
578 assert(OpIdx >= Current->Begin && OpIdx < Current->End &&
579 "the operand bundle doesn't cover every value in the range");
580 return *Current;
581}
582
585 BasicBlock::iterator InsertPt) {
586 if (CB->getOperandBundle(ID))
587 return CB;
588
590 CB->getOperandBundlesAsDefs(Bundles);
591 Bundles.push_back(OB);
592 return Create(CB, Bundles, InsertPt);
593}
594
597 Instruction *InsertPt) {
598 if (CB->getOperandBundle(ID))
599 return CB;
600
602 CB->getOperandBundlesAsDefs(Bundles);
603 Bundles.push_back(OB);
604 return Create(CB, Bundles, InsertPt);
605}
606
608 BasicBlock::iterator InsertPt) {
610 bool CreateNew = false;
611
612 for (unsigned I = 0, E = CB->getNumOperandBundles(); I != E; ++I) {
613 auto Bundle = CB->getOperandBundleAt(I);
614 if (Bundle.getTagID() == ID) {
615 CreateNew = true;
616 continue;
617 }
618 Bundles.emplace_back(Bundle);
619 }
620
621 return CreateNew ? Create(CB, Bundles, InsertPt) : CB;
622}
623
625 Instruction *InsertPt) {
627 bool CreateNew = false;
628
629 for (unsigned I = 0, E = CB->getNumOperandBundles(); I != E; ++I) {
630 auto Bundle = CB->getOperandBundleAt(I);
631 if (Bundle.getTagID() == ID) {
632 CreateNew = true;
633 continue;
634 }
635 Bundles.emplace_back(Bundle);
636 }
637
638 return CreateNew ? Create(CB, Bundles, InsertPt) : CB;
639}
640
642 // Implementation note: this is a conservative implementation of operand
643 // bundle semantics, where *any* non-assume operand bundle (other than
644 // ptrauth) forces a callsite to be at least readonly.
647 getIntrinsicID() != Intrinsic::assume;
648}
649
654 getIntrinsicID() != Intrinsic::assume;
655}
656
659 if (auto *Fn = dyn_cast<Function>(getCalledOperand())) {
660 MemoryEffects FnME = Fn->getMemoryEffects();
661 if (hasOperandBundles()) {
662 // TODO: Add a method to get memory effects for operand bundles instead.
664 FnME |= MemoryEffects::readOnly();
666 FnME |= MemoryEffects::writeOnly();
667 }
668 ME &= FnME;
669 }
670 return ME;
671}
674}
675
676/// Determine if the function does not access memory.
679}
682}
683
684/// Determine if the function does not access or only reads memory.
687}
690}
691
692/// Determine if the function does not access or only writes memory.
695}
698}
699
700/// Determine if the call can access memmory only using pointers based
701/// on its arguments.
704}
707}
708
709/// Determine if the function may only access memory that is
710/// inaccessible from the IR.
713}
716}
717
718/// Determine if the function may only access memory that is
719/// either inaccessible from the IR or pointed to by its arguments.
722}
726}
727
728//===----------------------------------------------------------------------===//
729// CallInst Implementation
730//===----------------------------------------------------------------------===//
731
732void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
733 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) {
734 this->FTy = FTy;
735 assert(getNumOperands() == Args.size() + CountBundleInputs(Bundles) + 1 &&
736 "NumOperands not set up?");
737
738#ifndef NDEBUG
739 assert((Args.size() == FTy->getNumParams() ||
740 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
741 "Calling a function with bad signature!");
742
743 for (unsigned i = 0; i != Args.size(); ++i)
744 assert((i >= FTy->getNumParams() ||
745 FTy->getParamType(i) == Args[i]->getType()) &&
746 "Calling a function with a bad signature!");
747#endif
748
749 // Set operands in order of their index to match use-list-order
750 // prediction.
751 llvm::copy(Args, op_begin());
752 setCalledOperand(Func);
753
754 auto It = populateBundleOperandInfos(Bundles, Args.size());
755 (void)It;
756 assert(It + 1 == op_end() && "Should add up!");
757
758 setName(NameStr);
759}
760
761void CallInst::init(FunctionType *FTy, Value *Func, const Twine &NameStr) {
762 this->FTy = FTy;
763 assert(getNumOperands() == 1 && "NumOperands not set up?");
764 setCalledOperand(Func);
765
766 assert(FTy->getNumParams() == 0 && "Calling a function with bad signature");
767
768 setName(NameStr);
769}
770
771CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
772 BasicBlock::iterator InsertBefore)
773 : CallBase(Ty->getReturnType(), Instruction::Call,
774 OperandTraits<CallBase>::op_end(this) - 1, 1, InsertBefore) {
775 init(Ty, Func, Name);
776}
777
778CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
779 Instruction *InsertBefore)
780 : CallBase(Ty->getReturnType(), Instruction::Call,
781 OperandTraits<CallBase>::op_end(this) - 1, 1, InsertBefore) {
782 init(Ty, Func, Name);
783}
784
785CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
786 BasicBlock *InsertAtEnd)
787 : CallBase(Ty->getReturnType(), Instruction::Call,
788 OperandTraits<CallBase>::op_end(this) - 1, 1, InsertAtEnd) {
789 init(Ty, Func, Name);
790}
791
792CallInst::CallInst(const CallInst &CI)
793 : CallBase(CI.Attrs, CI.FTy, CI.getType(), Instruction::Call,
794 OperandTraits<CallBase>::op_end(this) - CI.getNumOperands(),
795 CI.getNumOperands()) {
796 setTailCallKind(CI.getTailCallKind());
798
799 std::copy(CI.op_begin(), CI.op_end(), op_begin());
800 std::copy(CI.bundle_op_info_begin(), CI.bundle_op_info_end(),
803}
804
806 BasicBlock::iterator InsertPt) {
807 std::vector<Value *> Args(CI->arg_begin(), CI->arg_end());
808
809 auto *NewCI = CallInst::Create(CI->getFunctionType(), CI->getCalledOperand(),
810 Args, OpB, CI->getName(), InsertPt);
811 NewCI->setTailCallKind(CI->getTailCallKind());
812 NewCI->setCallingConv(CI->getCallingConv());
813 NewCI->SubclassOptionalData = CI->SubclassOptionalData;
814 NewCI->setAttributes(CI->getAttributes());
815 NewCI->setDebugLoc(CI->getDebugLoc());
816 return NewCI;
817}
818
820 Instruction *InsertPt) {
821 std::vector<Value *> Args(CI->arg_begin(), CI->arg_end());
822
823 auto *NewCI = CallInst::Create(CI->getFunctionType(), CI->getCalledOperand(),
824 Args, OpB, CI->getName(), InsertPt);
825 NewCI->setTailCallKind(CI->getTailCallKind());
826 NewCI->setCallingConv(CI->getCallingConv());
827 NewCI->SubclassOptionalData = CI->SubclassOptionalData;
828 NewCI->setAttributes(CI->getAttributes());
829 NewCI->setDebugLoc(CI->getDebugLoc());
830 return NewCI;
831}
832
833// Update profile weight for call instruction by scaling it using the ratio
834// of S/T. The meaning of "branch_weights" meta data for call instruction is
835// transfered to represent call count.
837 if (T == 0) {
838 LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
839 "div by 0. Ignoring. Likely the function "
840 << getParent()->getParent()->getName()
841 << " has 0 entry count, and contains call instructions "
842 "with non-zero prof info.");
843 return;
844 }
845 scaleProfData(*this, S, T);
846}
847
848//===----------------------------------------------------------------------===//
849// InvokeInst Implementation
850//===----------------------------------------------------------------------===//
851
852void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal,
853 BasicBlock *IfException, ArrayRef<Value *> Args,
855 const Twine &NameStr) {
856 this->FTy = FTy;
857
858 assert((int)getNumOperands() ==
859 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)) &&
860 "NumOperands not set up?");
861
862#ifndef NDEBUG
863 assert(((Args.size() == FTy->getNumParams()) ||
864 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
865 "Invoking a function with bad signature");
866
867 for (unsigned i = 0, e = Args.size(); i != e; i++)
868 assert((i >= FTy->getNumParams() ||
869 FTy->getParamType(i) == Args[i]->getType()) &&
870 "Invoking a function with a bad signature!");
871#endif
872
873 // Set operands in order of their index to match use-list-order
874 // prediction.
875 llvm::copy(Args, op_begin());
876 setNormalDest(IfNormal);
877 setUnwindDest(IfException);
879
880 auto It = populateBundleOperandInfos(Bundles, Args.size());
881 (void)It;
882 assert(It + 3 == op_end() && "Should add up!");
883
884 setName(NameStr);
885}
886
887InvokeInst::InvokeInst(const InvokeInst &II)
888 : CallBase(II.Attrs, II.FTy, II.getType(), Instruction::Invoke,
889 OperandTraits<CallBase>::op_end(this) - II.getNumOperands(),
890 II.getNumOperands()) {
892 std::copy(II.op_begin(), II.op_end(), op_begin());
893 std::copy(II.bundle_op_info_begin(), II.bundle_op_info_end(),
896}
897
899 BasicBlock::iterator InsertPt) {
900 std::vector<Value *> Args(II->arg_begin(), II->arg_end());
901
902 auto *NewII = InvokeInst::Create(
904 II->getUnwindDest(), Args, OpB, II->getName(), InsertPt);
905 NewII->setCallingConv(II->getCallingConv());
906 NewII->SubclassOptionalData = II->SubclassOptionalData;
907 NewII->setAttributes(II->getAttributes());
908 NewII->setDebugLoc(II->getDebugLoc());
909 return NewII;
910}
911
913 Instruction *InsertPt) {
914 std::vector<Value *> Args(II->arg_begin(), II->arg_end());
915
916 auto *NewII = InvokeInst::Create(
918 II->getUnwindDest(), Args, OpB, II->getName(), InsertPt);
919 NewII->setCallingConv(II->getCallingConv());
920 NewII->SubclassOptionalData = II->SubclassOptionalData;
921 NewII->setAttributes(II->getAttributes());
922 NewII->setDebugLoc(II->getDebugLoc());
923 return NewII;
924}
925
927 return cast<LandingPadInst>(getUnwindDest()->getFirstNonPHI());
928}
929
931 if (T == 0) {
932 LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
933 "div by 0. Ignoring. Likely the function "
934 << getParent()->getParent()->getName()
935 << " has 0 entry count, and contains call instructions "
936 "with non-zero prof info.");
937 return;
938 }
939 scaleProfData(*this, S, T);
940}
941
942//===----------------------------------------------------------------------===//
943// CallBrInst Implementation
944//===----------------------------------------------------------------------===//
945
946void CallBrInst::init(FunctionType *FTy, Value *Fn, BasicBlock *Fallthrough,
947 ArrayRef<BasicBlock *> IndirectDests,
950 const Twine &NameStr) {
951 this->FTy = FTy;
952
953 assert((int)getNumOperands() ==
954 ComputeNumOperands(Args.size(), IndirectDests.size(),
955 CountBundleInputs(Bundles)) &&
956 "NumOperands not set up?");
957
958#ifndef NDEBUG
959 assert(((Args.size() == FTy->getNumParams()) ||
960 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
961 "Calling a function with bad signature");
962
963 for (unsigned i = 0, e = Args.size(); i != e; i++)
964 assert((i >= FTy->getNumParams() ||
965 FTy->getParamType(i) == Args[i]->getType()) &&
966 "Calling a function with a bad signature!");
967#endif
968
969 // Set operands in order of their index to match use-list-order
970 // prediction.
971 std::copy(Args.begin(), Args.end(), op_begin());
972 NumIndirectDests = IndirectDests.size();
973 setDefaultDest(Fallthrough);
974 for (unsigned i = 0; i != NumIndirectDests; ++i)
975 setIndirectDest(i, IndirectDests[i]);
977
978 auto It = populateBundleOperandInfos(Bundles, Args.size());
979 (void)It;
980 assert(It + 2 + IndirectDests.size() == op_end() && "Should add up!");
981
982 setName(NameStr);
983}
984
985CallBrInst::CallBrInst(const CallBrInst &CBI)
986 : CallBase(CBI.Attrs, CBI.FTy, CBI.getType(), Instruction::CallBr,
987 OperandTraits<CallBase>::op_end(this) - CBI.getNumOperands(),
988 CBI.getNumOperands()) {
990 std::copy(CBI.op_begin(), CBI.op_end(), op_begin());
991 std::copy(CBI.bundle_op_info_begin(), CBI.bundle_op_info_end(),
994 NumIndirectDests = CBI.NumIndirectDests;
995}
996
998 BasicBlock::iterator InsertPt) {
999 std::vector<Value *> Args(CBI->arg_begin(), CBI->arg_end());
1000
1001 auto *NewCBI = CallBrInst::Create(
1002 CBI->getFunctionType(), CBI->getCalledOperand(), CBI->getDefaultDest(),
1003 CBI->getIndirectDests(), Args, OpB, CBI->getName(), InsertPt);
1004 NewCBI->setCallingConv(CBI->getCallingConv());
1005 NewCBI->SubclassOptionalData = CBI->SubclassOptionalData;
1006 NewCBI->setAttributes(CBI->getAttributes());
1007 NewCBI->setDebugLoc(CBI->getDebugLoc());
1008 NewCBI->NumIndirectDests = CBI->NumIndirectDests;
1009 return NewCBI;
1010}
1011
1013 Instruction *InsertPt) {
1014 std::vector<Value *> Args(CBI->arg_begin(), CBI->arg_end());
1015
1016 auto *NewCBI = CallBrInst::Create(
1017 CBI->getFunctionType(), CBI->getCalledOperand(), CBI->getDefaultDest(),
1018 CBI->getIndirectDests(), Args, OpB, CBI->getName(), InsertPt);
1019 NewCBI->setCallingConv(CBI->getCallingConv());
1020 NewCBI->SubclassOptionalData = CBI->SubclassOptionalData;
1021 NewCBI->setAttributes(CBI->getAttributes());
1022 NewCBI->setDebugLoc(CBI->getDebugLoc());
1023 NewCBI->NumIndirectDests = CBI->NumIndirectDests;
1024 return NewCBI;
1025}
1026
1027//===----------------------------------------------------------------------===//
1028// ReturnInst Implementation
1029//===----------------------------------------------------------------------===//
1030
1031ReturnInst::ReturnInst(const ReturnInst &RI)
1032 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Ret,
1033 OperandTraits<ReturnInst>::op_end(this) - RI.getNumOperands(),
1034 RI.getNumOperands()) {
1035 if (RI.getNumOperands())
1036 Op<0>() = RI.Op<0>();
1038}
1039
1040ReturnInst::ReturnInst(LLVMContext &C, Value *retVal,
1041 BasicBlock::iterator InsertBefore)
1042 : Instruction(Type::getVoidTy(C), Instruction::Ret,
1043 OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal,
1044 InsertBefore) {
1045 if (retVal)
1046 Op<0>() = retVal;
1047}
1048
1049ReturnInst::ReturnInst(LLVMContext &C, Value *retVal,
1050 Instruction *InsertBefore)
1051 : Instruction(Type::getVoidTy(C), Instruction::Ret,
1052 OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal,
1053 InsertBefore) {
1054 if (retVal)
1055 Op<0>() = retVal;
1056}
1057
1058ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd)
1059 : Instruction(Type::getVoidTy(C), Instruction::Ret,
1060 OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal,
1061 InsertAtEnd) {
1062 if (retVal)
1063 Op<0>() = retVal;
1064}
1065
1066ReturnInst::ReturnInst(LLVMContext &Context, BasicBlock *InsertAtEnd)
1067 : Instruction(Type::getVoidTy(Context), Instruction::Ret,
1068 OperandTraits<ReturnInst>::op_end(this), 0, InsertAtEnd) {}
1069
1070//===----------------------------------------------------------------------===//
1071// ResumeInst Implementation
1072//===----------------------------------------------------------------------===//
1073
1074ResumeInst::ResumeInst(const ResumeInst &RI)
1075 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Resume,
1076 OperandTraits<ResumeInst>::op_begin(this), 1) {
1077 Op<0>() = RI.Op<0>();
1078}
1079
1080ResumeInst::ResumeInst(Value *Exn, BasicBlock::iterator InsertBefore)
1081 : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
1082 OperandTraits<ResumeInst>::op_begin(this), 1, InsertBefore) {
1083 Op<0>() = Exn;
1084}
1085
1086ResumeInst::ResumeInst(Value *Exn, Instruction *InsertBefore)
1087 : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
1088 OperandTraits<ResumeInst>::op_begin(this), 1, InsertBefore) {
1089 Op<0>() = Exn;
1090}
1091
1092ResumeInst::ResumeInst(Value *Exn, BasicBlock *InsertAtEnd)
1093 : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
1094 OperandTraits<ResumeInst>::op_begin(this), 1, InsertAtEnd) {
1095 Op<0>() = Exn;
1096}
1097
1098//===----------------------------------------------------------------------===//
1099// CleanupReturnInst Implementation
1100//===----------------------------------------------------------------------===//
1101
1102CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst &CRI)
1103 : Instruction(CRI.getType(), Instruction::CleanupRet,
1104 OperandTraits<CleanupReturnInst>::op_end(this) -
1105 CRI.getNumOperands(),
1106 CRI.getNumOperands()) {
1107 setSubclassData<Instruction::OpaqueField>(
1109 Op<0>() = CRI.Op<0>();
1110 if (CRI.hasUnwindDest())
1111 Op<1>() = CRI.Op<1>();
1112}
1113
1114void CleanupReturnInst::init(Value *CleanupPad, BasicBlock *UnwindBB) {
1115 if (UnwindBB)
1116 setSubclassData<UnwindDestField>(true);
1117
1118 Op<0>() = CleanupPad;
1119 if (UnwindBB)
1120 Op<1>() = UnwindBB;
1121}
1122
1123CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
1124 unsigned Values,
1125 BasicBlock::iterator InsertBefore)
1126 : Instruction(Type::getVoidTy(CleanupPad->getContext()),
1127 Instruction::CleanupRet,
1128 OperandTraits<CleanupReturnInst>::op_end(this) - Values,
1129 Values, InsertBefore) {
1130 init(CleanupPad, UnwindBB);
1131}
1132
1133CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
1134 unsigned Values, Instruction *InsertBefore)
1135 : Instruction(Type::getVoidTy(CleanupPad->getContext()),
1136 Instruction::CleanupRet,
1137 OperandTraits<CleanupReturnInst>::op_end(this) - Values,
1138 Values, InsertBefore) {
1139 init(CleanupPad, UnwindBB);
1140}
1141
1142CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
1143 unsigned Values, BasicBlock *InsertAtEnd)
1144 : Instruction(Type::getVoidTy(CleanupPad->getContext()),
1145 Instruction::CleanupRet,
1146 OperandTraits<CleanupReturnInst>::op_end(this) - Values,
1147 Values, InsertAtEnd) {
1148 init(CleanupPad, UnwindBB);
1149}
1150
1151//===----------------------------------------------------------------------===//
1152// CatchReturnInst Implementation
1153//===----------------------------------------------------------------------===//
1154void CatchReturnInst::init(Value *CatchPad, BasicBlock *BB) {
1155 Op<0>() = CatchPad;
1156 Op<1>() = BB;
1157}
1158
1159CatchReturnInst::CatchReturnInst(const CatchReturnInst &CRI)
1160 : Instruction(Type::getVoidTy(CRI.getContext()), Instruction::CatchRet,
1161 OperandTraits<CatchReturnInst>::op_begin(this), 2) {
1162 Op<0>() = CRI.Op<0>();
1163 Op<1>() = CRI.Op<1>();
1164}
1165
1166CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
1167 BasicBlock::iterator InsertBefore)
1168 : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
1169 OperandTraits<CatchReturnInst>::op_begin(this), 2,
1170 InsertBefore) {
1171 init(CatchPad, BB);
1172}
1173
1174CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
1175 Instruction *InsertBefore)
1176 : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
1177 OperandTraits<CatchReturnInst>::op_begin(this), 2,
1178 InsertBefore) {
1179 init(CatchPad, BB);
1180}
1181
1182CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
1183 BasicBlock *InsertAtEnd)
1184 : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
1185 OperandTraits<CatchReturnInst>::op_begin(this), 2,
1186 InsertAtEnd) {
1187 init(CatchPad, BB);
1188}
1189
1190//===----------------------------------------------------------------------===//
1191// CatchSwitchInst Implementation
1192//===----------------------------------------------------------------------===//
1193
1194CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
1195 unsigned NumReservedValues,
1196 const Twine &NameStr,
1197 BasicBlock::iterator InsertBefore)
1198 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
1199 InsertBefore) {
1200 if (UnwindDest)
1201 ++NumReservedValues;
1202 init(ParentPad, UnwindDest, NumReservedValues + 1);
1203 setName(NameStr);
1204}
1205
1206CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
1207 unsigned NumReservedValues,
1208 const Twine &NameStr,
1209 Instruction *InsertBefore)
1210 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
1211 InsertBefore) {
1212 if (UnwindDest)
1213 ++NumReservedValues;
1214 init(ParentPad, UnwindDest, NumReservedValues + 1);
1215 setName(NameStr);
1216}
1217
1218CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
1219 unsigned NumReservedValues,
1220 const Twine &NameStr, BasicBlock *InsertAtEnd)
1221 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
1222 InsertAtEnd) {
1223 if (UnwindDest)
1224 ++NumReservedValues;
1225 init(ParentPad, UnwindDest, NumReservedValues + 1);
1226 setName(NameStr);
1227}
1228
1229CatchSwitchInst::CatchSwitchInst(const CatchSwitchInst &CSI)
1230 : Instruction(CSI.getType(), Instruction::CatchSwitch, nullptr,
1231 CSI.getNumOperands()) {
1232 init(CSI.getParentPad(), CSI.getUnwindDest(), CSI.getNumOperands());
1233 setNumHungOffUseOperands(ReservedSpace);
1234 Use *OL = getOperandList();
1235 const Use *InOL = CSI.getOperandList();
1236 for (unsigned I = 1, E = ReservedSpace; I != E; ++I)
1237 OL[I] = InOL[I];
1238}
1239
1240void CatchSwitchInst::init(Value *ParentPad, BasicBlock *UnwindDest,
1241 unsigned NumReservedValues) {
1242 assert(ParentPad && NumReservedValues);
1243
1244 ReservedSpace = NumReservedValues;
1245 setNumHungOffUseOperands(UnwindDest ? 2 : 1);
1246 allocHungoffUses(ReservedSpace);
1247
1248 Op<0>() = ParentPad;
1249 if (UnwindDest) {
1250 setSubclassData<UnwindDestField>(true);
1251 setUnwindDest(UnwindDest);
1252 }
1253}
1254
1255/// growOperands - grow operands - This grows the operand list in response to a
1256/// push_back style of operation. This grows the number of ops by 2 times.
1257void CatchSwitchInst::growOperands(unsigned Size) {
1258 unsigned NumOperands = getNumOperands();
1259 assert(NumOperands >= 1);
1260 if (ReservedSpace >= NumOperands + Size)
1261 return;
1262 ReservedSpace = (NumOperands + Size / 2) * 2;
1263 growHungoffUses(ReservedSpace);
1264}
1265
1267 unsigned OpNo = getNumOperands();
1268 growOperands(1);
1269 assert(OpNo < ReservedSpace && "Growing didn't work!");
1271 getOperandList()[OpNo] = Handler;
1272}
1273
1275 // Move all subsequent handlers up one.
1276 Use *EndDst = op_end() - 1;
1277 for (Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst)
1278 *CurDst = *(CurDst + 1);
1279 // Null out the last handler use.
1280 *EndDst = nullptr;
1281
1283}
1284
1285//===----------------------------------------------------------------------===//
1286// FuncletPadInst Implementation
1287//===----------------------------------------------------------------------===//
1288void FuncletPadInst::init(Value *ParentPad, ArrayRef<Value *> Args,
1289 const Twine &NameStr) {
1290 assert(getNumOperands() == 1 + Args.size() && "NumOperands not set up?");
1291 llvm::copy(Args, op_begin());
1292 setParentPad(ParentPad);
1293 setName(NameStr);
1294}
1295
1296FuncletPadInst::FuncletPadInst(const FuncletPadInst &FPI)
1297 : Instruction(FPI.getType(), FPI.getOpcode(),
1298 OperandTraits<FuncletPadInst>::op_end(this) -
1299 FPI.getNumOperands(),
1300 FPI.getNumOperands()) {
1301 std::copy(FPI.op_begin(), FPI.op_end(), op_begin());
1303}
1304
1305FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
1306 ArrayRef<Value *> Args, unsigned Values,
1307 const Twine &NameStr,
1308 BasicBlock::iterator InsertBefore)
1309 : Instruction(ParentPad->getType(), Op,
1310 OperandTraits<FuncletPadInst>::op_end(this) - Values, Values,
1311 InsertBefore) {
1312 init(ParentPad, Args, NameStr);
1313}
1314
1315FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
1316 ArrayRef<Value *> Args, unsigned Values,
1317 const Twine &NameStr, Instruction *InsertBefore)
1318 : Instruction(ParentPad->getType(), Op,
1319 OperandTraits<FuncletPadInst>::op_end(this) - Values, Values,
1320 InsertBefore) {
1321 init(ParentPad, Args, NameStr);
1322}
1323
1324FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
1325 ArrayRef<Value *> Args, unsigned Values,
1326 const Twine &NameStr, BasicBlock *InsertAtEnd)
1327 : Instruction(ParentPad->getType(), Op,
1328 OperandTraits<FuncletPadInst>::op_end(this) - Values, Values,
1329 InsertAtEnd) {
1330 init(ParentPad, Args, NameStr);
1331}
1332
1333//===----------------------------------------------------------------------===//
1334// UnreachableInst Implementation
1335//===----------------------------------------------------------------------===//
1336
1338 BasicBlock::iterator InsertBefore)
1339 : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr,
1340 0, InsertBefore) {}
1342 Instruction *InsertBefore)
1343 : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr,
1344 0, InsertBefore) {}
1346 : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr,
1347 0, InsertAtEnd) {}
1348
1349//===----------------------------------------------------------------------===//
1350// BranchInst Implementation
1351//===----------------------------------------------------------------------===//
1352
1353void BranchInst::AssertOK() {
1354 if (isConditional())
1355 assert(getCondition()->getType()->isIntegerTy(1) &&
1356 "May only branch on boolean predicates!");
1357}
1358
1359BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock::iterator InsertBefore)
1360 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1361 OperandTraits<BranchInst>::op_end(this) - 1, 1,
1362 InsertBefore) {
1363 assert(IfTrue && "Branch destination may not be null!");
1364 Op<-1>() = IfTrue;
1365}
1366
1367BranchInst::BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore)
1368 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1369 OperandTraits<BranchInst>::op_end(this) - 1, 1,
1370 InsertBefore) {
1371 assert(IfTrue && "Branch destination may not be null!");
1372 Op<-1>() = IfTrue;
1373}
1374
1375BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
1376 BasicBlock::iterator InsertBefore)
1377 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1378 OperandTraits<BranchInst>::op_end(this) - 3, 3,
1379 InsertBefore) {
1380 // Assign in order of operand index to make use-list order predictable.
1381 Op<-3>() = Cond;
1382 Op<-2>() = IfFalse;
1383 Op<-1>() = IfTrue;
1384#ifndef NDEBUG
1385 AssertOK();
1386#endif
1387}
1388
1389BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
1390 Instruction *InsertBefore)
1391 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1392 OperandTraits<BranchInst>::op_end(this) - 3, 3,
1393 InsertBefore) {
1394 // Assign in order of operand index to make use-list order predictable.
1395 Op<-3>() = Cond;
1396 Op<-2>() = IfFalse;
1397 Op<-1>() = IfTrue;
1398#ifndef NDEBUG
1399 AssertOK();
1400#endif
1401}
1402
1403BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd)
1404 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1405 OperandTraits<BranchInst>::op_end(this) - 1, 1, InsertAtEnd) {
1406 assert(IfTrue && "Branch destination may not be null!");
1407 Op<-1>() = IfTrue;
1408}
1409
1410BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
1411 BasicBlock *InsertAtEnd)
1412 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1413 OperandTraits<BranchInst>::op_end(this) - 3, 3, InsertAtEnd) {
1414 // Assign in order of operand index to make use-list order predictable.
1415 Op<-3>() = Cond;
1416 Op<-2>() = IfFalse;
1417 Op<-1>() = IfTrue;
1418#ifndef NDEBUG
1419 AssertOK();
1420#endif
1421}
1422
1423BranchInst::BranchInst(const BranchInst &BI)
1424 : Instruction(Type::getVoidTy(BI.getContext()), Instruction::Br,
1425 OperandTraits<BranchInst>::op_end(this) - BI.getNumOperands(),
1426 BI.getNumOperands()) {
1427 // Assign in order of operand index to make use-list order predictable.
1428 if (BI.getNumOperands() != 1) {
1429 assert(BI.getNumOperands() == 3 && "BR can have 1 or 3 operands!");
1430 Op<-3>() = BI.Op<-3>();
1431 Op<-2>() = BI.Op<-2>();
1432 }
1433 Op<-1>() = BI.Op<-1>();
1435}
1436
1439 "Cannot swap successors of an unconditional branch");
1440 Op<-1>().swap(Op<-2>());
1441
1442 // Update profile metadata if present and it matches our structural
1443 // expectations.
1445}
1446
1447//===----------------------------------------------------------------------===//
1448// AllocaInst Implementation
1449//===----------------------------------------------------------------------===//
1450
1451static Value *getAISize(LLVMContext &Context, Value *Amt) {
1452 if (!Amt)
1453 Amt = ConstantInt::get(Type::getInt32Ty(Context), 1);
1454 else {
1455 assert(!isa<BasicBlock>(Amt) &&
1456 "Passed basic block into allocation size parameter! Use other ctor");
1457 assert(Amt->getType()->isIntegerTy() &&
1458 "Allocation array size is not an integer!");
1459 }
1460 return Amt;
1461}
1462
1464 assert(BB && "Insertion BB cannot be null when alignment not provided!");
1465 assert(BB->getParent() &&
1466 "BB must be in a Function when alignment not provided!");
1467 const DataLayout &DL = BB->getModule()->getDataLayout();
1468 return DL.getPrefTypeAlign(Ty);
1469}
1470
1472 return computeAllocaDefaultAlign(Ty, It->getParent());
1473}
1474
1476 assert(I && "Insertion position cannot be null when alignment not provided!");
1477 return computeAllocaDefaultAlign(Ty, I->getParent());
1478}
1479
1480AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
1481 BasicBlock::iterator InsertBefore)
1482 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertBefore) {}
1483
1484AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
1485 Instruction *InsertBefore)
1486 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertBefore) {}
1487
1488AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
1489 BasicBlock *InsertAtEnd)
1490 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertAtEnd) {}
1491
1492AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1493 const Twine &Name, BasicBlock::iterator InsertBefore)
1494 : AllocaInst(Ty, AddrSpace, ArraySize,
1495 computeAllocaDefaultAlign(Ty, InsertBefore), Name,
1496 InsertBefore) {}
1497
1498AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1499 const Twine &Name, Instruction *InsertBefore)
1500 : AllocaInst(Ty, AddrSpace, ArraySize,
1501 computeAllocaDefaultAlign(Ty, InsertBefore), Name,
1502 InsertBefore) {}
1503
1504AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1505 const Twine &Name, BasicBlock *InsertAtEnd)
1506 : AllocaInst(Ty, AddrSpace, ArraySize,
1507 computeAllocaDefaultAlign(Ty, InsertAtEnd), Name,
1508 InsertAtEnd) {}
1509
1510AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1511 Align Align, const Twine &Name,
1512 BasicBlock::iterator InsertBefore)
1513 : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca,
1514 getAISize(Ty->getContext(), ArraySize), InsertBefore),
1515 AllocatedType(Ty) {
1517 assert(!Ty->isVoidTy() && "Cannot allocate void!");
1518 setName(Name);
1519}
1520
1521AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1522 Align Align, const Twine &Name,
1523 Instruction *InsertBefore)
1524 : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca,
1525 getAISize(Ty->getContext(), ArraySize), InsertBefore),
1526 AllocatedType(Ty) {
1528 assert(!Ty->isVoidTy() && "Cannot allocate void!");
1529 setName(Name);
1530}
1531
1532AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1533 Align Align, const Twine &Name, BasicBlock *InsertAtEnd)
1534 : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca,
1535 getAISize(Ty->getContext(), ArraySize), InsertAtEnd),
1536 AllocatedType(Ty) {
1538 assert(!Ty->isVoidTy() && "Cannot allocate void!");
1539 setName(Name);
1540}
1541
1542
1544 if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(0)))
1545 return !CI->isOne();
1546 return true;
1547}
1548
1549/// isStaticAlloca - Return true if this alloca is in the entry block of the
1550/// function and is a constant size. If so, the code generator will fold it
1551/// into the prolog/epilog code, so it is basically free.
1553 // Must be constant size.
1554 if (!isa<ConstantInt>(getArraySize())) return false;
1555
1556 // Must be in the entry block.
1557 const BasicBlock *Parent = getParent();
1558 return Parent->isEntryBlock() && !isUsedWithInAlloca();
1559}
1560
1561//===----------------------------------------------------------------------===//
1562// LoadInst Implementation
1563//===----------------------------------------------------------------------===//
1564
1565void LoadInst::AssertOK() {
1567 "Ptr must have pointer type.");
1568}
1569
1571 assert(BB && "Insertion BB cannot be null when alignment not provided!");
1572 assert(BB->getParent() &&
1573 "BB must be in a Function when alignment not provided!");
1574 const DataLayout &DL = BB->getModule()->getDataLayout();
1575 return DL.getABITypeAlign(Ty);
1576}
1577
1579 return computeLoadStoreDefaultAlign(Ty, It->getParent());
1580}
1581
1583 assert(I && "Insertion position cannot be null when alignment not provided!");
1584 return computeLoadStoreDefaultAlign(Ty, I->getParent());
1585}
1586
1588 BasicBlock::iterator InsertBef)
1589 : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertBef) {}
1590
1592 Instruction *InsertBef)
1593 : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertBef) {}
1594
1596 BasicBlock *InsertAE)
1597 : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertAE) {}
1598
1599LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1600 BasicBlock::iterator InsertBef)
1601 : LoadInst(Ty, Ptr, Name, isVolatile,
1602 computeLoadStoreDefaultAlign(Ty, InsertBef), InsertBef) {}
1603
1604LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1605 Instruction *InsertBef)
1606 : LoadInst(Ty, Ptr, Name, isVolatile,
1607 computeLoadStoreDefaultAlign(Ty, InsertBef), InsertBef) {}
1608
1609LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1610 BasicBlock *InsertAE)
1611 : LoadInst(Ty, Ptr, Name, isVolatile,
1612 computeLoadStoreDefaultAlign(Ty, InsertAE), InsertAE) {}
1613
1614LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1615 Align Align, BasicBlock::iterator InsertBef)
1616 : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
1617 SyncScope::System, InsertBef) {}
1618
1619LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1620 Align Align, Instruction *InsertBef)
1621 : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
1622 SyncScope::System, InsertBef) {}
1623
1624LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1625 Align Align, BasicBlock *InsertAE)
1626 : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
1627 SyncScope::System, InsertAE) {}
1628
1629LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1631 BasicBlock::iterator InsertBef)
1632 : UnaryInstruction(Ty, Load, Ptr, InsertBef) {
1635 setAtomic(Order, SSID);
1636 AssertOK();
1637 setName(Name);
1638}
1639
1640LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1642 Instruction *InsertBef)
1643 : UnaryInstruction(Ty, Load, Ptr, InsertBef) {
1646 setAtomic(Order, SSID);
1647 AssertOK();
1648 setName(Name);
1649}
1650
1651LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1653 BasicBlock *InsertAE)
1654 : UnaryInstruction(Ty, Load, Ptr, InsertAE) {
1657 setAtomic(Order, SSID);
1658 AssertOK();
1659 setName(Name);
1660}
1661
1662//===----------------------------------------------------------------------===//
1663// StoreInst Implementation
1664//===----------------------------------------------------------------------===//
1665
1666void StoreInst::AssertOK() {
1667 assert(getOperand(0) && getOperand(1) && "Both operands must be non-null!");
1669 "Ptr must have pointer type!");
1670}
1671
1672StoreInst::StoreInst(Value *val, Value *addr, Instruction *InsertBefore)
1673 : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {}
1674
1675StoreInst::StoreInst(Value *val, Value *addr, BasicBlock *InsertAtEnd)
1676 : StoreInst(val, addr, /*isVolatile=*/false, InsertAtEnd) {}
1677
1679 : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {}
1680
1681StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1682 Instruction *InsertBefore)
1683 : StoreInst(val, addr, isVolatile,
1684 computeLoadStoreDefaultAlign(val->getType(), InsertBefore),
1685 InsertBefore) {}
1686
1687StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1688 BasicBlock *InsertAtEnd)
1689 : StoreInst(val, addr, isVolatile,
1690 computeLoadStoreDefaultAlign(val->getType(), InsertAtEnd),
1691 InsertAtEnd) {}
1692
1693StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1694 BasicBlock::iterator InsertBefore)
1695 : StoreInst(val, addr, isVolatile,
1696 computeLoadStoreDefaultAlign(val->getType(), &*InsertBefore),
1697 InsertBefore) {}
1698
1699StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1700 Instruction *InsertBefore)
1701 : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
1702 SyncScope::System, InsertBefore) {}
1703
1704StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1705 BasicBlock *InsertAtEnd)
1706 : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
1707 SyncScope::System, InsertAtEnd) {}
1708
1709StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1710 BasicBlock::iterator InsertBefore)
1711 : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
1712 SyncScope::System, InsertBefore) {}
1713
1714StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1715 AtomicOrdering Order, SyncScope::ID SSID,
1716 Instruction *InsertBefore)
1717 : Instruction(Type::getVoidTy(val->getContext()), Store,
1718 OperandTraits<StoreInst>::op_begin(this),
1719 OperandTraits<StoreInst>::operands(this), InsertBefore) {
1720 Op<0>() = val;
1721 Op<1>() = addr;
1724 setAtomic(Order, SSID);
1725 AssertOK();
1726}
1727
1728StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1729 AtomicOrdering Order, SyncScope::ID SSID,
1730 BasicBlock *InsertAtEnd)
1731 : Instruction(Type::getVoidTy(val->getContext()), Store,
1732 OperandTraits<StoreInst>::op_begin(this),
1733 OperandTraits<StoreInst>::operands(this), InsertAtEnd) {
1734 Op<0>() = val;
1735 Op<1>() = addr;
1738 setAtomic(Order, SSID);
1739 AssertOK();
1740}
1741
1742StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1743 AtomicOrdering Order, SyncScope::ID SSID,
1744 BasicBlock::iterator InsertBefore)
1745 : Instruction(Type::getVoidTy(val->getContext()), Store,
1746 OperandTraits<StoreInst>::op_begin(this),
1747 OperandTraits<StoreInst>::operands(this)) {
1748 Op<0>() = val;
1749 Op<1>() = addr;
1752 setAtomic(Order, SSID);
1753 insertBefore(*InsertBefore->getParent(), InsertBefore);
1754 AssertOK();
1755}
1756
1757//===----------------------------------------------------------------------===//
1758// AtomicCmpXchgInst Implementation
1759//===----------------------------------------------------------------------===//
1760
1761void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
1762 Align Alignment, AtomicOrdering SuccessOrdering,
1763 AtomicOrdering FailureOrdering,
1764 SyncScope::ID SSID) {
1765 Op<0>() = Ptr;
1766 Op<1>() = Cmp;
1767 Op<2>() = NewVal;
1768 setSuccessOrdering(SuccessOrdering);
1769 setFailureOrdering(FailureOrdering);
1770 setSyncScopeID(SSID);
1771 setAlignment(Alignment);
1772
1773 assert(getOperand(0) && getOperand(1) && getOperand(2) &&
1774 "All operands must be non-null!");
1776 "Ptr must have pointer type!");
1777 assert(getOperand(1)->getType() == getOperand(2)->getType() &&
1778 "Cmp type and NewVal type must be same!");
1779}
1780
1782 Align Alignment,
1783 AtomicOrdering SuccessOrdering,
1784 AtomicOrdering FailureOrdering,
1785 SyncScope::ID SSID,
1786 BasicBlock::iterator InsertBefore)
1787 : Instruction(
1788 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
1789 AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
1790 OperandTraits<AtomicCmpXchgInst>::operands(this), InsertBefore) {
1791 Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
1792}
1793
1795 Align Alignment,
1796 AtomicOrdering SuccessOrdering,
1797 AtomicOrdering FailureOrdering,
1798 SyncScope::ID SSID,
1799 Instruction *InsertBefore)
1800 : Instruction(
1801 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
1802 AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
1803 OperandTraits<AtomicCmpXchgInst>::operands(this), InsertBefore) {
1804 Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
1805}
1806
1808 Align Alignment,
1809 AtomicOrdering SuccessOrdering,
1810 AtomicOrdering FailureOrdering,
1811 SyncScope::ID SSID,
1812 BasicBlock *InsertAtEnd)
1813 : Instruction(
1814 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
1815 AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
1816 OperandTraits<AtomicCmpXchgInst>::operands(this), InsertAtEnd) {
1817 Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
1818}
1819
1820//===----------------------------------------------------------------------===//
1821// AtomicRMWInst Implementation
1822//===----------------------------------------------------------------------===//
1823
1824void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,
1825 Align Alignment, AtomicOrdering Ordering,
1826 SyncScope::ID SSID) {
1827 assert(Ordering != AtomicOrdering::NotAtomic &&
1828 "atomicrmw instructions can only be atomic.");
1829 assert(Ordering != AtomicOrdering::Unordered &&
1830 "atomicrmw instructions cannot be unordered.");
1831 Op<0>() = Ptr;
1832 Op<1>() = Val;
1834 setOrdering(Ordering);
1835 setSyncScopeID(SSID);
1836 setAlignment(Alignment);
1837
1838 assert(getOperand(0) && getOperand(1) &&
1839 "All operands must be non-null!");
1841 "Ptr must have pointer type!");
1842 assert(Ordering != AtomicOrdering::NotAtomic &&
1843 "AtomicRMW instructions must be atomic!");
1844}
1845
1847 Align Alignment, AtomicOrdering Ordering,
1848 SyncScope::ID SSID,
1849 BasicBlock::iterator InsertBefore)
1850 : Instruction(Val->getType(), AtomicRMW,
1851 OperandTraits<AtomicRMWInst>::op_begin(this),
1852 OperandTraits<AtomicRMWInst>::operands(this), InsertBefore) {
1853 Init(Operation, Ptr, Val, Alignment, Ordering, SSID);
1854}
1855
1857 Align Alignment, AtomicOrdering Ordering,
1858 SyncScope::ID SSID, Instruction *InsertBefore)
1859 : Instruction(Val->getType(), AtomicRMW,
1860 OperandTraits<AtomicRMWInst>::op_begin(this),
1861 OperandTraits<AtomicRMWInst>::operands(this), InsertBefore) {
1862 Init(Operation, Ptr, Val, Alignment, Ordering, SSID);
1863}
1864
1866 Align Alignment, AtomicOrdering Ordering,
1867 SyncScope::ID SSID, BasicBlock *InsertAtEnd)
1868 : Instruction(Val->getType(), AtomicRMW,
1869 OperandTraits<AtomicRMWInst>::op_begin(this),
1870 OperandTraits<AtomicRMWInst>::operands(this), InsertAtEnd) {
1871 Init(Operation, Ptr, Val, Alignment, Ordering, SSID);
1872}
1873
1875 switch (Op) {
1877 return "xchg";
1878 case AtomicRMWInst::Add:
1879 return "add";
1880 case AtomicRMWInst::Sub:
1881 return "sub";
1882 case AtomicRMWInst::And:
1883 return "and";
1885 return "nand";
1886 case AtomicRMWInst::Or:
1887 return "or";
1888 case AtomicRMWInst::Xor:
1889 return "xor";
1890 case AtomicRMWInst::Max:
1891 return "max";
1892 case AtomicRMWInst::Min:
1893 return "min";
1895 return "umax";
1897 return "umin";
1899 return "fadd";
1901 return "fsub";
1903 return "fmax";
1905 return "fmin";
1907 return "uinc_wrap";
1909 return "udec_wrap";
1911 return "<invalid operation>";
1912 }
1913
1914 llvm_unreachable("invalid atomicrmw operation");
1915}
1916
1917//===----------------------------------------------------------------------===//
1918// FenceInst Implementation
1919//===----------------------------------------------------------------------===//
1920
1922 SyncScope::ID SSID, BasicBlock::iterator InsertBefore)
1923 : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertBefore) {
1924 setOrdering(Ordering);
1925 setSyncScopeID(SSID);
1926}
1927
1929 SyncScope::ID SSID,
1930 Instruction *InsertBefore)
1931 : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertBefore) {
1932 setOrdering(Ordering);
1933 setSyncScopeID(SSID);
1934}
1935
1937 SyncScope::ID SSID,
1938 BasicBlock *InsertAtEnd)
1939 : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertAtEnd) {
1940 setOrdering(Ordering);
1941 setSyncScopeID(SSID);
1942}
1943
1944//===----------------------------------------------------------------------===//
1945// GetElementPtrInst Implementation
1946//===----------------------------------------------------------------------===//
1947
1948void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList,
1949 const Twine &Name) {
1950 assert(getNumOperands() == 1 + IdxList.size() &&
1951 "NumOperands not initialized?");
1952 Op<0>() = Ptr;
1953 llvm::copy(IdxList, op_begin() + 1);
1954 setName(Name);
1955}
1956
1957GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI)
1958 : Instruction(GEPI.getType(), GetElementPtr,
1959 OperandTraits<GetElementPtrInst>::op_end(this) -
1960 GEPI.getNumOperands(),
1961 GEPI.getNumOperands()),
1962 SourceElementType(GEPI.SourceElementType),
1963 ResultElementType(GEPI.ResultElementType) {
1964 std::copy(GEPI.op_begin(), GEPI.op_end(), op_begin());
1966}
1967
1969 if (auto *Struct = dyn_cast<StructType>(Ty)) {
1970 if (!Struct->indexValid(Idx))
1971 return nullptr;
1972 return Struct->getTypeAtIndex(Idx);
1973 }
1974 if (!Idx->getType()->isIntOrIntVectorTy())
1975 return nullptr;
1976 if (auto *Array = dyn_cast<ArrayType>(Ty))
1977 return Array->getElementType();
1978 if (auto *Vector = dyn_cast<VectorType>(Ty))
1979 return Vector->getElementType();
1980 return nullptr;
1981}
1982
1984 if (auto *Struct = dyn_cast<StructType>(Ty)) {
1985 if (Idx >= Struct->getNumElements())
1986 return nullptr;
1987 return Struct->getElementType(Idx);
1988 }
1989 if (auto *Array = dyn_cast<ArrayType>(Ty))
1990 return Array->getElementType();
1991 if (auto *Vector = dyn_cast<VectorType>(Ty))
1992 return Vector->getElementType();
1993 return nullptr;
1994}
1995
1996template <typename IndexTy>
1998 if (IdxList.empty())
1999 return Ty;
2000 for (IndexTy V : IdxList.slice(1)) {
2002 if (!Ty)
2003 return Ty;
2004 }
2005 return Ty;
2006}
2007
2009 return getIndexedTypeInternal(Ty, IdxList);
2010}
2011
2013 ArrayRef<Constant *> IdxList) {
2014 return getIndexedTypeInternal(Ty, IdxList);
2015}
2016
2018 return getIndexedTypeInternal(Ty, IdxList);
2019}
2020
2021/// hasAllZeroIndices - Return true if all of the indices of this GEP are
2022/// zeros. If so, the result pointer and the first operand have the same
2023/// value, just potentially different types.
2025 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
2026 if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(i))) {
2027 if (!CI->isZero()) return false;
2028 } else {
2029 return false;
2030 }
2031 }
2032 return true;
2033}
2034
2035/// hasAllConstantIndices - Return true if all of the indices of this GEP are
2036/// constant integers. If so, the result pointer and the first operand have
2037/// a constant offset between them.
2039 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
2040 if (!isa<ConstantInt>(getOperand(i)))
2041 return false;
2042 }
2043 return true;
2044}
2045
2047 cast<GEPOperator>(this)->setIsInBounds(B);
2048}
2049
2051 return cast<GEPOperator>(this)->isInBounds();
2052}
2053
2055 APInt &Offset) const {
2056 // Delegate to the generic GEPOperator implementation.
2057 return cast<GEPOperator>(this)->accumulateConstantOffset(DL, Offset);
2058}
2059
2061 const DataLayout &DL, unsigned BitWidth,
2062 MapVector<Value *, APInt> &VariableOffsets,
2063 APInt &ConstantOffset) const {
2064 // Delegate to the generic GEPOperator implementation.
2065 return cast<GEPOperator>(this)->collectOffset(DL, BitWidth, VariableOffsets,
2066 ConstantOffset);
2067}
2068
2069//===----------------------------------------------------------------------===//
2070// ExtractElementInst Implementation
2071//===----------------------------------------------------------------------===//
2072
2073ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
2074 const Twine &Name,
2075 BasicBlock::iterator InsertBef)
2076 : Instruction(
2077 cast<VectorType>(Val->getType())->getElementType(), ExtractElement,
2078 OperandTraits<ExtractElementInst>::op_begin(this), 2, InsertBef) {
2079 assert(isValidOperands(Val, Index) &&
2080 "Invalid extractelement instruction operands!");
2081 Op<0>() = Val;
2082 Op<1>() = Index;
2083 setName(Name);
2084}
2085
2086ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
2087 const Twine &Name,
2088 Instruction *InsertBef)
2089 : Instruction(cast<VectorType>(Val->getType())->getElementType(),
2090 ExtractElement,
2091 OperandTraits<ExtractElementInst>::op_begin(this),
2092 2, InsertBef) {
2093 assert(isValidOperands(Val, Index) &&
2094 "Invalid extractelement instruction operands!");
2095 Op<0>() = Val;
2096 Op<1>() = Index;
2097 setName(Name);
2098}
2099
2100ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
2101 const Twine &Name,
2102 BasicBlock *InsertAE)
2103 : Instruction(cast<VectorType>(Val->getType())->getElementType(),
2104 ExtractElement,
2105 OperandTraits<ExtractElementInst>::op_begin(this),
2106 2, InsertAE) {
2107 assert(isValidOperands(Val, Index) &&
2108 "Invalid extractelement instruction operands!");
2109
2110 Op<0>() = Val;
2111 Op<1>() = Index;
2112 setName(Name);
2113}
2114
2116 if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy())
2117 return false;
2118 return true;
2119}
2120
2121//===----------------------------------------------------------------------===//
2122// InsertElementInst Implementation
2123//===----------------------------------------------------------------------===//
2124
2125InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
2126 const Twine &Name,
2127 BasicBlock::iterator InsertBef)
2128 : Instruction(Vec->getType(), InsertElement,
2129 OperandTraits<InsertElementInst>::op_begin(this), 3,
2130 InsertBef) {
2131 assert(isValidOperands(Vec, Elt, Index) &&
2132 "Invalid insertelement instruction operands!");
2133 Op<0>() = Vec;
2134 Op<1>() = Elt;
2135 Op<2>() = Index;
2136 setName(Name);
2137}
2138
2139InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
2140 const Twine &Name,
2141 Instruction *InsertBef)
2142 : Instruction(Vec->getType(), InsertElement,
2143 OperandTraits<InsertElementInst>::op_begin(this),
2144 3, InsertBef) {
2145 assert(isValidOperands(Vec, Elt, Index) &&
2146 "Invalid insertelement instruction operands!");
2147 Op<0>() = Vec;
2148 Op<1>() = Elt;
2149 Op<2>() = Index;
2150 setName(Name);
2151}
2152
2153InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
2154 const Twine &Name,
2155 BasicBlock *InsertAE)
2156 : Instruction(Vec->getType(), InsertElement,
2157 OperandTraits<InsertElementInst>::op_begin(this),
2158 3, InsertAE) {
2159 assert(isValidOperands(Vec, Elt, Index) &&
2160 "Invalid insertelement instruction operands!");
2161
2162 Op<0>() = Vec;
2163 Op<1>() = Elt;
2164 Op<2>() = Index;
2165 setName(Name);
2166}
2167
2169 const Value *Index) {
2170 if (!Vec->getType()->isVectorTy())
2171 return false; // First operand of insertelement must be vector type.
2172
2173 if (Elt->getType() != cast<VectorType>(Vec->getType())->getElementType())
2174 return false;// Second operand of insertelement must be vector element type.
2175
2176 if (!Index->getType()->isIntegerTy())
2177 return false; // Third operand of insertelement must be i32.
2178 return true;
2179}
2180
2181//===----------------------------------------------------------------------===//
2182// ShuffleVectorInst Implementation
2183//===----------------------------------------------------------------------===//
2184
2186 assert(V && "Cannot create placeholder of nullptr V");
2187 return PoisonValue::get(V->getType());
2188}
2189
2191 BasicBlock::iterator InsertBefore)
2193 InsertBefore) {}
2194
2196 Instruction *InsertBefore)
2198 InsertBefore) {}
2199
2201 BasicBlock *InsertAtEnd)
2203 InsertAtEnd) {}
2204
2206 const Twine &Name,
2207 BasicBlock::iterator InsertBefore)
2209 InsertBefore) {}
2210
2212 const Twine &Name,
2213 Instruction *InsertBefore)
2215 InsertBefore) {}
2216
2218 const Twine &Name, BasicBlock *InsertAtEnd)
2220 InsertAtEnd) {}
2221
2223 const Twine &Name,
2224 BasicBlock::iterator InsertBefore)
2225 : Instruction(
2226 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
2227 cast<VectorType>(Mask->getType())->getElementCount()),
2228 ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
2229 OperandTraits<ShuffleVectorInst>::operands(this), InsertBefore) {
2230 assert(isValidOperands(V1, V2, Mask) &&
2231 "Invalid shuffle vector instruction operands!");
2232
2233 Op<0>() = V1;
2234 Op<1>() = V2;
2235 SmallVector<int, 16> MaskArr;
2236 getShuffleMask(cast<Constant>(Mask), MaskArr);
2237 setShuffleMask(MaskArr);
2238 setName(Name);
2239}
2240
2242 const Twine &Name,
2243 Instruction *InsertBefore)
2244 : Instruction(
2245 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
2246 cast<VectorType>(Mask->getType())->getElementCount()),
2247 ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
2248 OperandTraits<ShuffleVectorInst>::operands(this), InsertBefore) {
2249 assert(isValidOperands(V1, V2, Mask) &&
2250 "Invalid shuffle vector instruction operands!");
2251
2252 Op<0>() = V1;
2253 Op<1>() = V2;
2254 SmallVector<int, 16> MaskArr;
2255 getShuffleMask(cast<Constant>(Mask), MaskArr);
2256 setShuffleMask(MaskArr);
2257 setName(Name);
2258}
2259
2261 const Twine &Name, BasicBlock *InsertAtEnd)
2262 : Instruction(
2263 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
2264 cast<VectorType>(Mask->getType())->getElementCount()),
2265 ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
2266 OperandTraits<ShuffleVectorInst>::operands(this), InsertAtEnd) {
2267 assert(isValidOperands(V1, V2, Mask) &&
2268 "Invalid shuffle vector instruction operands!");
2269
2270 Op<0>() = V1;
2271 Op<1>() = V2;
2272 SmallVector<int, 16> MaskArr;
2273 getShuffleMask(cast<Constant>(Mask), MaskArr);
2274 setShuffleMask(MaskArr);
2275 setName(Name);
2276}
2277
2279 const Twine &Name,
2280 BasicBlock::iterator InsertBefore)
2281 : Instruction(
2282 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
2283 Mask.size(), isa<ScalableVectorType>(V1->getType())),
2284 ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
2285 OperandTraits<ShuffleVectorInst>::operands(this), InsertBefore) {
2286 assert(isValidOperands(V1, V2, Mask) &&
2287 "Invalid shuffle vector instruction operands!");
2288 Op<0>() = V1;
2289 Op<1>() = V2;
2290 setShuffleMask(Mask);
2291 setName(Name);
2292}
2293
2295 const Twine &Name,
2296 Instruction *InsertBefore)
2297 : Instruction(
2298 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
2299 Mask.size(), isa<ScalableVectorType>(V1->getType())),
2300 ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
2301 OperandTraits<ShuffleVectorInst>::operands(this), InsertBefore) {
2302 assert(isValidOperands(V1, V2, Mask) &&
2303 "Invalid shuffle vector instruction operands!");
2304 Op<0>() = V1;
2305 Op<1>() = V2;
2306 setShuffleMask(Mask);
2307 setName(Name);
2308}
2309
2311 const Twine &Name, BasicBlock *InsertAtEnd)
2312 : Instruction(
2313 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
2314 Mask.size(), isa<ScalableVectorType>(V1->getType())),
2315 ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
2316 OperandTraits<ShuffleVectorInst>::operands(this), InsertAtEnd) {
2317 assert(isValidOperands(V1, V2, Mask) &&
2318 "Invalid shuffle vector instruction operands!");
2319
2320 Op<0>() = V1;
2321 Op<1>() = V2;
2322 setShuffleMask(Mask);
2323 setName(Name);
2324}
2325
2327 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2328 int NumMaskElts = ShuffleMask.size();
2329 SmallVector<int, 16> NewMask(NumMaskElts);
2330 for (int i = 0; i != NumMaskElts; ++i) {
2331 int MaskElt = getMaskValue(i);
2332 if (MaskElt == PoisonMaskElem) {
2333 NewMask[i] = PoisonMaskElem;
2334 continue;
2335 }
2336 assert(MaskElt >= 0 && MaskElt < 2 * NumOpElts && "Out-of-range mask");
2337 MaskElt = (MaskElt < NumOpElts) ? MaskElt + NumOpElts : MaskElt - NumOpElts;
2338 NewMask[i] = MaskElt;
2339 }
2340 setShuffleMask(NewMask);
2341 Op<0>().swap(Op<1>());
2342}
2343
2345 ArrayRef<int> Mask) {
2346 // V1 and V2 must be vectors of the same type.
2347 if (!isa<VectorType>(V1->getType()) || V1->getType() != V2->getType())
2348 return false;
2349
2350 // Make sure the mask elements make sense.
2351 int V1Size =
2352 cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue();
2353 for (int Elem : Mask)
2354 if (Elem != PoisonMaskElem && Elem >= V1Size * 2)
2355 return false;
2356
2357 if (isa<ScalableVectorType>(V1->getType()))
2358 if ((Mask[0] != 0 && Mask[0] != PoisonMaskElem) || !all_equal(Mask))
2359 return false;
2360
2361 return true;
2362}
2363
2365 const Value *Mask) {
2366 // V1 and V2 must be vectors of the same type.
2367 if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType())
2368 return false;
2369
2370 // Mask must be vector of i32, and must be the same kind of vector as the
2371 // input vectors
2372 auto *MaskTy = dyn_cast<VectorType>(Mask->getType());
2373 if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32) ||
2374 isa<ScalableVectorType>(MaskTy) != isa<ScalableVectorType>(V1->getType()))
2375 return false;
2376
2377 // Check to see if Mask is valid.
2378 if (isa<UndefValue>(Mask) || isa<ConstantAggregateZero>(Mask))
2379 return true;
2380
2381 if (const auto *MV = dyn_cast<ConstantVector>(Mask)) {
2382 unsigned V1Size = cast<FixedVectorType>(V1->getType())->getNumElements();
2383 for (Value *Op : MV->operands()) {
2384 if (auto *CI = dyn_cast<ConstantInt>(Op)) {
2385 if (CI->uge(V1Size*2))
2386 return false;
2387 } else if (!isa<UndefValue>(Op)) {
2388 return false;
2389 }
2390 }
2391 return true;
2392 }
2393
2394 if (const auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
2395 unsigned V1Size = cast<FixedVectorType>(V1->getType())->getNumElements();
2396 for (unsigned i = 0, e = cast<FixedVectorType>(MaskTy)->getNumElements();
2397 i != e; ++i)
2398 if (CDS->getElementAsInteger(i) >= V1Size*2)
2399 return false;
2400 return true;
2401 }
2402
2403 return false;
2404}
2405
2407 SmallVectorImpl<int> &Result) {
2408 ElementCount EC = cast<VectorType>(Mask->getType())->getElementCount();
2409
2410 if (isa<ConstantAggregateZero>(Mask)) {
2411 Result.resize(EC.getKnownMinValue(), 0);
2412 return;
2413 }
2414
2415 Result.reserve(EC.getKnownMinValue());
2416
2417 if (EC.isScalable()) {
2418 assert((isa<ConstantAggregateZero>(Mask) || isa<UndefValue>(Mask)) &&
2419 "Scalable vector shuffle mask must be undef or zeroinitializer");
2420 int MaskVal = isa<UndefValue>(Mask) ? -1 : 0;
2421 for (unsigned I = 0; I < EC.getKnownMinValue(); ++I)
2422 Result.emplace_back(MaskVal);
2423 return;
2424 }
2425
2426 unsigned NumElts = EC.getKnownMinValue();
2427
2428 if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
2429 for (unsigned i = 0; i != NumElts; ++i)
2430 Result.push_back(CDS->getElementAsInteger(i));
2431 return;
2432 }
2433 for (unsigned i = 0; i != NumElts; ++i) {
2434 Constant *C = Mask->getAggregateElement(i);
2435 Result.push_back(isa<UndefValue>(C) ? -1 :
2436 cast<ConstantInt>(C)->getZExtValue());
2437 }
2438}
2439
2441 ShuffleMask.assign(Mask.begin(), Mask.end());
2442 ShuffleMaskForBitcode = convertShuffleMaskForBitcode(Mask, getType());
2443}
2444
2446 Type *ResultTy) {
2447 Type *Int32Ty = Type::getInt32Ty(ResultTy->getContext());
2448 if (isa<ScalableVectorType>(ResultTy)) {
2449 assert(all_equal(Mask) && "Unexpected shuffle");
2450 Type *VecTy = VectorType::get(Int32Ty, Mask.size(), true);
2451 if (Mask[0] == 0)
2452 return Constant::getNullValue(VecTy);
2453 return UndefValue::get(VecTy);
2454 }
2456 for (int Elem : Mask) {
2457 if (Elem == PoisonMaskElem)
2459 else
2460 MaskConst.push_back(ConstantInt::get(Int32Ty, Elem));
2461 }
2462 return ConstantVector::get(MaskConst);
2463}
2464
2465static bool isSingleSourceMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
2466 assert(!Mask.empty() && "Shuffle mask must contain elements");
2467 bool UsesLHS = false;
2468 bool UsesRHS = false;
2469 for (int I : Mask) {
2470 if (I == -1)
2471 continue;
2472 assert(I >= 0 && I < (NumOpElts * 2) &&
2473 "Out-of-bounds shuffle mask element");
2474 UsesLHS |= (I < NumOpElts);
2475 UsesRHS |= (I >= NumOpElts);
2476 if (UsesLHS && UsesRHS)
2477 return false;
2478 }
2479 // Allow for degenerate case: completely undef mask means neither source is used.
2480 return UsesLHS || UsesRHS;
2481}
2482
2484 // We don't have vector operand size information, so assume operands are the
2485 // same size as the mask.
2486 return isSingleSourceMaskImpl(Mask, NumSrcElts);
2487}
2488
2489static bool isIdentityMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
2490 if (!isSingleSourceMaskImpl(Mask, NumOpElts))
2491 return false;
2492 for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) {
2493 if (Mask[i] == -1)
2494 continue;
2495 if (Mask[i] != i && Mask[i] != (NumOpElts + i))
2496 return false;
2497 }
2498 return true;
2499}
2500
2502 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2503 return false;
2504 // We don't have vector operand size information, so assume operands are the
2505 // same size as the mask.
2506 return isIdentityMaskImpl(Mask, NumSrcElts);
2507}
2508
2510 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2511 return false;
2512 if (!isSingleSourceMask(Mask, NumSrcElts))
2513 return false;
2514
2515 // The number of elements in the mask must be at least 2.
2516 if (NumSrcElts < 2)
2517 return false;
2518
2519 for (int I = 0, E = Mask.size(); I < E; ++I) {
2520 if (Mask[I] == -1)
2521 continue;
2522 if (Mask[I] != (NumSrcElts - 1 - I) &&
2523 Mask[I] != (NumSrcElts + NumSrcElts - 1 - I))
2524 return false;
2525 }
2526 return true;
2527}
2528
2530 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2531 return false;
2532 if (!isSingleSourceMask(Mask, NumSrcElts))
2533 return false;
2534 for (int I = 0, E = Mask.size(); I < E; ++I) {
2535 if (Mask[I] == -1)
2536 continue;
2537 if (Mask[I] != 0 && Mask[I] != NumSrcElts)
2538 return false;
2539 }
2540 return true;
2541}
2542
2544 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2545 return false;
2546 // Select is differentiated from identity. It requires using both sources.
2547 if (isSingleSourceMask(Mask, NumSrcElts))
2548 return false;
2549 for (int I = 0, E = Mask.size(); I < E; ++I) {
2550 if (Mask[I] == -1)
2551 continue;
2552 if (Mask[I] != I && Mask[I] != (NumSrcElts + I))
2553 return false;
2554 }
2555 return true;
2556}
2557
2559 // Example masks that will return true:
2560 // v1 = <a, b, c, d>
2561 // v2 = <e, f, g, h>
2562 // trn1 = shufflevector v1, v2 <0, 4, 2, 6> = <a, e, c, g>
2563 // trn2 = shufflevector v1, v2 <1, 5, 3, 7> = <b, f, d, h>
2564
2565 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2566 return false;
2567 // 1. The number of elements in the mask must be a power-of-2 and at least 2.
2568 int Sz = Mask.size();
2569 if (Sz < 2 || !isPowerOf2_32(Sz))
2570 return false;
2571
2572 // 2. The first element of the mask must be either a 0 or a 1.
2573 if (Mask[0] != 0 && Mask[0] != 1)
2574 return false;
2575
2576 // 3. The difference between the first 2 elements must be equal to the
2577 // number of elements in the mask.
2578 if ((Mask[1] - Mask[0]) != NumSrcElts)
2579 return false;
2580
2581 // 4. The difference between consecutive even-numbered and odd-numbered
2582 // elements must be equal to 2.
2583 for (int I = 2; I < Sz; ++I) {
2584 int MaskEltVal = Mask[I];
2585 if (MaskEltVal == -1)
2586 return false;
2587 int MaskEltPrevVal = Mask[I - 2];
2588 if (MaskEltVal - MaskEltPrevVal != 2)
2589 return false;
2590 }
2591 return true;
2592}
2593
2595 int &Index) {
2596 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2597 return false;
2598 // Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2599 int StartIndex = -1;
2600 for (int I = 0, E = Mask.size(); I != E; ++I) {
2601 int MaskEltVal = Mask[I];
2602 if (MaskEltVal == -1)
2603 continue;
2604
2605 if (StartIndex == -1) {
2606 // Don't support a StartIndex that begins in the second input, or if the
2607 // first non-undef index would access below the StartIndex.
2608 if (MaskEltVal < I || NumSrcElts <= (MaskEltVal - I))
2609 return false;
2610
2611 StartIndex = MaskEltVal - I;
2612 continue;
2613 }
2614
2615 // Splice is sequential starting from StartIndex.
2616 if (MaskEltVal != (StartIndex + I))
2617 return false;
2618 }
2619
2620 if (StartIndex == -1)
2621 return false;
2622
2623 // NOTE: This accepts StartIndex == 0 (COPY).
2624 Index = StartIndex;
2625 return true;
2626}
2627
2629 int NumSrcElts, int &Index) {
2630 // Must extract from a single source.
2631 if (!isSingleSourceMaskImpl(Mask, NumSrcElts))
2632 return false;
2633
2634 // Must be smaller (else this is an Identity shuffle).
2635 if (NumSrcElts <= (int)Mask.size())
2636 return false;
2637
2638 // Find start of extraction, accounting that we may start with an UNDEF.
2639 int SubIndex = -1;
2640 for (int i = 0, e = Mask.size(); i != e; ++i) {
2641 int M = Mask[i];
2642 if (M < 0)
2643 continue;
2644 int Offset = (M % NumSrcElts) - i;
2645 if (0 <= SubIndex && SubIndex != Offset)
2646 return false;
2647 SubIndex = Offset;
2648 }
2649
2650 if (0 <= SubIndex && SubIndex + (int)Mask.size() <= NumSrcElts) {
2651 Index = SubIndex;
2652 return true;
2653 }
2654 return false;
2655}
2656
2658 int NumSrcElts, int &NumSubElts,
2659 int &Index) {
2660 int NumMaskElts = Mask.size();
2661
2662 // Don't try to match if we're shuffling to a smaller size.
2663 if (NumMaskElts < NumSrcElts)
2664 return false;
2665
2666 // TODO: We don't recognize self-insertion/widening.
2667 if (isSingleSourceMaskImpl(Mask, NumSrcElts))
2668 return false;
2669
2670 // Determine which mask elements are attributed to which source.
2671 APInt UndefElts = APInt::getZero(NumMaskElts);
2672 APInt Src0Elts = APInt::getZero(NumMaskElts);
2673 APInt Src1Elts = APInt::getZero(NumMaskElts);
2674 bool Src0Identity = true;
2675 bool Src1Identity = true;
2676
2677 for (int i = 0; i != NumMaskElts; ++i) {
2678 int M = Mask[i];
2679 if (M < 0) {
2680 UndefElts.setBit(i);
2681 continue;
2682 }
2683 if (M < NumSrcElts) {
2684 Src0Elts.setBit(i);
2685 Src0Identity &= (M == i);
2686 continue;
2687 }
2688 Src1Elts.setBit(i);
2689 Src1Identity &= (M == (i + NumSrcElts));
2690 }
2691 assert((Src0Elts | Src1Elts | UndefElts).isAllOnes() &&
2692 "unknown shuffle elements");
2693 assert(!Src0Elts.isZero() && !Src1Elts.isZero() &&
2694 "2-source shuffle not found");
2695
2696 // Determine lo/hi span ranges.
2697 // TODO: How should we handle undefs at the start of subvector insertions?
2698 int Src0Lo = Src0Elts.countr_zero();
2699 int Src1Lo = Src1Elts.countr_zero();
2700 int Src0Hi = NumMaskElts - Src0Elts.countl_zero();
2701 int Src1Hi = NumMaskElts - Src1Elts.countl_zero();
2702
2703 // If src0 is in place, see if the src1 elements is inplace within its own
2704 // span.
2705 if (Src0Identity) {
2706 int NumSub1Elts = Src1Hi - Src1Lo;
2707 ArrayRef<int> Sub1Mask = Mask.slice(Src1Lo, NumSub1Elts);
2708 if (isIdentityMaskImpl(Sub1Mask, NumSrcElts)) {
2709 NumSubElts = NumSub1Elts;
2710 Index = Src1Lo;
2711 return true;
2712 }
2713 }
2714
2715 // If src1 is in place, see if the src0 elements is inplace within its own
2716 // span.
2717 if (Src1Identity) {
2718 int NumSub0Elts = Src0Hi - Src0Lo;
2719 ArrayRef<int> Sub0Mask = Mask.slice(Src0Lo, NumSub0Elts);
2720 if (isIdentityMaskImpl(Sub0Mask, NumSrcElts)) {
2721 NumSubElts = NumSub0Elts;
2722 Index = Src0Lo;
2723 return true;
2724 }
2725 }
2726
2727 return false;
2728}
2729
2731 // FIXME: Not currently possible to express a shuffle mask for a scalable
2732 // vector for this case.
2733 if (isa<ScalableVectorType>(getType()))
2734 return false;
2735
2736 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2737 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2738 if (NumMaskElts <= NumOpElts)
2739 return false;
2740
2741 // The first part of the mask must choose elements from exactly 1 source op.
2743 if (!isIdentityMaskImpl(Mask, NumOpElts))
2744 return false;
2745
2746 // All extending must be with undef elements.
2747 for (int i = NumOpElts; i < NumMaskElts; ++i)
2748 if (Mask[i] != -1)
2749 return false;
2750
2751 return true;
2752}
2753
2755 // FIXME: Not currently possible to express a shuffle mask for a scalable
2756 // vector for this case.
2757 if (isa<ScalableVectorType>(getType()))
2758 return false;
2759
2760 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2761 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2762 if (NumMaskElts >= NumOpElts)
2763 return false;
2764
2765 return isIdentityMaskImpl(getShuffleMask(), NumOpElts);
2766}
2767
2769 // Vector concatenation is differentiated from identity with padding.
2770 if (isa<UndefValue>(Op<0>()) || isa<UndefValue>(Op<1>()))
2771 return false;
2772
2773 // FIXME: Not currently possible to express a shuffle mask for a scalable
2774 // vector for this case.
2775 if (isa<ScalableVectorType>(getType()))
2776 return false;
2777
2778 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2779 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2780 if (NumMaskElts != NumOpElts * 2)
2781 return false;
2782
2783 // Use the mask length rather than the operands' vector lengths here. We
2784 // already know that the shuffle returns a vector twice as long as the inputs,
2785 // and neither of the inputs are undef vectors. If the mask picks consecutive
2786 // elements from both inputs, then this is a concatenation of the inputs.
2787 return isIdentityMaskImpl(getShuffleMask(), NumMaskElts);
2788}
2789
2791 int ReplicationFactor, int VF) {
2792 assert(Mask.size() == (unsigned)ReplicationFactor * VF &&
2793 "Unexpected mask size.");
2794
2795 for (int CurrElt : seq(VF)) {
2796 ArrayRef<int> CurrSubMask = Mask.take_front(ReplicationFactor);
2797 assert(CurrSubMask.size() == (unsigned)ReplicationFactor &&
2798 "Run out of mask?");
2799 Mask = Mask.drop_front(ReplicationFactor);
2800 if (!all_of(CurrSubMask, [CurrElt](int MaskElt) {
2801 return MaskElt == PoisonMaskElem || MaskElt == CurrElt;
2802 }))
2803 return false;
2804 }
2805 assert(Mask.empty() && "Did not consume the whole mask?");
2806
2807 return true;
2808}
2809
2811 int &ReplicationFactor, int &VF) {
2812 // undef-less case is trivial.
2813 if (!llvm::is_contained(Mask, PoisonMaskElem)) {
2814 ReplicationFactor =
2815 Mask.take_while([](int MaskElt) { return MaskElt == 0; }).size();
2816 if (ReplicationFactor == 0 || Mask.size() % ReplicationFactor != 0)
2817 return false;
2818 VF = Mask.size() / ReplicationFactor;
2819 return isReplicationMaskWithParams(Mask, ReplicationFactor, VF);
2820 }
2821
2822 // However, if the mask contains undef's, we have to enumerate possible tuples
2823 // and pick one. There are bounds on replication factor: [1, mask size]
2824 // (where RF=1 is an identity shuffle, RF=mask size is a broadcast shuffle)
2825 // Additionally, mask size is a replication factor multiplied by vector size,
2826 // which further significantly reduces the search space.
2827
2828 // Before doing that, let's perform basic correctness checking first.
2829 int Largest = -1;
2830 for (int MaskElt : Mask) {
2831 if (MaskElt == PoisonMaskElem)
2832 continue;
2833 // Elements must be in non-decreasing order.
2834 if (MaskElt < Largest)
2835 return false;
2836 Largest = std::max(Largest, MaskElt);
2837 }
2838
2839 // Prefer larger replication factor if all else equal.
2840 for (int PossibleReplicationFactor :
2841 reverse(seq_inclusive<unsigned>(1, Mask.size()))) {
2842 if (Mask.size() % PossibleReplicationFactor != 0)
2843 continue;
2844 int PossibleVF = Mask.size() / PossibleReplicationFactor;
2845 if (!isReplicationMaskWithParams(Mask, PossibleReplicationFactor,
2846 PossibleVF))
2847 continue;
2848 ReplicationFactor = PossibleReplicationFactor;
2849 VF = PossibleVF;
2850 return true;
2851 }
2852
2853 return false;
2854}
2855
2856bool ShuffleVectorInst::isReplicationMask(int &ReplicationFactor,
2857 int &VF) const {
2858 // Not possible to express a shuffle mask for a scalable vector for this
2859 // case.
2860 if (isa<ScalableVectorType>(getType()))
2861 return false;
2862
2863 VF = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2864 if (ShuffleMask.size() % VF != 0)
2865 return false;
2866 ReplicationFactor = ShuffleMask.size() / VF;
2867
2868 return isReplicationMaskWithParams(ShuffleMask, ReplicationFactor, VF);
2869}
2870
2872 if (VF <= 0 || Mask.size() < static_cast<unsigned>(VF) ||
2873 Mask.size() % VF != 0)
2874 return false;
2875 for (unsigned K = 0, Sz = Mask.size(); K < Sz; K += VF) {
2876 ArrayRef<int> SubMask = Mask.slice(K, VF);
2877 if (all_of(SubMask, [](int Idx) { return Idx == PoisonMaskElem; }))
2878 continue;
2879 SmallBitVector Used(VF, false);
2880 for (int Idx : SubMask) {
2881 if (Idx != PoisonMaskElem && Idx < VF)
2882 Used.set(Idx);
2883 }
2884 if (!Used.all())
2885 return false;
2886 }
2887 return true;
2888}
2889
2890/// Return true if this shuffle mask is a replication mask.
2892 // Not possible to express a shuffle mask for a scalable vector for this
2893 // case.
2894 if (isa<ScalableVectorType>(getType()))
2895 return false;
2896 if (!isSingleSourceMask(ShuffleMask, VF))
2897 return false;
2898
2899 return isOneUseSingleSourceMask(ShuffleMask, VF);
2900}
2901
2902bool ShuffleVectorInst::isInterleave(unsigned Factor) {
2903 FixedVectorType *OpTy = dyn_cast<FixedVectorType>(getOperand(0)->getType());
2904 // shuffle_vector can only interleave fixed length vectors - for scalable
2905 // vectors, see the @llvm.vector.interleave2 intrinsic
2906 if (!OpTy)
2907 return false;
2908 unsigned OpNumElts = OpTy->getNumElements();
2909
2910 return isInterleaveMask(ShuffleMask, Factor, OpNumElts * 2);
2911}
2912
2914 ArrayRef<int> Mask, unsigned Factor, unsigned NumInputElts,
2915 SmallVectorImpl<unsigned> &StartIndexes) {
2916 unsigned NumElts = Mask.size();
2917 if (NumElts % Factor)
2918 return false;
2919
2920 unsigned LaneLen = NumElts / Factor;
2921 if (!isPowerOf2_32(LaneLen))
2922 return false;
2923
2924 StartIndexes.resize(Factor);
2925
2926 // Check whether each element matches the general interleaved rule.
2927 // Ignore undef elements, as long as the defined elements match the rule.
2928 // Outer loop processes all factors (x, y, z in the above example)
2929 unsigned I = 0, J;
2930 for (; I < Factor; I++) {
2931 unsigned SavedLaneValue;
2932 unsigned SavedNoUndefs = 0;
2933
2934 // Inner loop processes consecutive accesses (x, x+1... in the example)
2935 for (J = 0; J < LaneLen - 1; J++) {
2936 // Lane computes x's position in the Mask
2937 unsigned Lane = J * Factor + I;
2938 unsigned NextLane = Lane + Factor;
2939 int LaneValue = Mask[Lane];
2940 int NextLaneValue = Mask[NextLane];
2941
2942 // If both are defined, values must be sequential
2943 if (LaneValue >= 0 && NextLaneValue >= 0 &&
2944 LaneValue + 1 != NextLaneValue)
2945 break;
2946
2947 // If the next value is undef, save the current one as reference
2948 if (LaneValue >= 0 && NextLaneValue < 0) {
2949 SavedLaneValue = LaneValue;
2950 SavedNoUndefs = 1;
2951 }
2952
2953 // Undefs are allowed, but defined elements must still be consecutive:
2954 // i.e.: x,..., undef,..., x + 2,..., undef,..., undef,..., x + 5, ....
2955 // Verify this by storing the last non-undef followed by an undef
2956 // Check that following non-undef masks are incremented with the
2957 // corresponding distance.
2958 if (SavedNoUndefs > 0 && LaneValue < 0) {
2959 SavedNoUndefs++;
2960 if (NextLaneValue >= 0 &&
2961 SavedLaneValue + SavedNoUndefs != (unsigned)NextLaneValue)
2962 break;
2963 }
2964 }
2965
2966 if (J < LaneLen - 1)
2967 return false;
2968
2969 int StartMask = 0;
2970 if (Mask[I] >= 0) {
2971 // Check that the start of the I range (J=0) is greater than 0
2972 StartMask = Mask[I];
2973 } else if (Mask[(LaneLen - 1) * Factor + I] >= 0) {
2974 // StartMask defined by the last value in lane
2975 StartMask = Mask[(LaneLen - 1) * Factor + I] - J;
2976 } else if (SavedNoUndefs > 0) {
2977 // StartMask defined by some non-zero value in the j loop
2978 StartMask = SavedLaneValue - (LaneLen - 1 - SavedNoUndefs);
2979 }
2980 // else StartMask remains set to 0, i.e. all elements are undefs
2981
2982 if (StartMask < 0)
2983 return false;
2984 // We must stay within the vectors; This case can happen with undefs.
2985 if (StartMask + LaneLen > NumInputElts)
2986 return false;
2987
2988 StartIndexes[I] = StartMask;
2989 }
2990
2991 return true;
2992}
2993
2994/// Check if the mask is a DE-interleave mask of the given factor
2995/// \p Factor like:
2996/// <Index, Index+Factor, ..., Index+(NumElts-1)*Factor>
2998 unsigned Factor,
2999 unsigned &Index) {
3000 // Check all potential start indices from 0 to (Factor - 1).
3001 for (unsigned Idx = 0; Idx < Factor; Idx++) {
3002 unsigned I = 0;
3003
3004 // Check that elements are in ascending order by Factor. Ignore undef
3005 // elements.
3006 for (; I < Mask.size(); I++)
3007 if (Mask[I] >= 0 && static_cast<unsigned>(Mask[I]) != Idx + I * Factor)
3008 break;
3009
3010 if (I == Mask.size()) {
3011 Index = Idx;
3012 return true;
3013 }
3014 }
3015
3016 return false;
3017}
3018
3019/// Try to lower a vector shuffle as a bit rotation.
3020///
3021/// Look for a repeated rotation pattern in each sub group.
3022/// Returns an element-wise left bit rotation amount or -1 if failed.
3023static int matchShuffleAsBitRotate(ArrayRef<int> Mask, int NumSubElts) {
3024 int NumElts = Mask.size();
3025 assert((NumElts % NumSubElts) == 0 && "Illegal shuffle mask");
3026
3027 int RotateAmt = -1;
3028 for (int i = 0; i != NumElts; i += NumSubElts) {
3029 for (int j = 0; j != NumSubElts; ++j) {
3030 int M = Mask[i + j];
3031 if (M < 0)
3032 continue;
3033 if (M < i || M >= i + NumSubElts)
3034 return -1;
3035 int Offset = (NumSubElts - (M - (i + j))) % NumSubElts;
3036 if (0 <= RotateAmt && Offset != RotateAmt)
3037 return -1;
3038 RotateAmt = Offset;
3039 }
3040 }
3041 return RotateAmt;
3042}
3043
3045 ArrayRef<int> Mask, unsigned EltSizeInBits, unsigned MinSubElts,
3046 unsigned MaxSubElts, unsigned &NumSubElts, unsigned &RotateAmt) {
3047 for (NumSubElts = MinSubElts; NumSubElts <= MaxSubElts; NumSubElts *= 2) {
3048 int EltRotateAmt = matchShuffleAsBitRotate(Mask, NumSubElts);
3049 if (EltRotateAmt < 0)
3050 continue;
3051 RotateAmt = EltRotateAmt * EltSizeInBits;
3052 return true;
3053 }
3054
3055 return false;
3056}
3057
3058//===----------------------------------------------------------------------===//
3059// InsertValueInst Class
3060//===----------------------------------------------------------------------===//
3061
3062void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
3063 const Twine &Name) {
3064 assert(getNumOperands() == 2 && "NumOperands not initialized?");
3065
3066 // There's no fundamental reason why we require at least one index
3067 // (other than weirdness with &*IdxBegin being invalid; see
3068 // getelementptr's init routine for example). But there's no
3069 // present need to support it.
3070 assert(!Idxs.empty() && "InsertValueInst must have at least one index");
3071
3073 Val->getType() && "Inserted value must match indexed type!");
3074 Op<0>() = Agg;
3075 Op<1>() = Val;
3076
3077 Indices.append(Idxs.begin(), Idxs.end());
3078 setName(Name);
3079}
3080
3081InsertValueInst::InsertValueInst(const InsertValueInst &IVI)
3082 : Instruction(IVI.getType(), InsertValue,
3083 OperandTraits<InsertValueInst>::op_begin(this), 2),
3084 Indices(IVI.Indices) {
3085 Op<0>() = IVI.getOperand(0);
3086 Op<1>() = IVI.getOperand(1);
3088}
3089
3090//===----------------------------------------------------------------------===//
3091// ExtractValueInst Class
3092//===----------------------------------------------------------------------===//
3093
3094void ExtractValueInst::init(ArrayRef<unsigned> Idxs, const Twine &Name) {
3095 assert(getNumOperands() == 1 && "NumOperands not initialized?");
3096
3097 // There's no fundamental reason why we require at least one index.
3098 // But there's no present need to support it.
3099 assert(!Idxs.empty() && "ExtractValueInst must have at least one index");
3100
3101 Indices.append(Idxs.begin(), Idxs.end());
3102 setName(Name);
3103}
3104
3105ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI)
3106 : UnaryInstruction(EVI.getType(), ExtractValue, EVI.getOperand(0)),
3107 Indices(EVI.Indices) {
3109}
3110
3111// getIndexedType - Returns the type of the element that would be extracted
3112// with an extractvalue instruction with the specified parameters.
3113//
3114// A null type is returned if the indices are invalid for the specified
3115// pointer type.
3116//
3118 ArrayRef<unsigned> Idxs) {
3119 for (unsigned Index : Idxs) {
3120 // We can't use CompositeType::indexValid(Index) here.
3121 // indexValid() always returns true for arrays because getelementptr allows
3122 // out-of-bounds indices. Since we don't allow those for extractvalue and
3123 // insertvalue we need to check array indexing manually.
3124 // Since the only other types we can index into are struct types it's just
3125 // as easy to check those manually as well.
3126 if (ArrayType *AT = dyn_cast<ArrayType>(Agg)) {
3127 if (Index >= AT->getNumElements())
3128 return nullptr;
3129 Agg = AT->getElementType();
3130 } else if (StructType *ST = dyn_cast<StructType>(Agg)) {
3131 if (Index >= ST->getNumElements())
3132 return nullptr;
3133 Agg = ST->getElementType(Index);
3134 } else {
3135 // Not a valid type to index into.
3136 return nullptr;
3137 }
3138 }
3139 return const_cast<Type*>(Agg);
3140}
3141
3142//===----------------------------------------------------------------------===//
3143// UnaryOperator Class
3144//===----------------------------------------------------------------------===//
3145
3147 const Twine &Name,
3148 BasicBlock::iterator InsertBefore)
3149 : UnaryInstruction(Ty, iType, S, InsertBefore) {
3150 Op<0>() = S;
3151 setName(Name);
3152 AssertOK();
3153}
3154
3156 Type *Ty, const Twine &Name,
3157 Instruction *InsertBefore)
3158 : UnaryInstruction(Ty, iType, S, InsertBefore) {
3159 Op<0>() = S;
3160 setName(Name);
3161 AssertOK();
3162}
3163
3165 Type *Ty, const Twine &Name,
3166 BasicBlock *InsertAtEnd)
3167 : UnaryInstruction(Ty, iType, S, InsertAtEnd) {
3168 Op<0>() = S;
3169 setName(Name);
3170 AssertOK();
3171}
3172
3174 BasicBlock::iterator InsertBefore) {
3175 return new UnaryOperator(Op, S, S->getType(), Name, InsertBefore);
3176}
3177
3179 const Twine &Name,
3180 Instruction *InsertBefore) {
3181 return new UnaryOperator(Op, S, S->getType(), Name, InsertBefore);
3182}
3183
3185 const Twine &Name,
3186 BasicBlock *InsertAtEnd) {
3187 UnaryOperator *Res = Create(Op, S, Name);
3188 Res->insertInto(InsertAtEnd, InsertAtEnd->end());
3189 return Res;
3190}
3191
3192void UnaryOperator::AssertOK() {
3193 Value *LHS = getOperand(0);
3194 (void)LHS; // Silence warnings.
3195#ifndef NDEBUG
3196 switch (getOpcode()) {
3197 case FNeg:
3198 assert(getType() == LHS->getType() &&
3199 "Unary operation should return same type as operand!");
3200 assert(getType()->isFPOrFPVectorTy() &&
3201 "Tried to create a floating-point operation on a "
3202 "non-floating-point type!");
3203 break;
3204 default: llvm_unreachable("Invalid opcode provided");
3205 }
3206#endif
3207}
3208
3209//===----------------------------------------------------------------------===//
3210// BinaryOperator Class
3211//===----------------------------------------------------------------------===//
3212
3214 const Twine &Name,
3215 BasicBlock::iterator InsertBefore)
3216 : Instruction(Ty, iType, OperandTraits<BinaryOperator>::op_begin(this),
3217 OperandTraits<BinaryOperator>::operands(this), InsertBefore) {
3218 Op<0>() = S1;
3219 Op<1>() = S2;
3220 setName(Name);
3221 AssertOK();
3222}
3223
3225 Type *Ty, const Twine &Name,
3226 Instruction *InsertBefore)
3227 : Instruction(Ty, iType,
3228 OperandTraits<BinaryOperator>::op_begin(this),
3229 OperandTraits<BinaryOperator>::operands(this),
3230 InsertBefore) {
3231 Op<0>() = S1;
3232 Op<1>() = S2;
3233 setName(Name);
3234 AssertOK();
3235}
3236
3238 Type *Ty, const Twine &Name,
3239 BasicBlock *InsertAtEnd)
3240 : Instruction(Ty, iType,
3241 OperandTraits<BinaryOperator>::op_begin(this),
3242 OperandTraits<BinaryOperator>::operands(this),
3243 InsertAtEnd) {
3244 Op<0>() = S1;
3245 Op<1>() = S2;
3246 setName(Name);
3247 AssertOK();
3248}
3249
3250void BinaryOperator::AssertOK() {
3251 Value *LHS = getOperand(0), *RHS = getOperand(1);
3252 (void)LHS; (void)RHS; // Silence warnings.
3253 assert(LHS->getType() == RHS->getType() &&
3254 "Binary operator operand types must match!");
3255#ifndef NDEBUG
3256 switch (getOpcode()) {
3257 case Add: case Sub:
3258 case Mul:
3259 assert(getType() == LHS->getType() &&
3260 "Arithmetic operation should return same type as operands!");
3261 assert(getType()->isIntOrIntVectorTy() &&
3262 "Tried to create an integer operation on a non-integer type!");
3263 break;
3264 case FAdd: case FSub:
3265 case FMul:
3266 assert(getType() == LHS->getType() &&
3267 "Arithmetic operation should return same type as operands!");
3268 assert(getType()->isFPOrFPVectorTy() &&
3269 "Tried to create a floating-point operation on a "
3270 "non-floating-point type!");
3271 break;
3272 case UDiv:
3273 case SDiv:
3274 assert(getType() == LHS->getType() &&
3275 "Arithmetic operation should return same type as operands!");
3276 assert(getType()->isIntOrIntVectorTy() &&
3277 "Incorrect operand type (not integer) for S/UDIV");
3278 break;
3279 case FDiv:
3280 assert(getType() == LHS->getType() &&
3281 "Arithmetic operation should return same type as operands!");
3282 assert(getType()->isFPOrFPVectorTy() &&
3283 "Incorrect operand type (not floating point) for FDIV");
3284 break;
3285 case URem:
3286 case SRem:
3287 assert(getType() == LHS->getType() &&
3288 "Arithmetic operation should return same type as operands!");
3289 assert(getType()->isIntOrIntVectorTy() &&
3290 "Incorrect operand type (not integer) for S/UREM");
3291 break;
3292 case FRem:
3293 assert(getType() == LHS->getType() &&
3294 "Arithmetic operation should return same type as operands!");
3295 assert(getType()->isFPOrFPVectorTy() &&
3296 "Incorrect operand type (not floating point) for FREM");
3297 break;
3298 case Shl:
3299 case LShr:
3300 case AShr:
3301 assert(getType() == LHS->getType() &&
3302 "Shift operation should return same type as operands!");
3303 assert(getType()->isIntOrIntVectorTy() &&
3304 "Tried to create a shift operation on a non-integral type!");
3305 break;
3306 case And: case Or:
3307 case Xor:
3308 assert(getType() == LHS->getType() &&
3309 "Logical operation should return same type as operands!");
3310 assert(getType()->isIntOrIntVectorTy() &&
3311 "Tried to create a logical operation on a non-integral type!");
3312 break;
3313 default: llvm_unreachable("Invalid opcode provided");
3314 }
3315#endif
3316}
3317
3319 const Twine &Name,
3320 BasicBlock::iterator InsertBefore) {
3321 assert(S1->getType() == S2->getType() &&
3322 "Cannot create binary operator with two operands of differing type!");
3323 return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore);
3324}
3325
3327 const Twine &Name,
3328 Instruction *InsertBefore) {
3329 assert(S1->getType() == S2->getType() &&
3330 "Cannot create binary operator with two operands of differing type!");
3331 return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore);
3332}
3333
3335 const Twine &Name,
3336 BasicBlock *InsertAtEnd) {
3337 BinaryOperator *Res = Create(Op, S1, S2, Name);
3338 Res->insertInto(InsertAtEnd, InsertAtEnd->end());
3339 return Res;
3340}
3341
3343 BasicBlock::iterator InsertBefore) {
3344 Value *Zero = ConstantInt::get(Op->getType(), 0);
3345 return new BinaryOperator(Instruction::Sub, Zero, Op, Op->getType(), Name,
3346 InsertBefore);
3347}
3348
3350 BasicBlock *InsertAtEnd) {
3351 Value *Zero = ConstantInt::get(Op->getType(), 0);
3352 return new BinaryOperator(Instruction::Sub,
3353 Zero, Op,
3354 Op->getType(), Name, InsertAtEnd);
3355}
3356
3358 Instruction *InsertBefore) {
3359 Value *Zero = ConstantInt::get(Op->getType(), 0);
3360 return BinaryOperator::CreateNSWSub(Zero, Op, Name, InsertBefore);
3361}
3362
3364 BasicBlock *InsertAtEnd) {
3365 Value *Zero = ConstantInt::get(Op->getType(), 0);
3366 return BinaryOperator::CreateNSWSub(Zero, Op, Name, InsertAtEnd);
3367}
3368
3370 BasicBlock::iterator InsertBefore) {
3371 Constant *C = Constant::getAllOnesValue(Op->getType());
3372 return new BinaryOperator(Instruction::Xor, Op, C,
3373 Op->getType(), Name, InsertBefore);
3374}
3375
3377 Instruction *InsertBefore) {
3378 Constant *C = Constant::getAllOnesValue(Op->getType());
3379 return new BinaryOperator(Instruction::Xor, Op, C,
3380 Op->getType(), Name, InsertBefore);
3381}
3382
3384 BasicBlock *InsertAtEnd) {
3386 return new BinaryOperator(Instruction::Xor, Op, AllOnes,
3387 Op->getType(), Name, InsertAtEnd);
3388}
3389
3390// Exchange the two operands to this instruction. This instruction is safe to
3391// use on any binary instruction and does not modify the semantics of the
3392// instruction. If the instruction is order-dependent (SetLT f.e.), the opcode
3393// is changed.
3395 if (!isCommutative())
3396 return true; // Can't commute operands
3397 Op<0>().swap(Op<1>());
3398 return false;
3399}
3400
3401//===----------------------------------------------------------------------===//
3402// FPMathOperator Class
3403//===----------------------------------------------------------------------===//
3404
3406 const MDNode *MD =
3407 cast<Instruction>(this)->getMetadata(LLVMContext::MD_fpmath);
3408 if (!MD)
3409 return 0.0;
3410 ConstantFP *Accuracy = mdconst::extract<ConstantFP>(MD->getOperand(0));
3411 return Accuracy->getValueAPF().convertToFloat();
3412}
3413
3414//===----------------------------------------------------------------------===//
3415// CastInst Class
3416//===----------------------------------------------------------------------===//
3417
3418// Just determine if this cast only deals with integral->integral conversion.
3420 switch (getOpcode()) {
3421 default: return false;
3422 case Instruction::ZExt:
3423 case Instruction::SExt:
3424 case Instruction::Trunc:
3425 return true;
3426 case Instruction::BitCast:
3427 return getOperand(0)->getType()->isIntegerTy() &&
3428 getType()->isIntegerTy();
3429 }
3430}
3431
3432/// This function determines if the CastInst does not require any bits to be
3433/// changed in order to effect the cast. Essentially, it identifies cases where
3434/// no code gen is necessary for the cast, hence the name no-op cast. For
3435/// example, the following are all no-op casts:
3436/// # bitcast i32* %x to i8*
3437/// # bitcast <2 x i32> %x to <4 x i16>
3438/// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only
3439/// Determine if the described cast is a no-op.
3441 Type *SrcTy,
3442 Type *DestTy,
3443 const DataLayout &DL) {
3444 assert(castIsValid(Opcode, SrcTy, DestTy) && "method precondition");
3445 switch (Opcode) {
3446 default: llvm_unreachable("Invalid CastOp");
3447 case Instruction::Trunc:
3448 case Instruction::ZExt:
3449 case Instruction::SExt:
3450 case Instruction::FPTrunc:
3451 case Instruction::FPExt:
3452 case Instruction::UIToFP:
3453 case Instruction::SIToFP:
3454 case Instruction::FPToUI:
3455 case Instruction::FPToSI:
3456 case Instruction::AddrSpaceCast:
3457 // TODO: Target informations may give a more accurate answer here.
3458 return false;
3459 case Instruction::BitCast:
3460 return true; // BitCast never modifies bits.
3461 case Instruction::PtrToInt:
3462 return DL.getIntPtrType(SrcTy)->getScalarSizeInBits() ==
3463 DestTy->getScalarSizeInBits();
3464 case Instruction::IntToPtr:
3465 return DL.getIntPtrType(DestTy)->getScalarSizeInBits() ==
3466 SrcTy->getScalarSizeInBits();
3467 }
3468}
3469
3471 return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), DL);
3472}
3473
3474/// This function determines if a pair of casts can be eliminated and what
3475/// opcode should be used in the elimination. This assumes that there are two
3476/// instructions like this:
3477/// * %F = firstOpcode SrcTy %x to MidTy
3478/// * %S = secondOpcode MidTy %F to DstTy
3479/// The function returns a resultOpcode so these two casts can be replaced with:
3480/// * %Replacement = resultOpcode %SrcTy %x to DstTy
3481/// If no such cast is permitted, the function returns 0.
3484 Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy,
3485 Type *DstIntPtrTy) {
3486 // Define the 144 possibilities for these two cast instructions. The values
3487 // in this matrix determine what to do in a given situation and select the
3488 // case in the switch below. The rows correspond to firstOp, the columns
3489 // correspond to secondOp. In looking at the table below, keep in mind
3490 // the following cast properties:
3491 //
3492 // Size Compare Source Destination
3493 // Operator Src ? Size Type Sign Type Sign
3494 // -------- ------------ ------------------- ---------------------
3495 // TRUNC > Integer Any Integral Any
3496 // ZEXT < Integral Unsigned Integer Any
3497 // SEXT < Integral Signed Integer Any
3498 // FPTOUI n/a FloatPt n/a Integral Unsigned
3499 // FPTOSI n/a FloatPt n/a Integral Signed
3500 // UITOFP n/a Integral Unsigned FloatPt n/a
3501 // SITOFP n/a Integral Signed FloatPt n/a
3502 // FPTRUNC > FloatPt n/a FloatPt n/a
3503 // FPEXT < FloatPt n/a FloatPt n/a
3504 // PTRTOINT n/a Pointer n/a Integral Unsigned
3505 // INTTOPTR n/a Integral Unsigned Pointer n/a
3506 // BITCAST = FirstClass n/a FirstClass n/a
3507 // ADDRSPCST n/a Pointer n/a Pointer n/a
3508 //
3509 // NOTE: some transforms are safe, but we consider them to be non-profitable.
3510 // For example, we could merge "fptoui double to i32" + "zext i32 to i64",
3511 // into "fptoui double to i64", but this loses information about the range
3512 // of the produced value (we no longer know the top-part is all zeros).
3513 // Further this conversion is often much more expensive for typical hardware,
3514 // and causes issues when building libgcc. We disallow fptosi+sext for the
3515 // same reason.
3516 const unsigned numCastOps =
3517 Instruction::CastOpsEnd - Instruction::CastOpsBegin;
3518 static const uint8_t CastResults[numCastOps][numCastOps] = {
3519 // T F F U S F F P I B A -+
3520 // R Z S P P I I T P 2 N T S |
3521 // U E E 2 2 2 2 R E I T C C +- secondOp
3522 // N X X U S F F N X N 2 V V |
3523 // C T T I I P P C T T P T T -+
3524 { 1, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // Trunc -+
3525 { 8, 1, 9,99,99, 2,17,99,99,99, 2, 3, 0}, // ZExt |
3526 { 8, 0, 1,99,99, 0, 2,99,99,99, 0, 3, 0}, // SExt |
3527 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToUI |
3528 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToSI |
3529 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // UIToFP +- firstOp
3530 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // SIToFP |
3531 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // FPTrunc |
3532 { 99,99,99, 2, 2,99,99, 8, 2,99,99, 4, 0}, // FPExt |
3533 { 1, 0, 0,99,99, 0, 0,99,99,99, 7, 3, 0}, // PtrToInt |
3534 { 99,99,99,99,99,99,99,99,99,11,99,15, 0}, // IntToPtr |
3535 { 5, 5, 5, 0, 0, 5, 5, 0, 0,16, 5, 1,14}, // BitCast |
3536 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+
3537 };
3538
3539 // TODO: This logic could be encoded into the table above and handled in the
3540 // switch below.
3541 // If either of the casts are a bitcast from scalar to vector, disallow the
3542 // merging. However, any pair of bitcasts are allowed.
3543 bool IsFirstBitcast = (firstOp == Instruction::BitCast);
3544 bool IsSecondBitcast = (secondOp == Instruction::BitCast);
3545 bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast;
3546
3547 // Check if any of the casts convert scalars <-> vectors.
3548 if ((IsFirstBitcast && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) ||
3549 (IsSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy)))
3550 if (!AreBothBitcasts)
3551 return 0;
3552
3553 int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin]
3554 [secondOp-Instruction::CastOpsBegin];
3555 switch (ElimCase) {
3556 case 0:
3557 // Categorically disallowed.
3558 return 0;
3559 case 1:
3560 // Allowed, use first cast's opcode.
3561 return firstOp;
3562 case 2:
3563 // Allowed, use second cast's opcode.
3564 return secondOp;
3565 case 3:
3566 // No-op cast in second op implies firstOp as long as the DestTy
3567 // is integer and we are not converting between a vector and a
3568 // non-vector type.
3569 if (!SrcTy->isVectorTy() && DstTy->isIntegerTy())
3570 return firstOp;
3571 return 0;
3572 case 4:
3573 // No-op cast in second op implies firstOp as long as the DestTy
3574 // matches MidTy.
3575 if (DstTy == MidTy)
3576 return firstOp;
3577 return 0;
3578 case 5:
3579 // No-op cast in first op implies secondOp as long as the SrcTy
3580 // is an integer.
3581 if (SrcTy->isIntegerTy())
3582 return secondOp;
3583 return 0;
3584 case 7: {
3585 // Disable inttoptr/ptrtoint optimization if enabled.
3586 if (DisableI2pP2iOpt)
3587 return 0;
3588
3589 // Cannot simplify if address spaces are different!
3590 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
3591 return 0;
3592
3593 unsigned MidSize = MidTy->getScalarSizeInBits();
3594 // We can still fold this without knowing the actual sizes as long we
3595 // know that the intermediate pointer is the largest possible
3596 // pointer size.
3597 // FIXME: Is this always true?
3598 if (MidSize == 64)
3599 return Instruction::BitCast;
3600
3601 // ptrtoint, inttoptr -> bitcast (ptr -> ptr) if int size is >= ptr size.
3602 if (!SrcIntPtrTy || DstIntPtrTy != SrcIntPtrTy)
3603 return 0;
3604 unsigned PtrSize = SrcIntPtrTy->getScalarSizeInBits();
3605 if (MidSize >= PtrSize)
3606 return Instruction::BitCast;
3607 return 0;
3608 }
3609 case 8: {
3610 // ext, trunc -> bitcast, if the SrcTy and DstTy are the same
3611 // ext, trunc -> ext, if sizeof(SrcTy) < sizeof(DstTy)
3612 // ext, trunc -> trunc, if sizeof(SrcTy) > sizeof(DstTy)
3613 unsigned SrcSize = SrcTy->getScalarSizeInBits();
3614 unsigned DstSize = DstTy->getScalarSizeInBits();
3615 if (SrcTy == DstTy)
3616 return Instruction::BitCast;
3617 if (SrcSize < DstSize)
3618 return firstOp;
3619 if (SrcSize > DstSize)
3620 return secondOp;
3621 return 0;
3622 }
3623 case 9:
3624 // zext, sext -> zext, because sext can't sign extend after zext
3625 return Instruction::ZExt;
3626 case 11: {
3627 // inttoptr, ptrtoint -> bitcast if SrcSize<=PtrSize and SrcSize==DstSize
3628 if (!MidIntPtrTy)
3629 return 0;
3630 unsigned PtrSize = MidIntPtrTy->getScalarSizeInBits();
3631 unsigned SrcSize = SrcTy->getScalarSizeInBits();
3632 unsigned DstSize = DstTy->getScalarSizeInBits();
3633 if (SrcSize <= PtrSize && SrcSize == DstSize)
3634 return Instruction::BitCast;
3635 return 0;
3636 }
3637 case 12:
3638 // addrspacecast, addrspacecast -> bitcast, if SrcAS == DstAS
3639 // addrspacecast, addrspacecast -> addrspacecast, if SrcAS != DstAS
3640 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
3641 return Instruction::AddrSpaceCast;
3642 return Instruction::BitCast;
3643 case 13:
3644 // FIXME: this state can be merged with (1), but the following assert
3645 // is useful to check the correcteness of the sequence due to semantic
3646 // change of bitcast.
3647 assert(
3648 SrcTy->isPtrOrPtrVectorTy() &&
3649 MidTy->isPtrOrPtrVectorTy() &&
3650 DstTy->isPtrOrPtrVectorTy() &&
3651 SrcTy->getPointerAddressSpace() != MidTy->getPointerAddressSpace() &&
3652 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
3653 "Illegal addrspacecast, bitcast sequence!");
3654 // Allowed, use first cast's opcode
3655 return firstOp;
3656 case 14:
3657 // bitcast, addrspacecast -> addrspacecast
3658 return Instruction::AddrSpaceCast;
3659 case 15:
3660 // FIXME: this state can be merged with (1), but the following assert
3661 // is useful to check the correcteness of the sequence due to semantic
3662 // change of bitcast.
3663 assert(
3664 SrcTy->isIntOrIntVectorTy() &&
3665 MidTy->isPtrOrPtrVectorTy() &&
3666 DstTy->isPtrOrPtrVectorTy() &&
3667 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
3668 "Illegal inttoptr, bitcast sequence!");
3669 // Allowed, use first cast's opcode
3670 return firstOp;
3671 case 16:
3672 // FIXME: this state can be merged with (2), but the following assert
3673 // is useful to check the correcteness of the sequence due to semantic
3674 // change of bitcast.
3675 assert(
3676 SrcTy->isPtrOrPtrVectorTy() &&
3677 MidTy->isPtrOrPtrVectorTy() &&
3678 DstTy->isIntOrIntVectorTy() &&
3679 SrcTy->getPointerAddressSpace() == MidTy->getPointerAddressSpace() &&
3680 "Illegal bitcast, ptrtoint sequence!");
3681 // Allowed, use second cast's opcode
3682 return secondOp;
3683 case 17:
3684 // (sitofp (zext x)) -> (uitofp x)
3685 return Instruction::UIToFP;
3686 case 99:
3687 // Cast combination can't happen (error in input). This is for all cases
3688 // where the MidTy is not the same for the two cast instructions.
3689 llvm_unreachable("Invalid Cast Combination");
3690 default:
3691 llvm_unreachable("Error in CastResults table!!!");
3692 }
3693}
3694
3696 const Twine &Name,
3697 BasicBlock::iterator InsertBefore) {
3698 assert(castIsValid(op, S, Ty) && "Invalid cast!");
3699 // Construct and return the appropriate CastInst subclass
3700 switch (op) {
3701 case Trunc: return new TruncInst (S, Ty, Name, InsertBefore);
3702 case ZExt: return new ZExtInst (S, Ty, Name, InsertBefore);
3703 case SExt: return new SExtInst (S, Ty, Name, InsertBefore);
3704 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertBefore);
3705 case FPExt: return new FPExtInst (S, Ty, Name, InsertBefore);
3706 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertBefore);
3707 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore);
3708 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore);
3709 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore);
3710 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore);
3711 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore);
3712 case BitCast: return new BitCastInst (S, Ty, Name, InsertBefore);
3713 case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertBefore);
3714 default: llvm_unreachable("Invalid opcode provided");
3715 }
3716}
3717
3719 const Twine &Name, Instruction *InsertBefore) {
3720 assert(castIsValid(op, S, Ty) && "Invalid cast!");
3721 // Construct and return the appropriate CastInst subclass
3722 switch (op) {
3723 case Trunc: return new TruncInst (S, Ty, Name, InsertBefore);
3724 case ZExt: return new ZExtInst (S, Ty, Name, InsertBefore);
3725 case SExt: return new SExtInst (S, Ty, Name, InsertBefore);
3726 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertBefore);
3727 case FPExt: return new FPExtInst (S, Ty, Name, InsertBefore);
3728 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertBefore);
3729 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore);
3730 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore);
3731 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore);
3732 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore);
3733 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore);
3734 case BitCast: return new BitCastInst (S, Ty, Name, InsertBefore);
3735 case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertBefore);
3736 default: llvm_unreachable("Invalid opcode provided");
3737 }
3738}
3739
3741 const Twine &Name, BasicBlock *InsertAtEnd) {
3742 assert(castIsValid(op, S, Ty) && "Invalid cast!");
3743 // Construct and return the appropriate CastInst subclass
3744 switch (op) {
3745 case Trunc: return new TruncInst (S, Ty, Name, InsertAtEnd);
3746 case ZExt: return new ZExtInst (S, Ty, Name, InsertAtEnd);
3747 case SExt: return new SExtInst (S, Ty, Name, InsertAtEnd);
3748 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertAtEnd);
3749 case FPExt: return new FPExtInst (S, Ty, Name, InsertAtEnd);
3750 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertAtEnd);
3751 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertAtEnd);
3752 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertAtEnd);
3753 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertAtEnd);
3754 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertAtEnd);
3755 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertAtEnd);
3756 case BitCast: return new BitCastInst (S, Ty, Name, InsertAtEnd);
3757 case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertAtEnd);
3758 default: llvm_unreachable("Invalid opcode provided");
3759 }
3760}
3761
3763 BasicBlock::iterator InsertBefore) {
3764 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3765 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3766 return Create(Instruction::ZExt, S, Ty, Name, InsertBefore);
3767}
3768
3770 const Twine &Name,
3771 Instruction *InsertBefore) {
3772 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3773 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3774 return Create(Instruction::ZExt, S, Ty, Name, InsertBefore);
3775}
3776
3778 const Twine &Name,
3779 BasicBlock *InsertAtEnd) {
3780 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3781 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
3782 return Create(Instruction::ZExt, S, Ty, Name, InsertAtEnd);
3783}
3784
3786 BasicBlock::iterator InsertBefore) {
3787 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3788 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3789 return Create(Instruction::SExt, S, Ty, Name, InsertBefore);
3790}
3791
3793 const Twine &Name,
3794 Instruction *InsertBefore) {
3795 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3796 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3797 return Create(Instruction::SExt, S, Ty, Name, InsertBefore);
3798}
3799
3801 const Twine &Name,
3802 BasicBlock *InsertAtEnd) {
3803 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3804 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
3805 return Create(Instruction::SExt, S, Ty, Name, InsertAtEnd);
3806}
3807
3809 BasicBlock::iterator InsertBefore) {
3810 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3811 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3812 return Create(Instruction::Trunc, S, Ty, Name, InsertBefore);
3813}
3814
3816 const Twine &Name,
3817 Instruction *InsertBefore) {
3818 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3819 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3820 return Create(Instruction::Trunc, S, Ty, Name, InsertBefore);
3821}
3822
3824 const Twine &Name,
3825 BasicBlock *InsertAtEnd) {
3826 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3827 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
3828 return Create(Instruction::Trunc, S, Ty, Name, InsertAtEnd);
3829}
3830
3832 const Twine &Name,
3833 BasicBlock *InsertAtEnd) {
3834 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3835 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
3836 "Invalid cast");
3837 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
3838 assert((!Ty->isVectorTy() ||
3839 cast<VectorType>(Ty)->getElementCount() ==
3840 cast<VectorType>(S->getType())->getElementCount()) &&
3841 "Invalid cast");
3842
3843 if (Ty->isIntOrIntVectorTy())
3844 return Create(Instruction::PtrToInt, S, Ty, Name, InsertAtEnd);
3845
3846 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertAtEnd);
3847}
3848
3849/// Create a BitCast or a PtrToInt cast instruction
3851 BasicBlock::iterator InsertBefore) {
3852 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3853 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
3854 "Invalid cast");
3855 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
3856 assert((!Ty->isVectorTy() ||
3857 cast<VectorType>(Ty)->getElementCount() ==
3858 cast<VectorType>(S->getType())->getElementCount()) &&
3859 "Invalid cast");
3860
3861 if (Ty->isIntOrIntVectorTy())
3862 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3863
3864 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore);
3865}
3866
3867/// Create a BitCast or a PtrToInt cast instruction
3869 Instruction *InsertBefore) {
3870 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3871 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
3872 "Invalid cast");
3873 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
3874 assert((!Ty->isVectorTy() ||
3875 cast<VectorType>(Ty)->getElementCount() ==
3876 cast<VectorType>(S->getType())->getElementCount()) &&
3877 "Invalid cast");
3878
3879 if (Ty->isIntOrIntVectorTy())
3880 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3881
3882 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore);
3883}
3884
3886 Value *S, Type *Ty,
3887 const Twine &Name,
3888 BasicBlock *InsertAtEnd) {
3889 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3890 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
3891
3893 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertAtEnd);
3894
3895 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
3896}
3897
3899 Value *S, Type *Ty, const Twine &Name, BasicBlock::iterator InsertBefore) {
3900 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3901 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
3902
3904 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore);
3905
3906 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3907}
3908
3910 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore) {
3911 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3912 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
3913
3915 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore);
3916
3917 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3918}
3919
3921 const Twine &Name,
3922 BasicBlock::iterator InsertBefore) {
3923 if (S->getType()->isPointerTy() && Ty->isIntegerTy())
3924 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3925 if (S->getType()->isIntegerTy() && Ty->isPointerTy())
3926 return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore);
3927
3928 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3929}
3930
3932 const Twine &Name,
3933 Instruction *InsertBefore) {
3934 if (S->getType()->isPointerTy() && Ty->isIntegerTy())
3935 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3936 if (S->getType()->isIntegerTy() && Ty->isPointerTy())
3937 return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore);
3938
3939 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3940}
3941
3943 const Twine &Name,
3944 BasicBlock::iterator InsertBefore) {
3945 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
3946 "Invalid integer cast");
3947 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3948 unsigned DstBits = Ty->getScalarSizeInBits();
3949 Instruction::CastOps opcode =
3950 (SrcBits == DstBits ? Instruction::BitCast :
3951 (SrcBits > DstBits ? Instruction::Trunc :
3952 (isSigned ? Instruction::SExt : Instruction::ZExt)));
3953 return Create(opcode, C, Ty, Name, InsertBefore);
3954}
3955
3957 bool isSigned, const Twine &Name,
3958 Instruction *InsertBefore) {
3959 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
3960 "Invalid integer cast");
3961 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3962 unsigned DstBits = Ty->getScalarSizeInBits();
3963 Instruction::CastOps opcode =
3964 (SrcBits == DstBits ? Instruction::BitCast :
3965 (SrcBits > DstBits ? Instruction::Trunc :
3966 (isSigned ? Instruction::SExt : Instruction::ZExt)));
3967 return Create(opcode, C, Ty, Name, InsertBefore);
3968}
3969
3971 bool isSigned, const Twine &Name,
3972 BasicBlock *InsertAtEnd) {
3973 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
3974 "Invalid cast");
3975 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3976 unsigned DstBits = Ty->getScalarSizeInBits();
3977 Instruction::CastOps opcode =
3978 (SrcBits == DstBits ? Instruction::BitCast :
3979 (SrcBits > DstBits ? Instruction::Trunc :
3980 (isSigned ? Instruction::SExt : Instruction::ZExt)));
3981 return Create(opcode, C, Ty, Name, InsertAtEnd);
3982}
3983
3985 BasicBlock::iterator InsertBefore) {
3986 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
3987 "Invalid cast");
3988 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3989 unsigned DstBits = Ty->getScalarSizeInBits();
3990 Instruction::CastOps opcode =
3991 (SrcBits == DstBits ? Instruction::BitCast :
3992 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
3993 return Create(opcode, C, Ty, Name, InsertBefore);
3994}
3995
3997 const Twine &Name,
3998 Instruction *InsertBefore) {
3999 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
4000 "Invalid cast");
4001 unsigned SrcBits = C->getType()->getScalarSizeInBits();
4002 unsigned DstBits = Ty->getScalarSizeInBits();
4003 assert((C->getType() == Ty || SrcBits != DstBits) && "Invalid cast");
4004 Instruction::CastOps opcode =
4005 (SrcBits == DstBits ? Instruction::BitCast :
4006 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
4007 return Create(opcode, C, Ty, Name, InsertBefore);
4008}
4009
4011 const Twine &Name,
4012 BasicBlock *InsertAtEnd) {
4013 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
4014 "Invalid cast");
4015 unsigned SrcBits = C->getType()->getScalarSizeInBits();
4016 unsigned DstBits = Ty->getScalarSizeInBits();
4017 Instruction::CastOps opcode =
4018 (SrcBits == DstBits ? Instruction::BitCast :
4019 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
4020 return Create(opcode, C, Ty, Name, InsertAtEnd);
4021}
4022
4023bool CastInst::isBitCastable(Type *SrcTy, Type *DestTy) {
4024 if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType())
4025 return false;
4026
4027 if (SrcTy == DestTy)
4028 return true;
4029
4030 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) {
4031 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) {
4032 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
4033 // An element by element cast. Valid if casting the elements is valid.
4034 SrcTy = SrcVecTy->getElementType();
4035 DestTy = DestVecTy->getElementType();
4036 }
4037 }
4038 }
4039
4040 if (PointerType *DestPtrTy = dyn_cast<PointerType>(DestTy)) {
4041 if (PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy)) {
4042 return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace();
4043 }
4044 }
4045
4046 TypeSize SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
4047 TypeSize DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
4048
4049 // Could still have vectors of pointers if the number of elements doesn't
4050 // match
4051 if (SrcBits.getKnownMinValue() == 0 || DestBits.getKnownMinValue() == 0)
4052 return false;
4053
4054 if (SrcBits != DestBits)
4055 return false;
4056
4057 if (DestTy->isX86_MMXTy() || SrcTy->isX86_MMXTy())
4058 return false;
4059
4060 return true;
4061}
4062
4064 const DataLayout &DL) {
4065 // ptrtoint and inttoptr are not allowed on non-integral pointers
4066 if (auto *PtrTy = dyn_cast<PointerType>(SrcTy))
4067 if (auto *IntTy = dyn_cast<IntegerType>(DestTy))
4068 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
4069 !DL.isNonIntegralPointerType(PtrTy));
4070 if (auto *PtrTy = dyn_cast<PointerType>(DestTy))
4071 if (auto *IntTy = dyn_cast<IntegerType>(SrcTy))
4072 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
4073 !DL.isNonIntegralPointerType(PtrTy));
4074
4075 return isBitCastable(SrcTy, DestTy);
4076}
4077
4078// Provide a way to get a "cast" where the cast opcode is inferred from the
4079// types and size of the operand. This, basically, is a parallel of the
4080// logic in the castIsValid function below. This axiom should hold:
4081// castIsValid( getCastOpcode(Val, Ty), Val, Ty)
4082// should not assert in castIsValid. In other words, this produces a "correct"
4083// casting opcode for the arguments passed to it.
4086 const Value *Src, bool SrcIsSigned, Type *DestTy, bool DestIsSigned) {
4087 Type *SrcTy = Src->getType();
4088
4089 assert(SrcTy->isFirstClassType() && DestTy->isFirstClassType() &&
4090 "Only first class types are castable!");
4091
4092 if (SrcTy == DestTy)
4093 return BitCast;
4094
4095 // FIXME: Check address space sizes here
4096 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy))
4097 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy))
4098 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
4099 // An element by element cast. Find the appropriate opcode based on the
4100 // element types.
4101 SrcTy = SrcVecTy->getElementType();
4102 DestTy = DestVecTy->getElementType();
4103 }
4104
4105 // Get the bit sizes, we'll need these
4106 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
4107 unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
4108
4109 // Run through the possibilities ...
4110 if (DestTy->isIntegerTy()) { // Casting to integral
4111 if (SrcTy->isIntegerTy()) { // Casting from integral
4112 if (DestBits < SrcBits)
4113 return Trunc; // int -> smaller int
4114 else if (DestBits > SrcBits) { // its an extension
4115 if (SrcIsSigned)
4116 return SExt; // signed -> SEXT
4117 else
4118 return ZExt; // unsigned -> ZEXT
4119 } else {
4120 return BitCast; // Same size, No-op cast
4121 }
4122 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
4123 if (DestIsSigned)
4124 return FPToSI; // FP -> sint
4125 else
4126 return FPToUI; // FP -> uint
4127 } else if (SrcTy->isVectorTy()) {
4128 assert(DestBits == SrcBits &&
4129 "Casting vector to integer of different width");
4130 return BitCast; // Same size, no-op cast
4131 } else {
4132 assert(SrcTy->isPointerTy() &&
4133 "Casting from a value that is not first-class type");
4134 return PtrToInt; // ptr -> int
4135 }
4136 } else if (DestTy->isFloatingPointTy()) { // Casting to floating pt
4137 if (SrcTy->isIntegerTy()) { // Casting from integral
4138 if (SrcIsSigned)
4139 return SIToFP; // sint -> FP
4140 else
4141 return UIToFP; // uint -> FP
4142 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
4143 if (DestBits < SrcBits) {
4144 return FPTrunc; // FP -> smaller FP
4145 } else if (DestBits > SrcBits) {
4146 return FPExt; // FP -> larger FP
4147 } else {
4148 return BitCast; // same size, no-op cast
4149 }
4150 } else if (SrcTy->isVectorTy()) {
4151 assert(DestBits == SrcBits &&
4152 "Casting vector to floating point of different width");
4153 return BitCast; // same size, no-op cast
4154 }
4155 llvm_unreachable("Casting pointer or non-first class to float");
4156 } else if (DestTy->isVectorTy()) {
4157 assert(DestBits == SrcBits &&
4158 "Illegal cast to vector (wrong type or size)");
4159 return BitCast;
4160 } else if (DestTy->isPointerTy()) {
4161 if (SrcTy->isPointerTy()) {
4162 if (DestTy->getPointerAddressSpace() != SrcTy->getPointerAddressSpace())
4163 return AddrSpaceCast;
4164 return BitCast; // ptr -> ptr
4165 } else if (SrcTy->isIntegerTy()) {
4166 return IntToPtr; // int -> ptr
4167 }
4168 llvm_unreachable("Casting pointer to other than pointer or int");
4169 } else if (DestTy->isX86_MMXTy()) {
4170 if (SrcTy->isVectorTy()) {
4171 assert(DestBits == SrcBits && "Casting vector of wrong width to X86_MMX");
4172 return BitCast; // 64-bit vector to MMX
4173 }
4174 llvm_unreachable("Illegal cast to X86_MMX");
4175 }
4176 llvm_unreachable("Casting to type that is not first-class");
4177}
4178
4179//===----------------------------------------------------------------------===//
4180// CastInst SubClass Constructors
4181//===----------------------------------------------------------------------===//
4182
4183/// Check that the construction parameters for a CastInst are correct. This
4184/// could be broken out into the separate constructors but it is useful to have
4185/// it in one place and to eliminate the redundant code for getting the sizes
4186/// of the types involved.
4187bool
4189 if (!SrcTy->isFirstClassType() || !DstTy->isFirstClassType() ||
4190 SrcTy->isAggregateType() || DstTy->isAggregateType())
4191 return false;
4192
4193 // Get the size of the types in bits, and whether we are dealing
4194 // with vector types, we'll need this later.
4195 bool SrcIsVec = isa<VectorType>(SrcTy);
4196 bool DstIsVec = isa<VectorType>(DstTy);
4197 unsigned SrcScalarBitSize = SrcTy->getScalarSizeInBits();
4198 unsigned DstScalarBitSize = DstTy->getScalarSizeInBits();
4199
4200 // If these are vector types, get the lengths of the vectors (using zero for
4201 // scalar types means that checking that vector lengths match also checks that
4202 // scalars are not being converted to vectors or vectors to scalars).
4203 ElementCount SrcEC = SrcIsVec ? cast<VectorType>(SrcTy)->getElementCount()
4205 ElementCount DstEC = DstIsVec ? cast<VectorType>(DstTy)->getElementCount()
4207
4208 // Switch on the opcode provided
4209 switch (op) {
4210 default: return false; // This is an input error
4211 case Instruction::Trunc:
4212 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
4213 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
4214 case Instruction::ZExt:
4215 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
4216 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
4217 case Instruction::SExt:
4218 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
4219 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
4220 case Instruction::FPTrunc:
4221 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
4222 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
4223 case Instruction::FPExt:
4224 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
4225 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
4226 case Instruction::UIToFP:
4227 case Instruction::SIToFP:
4228 return SrcTy->isIntOrIntVectorTy() && DstTy->isFPOrFPVectorTy() &&
4229 SrcEC == DstEC;
4230 case Instruction::FPToUI:
4231 case Instruction::FPToSI:
4232 return SrcTy->isFPOrFPVectorTy() && DstTy->isIntOrIntVectorTy() &&
4233 SrcEC == DstEC;
4234 case Instruction::PtrToInt:
4235 if (SrcEC != DstEC)
4236 return false;
4237 return SrcTy->isPtrOrPtrVectorTy() && DstTy->isIntOrIntVectorTy();
4238 case Instruction::IntToPtr:
4239 if (SrcEC != DstEC)
4240 return false;
4241 return SrcTy->isIntOrIntVectorTy() && DstTy->isPtrOrPtrVectorTy();
4242 case Instruction::BitCast: {
4243 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
4244 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
4245
4246 // BitCast implies a no-op cast of type only. No bits change.
4247 // However, you can't cast pointers to anything but pointers.
4248 if (!SrcPtrTy != !DstPtrTy)
4249 return false;
4250
4251 // For non-pointer cases, the cast is okay if the source and destination bit
4252 // widths are identical.
4253 if (!SrcPtrTy)
4254 return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits();
4255
4256 // If both are pointers then the address spaces must match.
4257 if (SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace())
4258 return false;
4259
4260 // A vector of pointers must have the same number of elements.
4261 if (SrcIsVec && DstIsVec)
4262 return SrcEC == DstEC;
4263 if (SrcIsVec)
4264 return SrcEC == ElementCount::getFixed(1);
4265 if (DstIsVec)
4266 return DstEC == ElementCount::getFixed(1);
4267
4268 return true;
4269 }
4270 case Instruction::AddrSpaceCast: {
4271 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
4272 if (!SrcPtrTy)
4273 return false;
4274
4275 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
4276 if (!DstPtrTy)
4277 return false;
4278
4279 if (SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace())
4280 return false;
4281
4282 return SrcEC == DstEC;
4283 }
4284 }
4285}
4286
4288 BasicBlock::iterator InsertBefore)
4289 : CastInst(Ty, Trunc, S, Name, InsertBefore) {
4290 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
4291}
4292
4294 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4295) : CastInst(Ty, Trunc, S, Name, InsertBefore) {
4296 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
4297}
4298
4300 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4301) : CastInst(Ty, Trunc, S, Name, InsertAtEnd) {
4302 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
4303}
4304
4306 BasicBlock::iterator InsertBefore)
4307 : CastInst(Ty, ZExt, S, Name, InsertBefore) {
4308 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
4309}
4310
4312 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4313) : CastInst(Ty, ZExt, S, Name, InsertBefore) {
4314 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
4315}
4316
4318 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4319) : CastInst(Ty, ZExt, S, Name, InsertAtEnd) {
4320 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
4321}
4322
4324 BasicBlock::iterator InsertBefore)
4325 : CastInst(Ty, SExt, S, Name, InsertBefore) {
4326 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
4327}
4328
4330 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4331) : CastInst(Ty, SExt, S, Name, InsertBefore) {
4332 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
4333}
4334
4336 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4337) : CastInst(Ty, SExt, S, Name, InsertAtEnd) {
4338 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
4339}
4340
4342 BasicBlock::iterator InsertBefore)
4343 : CastInst(Ty, FPTrunc, S, Name, InsertBefore) {
4344 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
4345}
4346
4348 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4349) : CastInst(Ty, FPTrunc, S, Name, InsertBefore) {
4350 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
4351}
4352
4354 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4355) : CastInst(Ty, FPTrunc, S, Name, InsertAtEnd) {
4356 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
4357}
4358
4360 BasicBlock::iterator InsertBefore)
4361 : CastInst(Ty, FPExt, S, Name, InsertBefore) {
4362 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
4363}
4364
4366 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4367) : CastInst(Ty, FPExt, S, Name, InsertBefore) {
4368 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
4369}
4370
4372 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4373) : CastInst(Ty, FPExt, S, Name, InsertAtEnd) {
4374 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
4375}
4376
4378 BasicBlock::iterator InsertBefore)
4379 : CastInst(Ty, UIToFP, S, Name, InsertBefore) {
4380 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
4381}
4382
4384 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4385) : CastInst(Ty, UIToFP, S, Name, InsertBefore) {
4386 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
4387}
4388
4390 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4391) : CastInst(Ty, UIToFP, S, Name, InsertAtEnd) {
4392 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
4393}
4394
4396 BasicBlock::iterator InsertBefore)
4397 : CastInst(Ty, SIToFP, S, Name, InsertBefore) {
4398 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
4399}
4400