LLVM 19.0.0git
Instructions.cpp
Go to the documentation of this file.
1//===- Instructions.cpp - Implement the LLVM instructions -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements all of the non-inline methods for the LLVM instruction
10// classes.
11//
12//===----------------------------------------------------------------------===//
13
15#include "LLVMContextImpl.h"
18#include "llvm/ADT/Twine.h"
19#include "llvm/IR/Attributes.h"
20#include "llvm/IR/BasicBlock.h"
21#include "llvm/IR/Constant.h"
23#include "llvm/IR/Constants.h"
24#include "llvm/IR/DataLayout.h"
26#include "llvm/IR/Function.h"
27#include "llvm/IR/InstrTypes.h"
28#include "llvm/IR/Instruction.h"
29#include "llvm/IR/Intrinsics.h"
30#include "llvm/IR/LLVMContext.h"
31#include "llvm/IR/MDBuilder.h"
32#include "llvm/IR/Metadata.h"
33#include "llvm/IR/Module.h"
34#include "llvm/IR/Operator.h"
36#include "llvm/IR/Type.h"
37#include "llvm/IR/Value.h"
42#include "llvm/Support/ModRef.h"
44#include <algorithm>
45#include <cassert>
46#include <cstdint>
47#include <optional>
48#include <vector>
49
50using namespace llvm;
51
53 "disable-i2p-p2i-opt", cl::init(false),
54 cl::desc("Disables inttoptr/ptrtoint roundtrip optimization"));
55
56//===----------------------------------------------------------------------===//
57// AllocaInst Class
58//===----------------------------------------------------------------------===//
59
60std::optional<TypeSize>
62 TypeSize Size = DL.getTypeAllocSize(getAllocatedType());
63 if (isArrayAllocation()) {
64 auto *C = dyn_cast<ConstantInt>(getArraySize());
65 if (!C)
66 return std::nullopt;
67 assert(!Size.isScalable() && "Array elements cannot have a scalable size");
68 Size *= C->getZExtValue();
69 }
70 return Size;
71}
72
73std::optional<TypeSize>
75 std::optional<TypeSize> Size = getAllocationSize(DL);
76 if (Size)
77 return *Size * 8;
78 return std::nullopt;
79}
80
81//===----------------------------------------------------------------------===//
82// SelectInst Class
83//===----------------------------------------------------------------------===//
84
85/// areInvalidOperands - Return a string if the specified operands are invalid
86/// for a select operation, otherwise return null.
87const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) {
88 if (Op1->getType() != Op2->getType())
89 return "both values to select must have same type";
90
91 if (Op1->getType()->isTokenTy())
92 return "select values cannot have token type";
93
94 if (VectorType *VT = dyn_cast<VectorType>(Op0->getType())) {
95 // Vector select.
96 if (VT->getElementType() != Type::getInt1Ty(Op0->getContext()))
97 return "vector select condition element type must be i1";
98 VectorType *ET = dyn_cast<VectorType>(Op1->getType());
99 if (!ET)
100 return "selected values for vector select must be vectors";
101 if (ET->getElementCount() != VT->getElementCount())
102 return "vector select requires selected vectors to have "
103 "the same vector length as select condition";
104 } else if (Op0->getType() != Type::getInt1Ty(Op0->getContext())) {
105 return "select condition must be i1 or <n x i1>";
106 }
107 return nullptr;
108}
109
110//===----------------------------------------------------------------------===//
111// PHINode Class
112//===----------------------------------------------------------------------===//
113
114PHINode::PHINode(const PHINode &PN)
115 : Instruction(PN.getType(), Instruction::PHI, nullptr, PN.getNumOperands()),
116 ReservedSpace(PN.getNumOperands()) {
118 std::copy(PN.op_begin(), PN.op_end(), op_begin());
119 copyIncomingBlocks(make_range(PN.block_begin(), PN.block_end()));
121}
122
123// removeIncomingValue - Remove an incoming value. This is useful if a
124// predecessor basic block is deleted.
125Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) {
126 Value *Removed = getIncomingValue(Idx);
127
128 // Move everything after this operand down.
129 //
130 // FIXME: we could just swap with the end of the list, then erase. However,
131 // clients might not expect this to happen. The code as it is thrashes the
132 // use/def lists, which is kinda lame.
133 std::copy(op_begin() + Idx + 1, op_end(), op_begin() + Idx);
135
136 // Nuke the last value.
137 Op<-1>().set(nullptr);
139
140 // If the PHI node is dead, because it has zero entries, nuke it now.
141 if (getNumOperands() == 0 && DeletePHIIfEmpty) {
142 // If anyone is using this PHI, make them use a dummy value instead...
145 }
146 return Removed;
147}
148
149void PHINode::removeIncomingValueIf(function_ref<bool(unsigned)> Predicate,
150 bool DeletePHIIfEmpty) {
151 SmallDenseSet<unsigned> RemoveIndices;
152 for (unsigned Idx = 0; Idx < getNumIncomingValues(); ++Idx)
153 if (Predicate(Idx))
154 RemoveIndices.insert(Idx);
155
156 if (RemoveIndices.empty())
157 return;
158
159 // Remove operands.
160 auto NewOpEnd = remove_if(operands(), [&](Use &U) {
161 return RemoveIndices.contains(U.getOperandNo());
162 });
163 for (Use &U : make_range(NewOpEnd, op_end()))
164 U.set(nullptr);
165
166 // Remove incoming blocks.
167 (void)std::remove_if(const_cast<block_iterator>(block_begin()),
168 const_cast<block_iterator>(block_end()), [&](BasicBlock *&BB) {
169 return RemoveIndices.contains(&BB - block_begin());
170 });
171
172 setNumHungOffUseOperands(getNumOperands() - RemoveIndices.size());
173
174 // If the PHI node is dead, because it has zero entries, nuke it now.
175 if (getNumOperands() == 0 && DeletePHIIfEmpty) {
176 // If anyone is using this PHI, make them use a dummy value instead...
179 }
180}
181
182/// growOperands - grow operands - This grows the operand list in response
183/// to a push_back style of operation. This grows the number of ops by 1.5
184/// times.
185///
186void PHINode::growOperands() {
187 unsigned e = getNumOperands();
188 unsigned NumOps = e + e / 2;
189 if (NumOps < 2) NumOps = 2; // 2 op PHI nodes are VERY common.
190
191 ReservedSpace = NumOps;
192 growHungoffUses(ReservedSpace, /* IsPhi */ true);
193}
194
195/// hasConstantValue - If the specified PHI node always merges together the same
196/// value, return the value, otherwise return null.
198 // Exploit the fact that phi nodes always have at least one entry.
199 Value *ConstantValue = getIncomingValue(0);
200 for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i)
201 if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) {
202 if (ConstantValue != this)
203 return nullptr; // Incoming values not all the same.
204 // The case where the first value is this PHI.
205 ConstantValue = getIncomingValue(i);
206 }
207 if (ConstantValue == this)
208 return UndefValue::get(getType());
209 return ConstantValue;
210}
211
212/// hasConstantOrUndefValue - Whether the specified PHI node always merges
213/// together the same value, assuming that undefs result in the same value as
214/// non-undefs.
215/// Unlike \ref hasConstantValue, this does not return a value because the
216/// unique non-undef incoming value need not dominate the PHI node.
218 Value *ConstantValue = nullptr;
219 for (unsigned i = 0, e = getNumIncomingValues(); i != e; ++i) {
221 if (Incoming != this && !isa<UndefValue>(Incoming)) {
222 if (ConstantValue && ConstantValue != Incoming)
223 return false;
224 ConstantValue = Incoming;
225 }
226 }
227 return true;
228}
229
230//===----------------------------------------------------------------------===//
231// LandingPadInst Implementation
232//===----------------------------------------------------------------------===//
233
234LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
235 const Twine &NameStr,
236 BasicBlock::iterator InsertBefore)
237 : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertBefore) {
238 init(NumReservedValues, NameStr);
239}
240
241LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
242 const Twine &NameStr, Instruction *InsertBefore)
243 : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertBefore) {
244 init(NumReservedValues, NameStr);
245}
246
247LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
248 const Twine &NameStr, BasicBlock *InsertAtEnd)
249 : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertAtEnd) {
250 init(NumReservedValues, NameStr);
251}
252
253LandingPadInst::LandingPadInst(const LandingPadInst &LP)
254 : Instruction(LP.getType(), Instruction::LandingPad, nullptr,
255 LP.getNumOperands()),
256 ReservedSpace(LP.getNumOperands()) {
258 Use *OL = getOperandList();
259 const Use *InOL = LP.getOperandList();
260 for (unsigned I = 0, E = ReservedSpace; I != E; ++I)
261 OL[I] = InOL[I];
262
263 setCleanup(LP.isCleanup());
264}
265
266LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
267 const Twine &NameStr,
268 Instruction *InsertBefore) {
269 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertBefore);
270}
271
272LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
273 const Twine &NameStr,
274 BasicBlock *InsertAtEnd) {
275 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertAtEnd);
276}
277
278void LandingPadInst::init(unsigned NumReservedValues, const Twine &NameStr) {
279 ReservedSpace = NumReservedValues;
281 allocHungoffUses(ReservedSpace);
282 setName(NameStr);
283 setCleanup(false);
284}
285
286/// growOperands - grow operands - This grows the operand list in response to a
287/// push_back style of operation. This grows the number of ops by 2 times.
288void LandingPadInst::growOperands(unsigned Size) {
289 unsigned e = getNumOperands();
290 if (ReservedSpace >= e + Size) return;
291 ReservedSpace = (std::max(e, 1U) + Size / 2) * 2;
292 growHungoffUses(ReservedSpace);
293}
294
296 unsigned OpNo = getNumOperands();
297 growOperands(1);
298 assert(OpNo < ReservedSpace && "Growing didn't work!");
300 getOperandList()[OpNo] = Val;
301}
302
303//===----------------------------------------------------------------------===//
304// CallBase Implementation
305//===----------------------------------------------------------------------===//
306
308 BasicBlock::iterator InsertPt) {
309 switch (CB->getOpcode()) {
310 case Instruction::Call:
311 return CallInst::Create(cast<CallInst>(CB), Bundles, InsertPt);
312 case Instruction::Invoke:
313 return InvokeInst::Create(cast<InvokeInst>(CB), Bundles, InsertPt);
314 case Instruction::CallBr:
315 return CallBrInst::Create(cast<CallBrInst>(CB), Bundles, InsertPt);
316 default:
317 llvm_unreachable("Unknown CallBase sub-class!");
318 }
319}
320
322 Instruction *InsertPt) {
323 switch (CB->getOpcode()) {
324 case Instruction::Call:
325 return CallInst::Create(cast<CallInst>(CB), Bundles, InsertPt);
326 case Instruction::Invoke:
327 return InvokeInst::Create(cast<InvokeInst>(CB), Bundles, InsertPt);
328 case Instruction::CallBr:
329 return CallBrInst::Create(cast<CallBrInst>(CB), Bundles, InsertPt);
330 default:
331 llvm_unreachable("Unknown CallBase sub-class!");
332 }
333}
334
336 Instruction *InsertPt) {
338 for (unsigned i = 0, e = CI->getNumOperandBundles(); i < e; ++i) {
339 auto ChildOB = CI->getOperandBundleAt(i);
340 if (ChildOB.getTagName() != OpB.getTag())
341 OpDefs.emplace_back(ChildOB);
342 }
343 OpDefs.emplace_back(OpB);
344 return CallBase::Create(CI, OpDefs, InsertPt);
345}
346
347
349
351 assert(getOpcode() == Instruction::CallBr && "Unexpected opcode!");
352 return cast<CallBrInst>(this)->getNumIndirectDests() + 1;
353}
354
356 const Value *V = getCalledOperand();
357 if (isa<Function>(V) || isa<Constant>(V))
358 return false;
359 return !isInlineAsm();
360}
361
362/// Tests if this call site must be tail call optimized. Only a CallInst can
363/// be tail call optimized.
365 if (auto *CI = dyn_cast<CallInst>(this))
366 return CI->isMustTailCall();
367 return false;
368}
369
370/// Tests if this call site is marked as a tail call.
372 if (auto *CI = dyn_cast<CallInst>(this))
373 return CI->isTailCall();
374 return false;
375}
376
378 if (auto *F = getCalledFunction())
379 return F->getIntrinsicID();
381}
382
385
386 if (const Function *F = getCalledFunction())
387 Mask |= F->getAttributes().getRetNoFPClass();
388 return Mask;
389}
390
393
394 if (const Function *F = getCalledFunction())
395 Mask |= F->getAttributes().getParamNoFPClass(i);
396 return Mask;
397}
398
399std::optional<ConstantRange> CallBase::getRange() const {
400 const Attribute RangeAttr = getRetAttr(llvm::Attribute::Range);
401 if (RangeAttr.isValid())
402 return RangeAttr.getRange();
403 return std::nullopt;
404}
405
407 if (hasRetAttr(Attribute::NonNull))
408 return true;
409
410 if (getRetDereferenceableBytes() > 0 &&
412 return true;
413
414 return false;
415}
416
418 unsigned Index;
419
420 if (Attrs.hasAttrSomewhere(Kind, &Index))
422 if (const Function *F = getCalledFunction())
423 if (F->getAttributes().hasAttrSomewhere(Kind, &Index))
425
426 return nullptr;
427}
428
429/// Determine whether the argument or parameter has the given attribute.
430bool CallBase::paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
431 assert(ArgNo < arg_size() && "Param index out of bounds!");
432
433 if (Attrs.hasParamAttr(ArgNo, Kind))
434 return true;
435
436 const Function *F = getCalledFunction();
437 if (!F)
438 return false;
439
440 if (!F->getAttributes().hasParamAttr(ArgNo, Kind))
441 return false;
442
443 // Take into account mod/ref by operand bundles.
444 switch (Kind) {
445 case Attribute::ReadNone:
447 case Attribute::ReadOnly:
449 case Attribute::WriteOnly:
450 return !hasReadingOperandBundles();
451 default:
452 return true;
453 }
454}
455
456bool CallBase::hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const {
457 Value *V = getCalledOperand();
458 if (auto *CE = dyn_cast<ConstantExpr>(V))
459 if (CE->getOpcode() == BitCast)
460 V = CE->getOperand(0);
461
462 if (auto *F = dyn_cast<Function>(V))
463 return F->getAttributes().hasFnAttr(Kind);
464
465 return false;
466}
467
468bool CallBase::hasFnAttrOnCalledFunction(StringRef Kind) const {
469 Value *V = getCalledOperand();
470 if (auto *CE = dyn_cast<ConstantExpr>(V))
471 if (CE->getOpcode() == BitCast)
472 V = CE->getOperand(0);
473
474 if (auto *F = dyn_cast<Function>(V))
475 return F->getAttributes().hasFnAttr(Kind);
476
477 return false;
478}
479
480template <typename AK>
481Attribute CallBase::getFnAttrOnCalledFunction(AK Kind) const {
482 if constexpr (std::is_same_v<AK, Attribute::AttrKind>) {
483 // getMemoryEffects() correctly combines memory effects from the call-site,
484 // operand bundles and function.
485 assert(Kind != Attribute::Memory && "Use getMemoryEffects() instead");
486 }
487
489 if (auto *CE = dyn_cast<ConstantExpr>(V))
490 if (CE->getOpcode() == BitCast)
491 V = CE->getOperand(0);
492
493 if (auto *F = dyn_cast<Function>(V))
494 return F->getAttributes().getFnAttr(Kind);
495
496 return Attribute();
497}
498
499template Attribute
500CallBase::getFnAttrOnCalledFunction(Attribute::AttrKind Kind) const;
501template Attribute CallBase::getFnAttrOnCalledFunction(StringRef Kind) const;
502
505 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
507}
508
511 const unsigned BeginIndex) {
512 auto It = op_begin() + BeginIndex;
513 for (auto &B : Bundles)
514 It = std::copy(B.input_begin(), B.input_end(), It);
515
516 auto *ContextImpl = getContext().pImpl;
517 auto BI = Bundles.begin();
518 unsigned CurrentIndex = BeginIndex;
519
520 for (auto &BOI : bundle_op_infos()) {
521 assert(BI != Bundles.end() && "Incorrect allocation?");
522
523 BOI.Tag = ContextImpl->getOrInsertBundleTag(BI->getTag());
524 BOI.Begin = CurrentIndex;
525 BOI.End = CurrentIndex + BI->input_size();
526 CurrentIndex = BOI.End;
527 BI++;
528 }
529
530 assert(BI == Bundles.end() && "Incorrect allocation?");
531
532 return It;
533}
534
536 /// When there isn't many bundles, we do a simple linear search.
537 /// Else fallback to a binary-search that use the fact that bundles usually
538 /// have similar number of argument to get faster convergence.
540 for (auto &BOI : bundle_op_infos())
541 if (BOI.Begin <= OpIdx && OpIdx < BOI.End)
542 return BOI;
543
544 llvm_unreachable("Did not find operand bundle for operand!");
545 }
546
547 assert(OpIdx >= arg_size() && "the Idx is not in the operand bundles");
549 OpIdx < std::prev(bundle_op_info_end())->End &&
550 "The Idx isn't in the operand bundle");
551
552 /// We need a decimal number below and to prevent using floating point numbers
553 /// we use an intergal value multiplied by this constant.
554 constexpr unsigned NumberScaling = 1024;
555
558 bundle_op_iterator Current = Begin;
559
560 while (Begin != End) {
561 unsigned ScaledOperandPerBundle =
562 NumberScaling * (std::prev(End)->End - Begin->Begin) / (End - Begin);
563 Current = Begin + (((OpIdx - Begin->Begin) * NumberScaling) /
564 ScaledOperandPerBundle);
565 if (Current >= End)
566 Current = std::prev(End);
567 assert(Current < End && Current >= Begin &&
568 "the operand bundle doesn't cover every value in the range");
569 if (OpIdx >= Current->Begin && OpIdx < Current->End)
570 break;
571 if (OpIdx >= Current->End)
572 Begin = Current + 1;
573 else
574 End = Current;
575 }
576
577 assert(OpIdx >= Current->Begin && OpIdx < Current->End &&
578 "the operand bundle doesn't cover every value in the range");
579 return *Current;
580}
581
584 BasicBlock::iterator InsertPt) {
585 if (CB->getOperandBundle(ID))
586 return CB;
587
589 CB->getOperandBundlesAsDefs(Bundles);
590 Bundles.push_back(OB);
591 return Create(CB, Bundles, InsertPt);
592}
593
596 Instruction *InsertPt) {
597 if (CB->getOperandBundle(ID))
598 return CB;
599
601 CB->getOperandBundlesAsDefs(Bundles);
602 Bundles.push_back(OB);
603 return Create(CB, Bundles, InsertPt);
604}
605
607 BasicBlock::iterator InsertPt) {
609 bool CreateNew = false;
610
611 for (unsigned I = 0, E = CB->getNumOperandBundles(); I != E; ++I) {
612 auto Bundle = CB->getOperandBundleAt(I);
613 if (Bundle.getTagID() == ID) {
614 CreateNew = true;
615 continue;
616 }
617 Bundles.emplace_back(Bundle);
618 }
619
620 return CreateNew ? Create(CB, Bundles, InsertPt) : CB;
621}
622
624 Instruction *InsertPt) {
626 bool CreateNew = false;
627
628 for (unsigned I = 0, E = CB->getNumOperandBundles(); I != E; ++I) {
629 auto Bundle = CB->getOperandBundleAt(I);
630 if (Bundle.getTagID() == ID) {
631 CreateNew = true;
632 continue;
633 }
634 Bundles.emplace_back(Bundle);
635 }
636
637 return CreateNew ? Create(CB, Bundles, InsertPt) : CB;
638}
639
641 // Implementation note: this is a conservative implementation of operand
642 // bundle semantics, where *any* non-assume operand bundle (other than
643 // ptrauth) forces a callsite to be at least readonly.
646 getIntrinsicID() != Intrinsic::assume;
647}
648
653 getIntrinsicID() != Intrinsic::assume;
654}
655
658 if (auto *Fn = dyn_cast<Function>(getCalledOperand())) {
659 MemoryEffects FnME = Fn->getMemoryEffects();
660 if (hasOperandBundles()) {
661 // TODO: Add a method to get memory effects for operand bundles instead.
663 FnME |= MemoryEffects::readOnly();
665 FnME |= MemoryEffects::writeOnly();
666 }
667 ME &= FnME;
668 }
669 return ME;
670}
673}
674
675/// Determine if the function does not access memory.
678}
681}
682
683/// Determine if the function does not access or only reads memory.
686}
689}
690
691/// Determine if the function does not access or only writes memory.
694}
697}
698
699/// Determine if the call can access memmory only using pointers based
700/// on its arguments.
703}
706}
707
708/// Determine if the function may only access memory that is
709/// inaccessible from the IR.
712}
715}
716
717/// Determine if the function may only access memory that is
718/// either inaccessible from the IR or pointed to by its arguments.
721}
725}
726
727//===----------------------------------------------------------------------===//
728// CallInst Implementation
729//===----------------------------------------------------------------------===//
730
731void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
732 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) {
733 this->FTy = FTy;
734 assert(getNumOperands() == Args.size() + CountBundleInputs(Bundles) + 1 &&
735 "NumOperands not set up?");
736
737#ifndef NDEBUG
738 assert((Args.size() == FTy->getNumParams() ||
739 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
740 "Calling a function with bad signature!");
741
742 for (unsigned i = 0; i != Args.size(); ++i)
743 assert((i >= FTy->getNumParams() ||
744 FTy->getParamType(i) == Args[i]->getType()) &&
745 "Calling a function with a bad signature!");
746#endif
747
748 // Set operands in order of their index to match use-list-order
749 // prediction.
750 llvm::copy(Args, op_begin());
751 setCalledOperand(Func);
752
753 auto It = populateBundleOperandInfos(Bundles, Args.size());
754 (void)It;
755 assert(It + 1 == op_end() && "Should add up!");
756
757 setName(NameStr);
758}
759
760void CallInst::init(FunctionType *FTy, Value *Func, const Twine &NameStr) {
761 this->FTy = FTy;
762 assert(getNumOperands() == 1 && "NumOperands not set up?");
763 setCalledOperand(Func);
764
765 assert(FTy->getNumParams() == 0 && "Calling a function with bad signature");
766
767 setName(NameStr);
768}
769
770CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
771 BasicBlock::iterator InsertBefore)
772 : CallBase(Ty->getReturnType(), Instruction::Call,
773 OperandTraits<CallBase>::op_end(this) - 1, 1, InsertBefore) {
774 init(Ty, Func, Name);
775}
776
777CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
778 Instruction *InsertBefore)
779 : CallBase(Ty->getReturnType(), Instruction::Call,
780 OperandTraits<CallBase>::op_end(this) - 1, 1, InsertBefore) {
781 init(Ty, Func, Name);
782}
783
784CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
785 BasicBlock *InsertAtEnd)
786 : CallBase(Ty->getReturnType(), Instruction::Call,
787 OperandTraits<CallBase>::op_end(this) - 1, 1, InsertAtEnd) {
788 init(Ty, Func, Name);
789}
790
791CallInst::CallInst(const CallInst &CI)
792 : CallBase(CI.Attrs, CI.FTy, CI.getType(), Instruction::Call,
793 OperandTraits<CallBase>::op_end(this) - CI.getNumOperands(),
794 CI.getNumOperands()) {
795 setTailCallKind(CI.getTailCallKind());
797
798 std::copy(CI.op_begin(), CI.op_end(), op_begin());
799 std::copy(CI.bundle_op_info_begin(), CI.bundle_op_info_end(),
802}
803
805 BasicBlock::iterator InsertPt) {
806 std::vector<Value *> Args(CI->arg_begin(), CI->arg_end());
807
808 auto *NewCI = CallInst::Create(CI->getFunctionType(), CI->getCalledOperand(),
809 Args, OpB, CI->getName(), InsertPt);
810 NewCI->setTailCallKind(CI->getTailCallKind());
811 NewCI->setCallingConv(CI->getCallingConv());
812 NewCI->SubclassOptionalData = CI->SubclassOptionalData;
813 NewCI->setAttributes(CI->getAttributes());
814 NewCI->setDebugLoc(CI->getDebugLoc());
815 return NewCI;
816}
817
819 Instruction *InsertPt) {
820 std::vector<Value *> Args(CI->arg_begin(), CI->arg_end());
821
822 auto *NewCI = CallInst::Create(CI->getFunctionType(), CI->getCalledOperand(),
823 Args, OpB, CI->getName(), InsertPt);
824 NewCI->setTailCallKind(CI->getTailCallKind());
825 NewCI->setCallingConv(CI->getCallingConv());
826 NewCI->SubclassOptionalData = CI->SubclassOptionalData;
827 NewCI->setAttributes(CI->getAttributes());
828 NewCI->setDebugLoc(CI->getDebugLoc());
829 return NewCI;
830}
831
832// Update profile weight for call instruction by scaling it using the ratio
833// of S/T. The meaning of "branch_weights" meta data for call instruction is
834// transfered to represent call count.
836 if (T == 0) {
837 LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
838 "div by 0. Ignoring. Likely the function "
839 << getParent()->getParent()->getName()
840 << " has 0 entry count, and contains call instructions "
841 "with non-zero prof info.");
842 return;
843 }
844 scaleProfData(*this, S, T);
845}
846
847//===----------------------------------------------------------------------===//
848// InvokeInst Implementation
849//===----------------------------------------------------------------------===//
850
851void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal,
852 BasicBlock *IfException, ArrayRef<Value *> Args,
854 const Twine &NameStr) {
855 this->FTy = FTy;
856
857 assert((int)getNumOperands() ==
858 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)) &&
859 "NumOperands not set up?");
860
861#ifndef NDEBUG
862 assert(((Args.size() == FTy->getNumParams()) ||
863 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
864 "Invoking a function with bad signature");
865
866 for (unsigned i = 0, e = Args.size(); i != e; i++)
867 assert((i >= FTy->getNumParams() ||
868 FTy->getParamType(i) == Args[i]->getType()) &&
869 "Invoking a function with a bad signature!");
870#endif
871
872 // Set operands in order of their index to match use-list-order
873 // prediction.
874 llvm::copy(Args, op_begin());
875 setNormalDest(IfNormal);
876 setUnwindDest(IfException);
878
879 auto It = populateBundleOperandInfos(Bundles, Args.size());
880 (void)It;
881 assert(It + 3 == op_end() && "Should add up!");
882
883 setName(NameStr);
884}
885
886InvokeInst::InvokeInst(const InvokeInst &II)
887 : CallBase(II.Attrs, II.FTy, II.getType(), Instruction::Invoke,
888 OperandTraits<CallBase>::op_end(this) - II.getNumOperands(),
889 II.getNumOperands()) {
891 std::copy(II.op_begin(), II.op_end(), op_begin());
892 std::copy(II.bundle_op_info_begin(), II.bundle_op_info_end(),
895}
896
898 BasicBlock::iterator InsertPt) {
899 std::vector<Value *> Args(II->arg_begin(), II->arg_end());
900
901 auto *NewII = InvokeInst::Create(
903 II->getUnwindDest(), Args, OpB, II->getName(), InsertPt);
904 NewII->setCallingConv(II->getCallingConv());
905 NewII->SubclassOptionalData = II->SubclassOptionalData;
906 NewII->setAttributes(II->getAttributes());
907 NewII->setDebugLoc(II->getDebugLoc());
908 return NewII;
909}
910
912 Instruction *InsertPt) {
913 std::vector<Value *> Args(II->arg_begin(), II->arg_end());
914
915 auto *NewII = InvokeInst::Create(
917 II->getUnwindDest(), Args, OpB, II->getName(), InsertPt);
918 NewII->setCallingConv(II->getCallingConv());
919 NewII->SubclassOptionalData = II->SubclassOptionalData;
920 NewII->setAttributes(II->getAttributes());
921 NewII->setDebugLoc(II->getDebugLoc());
922 return NewII;
923}
924
926 return cast<LandingPadInst>(getUnwindDest()->getFirstNonPHI());
927}
928
929//===----------------------------------------------------------------------===//
930// CallBrInst Implementation
931//===----------------------------------------------------------------------===//
932
933void CallBrInst::init(FunctionType *FTy, Value *Fn, BasicBlock *Fallthrough,
934 ArrayRef<BasicBlock *> IndirectDests,
937 const Twine &NameStr) {
938 this->FTy = FTy;
939
940 assert((int)getNumOperands() ==
941 ComputeNumOperands(Args.size(), IndirectDests.size(),
942 CountBundleInputs(Bundles)) &&
943 "NumOperands not set up?");
944
945#ifndef NDEBUG
946 assert(((Args.size() == FTy->getNumParams()) ||
947 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
948 "Calling a function with bad signature");
949
950 for (unsigned i = 0, e = Args.size(); i != e; i++)
951 assert((i >= FTy->getNumParams() ||
952 FTy->getParamType(i) == Args[i]->getType()) &&
953 "Calling a function with a bad signature!");
954#endif
955
956 // Set operands in order of their index to match use-list-order
957 // prediction.
958 std::copy(Args.begin(), Args.end(), op_begin());
959 NumIndirectDests = IndirectDests.size();
960 setDefaultDest(Fallthrough);
961 for (unsigned i = 0; i != NumIndirectDests; ++i)
962 setIndirectDest(i, IndirectDests[i]);
964
965 auto It = populateBundleOperandInfos(Bundles, Args.size());
966 (void)It;
967 assert(It + 2 + IndirectDests.size() == op_end() && "Should add up!");
968
969 setName(NameStr);
970}
971
972CallBrInst::CallBrInst(const CallBrInst &CBI)
973 : CallBase(CBI.Attrs, CBI.FTy, CBI.getType(), Instruction::CallBr,
974 OperandTraits<CallBase>::op_end(this) - CBI.getNumOperands(),
975 CBI.getNumOperands()) {
977 std::copy(CBI.op_begin(), CBI.op_end(), op_begin());
978 std::copy(CBI.bundle_op_info_begin(), CBI.bundle_op_info_end(),
981 NumIndirectDests = CBI.NumIndirectDests;
982}
983
985 BasicBlock::iterator InsertPt) {
986 std::vector<Value *> Args(CBI->arg_begin(), CBI->arg_end());
987
988 auto *NewCBI = CallBrInst::Create(
989 CBI->getFunctionType(), CBI->getCalledOperand(), CBI->getDefaultDest(),
990 CBI->getIndirectDests(), Args, OpB, CBI->getName(), InsertPt);
991 NewCBI->setCallingConv(CBI->getCallingConv());
992 NewCBI->SubclassOptionalData = CBI->SubclassOptionalData;
993 NewCBI->setAttributes(CBI->getAttributes());
994 NewCBI->setDebugLoc(CBI->getDebugLoc());
995 NewCBI->NumIndirectDests = CBI->NumIndirectDests;
996 return NewCBI;
997}
998
1000 Instruction *InsertPt) {
1001 std::vector<Value *> Args(CBI->arg_begin(), CBI->arg_end());
1002
1003 auto *NewCBI = CallBrInst::Create(
1004 CBI->getFunctionType(), CBI->getCalledOperand(), CBI->getDefaultDest(),
1005 CBI->getIndirectDests(), Args, OpB, CBI->getName(), InsertPt);
1006 NewCBI->setCallingConv(CBI->getCallingConv());
1007 NewCBI->SubclassOptionalData = CBI->SubclassOptionalData;
1008 NewCBI->setAttributes(CBI->getAttributes());
1009 NewCBI->setDebugLoc(CBI->getDebugLoc());
1010 NewCBI->NumIndirectDests = CBI->NumIndirectDests;
1011 return NewCBI;
1012}
1013
1014//===----------------------------------------------------------------------===//
1015// ReturnInst Implementation
1016//===----------------------------------------------------------------------===//
1017
1018ReturnInst::ReturnInst(const ReturnInst &RI)
1019 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Ret,
1020 OperandTraits<ReturnInst>::op_end(this) - RI.getNumOperands(),
1021 RI.getNumOperands()) {
1022 if (RI.getNumOperands())
1023 Op<0>() = RI.Op<0>();
1025}
1026
1027ReturnInst::ReturnInst(LLVMContext &C, Value *retVal,
1028 BasicBlock::iterator InsertBefore)
1029 : Instruction(Type::getVoidTy(C), Instruction::Ret,
1030 OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal,
1031 InsertBefore) {
1032 if (retVal)
1033 Op<0>() = retVal;
1034}
1035
1036ReturnInst::ReturnInst(LLVMContext &C, Value *retVal,
1037 Instruction *InsertBefore)
1038 : Instruction(Type::getVoidTy(C), Instruction::Ret,
1039 OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal,
1040 InsertBefore) {
1041 if (retVal)
1042 Op<0>() = retVal;
1043}
1044
1045ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd)
1046 : Instruction(Type::getVoidTy(C), Instruction::Ret,
1047 OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal,
1048 InsertAtEnd) {
1049 if (retVal)
1050 Op<0>() = retVal;
1051}
1052
1053ReturnInst::ReturnInst(LLVMContext &Context, BasicBlock *InsertAtEnd)
1054 : Instruction(Type::getVoidTy(Context), Instruction::Ret,
1055 OperandTraits<ReturnInst>::op_end(this), 0, InsertAtEnd) {}
1056
1057//===----------------------------------------------------------------------===//
1058// ResumeInst Implementation
1059//===----------------------------------------------------------------------===//
1060
1061ResumeInst::ResumeInst(const ResumeInst &RI)
1062 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Resume,
1063 OperandTraits<ResumeInst>::op_begin(this), 1) {
1064 Op<0>() = RI.Op<0>();
1065}
1066
1067ResumeInst::ResumeInst(Value *Exn, BasicBlock::iterator InsertBefore)
1068 : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
1069 OperandTraits<ResumeInst>::op_begin(this), 1, InsertBefore) {
1070 Op<0>() = Exn;
1071}
1072
1073ResumeInst::ResumeInst(Value *Exn, Instruction *InsertBefore)
1074 : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
1075 OperandTraits<ResumeInst>::op_begin(this), 1, InsertBefore) {
1076 Op<0>() = Exn;
1077}
1078
1079ResumeInst::ResumeInst(Value *Exn, BasicBlock *InsertAtEnd)
1080 : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
1081 OperandTraits<ResumeInst>::op_begin(this), 1, InsertAtEnd) {
1082 Op<0>() = Exn;
1083}
1084
1085//===----------------------------------------------------------------------===//
1086// CleanupReturnInst Implementation
1087//===----------------------------------------------------------------------===//
1088
1089CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst &CRI)
1090 : Instruction(CRI.getType(), Instruction::CleanupRet,
1091 OperandTraits<CleanupReturnInst>::op_end(this) -
1092 CRI.getNumOperands(),
1093 CRI.getNumOperands()) {
1094 setSubclassData<Instruction::OpaqueField>(
1096 Op<0>() = CRI.Op<0>();
1097 if (CRI.hasUnwindDest())
1098 Op<1>() = CRI.Op<1>();
1099}
1100
1101void CleanupReturnInst::init(Value *CleanupPad, BasicBlock *UnwindBB) {
1102 if (UnwindBB)
1103 setSubclassData<UnwindDestField>(true);
1104
1105 Op<0>() = CleanupPad;
1106 if (UnwindBB)
1107 Op<1>() = UnwindBB;
1108}
1109
1110CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
1111 unsigned Values,
1112 BasicBlock::iterator InsertBefore)
1113 : Instruction(Type::getVoidTy(CleanupPad->getContext()),
1114 Instruction::CleanupRet,
1115 OperandTraits<CleanupReturnInst>::op_end(this) - Values,
1116 Values, InsertBefore) {
1117 init(CleanupPad, UnwindBB);
1118}
1119
1120CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
1121 unsigned Values, Instruction *InsertBefore)
1122 : Instruction(Type::getVoidTy(CleanupPad->getContext()),
1123 Instruction::CleanupRet,
1124 OperandTraits<CleanupReturnInst>::op_end(this) - Values,
1125 Values, InsertBefore) {
1126 init(CleanupPad, UnwindBB);
1127}
1128
1129CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
1130 unsigned Values, BasicBlock *InsertAtEnd)
1131 : Instruction(Type::getVoidTy(CleanupPad->getContext()),
1132 Instruction::CleanupRet,
1133 OperandTraits<CleanupReturnInst>::op_end(this) - Values,
1134 Values, InsertAtEnd) {
1135 init(CleanupPad, UnwindBB);
1136}
1137
1138//===----------------------------------------------------------------------===//
1139// CatchReturnInst Implementation
1140//===----------------------------------------------------------------------===//
1141void CatchReturnInst::init(Value *CatchPad, BasicBlock *BB) {
1142 Op<0>() = CatchPad;
1143 Op<1>() = BB;
1144}
1145
1146CatchReturnInst::CatchReturnInst(const CatchReturnInst &CRI)
1147 : Instruction(Type::getVoidTy(CRI.getContext()), Instruction::CatchRet,
1148 OperandTraits<CatchReturnInst>::op_begin(this), 2) {
1149 Op<0>() = CRI.Op<0>();
1150 Op<1>() = CRI.Op<1>();
1151}
1152
1153CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
1154 BasicBlock::iterator InsertBefore)
1155 : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
1156 OperandTraits<CatchReturnInst>::op_begin(this), 2,
1157 InsertBefore) {
1158 init(CatchPad, BB);
1159}
1160
1161CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
1162 Instruction *InsertBefore)
1163 : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
1164 OperandTraits<CatchReturnInst>::op_begin(this), 2,
1165 InsertBefore) {
1166 init(CatchPad, BB);
1167}
1168
1169CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
1170 BasicBlock *InsertAtEnd)
1171 : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
1172 OperandTraits<CatchReturnInst>::op_begin(this), 2,
1173 InsertAtEnd) {
1174 init(CatchPad, BB);
1175}
1176
1177//===----------------------------------------------------------------------===//
1178// CatchSwitchInst Implementation
1179//===----------------------------------------------------------------------===//
1180
1181CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
1182 unsigned NumReservedValues,
1183 const Twine &NameStr,
1184 BasicBlock::iterator InsertBefore)
1185 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
1186 InsertBefore) {
1187 if (UnwindDest)
1188 ++NumReservedValues;
1189 init(ParentPad, UnwindDest, NumReservedValues + 1);
1190 setName(NameStr);
1191}
1192
1193CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
1194 unsigned NumReservedValues,
1195 const Twine &NameStr,
1196 Instruction *InsertBefore)
1197 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
1198 InsertBefore) {
1199 if (UnwindDest)
1200 ++NumReservedValues;
1201 init(ParentPad, UnwindDest, NumReservedValues + 1);
1202 setName(NameStr);
1203}
1204
1205CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
1206 unsigned NumReservedValues,
1207 const Twine &NameStr, BasicBlock *InsertAtEnd)
1208 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
1209 InsertAtEnd) {
1210 if (UnwindDest)
1211 ++NumReservedValues;
1212 init(ParentPad, UnwindDest, NumReservedValues + 1);
1213 setName(NameStr);
1214}
1215
1216CatchSwitchInst::CatchSwitchInst(const CatchSwitchInst &CSI)
1217 : Instruction(CSI.getType(), Instruction::CatchSwitch, nullptr,
1218 CSI.getNumOperands()) {
1219 init(CSI.getParentPad(), CSI.getUnwindDest(), CSI.getNumOperands());
1220 setNumHungOffUseOperands(ReservedSpace);
1221 Use *OL = getOperandList();
1222 const Use *InOL = CSI.getOperandList();
1223 for (unsigned I = 1, E = ReservedSpace; I != E; ++I)
1224 OL[I] = InOL[I];
1225}
1226
1227void CatchSwitchInst::init(Value *ParentPad, BasicBlock *UnwindDest,
1228 unsigned NumReservedValues) {
1229 assert(ParentPad && NumReservedValues);
1230
1231 ReservedSpace = NumReservedValues;
1232 setNumHungOffUseOperands(UnwindDest ? 2 : 1);
1233 allocHungoffUses(ReservedSpace);
1234
1235 Op<0>() = ParentPad;
1236 if (UnwindDest) {
1237 setSubclassData<UnwindDestField>(true);
1238 setUnwindDest(UnwindDest);
1239 }
1240}
1241
1242/// growOperands - grow operands - This grows the operand list in response to a
1243/// push_back style of operation. This grows the number of ops by 2 times.
1244void CatchSwitchInst::growOperands(unsigned Size) {
1245 unsigned NumOperands = getNumOperands();
1246 assert(NumOperands >= 1);
1247 if (ReservedSpace >= NumOperands + Size)
1248 return;
1249 ReservedSpace = (NumOperands + Size / 2) * 2;
1250 growHungoffUses(ReservedSpace);
1251}
1252
1254 unsigned OpNo = getNumOperands();
1255 growOperands(1);
1256 assert(OpNo < ReservedSpace && "Growing didn't work!");
1258 getOperandList()[OpNo] = Handler;
1259}
1260
1262 // Move all subsequent handlers up one.
1263 Use *EndDst = op_end() - 1;
1264 for (Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst)
1265 *CurDst = *(CurDst + 1);
1266 // Null out the last handler use.
1267 *EndDst = nullptr;
1268
1270}
1271
1272//===----------------------------------------------------------------------===//
1273// FuncletPadInst Implementation
1274//===----------------------------------------------------------------------===//
1275void FuncletPadInst::init(Value *ParentPad, ArrayRef<Value *> Args,
1276 const Twine &NameStr) {
1277 assert(getNumOperands() == 1 + Args.size() && "NumOperands not set up?");
1278 llvm::copy(Args, op_begin());
1279 setParentPad(ParentPad);
1280 setName(NameStr);
1281}
1282
1283FuncletPadInst::FuncletPadInst(const FuncletPadInst &FPI)
1284 : Instruction(FPI.getType(), FPI.getOpcode(),
1285 OperandTraits<FuncletPadInst>::op_end(this) -
1286 FPI.getNumOperands(),
1287 FPI.getNumOperands()) {
1288 std::copy(FPI.op_begin(), FPI.op_end(), op_begin());
1290}
1291
1292FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
1293 ArrayRef<Value *> Args, unsigned Values,
1294 const Twine &NameStr,
1295 BasicBlock::iterator InsertBefore)
1296 : Instruction(ParentPad->getType(), Op,
1297 OperandTraits<FuncletPadInst>::op_end(this) - Values, Values,
1298 InsertBefore) {
1299 init(ParentPad, Args, NameStr);
1300}
1301
1302FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
1303 ArrayRef<Value *> Args, unsigned Values,
1304 const Twine &NameStr, Instruction *InsertBefore)
1305 : Instruction(ParentPad->getType(), Op,
1306 OperandTraits<FuncletPadInst>::op_end(this) - Values, Values,
1307 InsertBefore) {
1308 init(ParentPad, Args, NameStr);
1309}
1310
1311FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
1312 ArrayRef<Value *> Args, unsigned Values,
1313 const Twine &NameStr, BasicBlock *InsertAtEnd)
1314 : Instruction(ParentPad->getType(), Op,
1315 OperandTraits<FuncletPadInst>::op_end(this) - Values, Values,
1316 InsertAtEnd) {
1317 init(ParentPad, Args, NameStr);
1318}
1319
1320//===----------------------------------------------------------------------===//
1321// UnreachableInst Implementation
1322//===----------------------------------------------------------------------===//
1323
1325 BasicBlock::iterator InsertBefore)
1326 : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr,
1327 0, InsertBefore) {}
1329 Instruction *InsertBefore)
1330 : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr,
1331 0, InsertBefore) {}
1333 : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr,
1334 0, InsertAtEnd) {}
1335
1336//===----------------------------------------------------------------------===//
1337// BranchInst Implementation
1338//===----------------------------------------------------------------------===//
1339
1340void BranchInst::AssertOK() {
1341 if (isConditional())
1342 assert(getCondition()->getType()->isIntegerTy(1) &&
1343 "May only branch on boolean predicates!");
1344}
1345
1346BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock::iterator InsertBefore)
1347 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1348 OperandTraits<BranchInst>::op_end(this) - 1, 1,
1349 InsertBefore) {
1350 assert(IfTrue && "Branch destination may not be null!");
1351 Op<-1>() = IfTrue;
1352}
1353
1354BranchInst::BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore)
1355 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1356 OperandTraits<BranchInst>::op_end(this) - 1, 1,
1357 InsertBefore) {
1358 assert(IfTrue && "Branch destination may not be null!");
1359 Op<-1>() = IfTrue;
1360}
1361
1362BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
1363 BasicBlock::iterator InsertBefore)
1364 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1365 OperandTraits<BranchInst>::op_end(this) - 3, 3,
1366 InsertBefore) {
1367 // Assign in order of operand index to make use-list order predictable.
1368 Op<-3>() = Cond;
1369 Op<-2>() = IfFalse;
1370 Op<-1>() = IfTrue;
1371#ifndef NDEBUG
1372 AssertOK();
1373#endif
1374}
1375
1376BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
1377 Instruction *InsertBefore)
1378 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1379 OperandTraits<BranchInst>::op_end(this) - 3, 3,
1380 InsertBefore) {
1381 // Assign in order of operand index to make use-list order predictable.
1382 Op<-3>() = Cond;
1383 Op<-2>() = IfFalse;
1384 Op<-1>() = IfTrue;
1385#ifndef NDEBUG
1386 AssertOK();
1387#endif
1388}
1389
1390BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd)
1391 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1392 OperandTraits<BranchInst>::op_end(this) - 1, 1, InsertAtEnd) {
1393 assert(IfTrue && "Branch destination may not be null!");
1394 Op<-1>() = IfTrue;
1395}
1396
1397BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
1398 BasicBlock *InsertAtEnd)
1399 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1400 OperandTraits<BranchInst>::op_end(this) - 3, 3, InsertAtEnd) {
1401 // Assign in order of operand index to make use-list order predictable.
1402 Op<-3>() = Cond;
1403 Op<-2>() = IfFalse;
1404 Op<-1>() = IfTrue;
1405#ifndef NDEBUG
1406 AssertOK();
1407#endif
1408}
1409
1410BranchInst::BranchInst(const BranchInst &BI)
1411 : Instruction(Type::getVoidTy(BI.getContext()), Instruction::Br,
1412 OperandTraits<BranchInst>::op_end(this) - BI.getNumOperands(),
1413 BI.getNumOperands()) {
1414 // Assign in order of operand index to make use-list order predictable.
1415 if (BI.getNumOperands() != 1) {
1416 assert(BI.getNumOperands() == 3 && "BR can have 1 or 3 operands!");
1417 Op<-3>() = BI.Op<-3>();
1418 Op<-2>() = BI.Op<-2>();
1419 }
1420 Op<-1>() = BI.Op<-1>();
1422}
1423
1426 "Cannot swap successors of an unconditional branch");
1427 Op<-1>().swap(Op<-2>());
1428
1429 // Update profile metadata if present and it matches our structural
1430 // expectations.
1432}
1433
1434//===----------------------------------------------------------------------===//
1435// AllocaInst Implementation
1436//===----------------------------------------------------------------------===//
1437
1438static Value *getAISize(LLVMContext &Context, Value *Amt) {
1439 if (!Amt)
1440 Amt = ConstantInt::get(Type::getInt32Ty(Context), 1);
1441 else {
1442 assert(!isa<BasicBlock>(Amt) &&
1443 "Passed basic block into allocation size parameter! Use other ctor");
1444 assert(Amt->getType()->isIntegerTy() &&
1445 "Allocation array size is not an integer!");
1446 }
1447 return Amt;
1448}
1449
1451 assert(BB && "Insertion BB cannot be null when alignment not provided!");
1452 assert(BB->getParent() &&
1453 "BB must be in a Function when alignment not provided!");
1454 const DataLayout &DL = BB->getModule()->getDataLayout();
1455 return DL.getPrefTypeAlign(Ty);
1456}
1457
1459 return computeAllocaDefaultAlign(Ty, It->getParent());
1460}
1461
1463 assert(I && "Insertion position cannot be null when alignment not provided!");
1464 return computeAllocaDefaultAlign(Ty, I->getParent());
1465}
1466
1467AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
1468 BasicBlock::iterator InsertBefore)
1469 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertBefore) {}
1470
1471AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
1472 Instruction *InsertBefore)
1473 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertBefore) {}
1474
1475AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
1476 BasicBlock *InsertAtEnd)
1477 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertAtEnd) {}
1478
1479AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1480 const Twine &Name, BasicBlock::iterator InsertBefore)
1481 : AllocaInst(Ty, AddrSpace, ArraySize,
1482 computeAllocaDefaultAlign(Ty, InsertBefore), Name,
1483 InsertBefore) {}
1484
1485AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1486 const Twine &Name, Instruction *InsertBefore)
1487 : AllocaInst(Ty, AddrSpace, ArraySize,
1488 computeAllocaDefaultAlign(Ty, InsertBefore), Name,
1489 InsertBefore) {}
1490
1491AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1492 const Twine &Name, BasicBlock *InsertAtEnd)
1493 : AllocaInst(Ty, AddrSpace, ArraySize,
1494 computeAllocaDefaultAlign(Ty, InsertAtEnd), Name,
1495 InsertAtEnd) {}
1496
1497AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1498 Align Align, const Twine &Name,
1499 BasicBlock::iterator InsertBefore)
1500 : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca,
1501 getAISize(Ty->getContext(), ArraySize), InsertBefore),
1502 AllocatedType(Ty) {
1504 assert(!Ty->isVoidTy() && "Cannot allocate void!");
1505 setName(Name);
1506}
1507
1508AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1509 Align Align, const Twine &Name,
1510 Instruction *InsertBefore)
1511 : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca,
1512 getAISize(Ty->getContext(), ArraySize), InsertBefore),
1513 AllocatedType(Ty) {
1515 assert(!Ty->isVoidTy() && "Cannot allocate void!");
1516 setName(Name);
1517}
1518
1519AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1520 Align Align, const Twine &Name, BasicBlock *InsertAtEnd)
1521 : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca,
1522 getAISize(Ty->getContext(), ArraySize), InsertAtEnd),
1523 AllocatedType(Ty) {
1525 assert(!Ty->isVoidTy() && "Cannot allocate void!");
1526 setName(Name);
1527}
1528
1529
1531 if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(0)))
1532 return !CI->isOne();
1533 return true;
1534}
1535
1536/// isStaticAlloca - Return true if this alloca is in the entry block of the
1537/// function and is a constant size. If so, the code generator will fold it
1538/// into the prolog/epilog code, so it is basically free.
1540 // Must be constant size.
1541 if (!isa<ConstantInt>(getArraySize())) return false;
1542
1543 // Must be in the entry block.
1544 const BasicBlock *Parent = getParent();
1545 return Parent->isEntryBlock() && !isUsedWithInAlloca();
1546}
1547
1548//===----------------------------------------------------------------------===//
1549// LoadInst Implementation
1550//===----------------------------------------------------------------------===//
1551
1552void LoadInst::AssertOK() {
1554 "Ptr must have pointer type.");
1555}
1556
1558 assert(BB && "Insertion BB cannot be null when alignment not provided!");
1559 assert(BB->getParent() &&
1560 "BB must be in a Function when alignment not provided!");
1561 const DataLayout &DL = BB->getModule()->getDataLayout();
1562 return DL.getABITypeAlign(Ty);
1563}
1564
1566 return computeLoadStoreDefaultAlign(Ty, It->getParent());
1567}
1568
1570 assert(I && "Insertion position cannot be null when alignment not provided!");
1571 return computeLoadStoreDefaultAlign(Ty, I->getParent());
1572}
1573
1575 BasicBlock::iterator InsertBef)
1576 : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertBef) {}
1577
1579 Instruction *InsertBef)
1580 : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertBef) {}
1581
1583 BasicBlock *InsertAE)
1584 : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertAE) {}
1585
1586LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1587 BasicBlock::iterator InsertBef)
1588 : LoadInst(Ty, Ptr, Name, isVolatile,
1589 computeLoadStoreDefaultAlign(Ty, InsertBef), InsertBef) {}
1590
1591LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1592 Instruction *InsertBef)
1593 : LoadInst(Ty, Ptr, Name, isVolatile,
1594 computeLoadStoreDefaultAlign(Ty, InsertBef), InsertBef) {}
1595
1596LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1597 BasicBlock *InsertAE)
1598 : LoadInst(Ty, Ptr, Name, isVolatile,
1599 computeLoadStoreDefaultAlign(Ty, InsertAE), InsertAE) {}
1600
1601LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1602 Align Align, BasicBlock::iterator InsertBef)
1603 : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
1604 SyncScope::System, InsertBef) {}
1605
1606LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1607 Align Align, Instruction *InsertBef)
1608 : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
1609 SyncScope::System, InsertBef) {}
1610
1611LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1612 Align Align, BasicBlock *InsertAE)
1613 : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
1614 SyncScope::System, InsertAE) {}
1615
1616LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1618 BasicBlock::iterator InsertBef)
1619 : UnaryInstruction(Ty, Load, Ptr, InsertBef) {
1622 setAtomic(Order, SSID);
1623 AssertOK();
1624 setName(Name);
1625}
1626
1627LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1629 Instruction *InsertBef)
1630 : UnaryInstruction(Ty, Load, Ptr, InsertBef) {
1633 setAtomic(Order, SSID);
1634 AssertOK();
1635 setName(Name);
1636}
1637
1638LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1640 BasicBlock *InsertAE)
1641 : UnaryInstruction(Ty, Load, Ptr, InsertAE) {
1644 setAtomic(Order, SSID);
1645 AssertOK();
1646 setName(Name);
1647}
1648
1649//===----------------------------------------------------------------------===//
1650// StoreInst Implementation
1651//===----------------------------------------------------------------------===//
1652
1653void StoreInst::AssertOK() {
1654 assert(getOperand(0) && getOperand(1) && "Both operands must be non-null!");
1656 "Ptr must have pointer type!");
1657}
1658
1659StoreInst::StoreInst(Value *val, Value *addr, Instruction *InsertBefore)
1660 : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {}
1661
1662StoreInst::StoreInst(Value *val, Value *addr, BasicBlock *InsertAtEnd)
1663 : StoreInst(val, addr, /*isVolatile=*/false, InsertAtEnd) {}
1664
1666 : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {}
1667
1668StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1669 Instruction *InsertBefore)
1670 : StoreInst(val, addr, isVolatile,
1671 computeLoadStoreDefaultAlign(val->getType(), InsertBefore),
1672 InsertBefore) {}
1673
1674StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1675 BasicBlock *InsertAtEnd)
1676 : StoreInst(val, addr, isVolatile,
1677 computeLoadStoreDefaultAlign(val->getType(), InsertAtEnd),
1678 InsertAtEnd) {}
1679
1680StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1681 BasicBlock::iterator InsertBefore)
1682 : StoreInst(val, addr, isVolatile,
1683 computeLoadStoreDefaultAlign(val->getType(), &*InsertBefore),
1684 InsertBefore) {}
1685
1686StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1687 Instruction *InsertBefore)
1688 : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
1689 SyncScope::System, InsertBefore) {}
1690
1691StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1692 BasicBlock *InsertAtEnd)
1693 : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
1694 SyncScope::System, InsertAtEnd) {}
1695
1696StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1697 BasicBlock::iterator InsertBefore)
1698 : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
1699 SyncScope::System, InsertBefore) {}
1700
1701StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1702 AtomicOrdering Order, SyncScope::ID SSID,
1703 Instruction *InsertBefore)
1704 : Instruction(Type::getVoidTy(val->getContext()), Store,
1705 OperandTraits<StoreInst>::op_begin(this),
1706 OperandTraits<StoreInst>::operands(this), InsertBefore) {
1707 Op<0>() = val;
1708 Op<1>() = addr;
1711 setAtomic(Order, SSID);
1712 AssertOK();
1713}
1714
1715StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1716 AtomicOrdering Order, SyncScope::ID SSID,
1717 BasicBlock *InsertAtEnd)
1718 : Instruction(Type::getVoidTy(val->getContext()), Store,
1719 OperandTraits<StoreInst>::op_begin(this),
1720 OperandTraits<StoreInst>::operands(this), InsertAtEnd) {
1721 Op<0>() = val;
1722 Op<1>() = addr;
1725 setAtomic(Order, SSID);
1726 AssertOK();
1727}
1728
1729StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1730 AtomicOrdering Order, SyncScope::ID SSID,
1731 BasicBlock::iterator InsertBefore)
1732 : Instruction(Type::getVoidTy(val->getContext()), Store,
1733 OperandTraits<StoreInst>::op_begin(this),
1734 OperandTraits<StoreInst>::operands(this)) {
1735 Op<0>() = val;
1736 Op<1>() = addr;
1739 setAtomic(Order, SSID);
1740 insertBefore(*InsertBefore->getParent(), InsertBefore);
1741 AssertOK();
1742}
1743
1744//===----------------------------------------------------------------------===//
1745// AtomicCmpXchgInst Implementation
1746//===----------------------------------------------------------------------===//
1747
1748void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
1749 Align Alignment, AtomicOrdering SuccessOrdering,
1750 AtomicOrdering FailureOrdering,
1751 SyncScope::ID SSID) {
1752 Op<0>() = Ptr;
1753 Op<1>() = Cmp;
1754 Op<2>() = NewVal;
1755 setSuccessOrdering(SuccessOrdering);
1756 setFailureOrdering(FailureOrdering);
1757 setSyncScopeID(SSID);
1758 setAlignment(Alignment);
1759
1760 assert(getOperand(0) && getOperand(1) && getOperand(2) &&
1761 "All operands must be non-null!");
1763 "Ptr must have pointer type!");
1764 assert(getOperand(1)->getType() == getOperand(2)->getType() &&
1765 "Cmp type and NewVal type must be same!");
1766}
1767
1769 Align Alignment,
1770 AtomicOrdering SuccessOrdering,
1771 AtomicOrdering FailureOrdering,
1772 SyncScope::ID SSID,
1773 BasicBlock::iterator InsertBefore)
1774 : Instruction(
1775 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
1776 AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
1777 OperandTraits<AtomicCmpXchgInst>::operands(this), InsertBefore) {
1778 Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
1779}
1780
1782 Align Alignment,
1783 AtomicOrdering SuccessOrdering,
1784 AtomicOrdering FailureOrdering,
1785 SyncScope::ID SSID,
1786 Instruction *InsertBefore)
1787 : Instruction(
1788 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
1789 AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
1790 OperandTraits<AtomicCmpXchgInst>::operands(this), InsertBefore) {
1791 Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
1792}
1793
1795 Align Alignment,
1796 AtomicOrdering SuccessOrdering,
1797 AtomicOrdering FailureOrdering,
1798 SyncScope::ID SSID,
1799 BasicBlock *InsertAtEnd)
1800 : Instruction(
1801 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
1802 AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
1803 OperandTraits<AtomicCmpXchgInst>::operands(this), InsertAtEnd) {
1804 Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
1805}
1806
1807//===----------------------------------------------------------------------===//
1808// AtomicRMWInst Implementation
1809//===----------------------------------------------------------------------===//
1810
1811void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,
1812 Align Alignment, AtomicOrdering Ordering,
1813 SyncScope::ID SSID) {
1814 assert(Ordering != AtomicOrdering::NotAtomic &&
1815 "atomicrmw instructions can only be atomic.");
1816 assert(Ordering != AtomicOrdering::Unordered &&
1817 "atomicrmw instructions cannot be unordered.");
1818 Op<0>() = Ptr;
1819 Op<1>() = Val;
1821 setOrdering(Ordering);
1822 setSyncScopeID(SSID);
1823 setAlignment(Alignment);
1824
1825 assert(getOperand(0) && getOperand(1) &&
1826 "All operands must be non-null!");
1828 "Ptr must have pointer type!");
1829 assert(Ordering != AtomicOrdering::NotAtomic &&
1830 "AtomicRMW instructions must be atomic!");
1831}
1832
1834 Align Alignment, AtomicOrdering Ordering,
1835 SyncScope::ID SSID,
1836 BasicBlock::iterator InsertBefore)
1837 : Instruction(Val->getType(), AtomicRMW,
1838 OperandTraits<AtomicRMWInst>::op_begin(this),
1839 OperandTraits<AtomicRMWInst>::operands(this), InsertBefore) {
1840 Init(Operation, Ptr, Val, Alignment, Ordering, SSID);
1841}
1842
1844 Align Alignment, AtomicOrdering Ordering,
1845 SyncScope::ID SSID, Instruction *InsertBefore)
1846 : Instruction(Val->getType(), AtomicRMW,
1847 OperandTraits<AtomicRMWInst>::op_begin(this),
1848 OperandTraits<AtomicRMWInst>::operands(this), InsertBefore) {
1849 Init(Operation, Ptr, Val, Alignment, Ordering, SSID);
1850}
1851
1853 Align Alignment, AtomicOrdering Ordering,
1854 SyncScope::ID SSID, BasicBlock *InsertAtEnd)
1855 : Instruction(Val->getType(), AtomicRMW,
1856 OperandTraits<AtomicRMWInst>::op_begin(this),
1857 OperandTraits<AtomicRMWInst>::operands(this), InsertAtEnd) {
1858 Init(Operation, Ptr, Val, Alignment, Ordering, SSID);
1859}
1860
1862 switch (Op) {
1864 return "xchg";
1865 case AtomicRMWInst::Add:
1866 return "add";
1867 case AtomicRMWInst::Sub:
1868 return "sub";
1869 case AtomicRMWInst::And:
1870 return "and";
1872 return "nand";
1873 case AtomicRMWInst::Or:
1874 return "or";
1875 case AtomicRMWInst::Xor:
1876 return "xor";
1877 case AtomicRMWInst::Max:
1878 return "max";
1879 case AtomicRMWInst::Min:
1880 return "min";
1882 return "umax";
1884 return "umin";
1886 return "fadd";
1888 return "fsub";
1890 return "fmax";
1892 return "fmin";
1894 return "uinc_wrap";
1896 return "udec_wrap";
1898 return "<invalid operation>";
1899 }
1900
1901 llvm_unreachable("invalid atomicrmw operation");
1902}
1903
1904//===----------------------------------------------------------------------===//
1905// FenceInst Implementation
1906//===----------------------------------------------------------------------===//
1907
1909 SyncScope::ID SSID, BasicBlock::iterator InsertBefore)
1910 : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertBefore) {
1911 setOrdering(Ordering);
1912 setSyncScopeID(SSID);
1913}
1914
1916 SyncScope::ID SSID,
1917 Instruction *InsertBefore)
1918 : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertBefore) {
1919 setOrdering(Ordering);
1920 setSyncScopeID(SSID);
1921}
1922
1924 SyncScope::ID SSID,
1925 BasicBlock *InsertAtEnd)
1926 : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertAtEnd) {
1927 setOrdering(Ordering);
1928 setSyncScopeID(SSID);
1929}
1930
1931//===----------------------------------------------------------------------===//
1932// GetElementPtrInst Implementation
1933//===----------------------------------------------------------------------===//
1934
1935void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList,
1936 const Twine &Name) {
1937 assert(getNumOperands() == 1 + IdxList.size() &&
1938 "NumOperands not initialized?");
1939 Op<0>() = Ptr;
1940 llvm::copy(IdxList, op_begin() + 1);
1941 setName(Name);
1942}
1943
1944GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI)
1945 : Instruction(GEPI.getType(), GetElementPtr,
1946 OperandTraits<GetElementPtrInst>::op_end(this) -
1947 GEPI.getNumOperands(),
1948 GEPI.getNumOperands()),
1949 SourceElementType(GEPI.SourceElementType),
1950 ResultElementType(GEPI.ResultElementType) {
1951 std::copy(GEPI.op_begin(), GEPI.op_end(), op_begin());
1953}
1954
1956 if (auto *Struct = dyn_cast<StructType>(Ty)) {
1957 if (!Struct->indexValid(Idx))
1958 return nullptr;
1959 return Struct->getTypeAtIndex(Idx);
1960 }
1961 if (!Idx->getType()->isIntOrIntVectorTy())
1962 return nullptr;
1963 if (auto *Array = dyn_cast<ArrayType>(Ty))
1964 return Array->getElementType();
1965 if (auto *Vector = dyn_cast<VectorType>(Ty))
1966 return Vector->getElementType();
1967 return nullptr;
1968}
1969
1971 if (auto *Struct = dyn_cast<StructType>(Ty)) {
1972 if (Idx >= Struct->getNumElements())
1973 return nullptr;
1974 return Struct->getElementType(Idx);
1975 }
1976 if (auto *Array = dyn_cast<ArrayType>(Ty))
1977 return Array->getElementType();
1978 if (auto *Vector = dyn_cast<VectorType>(Ty))
1979 return Vector->getElementType();
1980 return nullptr;
1981}
1982
1983template <typename IndexTy>
1985 if (IdxList.empty())
1986 return Ty;
1987 for (IndexTy V : IdxList.slice(1)) {
1989 if (!Ty)
1990 return Ty;
1991 }
1992 return Ty;
1993}
1994
1996 return getIndexedTypeInternal(Ty, IdxList);
1997}
1998
2000 ArrayRef<Constant *> IdxList) {
2001 return getIndexedTypeInternal(Ty, IdxList);
2002}
2003
2005 return getIndexedTypeInternal(Ty, IdxList);
2006}
2007
2008/// hasAllZeroIndices - Return true if all of the indices of this GEP are
2009/// zeros. If so, the result pointer and the first operand have the same
2010/// value, just potentially different types.
2012 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
2013 if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(i))) {
2014 if (!CI->isZero()) return false;
2015 } else {
2016 return false;
2017 }
2018 }
2019 return true;
2020}
2021
2022/// hasAllConstantIndices - Return true if all of the indices of this GEP are
2023/// constant integers. If so, the result pointer and the first operand have
2024/// a constant offset between them.
2026 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
2027 if (!isa<ConstantInt>(getOperand(i)))
2028 return false;
2029 }
2030 return true;
2031}
2032
2034 cast<GEPOperator>(this)->setIsInBounds(B);
2035}
2036
2038 return cast<GEPOperator>(this)->isInBounds();
2039}
2040
2042 APInt &Offset) const {
2043 // Delegate to the generic GEPOperator implementation.
2044 return cast<GEPOperator>(this)->accumulateConstantOffset(DL, Offset);
2045}
2046
2048 const DataLayout &DL, unsigned BitWidth,
2049 MapVector<Value *, APInt> &VariableOffsets,
2050 APInt &ConstantOffset) const {
2051 // Delegate to the generic GEPOperator implementation.
2052 return cast<GEPOperator>(this)->collectOffset(DL, BitWidth, VariableOffsets,
2053 ConstantOffset);
2054}
2055
2056//===----------------------------------------------------------------------===//
2057// ExtractElementInst Implementation
2058//===----------------------------------------------------------------------===//
2059
2060ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
2061 const Twine &Name,
2062 BasicBlock::iterator InsertBef)
2063 : Instruction(
2064 cast<VectorType>(Val->getType())->getElementType(), ExtractElement,
2065 OperandTraits<ExtractElementInst>::op_begin(this), 2, InsertBef) {
2066 assert(isValidOperands(Val, Index) &&
2067 "Invalid extractelement instruction operands!");
2068 Op<0>() = Val;
2069 Op<1>() = Index;
2070 setName(Name);
2071}
2072
2073ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
2074 const Twine &Name,
2075 Instruction *InsertBef)
2076 : Instruction(cast<VectorType>(Val->getType())->getElementType(),
2077 ExtractElement,
2078 OperandTraits<ExtractElementInst>::op_begin(this),
2079 2, InsertBef) {
2080 assert(isValidOperands(Val, Index) &&
2081 "Invalid extractelement instruction operands!");
2082 Op<0>() = Val;
2083 Op<1>() = Index;
2084 setName(Name);
2085}
2086
2087ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
2088 const Twine &Name,
2089 BasicBlock *InsertAE)
2090 : Instruction(cast<VectorType>(Val->getType())->getElementType(),
2091 ExtractElement,
2092 OperandTraits<ExtractElementInst>::op_begin(this),
2093 2, InsertAE) {
2094 assert(isValidOperands(Val, Index) &&
2095 "Invalid extractelement instruction operands!");
2096
2097 Op<0>() = Val;
2098 Op<1>() = Index;
2099 setName(Name);
2100}
2101
2103 if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy())
2104 return false;
2105 return true;
2106}
2107
2108//===----------------------------------------------------------------------===//
2109// InsertElementInst Implementation
2110//===----------------------------------------------------------------------===//
2111
2112InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
2113 const Twine &Name,
2114 BasicBlock::iterator InsertBef)
2115 : Instruction(Vec->getType(), InsertElement,
2116 OperandTraits<InsertElementInst>::op_begin(this), 3,
2117 InsertBef) {
2118 assert(isValidOperands(Vec, Elt, Index) &&
2119 "Invalid insertelement instruction operands!");
2120 Op<0>() = Vec;
2121 Op<1>() = Elt;
2122 Op<2>() = Index;
2123 setName(Name);
2124}
2125
2126InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
2127 const Twine &Name,
2128 Instruction *InsertBef)
2129 : Instruction(Vec->getType(), InsertElement,
2130 OperandTraits<InsertElementInst>::op_begin(this),
2131 3, InsertBef) {
2132 assert(isValidOperands(Vec, Elt, Index) &&
2133 "Invalid insertelement instruction operands!");
2134 Op<0>() = Vec;
2135 Op<1>() = Elt;
2136 Op<2>() = Index;
2137 setName(Name);
2138}
2139
2140InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
2141 const Twine &Name,
2142 BasicBlock *InsertAE)
2143 : Instruction(Vec->getType(), InsertElement,
2144 OperandTraits<InsertElementInst>::op_begin(this),
2145 3, InsertAE) {
2146 assert(isValidOperands(Vec, Elt, Index) &&
2147 "Invalid insertelement instruction operands!");
2148
2149 Op<0>() = Vec;
2150 Op<1>() = Elt;
2151 Op<2>() = Index;
2152 setName(Name);
2153}
2154
2156 const Value *Index) {
2157 if (!Vec->getType()->isVectorTy())
2158 return false; // First operand of insertelement must be vector type.
2159
2160 if (Elt->getType() != cast<VectorType>(Vec->getType())->getElementType())
2161 return false;// Second operand of insertelement must be vector element type.
2162
2163 if (!Index->getType()->isIntegerTy())
2164 return false; // Third operand of insertelement must be i32.
2165 return true;
2166}
2167
2168//===----------------------------------------------------------------------===//
2169// ShuffleVectorInst Implementation
2170//===----------------------------------------------------------------------===//
2171
2173 assert(V && "Cannot create placeholder of nullptr V");
2174 return PoisonValue::get(V->getType());
2175}
2176
2178 BasicBlock::iterator InsertBefore)
2180 InsertBefore) {}
2181
2183 Instruction *InsertBefore)
2185 InsertBefore) {}
2186
2188 BasicBlock *InsertAtEnd)
2190 InsertAtEnd) {}
2191
2193 const Twine &Name,
2194 BasicBlock::iterator InsertBefore)
2196 InsertBefore) {}
2197
2199 const Twine &Name,
2200 Instruction *InsertBefore)
2202 InsertBefore) {}
2203
2205 const Twine &Name, BasicBlock *InsertAtEnd)
2207 InsertAtEnd) {}
2208
2210 const Twine &Name,
2211 BasicBlock::iterator InsertBefore)
2212 : Instruction(
2213 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
2214 cast<VectorType>(Mask->getType())->getElementCount()),
2215 ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
2216 OperandTraits<ShuffleVectorInst>::operands(this), InsertBefore) {
2217 assert(isValidOperands(V1, V2, Mask) &&
2218 "Invalid shuffle vector instruction operands!");
2219
2220 Op<0>() = V1;
2221 Op<1>() = V2;
2222 SmallVector<int, 16> MaskArr;
2223 getShuffleMask(cast<Constant>(Mask), MaskArr);
2224 setShuffleMask(MaskArr);
2225 setName(Name);
2226}
2227
2229 const Twine &Name,
2230 Instruction *InsertBefore)
2231 : Instruction(
2232 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
2233 cast<VectorType>(Mask->getType())->getElementCount()),
2234 ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
2235 OperandTraits<ShuffleVectorInst>::operands(this), InsertBefore) {
2236 assert(isValidOperands(V1, V2, Mask) &&
2237 "Invalid shuffle vector instruction operands!");
2238
2239 Op<0>() = V1;
2240 Op<1>() = V2;
2241 SmallVector<int, 16> MaskArr;
2242 getShuffleMask(cast<Constant>(Mask), MaskArr);
2243 setShuffleMask(MaskArr);
2244 setName(Name);
2245}
2246
2248 const Twine &Name, BasicBlock *InsertAtEnd)
2249 : Instruction(
2250 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
2251 cast<VectorType>(Mask->getType())->getElementCount()),
2252 ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
2253 OperandTraits<ShuffleVectorInst>::operands(this), InsertAtEnd) {
2254 assert(isValidOperands(V1, V2, Mask) &&
2255 "Invalid shuffle vector instruction operands!");
2256
2257 Op<0>() = V1;
2258 Op<1>() = V2;
2259 SmallVector<int, 16> MaskArr;
2260 getShuffleMask(cast<Constant>(Mask), MaskArr);
2261 setShuffleMask(MaskArr);
2262 setName(Name);
2263}
2264
2266 const Twine &Name,
2267 BasicBlock::iterator InsertBefore)
2268 : Instruction(
2269 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
2270 Mask.size(), isa<ScalableVectorType>(V1->getType())),
2271 ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
2272 OperandTraits<ShuffleVectorInst>::operands(this), InsertBefore) {
2273 assert(isValidOperands(V1, V2, Mask) &&
2274 "Invalid shuffle vector instruction operands!");
2275 Op<0>() = V1;
2276 Op<1>() = V2;
2277 setShuffleMask(Mask);
2278 setName(Name);
2279}
2280
2282 const Twine &Name,
2283 Instruction *InsertBefore)
2284 : Instruction(
2285 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
2286 Mask.size(), isa<ScalableVectorType>(V1->getType())),
2287 ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
2288 OperandTraits<ShuffleVectorInst>::operands(this), InsertBefore) {
2289 assert(isValidOperands(V1, V2, Mask) &&
2290 "Invalid shuffle vector instruction operands!");
2291 Op<0>() = V1;
2292 Op<1>() = V2;
2293 setShuffleMask(Mask);
2294 setName(Name);
2295}
2296
2298 const Twine &Name, BasicBlock *InsertAtEnd)
2299 : Instruction(
2300 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
2301 Mask.size(), isa<ScalableVectorType>(V1->getType())),
2302 ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
2303 OperandTraits<ShuffleVectorInst>::operands(this), InsertAtEnd) {
2304 assert(isValidOperands(V1, V2, Mask) &&
2305 "Invalid shuffle vector instruction operands!");
2306
2307 Op<0>() = V1;
2308 Op<1>() = V2;
2309 setShuffleMask(Mask);
2310 setName(Name);
2311}
2312
2314 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2315 int NumMaskElts = ShuffleMask.size();
2316 SmallVector<int, 16> NewMask(NumMaskElts);
2317 for (int i = 0; i != NumMaskElts; ++i) {
2318 int MaskElt = getMaskValue(i);
2319 if (MaskElt == PoisonMaskElem) {
2320 NewMask[i] = PoisonMaskElem;
2321 continue;
2322 }
2323 assert(MaskElt >= 0 && MaskElt < 2 * NumOpElts && "Out-of-range mask");
2324 MaskElt = (MaskElt < NumOpElts) ? MaskElt + NumOpElts : MaskElt - NumOpElts;
2325 NewMask[i] = MaskElt;
2326 }
2327 setShuffleMask(NewMask);
2328 Op<0>().swap(Op<1>());
2329}
2330
2332 ArrayRef<int> Mask) {
2333 // V1 and V2 must be vectors of the same type.
2334 if (!isa<VectorType>(V1->getType()) || V1->getType() != V2->getType())
2335 return false;
2336
2337 // Make sure the mask elements make sense.
2338 int V1Size =
2339 cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue();
2340 for (int Elem : Mask)
2341 if (Elem != PoisonMaskElem && Elem >= V1Size * 2)
2342 return false;
2343
2344 if (isa<ScalableVectorType>(V1->getType()))
2345 if ((Mask[0] != 0 && Mask[0] != PoisonMaskElem) || !all_equal(Mask))
2346 return false;
2347
2348 return true;
2349}
2350
2352 const Value *Mask) {
2353 // V1 and V2 must be vectors of the same type.
2354 if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType())
2355 return false;
2356
2357 // Mask must be vector of i32, and must be the same kind of vector as the
2358 // input vectors
2359 auto *MaskTy = dyn_cast<VectorType>(Mask->getType());
2360 if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32) ||
2361 isa<ScalableVectorType>(MaskTy) != isa<ScalableVectorType>(V1->getType()))
2362 return false;
2363
2364 // Check to see if Mask is valid.
2365 if (isa<UndefValue>(Mask) || isa<ConstantAggregateZero>(Mask))
2366 return true;
2367
2368 if (const auto *MV = dyn_cast<ConstantVector>(Mask)) {
2369 unsigned V1Size = cast<FixedVectorType>(V1->getType())->getNumElements();
2370 for (Value *Op : MV->operands()) {
2371 if (auto *CI = dyn_cast<ConstantInt>(Op)) {
2372 if (CI->uge(V1Size*2))
2373 return false;
2374 } else if (!isa<UndefValue>(Op)) {
2375 return false;
2376 }
2377 }
2378 return true;
2379 }
2380
2381 if (const auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
2382 unsigned V1Size = cast<FixedVectorType>(V1->getType())->getNumElements();
2383 for (unsigned i = 0, e = cast<FixedVectorType>(MaskTy)->getNumElements();
2384 i != e; ++i)
2385 if (CDS->getElementAsInteger(i) >= V1Size*2)
2386 return false;
2387 return true;
2388 }
2389
2390 return false;
2391}
2392
2394 SmallVectorImpl<int> &Result) {
2395 ElementCount EC = cast<VectorType>(Mask->getType())->getElementCount();
2396
2397 if (isa<ConstantAggregateZero>(Mask)) {
2398 Result.resize(EC.getKnownMinValue(), 0);
2399 return;
2400 }
2401
2402 Result.reserve(EC.getKnownMinValue());
2403
2404 if (EC.isScalable()) {
2405 assert((isa<ConstantAggregateZero>(Mask) || isa<UndefValue>(Mask)) &&
2406 "Scalable vector shuffle mask must be undef or zeroinitializer");
2407 int MaskVal = isa<UndefValue>(Mask) ? -1 : 0;
2408 for (unsigned I = 0; I < EC.getKnownMinValue(); ++I)
2409 Result.emplace_back(MaskVal);
2410 return;
2411 }
2412
2413 unsigned NumElts = EC.getKnownMinValue();
2414
2415 if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
2416 for (unsigned i = 0; i != NumElts; ++i)
2417 Result.push_back(CDS->getElementAsInteger(i));
2418 return;
2419 }
2420 for (unsigned i = 0; i != NumElts; ++i) {
2421 Constant *C = Mask->getAggregateElement(i);
2422 Result.push_back(isa<UndefValue>(C) ? -1 :
2423 cast<ConstantInt>(C)->getZExtValue());
2424 }
2425}
2426
2428 ShuffleMask.assign(Mask.begin(), Mask.end());
2429 ShuffleMaskForBitcode = convertShuffleMaskForBitcode(Mask, getType());
2430}
2431
2433 Type *ResultTy) {
2434 Type *Int32Ty = Type::getInt32Ty(ResultTy->getContext());
2435 if (isa<ScalableVectorType>(ResultTy)) {
2436 assert(all_equal(Mask) && "Unexpected shuffle");
2437 Type *VecTy = VectorType::get(Int32Ty, Mask.size(), true);
2438 if (Mask[0] == 0)
2439 return Constant::getNullValue(VecTy);
2440 return UndefValue::get(VecTy);
2441 }
2443 for (int Elem : Mask) {
2444 if (Elem == PoisonMaskElem)
2446 else
2447 MaskConst.push_back(ConstantInt::get(Int32Ty, Elem));
2448 }
2449 return ConstantVector::get(MaskConst);
2450}
2451
2452static bool isSingleSourceMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
2453 assert(!Mask.empty() && "Shuffle mask must contain elements");
2454 bool UsesLHS = false;
2455 bool UsesRHS = false;
2456 for (int I : Mask) {
2457 if (I == -1)
2458 continue;
2459 assert(I >= 0 && I < (NumOpElts * 2) &&
2460 "Out-of-bounds shuffle mask element");
2461 UsesLHS |= (I < NumOpElts);
2462 UsesRHS |= (I >= NumOpElts);
2463 if (UsesLHS && UsesRHS)
2464 return false;
2465 }
2466 // Allow for degenerate case: completely undef mask means neither source is used.
2467 return UsesLHS || UsesRHS;
2468}
2469
2471 // We don't have vector operand size information, so assume operands are the
2472 // same size as the mask.
2473 return isSingleSourceMaskImpl(Mask, NumSrcElts);
2474}
2475
2476static bool isIdentityMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
2477 if (!isSingleSourceMaskImpl(Mask, NumOpElts))
2478 return false;
2479 for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) {
2480 if (Mask[i] == -1)
2481 continue;
2482 if (Mask[i] != i && Mask[i] != (NumOpElts + i))
2483 return false;
2484 }
2485 return true;
2486}
2487
2489 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2490 return false;
2491 // We don't have vector operand size information, so assume operands are the
2492 // same size as the mask.
2493 return isIdentityMaskImpl(Mask, NumSrcElts);
2494}
2495
2497 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2498 return false;
2499 if (!isSingleSourceMask(Mask, NumSrcElts))
2500 return false;
2501
2502 // The number of elements in the mask must be at least 2.
2503 if (NumSrcElts < 2)
2504 return false;
2505
2506 for (int I = 0, E = Mask.size(); I < E; ++I) {
2507 if (Mask[I] == -1)
2508 continue;
2509 if (Mask[I] != (NumSrcElts - 1 - I) &&
2510 Mask[I] != (NumSrcElts + NumSrcElts - 1 - I))
2511 return false;
2512 }
2513 return true;
2514}
2515
2517 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2518 return false;
2519 if (!isSingleSourceMask(Mask, NumSrcElts))
2520 return false;
2521 for (int I = 0, E = Mask.size(); I < E; ++I) {
2522 if (Mask[I] == -1)
2523 continue;
2524 if (Mask[I] != 0 && Mask[I] != NumSrcElts)
2525 return false;
2526 }
2527 return true;
2528}
2529
2531 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2532 return false;
2533 // Select is differentiated from identity. It requires using both sources.
2534 if (isSingleSourceMask(Mask, NumSrcElts))
2535 return false;
2536 for (int I = 0, E = Mask.size(); I < E; ++I) {
2537 if (Mask[I] == -1)
2538 continue;
2539 if (Mask[I] != I && Mask[I] != (NumSrcElts + I))
2540 return false;
2541 }
2542 return true;
2543}
2544
2546 // Example masks that will return true:
2547 // v1 = <a, b, c, d>
2548 // v2 = <e, f, g, h>
2549 // trn1 = shufflevector v1, v2 <0, 4, 2, 6> = <a, e, c, g>
2550 // trn2 = shufflevector v1, v2 <1, 5, 3, 7> = <b, f, d, h>
2551
2552 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2553 return false;
2554 // 1. The number of elements in the mask must be a power-of-2 and at least 2.
2555 int Sz = Mask.size();
2556 if (Sz < 2 || !isPowerOf2_32(Sz))
2557 return false;
2558
2559 // 2. The first element of the mask must be either a 0 or a 1.
2560 if (Mask[0] != 0 && Mask[0] != 1)
2561 return false;
2562
2563 // 3. The difference between the first 2 elements must be equal to the
2564 // number of elements in the mask.
2565 if ((Mask[1] - Mask[0]) != NumSrcElts)
2566 return false;
2567
2568 // 4. The difference between consecutive even-numbered and odd-numbered
2569 // elements must be equal to 2.
2570 for (int I = 2; I < Sz; ++I) {
2571 int MaskEltVal = Mask[I];
2572 if (MaskEltVal == -1)
2573 return false;
2574 int MaskEltPrevVal = Mask[I - 2];
2575 if (MaskEltVal - MaskEltPrevVal != 2)
2576 return false;
2577 }
2578 return true;
2579}
2580
2582 int &Index) {
2583 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2584 return false;
2585 // Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2586 int StartIndex = -1;
2587 for (int I = 0, E = Mask.size(); I != E; ++I) {
2588 int MaskEltVal = Mask[I];
2589 if (MaskEltVal == -1)
2590 continue;
2591
2592 if (StartIndex == -1) {
2593 // Don't support a StartIndex that begins in the second input, or if the
2594 // first non-undef index would access below the StartIndex.
2595 if (MaskEltVal < I || NumSrcElts <= (MaskEltVal - I))
2596 return false;
2597
2598 StartIndex = MaskEltVal - I;
2599 continue;
2600 }
2601
2602 // Splice is sequential starting from StartIndex.
2603 if (MaskEltVal != (StartIndex + I))
2604 return false;
2605 }
2606
2607 if (StartIndex == -1)
2608 return false;
2609
2610 // NOTE: This accepts StartIndex == 0 (COPY).
2611 Index = StartIndex;
2612 return true;
2613}
2614
2616 int NumSrcElts, int &Index) {
2617 // Must extract from a single source.
2618 if (!isSingleSourceMaskImpl(Mask, NumSrcElts))
2619 return false;
2620
2621 // Must be smaller (else this is an Identity shuffle).
2622 if (NumSrcElts <= (int)Mask.size())
2623 return false;
2624
2625 // Find start of extraction, accounting that we may start with an UNDEF.
2626 int SubIndex = -1;
2627 for (int i = 0, e = Mask.size(); i != e; ++i) {
2628 int M = Mask[i];
2629 if (M < 0)
2630 continue;
2631 int Offset = (M % NumSrcElts) - i;
2632 if (0 <= SubIndex && SubIndex != Offset)
2633 return false;
2634 SubIndex = Offset;
2635 }
2636
2637 if (0 <= SubIndex && SubIndex + (int)Mask.size() <= NumSrcElts) {
2638 Index = SubIndex;
2639 return true;
2640 }
2641 return false;
2642}
2643
2645 int NumSrcElts, int &NumSubElts,
2646 int &Index) {
2647 int NumMaskElts = Mask.size();
2648
2649 // Don't try to match if we're shuffling to a smaller size.
2650 if (NumMaskElts < NumSrcElts)
2651 return false;
2652
2653 // TODO: We don't recognize self-insertion/widening.
2654 if (isSingleSourceMaskImpl(Mask, NumSrcElts))
2655 return false;
2656
2657 // Determine which mask elements are attributed to which source.
2658 APInt UndefElts = APInt::getZero(NumMaskElts);
2659 APInt Src0Elts = APInt::getZero(NumMaskElts);
2660 APInt Src1Elts = APInt::getZero(NumMaskElts);
2661 bool Src0Identity = true;
2662 bool Src1Identity = true;
2663
2664 for (int i = 0; i != NumMaskElts; ++i) {
2665 int M = Mask[i];
2666 if (M < 0) {
2667 UndefElts.setBit(i);
2668 continue;
2669 }
2670 if (M < NumSrcElts) {
2671 Src0Elts.setBit(i);
2672 Src0Identity &= (M == i);
2673 continue;
2674 }
2675 Src1Elts.setBit(i);
2676 Src1Identity &= (M == (i + NumSrcElts));
2677 }
2678 assert((Src0Elts | Src1Elts | UndefElts).isAllOnes() &&
2679 "unknown shuffle elements");
2680 assert(!Src0Elts.isZero() && !Src1Elts.isZero() &&
2681 "2-source shuffle not found");
2682
2683 // Determine lo/hi span ranges.
2684 // TODO: How should we handle undefs at the start of subvector insertions?
2685 int Src0Lo = Src0Elts.countr_zero();
2686 int Src1Lo = Src1Elts.countr_zero();
2687 int Src0Hi = NumMaskElts - Src0Elts.countl_zero();
2688 int Src1Hi = NumMaskElts - Src1Elts.countl_zero();
2689
2690 // If src0 is in place, see if the src1 elements is inplace within its own
2691 // span.
2692 if (Src0Identity) {
2693 int NumSub1Elts = Src1Hi - Src1Lo;
2694 ArrayRef<int> Sub1Mask = Mask.slice(Src1Lo, NumSub1Elts);
2695 if (isIdentityMaskImpl(Sub1Mask, NumSrcElts)) {
2696 NumSubElts = NumSub1Elts;
2697 Index = Src1Lo;
2698 return true;
2699 }
2700 }
2701
2702 // If src1 is in place, see if the src0 elements is inplace within its own
2703 // span.
2704 if (Src1Identity) {
2705 int NumSub0Elts = Src0Hi - Src0Lo;
2706 ArrayRef<int> Sub0Mask = Mask.slice(Src0Lo, NumSub0Elts);
2707 if (isIdentityMaskImpl(Sub0Mask, NumSrcElts)) {
2708 NumSubElts = NumSub0Elts;
2709 Index = Src0Lo;
2710 return true;
2711 }
2712 }
2713
2714 return false;
2715}
2716
2718 // FIXME: Not currently possible to express a shuffle mask for a scalable
2719 // vector for this case.
2720 if (isa<ScalableVectorType>(getType()))
2721 return false;
2722
2723 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2724 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2725 if (NumMaskElts <= NumOpElts)
2726 return false;
2727
2728 // The first part of the mask must choose elements from exactly 1 source op.
2730 if (!isIdentityMaskImpl(Mask, NumOpElts))
2731 return false;
2732
2733 // All extending must be with undef elements.
2734 for (int i = NumOpElts; i < NumMaskElts; ++i)
2735 if (Mask[i] != -1)
2736 return false;
2737
2738 return true;
2739}
2740
2742 // FIXME: Not currently possible to express a shuffle mask for a scalable
2743 // vector for this case.
2744 if (isa<ScalableVectorType>(getType()))
2745 return false;
2746
2747 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2748 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2749 if (NumMaskElts >= NumOpElts)
2750 return false;
2751
2752 return isIdentityMaskImpl(getShuffleMask(), NumOpElts);
2753}
2754
2756 // Vector concatenation is differentiated from identity with padding.
2757 if (isa<UndefValue>(Op<0>()) || isa<UndefValue>(Op<1>()))
2758 return false;
2759
2760 // FIXME: Not currently possible to express a shuffle mask for a scalable
2761 // vector for this case.
2762 if (isa<ScalableVectorType>(getType()))
2763 return false;
2764
2765 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2766 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2767 if (NumMaskElts != NumOpElts * 2)
2768 return false;
2769
2770 // Use the mask length rather than the operands' vector lengths here. We
2771 // already know that the shuffle returns a vector twice as long as the inputs,
2772 // and neither of the inputs are undef vectors. If the mask picks consecutive
2773 // elements from both inputs, then this is a concatenation of the inputs.
2774 return isIdentityMaskImpl(getShuffleMask(), NumMaskElts);
2775}
2776
2778 int ReplicationFactor, int VF) {
2779 assert(Mask.size() == (unsigned)ReplicationFactor * VF &&
2780 "Unexpected mask size.");
2781
2782 for (int CurrElt : seq(VF)) {
2783 ArrayRef<int> CurrSubMask = Mask.take_front(ReplicationFactor);
2784 assert(CurrSubMask.size() == (unsigned)ReplicationFactor &&
2785 "Run out of mask?");
2786 Mask = Mask.drop_front(ReplicationFactor);
2787 if (!all_of(CurrSubMask, [CurrElt](int MaskElt) {
2788 return MaskElt == PoisonMaskElem || MaskElt == CurrElt;
2789 }))
2790 return false;
2791 }
2792 assert(Mask.empty() && "Did not consume the whole mask?");
2793
2794 return true;
2795}
2796
2798 int &ReplicationFactor, int &VF) {
2799 // undef-less case is trivial.
2800 if (!llvm::is_contained(Mask, PoisonMaskElem)) {
2801 ReplicationFactor =
2802 Mask.take_while([](int MaskElt) { return MaskElt == 0; }).size();
2803 if (ReplicationFactor == 0 || Mask.size() % ReplicationFactor != 0)
2804 return false;
2805 VF = Mask.size() / ReplicationFactor;
2806 return isReplicationMaskWithParams(Mask, ReplicationFactor, VF);
2807 }
2808
2809 // However, if the mask contains undef's, we have to enumerate possible tuples
2810 // and pick one. There are bounds on replication factor: [1, mask size]
2811 // (where RF=1 is an identity shuffle, RF=mask size is a broadcast shuffle)
2812 // Additionally, mask size is a replication factor multiplied by vector size,
2813 // which further significantly reduces the search space.
2814
2815 // Before doing that, let's perform basic correctness checking first.
2816 int Largest = -1;
2817 for (int MaskElt : Mask) {
2818 if (MaskElt == PoisonMaskElem)
2819 continue;
2820 // Elements must be in non-decreasing order.
2821 if (MaskElt < Largest)
2822 return false;
2823 Largest = std::max(Largest, MaskElt);
2824 }
2825
2826 // Prefer larger replication factor if all else equal.
2827 for (int PossibleReplicationFactor :
2828 reverse(seq_inclusive<unsigned>(1, Mask.size()))) {
2829 if (Mask.size() % PossibleReplicationFactor != 0)
2830 continue;
2831 int PossibleVF = Mask.size() / PossibleReplicationFactor;
2832 if (!isReplicationMaskWithParams(Mask, PossibleReplicationFactor,
2833 PossibleVF))
2834 continue;
2835 ReplicationFactor = PossibleReplicationFactor;
2836 VF = PossibleVF;
2837 return true;
2838 }
2839
2840 return false;
2841}
2842
2843bool ShuffleVectorInst::isReplicationMask(int &ReplicationFactor,
2844 int &VF) const {
2845 // Not possible to express a shuffle mask for a scalable vector for this
2846 // case.
2847 if (isa<ScalableVectorType>(getType()))
2848 return false;
2849
2850 VF = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2851 if (ShuffleMask.size() % VF != 0)
2852 return false;
2853 ReplicationFactor = ShuffleMask.size() / VF;
2854
2855 return isReplicationMaskWithParams(ShuffleMask, ReplicationFactor, VF);
2856}
2857
2859 if (VF <= 0 || Mask.size() < static_cast<unsigned>(VF) ||
2860 Mask.size() % VF != 0)
2861 return false;
2862 for (unsigned K = 0, Sz = Mask.size(); K < Sz; K += VF) {
2863 ArrayRef<int> SubMask = Mask.slice(K, VF);
2864 if (all_of(SubMask, [](int Idx) { return Idx == PoisonMaskElem; }))
2865 continue;
2866 SmallBitVector Used(VF, false);
2867 for (int Idx : SubMask) {
2868 if (Idx != PoisonMaskElem && Idx < VF)
2869 Used.set(Idx);
2870 }
2871 if (!Used.all())
2872 return false;
2873 }
2874 return true;
2875}
2876
2877/// Return true if this shuffle mask is a replication mask.
2879 // Not possible to express a shuffle mask for a scalable vector for this
2880 // case.
2881 if (isa<ScalableVectorType>(getType()))
2882 return false;
2883 if (!isSingleSourceMask(ShuffleMask, VF))
2884 return false;
2885
2886 return isOneUseSingleSourceMask(ShuffleMask, VF);
2887}
2888
2889bool ShuffleVectorInst::isInterleave(unsigned Factor) {
2890 FixedVectorType *OpTy = dyn_cast<FixedVectorType>(getOperand(0)->getType());
2891 // shuffle_vector can only interleave fixed length vectors - for scalable
2892 // vectors, see the @llvm.experimental.vector.interleave2 intrinsic
2893 if (!OpTy)
2894 return false;
2895 unsigned OpNumElts = OpTy->getNumElements();
2896
2897 return isInterleaveMask(ShuffleMask, Factor, OpNumElts * 2);
2898}
2899
2901 ArrayRef<int> Mask, unsigned Factor, unsigned NumInputElts,
2902 SmallVectorImpl<unsigned> &StartIndexes) {
2903 unsigned NumElts = Mask.size();
2904 if (NumElts % Factor)
2905 return false;
2906
2907 unsigned LaneLen = NumElts / Factor;
2908 if (!isPowerOf2_32(LaneLen))
2909 return false;
2910
2911 StartIndexes.resize(Factor);
2912
2913 // Check whether each element matches the general interleaved rule.
2914 // Ignore undef elements, as long as the defined elements match the rule.
2915 // Outer loop processes all factors (x, y, z in the above example)
2916 unsigned I = 0, J;
2917 for (; I < Factor; I++) {
2918 unsigned SavedLaneValue;
2919 unsigned SavedNoUndefs = 0;
2920
2921 // Inner loop processes consecutive accesses (x, x+1... in the example)
2922 for (J = 0; J < LaneLen - 1; J++) {
2923 // Lane computes x's position in the Mask
2924 unsigned Lane = J * Factor + I;
2925 unsigned NextLane = Lane + Factor;
2926 int LaneValue = Mask[Lane];
2927 int NextLaneValue = Mask[NextLane];
2928
2929 // If both are defined, values must be sequential
2930 if (LaneValue >= 0 && NextLaneValue >= 0 &&
2931 LaneValue + 1 != NextLaneValue)
2932 break;
2933
2934 // If the next value is undef, save the current one as reference
2935 if (LaneValue >= 0 && NextLaneValue < 0) {
2936 SavedLaneValue = LaneValue;
2937 SavedNoUndefs = 1;
2938 }
2939
2940 // Undefs are allowed, but defined elements must still be consecutive:
2941 // i.e.: x,..., undef,..., x + 2,..., undef,..., undef,..., x + 5, ....
2942 // Verify this by storing the last non-undef followed by an undef
2943 // Check that following non-undef masks are incremented with the
2944 // corresponding distance.
2945 if (SavedNoUndefs > 0 && LaneValue < 0) {
2946 SavedNoUndefs++;
2947 if (NextLaneValue >= 0 &&
2948 SavedLaneValue + SavedNoUndefs != (unsigned)NextLaneValue)
2949 break;
2950 }
2951 }
2952
2953 if (J < LaneLen - 1)
2954 return false;
2955
2956 int StartMask = 0;
2957 if (Mask[I] >= 0) {
2958 // Check that the start of the I range (J=0) is greater than 0
2959 StartMask = Mask[I];
2960 } else if (Mask[(LaneLen - 1) * Factor + I] >= 0) {
2961 // StartMask defined by the last value in lane
2962 StartMask = Mask[(LaneLen - 1) * Factor + I] - J;
2963 } else if (SavedNoUndefs > 0) {
2964 // StartMask defined by some non-zero value in the j loop
2965 StartMask = SavedLaneValue - (LaneLen - 1 - SavedNoUndefs);
2966 }
2967 // else StartMask remains set to 0, i.e. all elements are undefs
2968
2969 if (StartMask < 0)
2970 return false;
2971 // We must stay within the vectors; This case can happen with undefs.
2972 if (StartMask + LaneLen > NumInputElts)
2973 return false;
2974
2975 StartIndexes[I] = StartMask;
2976 }
2977
2978 return true;
2979}
2980
2981/// Check if the mask is a DE-interleave mask of the given factor
2982/// \p Factor like:
2983/// <Index, Index+Factor, ..., Index+(NumElts-1)*Factor>
2985 unsigned Factor,
2986 unsigned &Index) {
2987 // Check all potential start indices from 0 to (Factor - 1).
2988 for (unsigned Idx = 0; Idx < Factor; Idx++) {
2989 unsigned I = 0;
2990
2991 // Check that elements are in ascending order by Factor. Ignore undef
2992 // elements.
2993 for (; I < Mask.size(); I++)
2994 if (Mask[I] >= 0 && static_cast<unsigned>(Mask[I]) != Idx + I * Factor)
2995 break;
2996
2997 if (I == Mask.size()) {
2998 Index = Idx;
2999 return true;
3000 }
3001 }
3002
3003 return false;
3004}
3005
3006/// Try to lower a vector shuffle as a bit rotation.
3007///
3008/// Look for a repeated rotation pattern in each sub group.
3009/// Returns an element-wise left bit rotation amount or -1 if failed.
3010static int matchShuffleAsBitRotate(ArrayRef<int> Mask, int NumSubElts) {
3011 int NumElts = Mask.size();
3012 assert((NumElts % NumSubElts) == 0 && "Illegal shuffle mask");
3013
3014 int RotateAmt = -1;
3015 for (int i = 0; i != NumElts; i += NumSubElts) {
3016 for (int j = 0; j != NumSubElts; ++j) {
3017 int M = Mask[i + j];
3018 if (M < 0)
3019 continue;
3020 if (M < i || M >= i + NumSubElts)
3021 return -1;
3022 int Offset = (NumSubElts - (M - (i + j))) % NumSubElts;
3023 if (0 <= RotateAmt && Offset != RotateAmt)
3024 return -1;
3025 RotateAmt = Offset;
3026 }
3027 }
3028 return RotateAmt;
3029}
3030
3032 ArrayRef<int> Mask, unsigned EltSizeInBits, unsigned MinSubElts,
3033 unsigned MaxSubElts, unsigned &NumSubElts, unsigned &RotateAmt) {
3034 for (NumSubElts = MinSubElts; NumSubElts <= MaxSubElts; NumSubElts *= 2) {
3035 int EltRotateAmt = matchShuffleAsBitRotate(Mask, NumSubElts);
3036 if (EltRotateAmt < 0)
3037 continue;
3038 RotateAmt = EltRotateAmt * EltSizeInBits;
3039 return true;
3040 }
3041
3042 return false;
3043}
3044
3045//===----------------------------------------------------------------------===//
3046// InsertValueInst Class
3047//===----------------------------------------------------------------------===//
3048
3049void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
3050 const Twine &Name) {
3051 assert(getNumOperands() == 2 && "NumOperands not initialized?");
3052
3053 // There's no fundamental reason why we require at least one index
3054 // (other than weirdness with &*IdxBegin being invalid; see
3055 // getelementptr's init routine for example). But there's no
3056 // present need to support it.
3057 assert(!Idxs.empty() && "InsertValueInst must have at least one index");
3058
3060 Val->getType() && "Inserted value must match indexed type!");
3061 Op<0>() = Agg;
3062 Op<1>() = Val;
3063
3064 Indices.append(Idxs.begin(), Idxs.end());
3065 setName(Name);
3066}
3067
3068InsertValueInst::InsertValueInst(const InsertValueInst &IVI)
3069 : Instruction(IVI.getType(), InsertValue,
3070 OperandTraits<InsertValueInst>::op_begin(this), 2),
3071 Indices(IVI.Indices) {
3072 Op<0>() = IVI.getOperand(0);
3073 Op<1>() = IVI.getOperand(1);
3075}
3076
3077//===----------------------------------------------------------------------===//
3078// ExtractValueInst Class
3079//===----------------------------------------------------------------------===//
3080
3081void ExtractValueInst::init(ArrayRef<unsigned> Idxs, const Twine &Name) {
3082 assert(getNumOperands() == 1 && "NumOperands not initialized?");
3083
3084 // There's no fundamental reason why we require at least one index.
3085 // But there's no present need to support it.
3086 assert(!Idxs.empty() && "ExtractValueInst must have at least one index");
3087
3088 Indices.append(Idxs.begin(), Idxs.end());
3089 setName(Name);
3090}
3091
3092ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI)
3093 : UnaryInstruction(EVI.getType(), ExtractValue, EVI.getOperand(0)),
3094 Indices(EVI.Indices) {
3096}
3097
3098// getIndexedType - Returns the type of the element that would be extracted
3099// with an extractvalue instruction with the specified parameters.
3100//
3101// A null type is returned if the indices are invalid for the specified
3102// pointer type.
3103//
3105 ArrayRef<unsigned> Idxs) {
3106 for (unsigned Index : Idxs) {
3107 // We can't use CompositeType::indexValid(Index) here.
3108 // indexValid() always returns true for arrays because getelementptr allows
3109 // out-of-bounds indices. Since we don't allow those for extractvalue and
3110 // insertvalue we need to check array indexing manually.
3111 // Since the only other types we can index into are struct types it's just
3112 // as easy to check those manually as well.
3113 if (ArrayType *AT = dyn_cast<ArrayType>(Agg)) {
3114 if (Index >= AT->getNumElements())
3115 return nullptr;
3116 Agg = AT->getElementType();
3117 } else if (StructType *ST = dyn_cast<StructType>(Agg)) {
3118 if (Index >= ST->getNumElements())
3119 return nullptr;
3120 Agg = ST->getElementType(Index);
3121 } else {
3122 // Not a valid type to index into.
3123 return nullptr;
3124 }
3125 }
3126 return const_cast<Type*>(Agg);
3127}
3128
3129//===----------------------------------------------------------------------===//
3130// UnaryOperator Class
3131//===----------------------------------------------------------------------===//
3132
3134 const Twine &Name,
3135 BasicBlock::iterator InsertBefore)
3136 : UnaryInstruction(Ty, iType, S, InsertBefore) {
3137 Op<0>() = S;
3138 setName(Name);
3139 AssertOK();
3140}
3141
3143 Type *Ty, const Twine &Name,
3144 Instruction *InsertBefore)
3145 : UnaryInstruction(Ty, iType, S, InsertBefore) {
3146 Op<0>() = S;
3147 setName(Name);
3148 AssertOK();
3149}
3150
3152 Type *Ty, const Twine &Name,
3153 BasicBlock *InsertAtEnd)
3154 : UnaryInstruction(Ty, iType, S, InsertAtEnd) {
3155 Op<0>() = S;
3156 setName(Name);
3157 AssertOK();
3158}
3159
3161 BasicBlock::iterator InsertBefore) {
3162 return new UnaryOperator(Op, S, S->getType(), Name, InsertBefore);
3163}
3164
3166 const Twine &Name,
3167 Instruction *InsertBefore) {
3168 return new UnaryOperator(Op, S, S->getType(), Name, InsertBefore);
3169}
3170
3172 const Twine &Name,
3173 BasicBlock *InsertAtEnd) {
3174 UnaryOperator *Res = Create(Op, S, Name);
3175 Res->insertInto(InsertAtEnd, InsertAtEnd->end());
3176 return Res;
3177}
3178
3179void UnaryOperator::AssertOK() {
3180 Value *LHS = getOperand(0);
3181 (void)LHS; // Silence warnings.
3182#ifndef NDEBUG
3183 switch (getOpcode()) {
3184 case FNeg:
3185 assert(getType() == LHS->getType() &&
3186 "Unary operation should return same type as operand!");
3187 assert(getType()->isFPOrFPVectorTy() &&
3188 "Tried to create a floating-point operation on a "
3189 "non-floating-point type!");
3190 break;
3191 default: llvm_unreachable("Invalid opcode provided");
3192 }
3193#endif
3194}
3195
3196//===----------------------------------------------------------------------===//
3197// BinaryOperator Class
3198//===----------------------------------------------------------------------===//
3199
3201 const Twine &Name,
3202 BasicBlock::iterator InsertBefore)
3203 : Instruction(Ty, iType, OperandTraits<BinaryOperator>::op_begin(this),
3204 OperandTraits<BinaryOperator>::operands(this), InsertBefore) {
3205 Op<0>() = S1;
3206 Op<1>() = S2;
3207 setName(Name);
3208 AssertOK();
3209}
3210
3212 Type *Ty, const Twine &Name,
3213 Instruction *InsertBefore)
3214 : Instruction(Ty, iType,
3215 OperandTraits<BinaryOperator>::op_begin(this),
3216 OperandTraits<BinaryOperator>::operands(this),
3217 InsertBefore) {
3218 Op<0>() = S1;
3219 Op<1>() = S2;
3220 setName(Name);
3221 AssertOK();
3222}
3223
3225 Type *Ty, const Twine &Name,
3226 BasicBlock *InsertAtEnd)
3227 : Instruction(Ty, iType,
3228 OperandTraits<BinaryOperator>::op_begin(this),
3229 OperandTraits<BinaryOperator>::operands(this),
3230 InsertAtEnd) {
3231 Op<0>() = S1;
3232 Op<1>() = S2;
3233 setName(Name);
3234 AssertOK();
3235}
3236
3237void BinaryOperator::AssertOK() {
3238 Value *LHS = getOperand(0), *RHS = getOperand(1);
3239 (void)LHS; (void)RHS; // Silence warnings.
3240 assert(LHS->getType() == RHS->getType() &&
3241 "Binary operator operand types must match!");
3242#ifndef NDEBUG
3243 switch (getOpcode()) {
3244 case Add: case Sub:
3245 case Mul:
3246 assert(getType() == LHS->getType() &&
3247 "Arithmetic operation should return same type as operands!");
3248 assert(getType()->isIntOrIntVectorTy() &&
3249 "Tried to create an integer operation on a non-integer type!");
3250 break;
3251 case FAdd: case FSub:
3252 case FMul:
3253 assert(getType() == LHS->getType() &&
3254 "Arithmetic operation should return same type as operands!");
3255 assert(getType()->isFPOrFPVectorTy() &&
3256 "Tried to create a floating-point operation on a "
3257 "non-floating-point type!");
3258 break;
3259 case UDiv:
3260 case SDiv:
3261 assert(getType() == LHS->getType() &&
3262 "Arithmetic operation should return same type as operands!");
3263 assert(getType()->isIntOrIntVectorTy() &&
3264 "Incorrect operand type (not integer) for S/UDIV");
3265 break;
3266 case FDiv:
3267 assert(getType() == LHS->getType() &&
3268 "Arithmetic operation should return same type as operands!");
3269 assert(getType()->isFPOrFPVectorTy() &&
3270 "Incorrect operand type (not floating point) for FDIV");
3271 break;
3272 case URem:
3273 case SRem:
3274 assert(getType() == LHS->getType() &&
3275 "Arithmetic operation should return same type as operands!");
3276 assert(getType()->isIntOrIntVectorTy() &&
3277 "Incorrect operand type (not integer) for S/UREM");
3278 break;
3279 case FRem:
3280 assert(getType() == LHS->getType() &&
3281 "Arithmetic operation should return same type as operands!");
3282 assert(getType()->isFPOrFPVectorTy() &&
3283 "Incorrect operand type (not floating point) for FREM");
3284 break;
3285 case Shl:
3286 case LShr:
3287 case AShr:
3288 assert(getType() == LHS->getType() &&
3289 "Shift operation should return same type as operands!");
3290 assert(getType()->isIntOrIntVectorTy() &&
3291 "Tried to create a shift operation on a non-integral type!");
3292 break;
3293 case And: case Or:
3294 case Xor:
3295 assert(getType() == LHS->getType() &&
3296 "Logical operation should return same type as operands!");
3297 assert(getType()->isIntOrIntVectorTy() &&
3298 "Tried to create a logical operation on a non-integral type!");
3299 break;
3300 default: llvm_unreachable("Invalid opcode provided");
3301 }
3302#endif
3303}
3304
3306 const Twine &Name,
3307 BasicBlock::iterator InsertBefore) {
3308 assert(S1->getType() == S2->getType() &&
3309 "Cannot create binary operator with two operands of differing type!");
3310 return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore);
3311}
3312
3314 const Twine &Name,
3315 Instruction *InsertBefore) {
3316 assert(S1->getType() == S2->getType() &&
3317 "Cannot create binary operator with two operands of differing type!");
3318 return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore);
3319}
3320
3322 const Twine &Name,
3323 BasicBlock *InsertAtEnd) {
3324 BinaryOperator *Res = Create(Op, S1, S2, Name);
3325 Res->insertInto(InsertAtEnd, InsertAtEnd->end());
3326 return Res;
3327}
3328
3330 BasicBlock::iterator InsertBefore) {
3331 Value *Zero = ConstantInt::get(Op->getType(), 0);
3332 return new BinaryOperator(Instruction::Sub, Zero, Op, Op->getType(), Name,
3333 InsertBefore);
3334}
3335
3337 BasicBlock *InsertAtEnd) {
3338 Value *Zero = ConstantInt::get(Op->getType(), 0);
3339 return new BinaryOperator(Instruction::Sub,
3340 Zero, Op,
3341 Op->getType(), Name, InsertAtEnd);
3342}
3343
3345 Instruction *InsertBefore) {
3346 Value *Zero = ConstantInt::get(Op->getType(), 0);
3347 return BinaryOperator::CreateNSWSub(Zero, Op, Name, InsertBefore);
3348}
3349
3351 BasicBlock *InsertAtEnd) {
3352 Value *Zero = ConstantInt::get(Op->getType(), 0);
3353 return BinaryOperator::CreateNSWSub(Zero, Op, Name, InsertAtEnd);
3354}
3355
3357 BasicBlock::iterator InsertBefore) {
3358 Constant *C = Constant::getAllOnesValue(Op->getType());
3359 return new BinaryOperator(Instruction::Xor, Op, C,
3360 Op->getType(), Name, InsertBefore);
3361}
3362
3364 Instruction *InsertBefore) {
3365 Constant *C = Constant::getAllOnesValue(Op->getType());
3366 return new BinaryOperator(Instruction::Xor, Op, C,
3367 Op->getType(), Name, InsertBefore);
3368}
3369
3371 BasicBlock *InsertAtEnd) {
3373 return new BinaryOperator(Instruction::Xor, Op, AllOnes,
3374 Op->getType(), Name, InsertAtEnd);
3375}
3376
3377// Exchange the two operands to this instruction. This instruction is safe to
3378// use on any binary instruction and does not modify the semantics of the
3379// instruction. If the instruction is order-dependent (SetLT f.e.), the opcode
3380// is changed.
3382 if (!isCommutative())
3383 return true; // Can't commute operands
3384 Op<0>().swap(Op<1>());
3385 return false;
3386}
3387
3388//===----------------------------------------------------------------------===//
3389// FPMathOperator Class
3390//===----------------------------------------------------------------------===//
3391
3393 const MDNode *MD =
3394 cast<Instruction>(this)->getMetadata(LLVMContext::MD_fpmath);
3395 if (!MD)
3396 return 0.0;
3397 ConstantFP *Accuracy = mdconst::extract<ConstantFP>(MD->getOperand(0));
3398 return Accuracy->getValueAPF().convertToFloat();
3399}
3400
3401//===----------------------------------------------------------------------===//
3402// CastInst Class
3403//===----------------------------------------------------------------------===//
3404
3405// Just determine if this cast only deals with integral->integral conversion.
3407 switch (getOpcode()) {
3408 default: return false;
3409 case Instruction::ZExt:
3410 case Instruction::SExt:
3411 case Instruction::Trunc:
3412 return true;
3413 case Instruction::BitCast:
3414 return getOperand(0)->getType()->isIntegerTy() &&
3415 getType()->isIntegerTy();
3416 }
3417}
3418
3419/// This function determines if the CastInst does not require any bits to be
3420/// changed in order to effect the cast. Essentially, it identifies cases where
3421/// no code gen is necessary for the cast, hence the name no-op cast. For
3422/// example, the following are all no-op casts:
3423/// # bitcast i32* %x to i8*
3424/// # bitcast <2 x i32> %x to <4 x i16>
3425/// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only
3426/// Determine if the described cast is a no-op.
3428 Type *SrcTy,
3429 Type *DestTy,
3430 const DataLayout &DL) {
3431 assert(castIsValid(Opcode, SrcTy, DestTy) && "method precondition");
3432 switch (Opcode) {
3433 default: llvm_unreachable("Invalid CastOp");
3434 case Instruction::Trunc:
3435 case Instruction::ZExt:
3436 case Instruction::SExt:
3437 case Instruction::FPTrunc:
3438 case Instruction::FPExt:
3439 case Instruction::UIToFP:
3440 case Instruction::SIToFP:
3441 case Instruction::FPToUI:
3442 case Instruction::FPToSI:
3443 case Instruction::AddrSpaceCast:
3444 // TODO: Target informations may give a more accurate answer here.
3445 return false;
3446 case Instruction::BitCast:
3447 return true; // BitCast never modifies bits.
3448 case Instruction::PtrToInt:
3449 return DL.getIntPtrType(SrcTy)->getScalarSizeInBits() ==
3450 DestTy->getScalarSizeInBits();
3451 case Instruction::IntToPtr:
3452 return DL.getIntPtrType(DestTy)->getScalarSizeInBits() ==
3453 SrcTy->getScalarSizeInBits();
3454 }
3455}
3456
3458 return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), DL);
3459}
3460
3461/// This function determines if a pair of casts can be eliminated and what
3462/// opcode should be used in the elimination. This assumes that there are two
3463/// instructions like this:
3464/// * %F = firstOpcode SrcTy %x to MidTy
3465/// * %S = secondOpcode MidTy %F to DstTy
3466/// The function returns a resultOpcode so these two casts can be replaced with:
3467/// * %Replacement = resultOpcode %SrcTy %x to DstTy
3468/// If no such cast is permitted, the function returns 0.
3471 Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy,
3472 Type *DstIntPtrTy) {
3473 // Define the 144 possibilities for these two cast instructions. The values
3474 // in this matrix determine what to do in a given situation and select the
3475 // case in the switch below. The rows correspond to firstOp, the columns
3476 // correspond to secondOp. In looking at the table below, keep in mind
3477 // the following cast properties:
3478 //
3479 // Size Compare Source Destination
3480 // Operator Src ? Size Type Sign Type Sign
3481 // -------- ------------ ------------------- ---------------------
3482 // TRUNC > Integer Any Integral Any
3483 // ZEXT < Integral Unsigned Integer Any
3484 // SEXT < Integral Signed Integer Any
3485 // FPTOUI n/a FloatPt n/a Integral Unsigned
3486 // FPTOSI n/a FloatPt n/a Integral Signed
3487 // UITOFP n/a Integral Unsigned FloatPt n/a
3488 // SITOFP n/a Integral Signed FloatPt n/a
3489 // FPTRUNC > FloatPt n/a FloatPt n/a
3490 // FPEXT < FloatPt n/a FloatPt n/a
3491 // PTRTOINT n/a Pointer n/a Integral Unsigned
3492 // INTTOPTR n/a Integral Unsigned Pointer n/a
3493 // BITCAST = FirstClass n/a FirstClass n/a
3494 // ADDRSPCST n/a Pointer n/a Pointer n/a
3495 //
3496 // NOTE: some transforms are safe, but we consider them to be non-profitable.
3497 // For example, we could merge "fptoui double to i32" + "zext i32 to i64",
3498 // into "fptoui double to i64", but this loses information about the range
3499 // of the produced value (we no longer know the top-part is all zeros).
3500 // Further this conversion is often much more expensive for typical hardware,
3501 // and causes issues when building libgcc. We disallow fptosi+sext for the
3502 // same reason.
3503 const unsigned numCastOps =
3504 Instruction::CastOpsEnd - Instruction::CastOpsBegin;
3505 static const uint8_t CastResults[numCastOps][numCastOps] = {
3506 // T F F U S F F P I B A -+
3507 // R Z S P P I I T P 2 N T S |
3508 // U E E 2 2 2 2 R E I T C C +- secondOp
3509 // N X X U S F F N X N 2 V V |
3510 // C T T I I P P C T T P T T -+
3511 { 1, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // Trunc -+
3512 { 8, 1, 9,99,99, 2,17,99,99,99, 2, 3, 0}, // ZExt |
3513 { 8, 0, 1,99,99, 0, 2,99,99,99, 0, 3, 0}, // SExt |
3514 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToUI |
3515 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToSI |
3516 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // UIToFP +- firstOp
3517 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // SIToFP |
3518 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // FPTrunc |
3519 { 99,99,99, 2, 2,99,99, 8, 2,99,99, 4, 0}, // FPExt |
3520 { 1, 0, 0,99,99, 0, 0,99,99,99, 7, 3, 0}, // PtrToInt |
3521 { 99,99,99,99,99,99,99,99,99,11,99,15, 0}, // IntToPtr |
3522 { 5, 5, 5, 0, 0, 5, 5, 0, 0,16, 5, 1,14}, // BitCast |
3523 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+
3524 };
3525
3526 // TODO: This logic could be encoded into the table above and handled in the
3527 // switch below.
3528 // If either of the casts are a bitcast from scalar to vector, disallow the
3529 // merging. However, any pair of bitcasts are allowed.
3530 bool IsFirstBitcast = (firstOp == Instruction::BitCast);
3531 bool IsSecondBitcast = (secondOp == Instruction::BitCast);
3532 bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast;
3533
3534 // Check if any of the casts convert scalars <-> vectors.
3535 if ((IsFirstBitcast && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) ||
3536 (IsSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy)))
3537 if (!AreBothBitcasts)
3538 return 0;
3539
3540 int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin]
3541 [secondOp-Instruction::CastOpsBegin];
3542 switch (ElimCase) {
3543 case 0:
3544 // Categorically disallowed.
3545 return 0;
3546 case 1:
3547 // Allowed, use first cast's opcode.
3548 return firstOp;
3549 case 2:
3550 // Allowed, use second cast's opcode.
3551 return secondOp;
3552 case 3:
3553 // No-op cast in second op implies firstOp as long as the DestTy
3554 // is integer and we are not converting between a vector and a
3555 // non-vector type.
3556 if (!SrcTy->isVectorTy() && DstTy->isIntegerTy())
3557 return firstOp;
3558 return 0;
3559 case 4:
3560 // No-op cast in second op implies firstOp as long as the DestTy
3561 // matches MidTy.
3562 if (DstTy == MidTy)
3563 return firstOp;
3564 return 0;
3565 case 5:
3566 // No-op cast in first op implies secondOp as long as the SrcTy
3567 // is an integer.
3568 if (SrcTy->isIntegerTy())
3569 return secondOp;
3570 return 0;
3571 case 7: {
3572 // Disable inttoptr/ptrtoint optimization if enabled.
3573 if (DisableI2pP2iOpt)
3574 return 0;
3575
3576 // Cannot simplify if address spaces are different!
3577 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
3578 return 0;
3579
3580 unsigned MidSize = MidTy->getScalarSizeInBits();
3581 // We can still fold this without knowing the actual sizes as long we
3582 // know that the intermediate pointer is the largest possible
3583 // pointer size.
3584 // FIXME: Is this always true?
3585 if (MidSize == 64)
3586 return Instruction::BitCast;
3587
3588 // ptrtoint, inttoptr -> bitcast (ptr -> ptr) if int size is >= ptr size.
3589 if (!SrcIntPtrTy || DstIntPtrTy != SrcIntPtrTy)
3590 return 0;
3591 unsigned PtrSize = SrcIntPtrTy->getScalarSizeInBits();
3592 if (MidSize >= PtrSize)
3593 return Instruction::BitCast;
3594 return 0;
3595 }
3596 case 8: {
3597 // ext, trunc -> bitcast, if the SrcTy and DstTy are the same
3598 // ext, trunc -> ext, if sizeof(SrcTy) < sizeof(DstTy)
3599 // ext, trunc -> trunc, if sizeof(SrcTy) > sizeof(DstTy)
3600 unsigned SrcSize = SrcTy->getScalarSizeInBits();
3601 unsigned DstSize = DstTy->getScalarSizeInBits();
3602 if (SrcTy == DstTy)
3603 return Instruction::BitCast;
3604 if (SrcSize < DstSize)
3605 return firstOp;
3606 if (SrcSize > DstSize)
3607 return secondOp;
3608 return 0;
3609 }
3610 case 9:
3611 // zext, sext -> zext, because sext can't sign extend after zext
3612 return Instruction::ZExt;
3613 case 11: {
3614 // inttoptr, ptrtoint -> bitcast if SrcSize<=PtrSize and SrcSize==DstSize
3615 if (!MidIntPtrTy)
3616 return 0;
3617 unsigned PtrSize = MidIntPtrTy->getScalarSizeInBits();
3618 unsigned SrcSize = SrcTy->getScalarSizeInBits();
3619 unsigned DstSize = DstTy->getScalarSizeInBits();
3620 if (SrcSize <= PtrSize && SrcSize == DstSize)
3621 return Instruction::BitCast;
3622 return 0;
3623 }
3624 case 12:
3625 // addrspacecast, addrspacecast -> bitcast, if SrcAS == DstAS
3626 // addrspacecast, addrspacecast -> addrspacecast, if SrcAS != DstAS
3627 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
3628 return Instruction::AddrSpaceCast;
3629 return Instruction::BitCast;
3630 case 13:
3631 // FIXME: this state can be merged with (1), but the following assert
3632 // is useful to check the correcteness of the sequence due to semantic
3633 // change of bitcast.
3634 assert(
3635 SrcTy->isPtrOrPtrVectorTy() &&
3636 MidTy->isPtrOrPtrVectorTy() &&
3637 DstTy->isPtrOrPtrVectorTy() &&
3638 SrcTy->getPointerAddressSpace() != MidTy->getPointerAddressSpace() &&
3639 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
3640 "Illegal addrspacecast, bitcast sequence!");
3641 // Allowed, use first cast's opcode
3642 return firstOp;
3643 case 14:
3644 // bitcast, addrspacecast -> addrspacecast
3645 return Instruction::AddrSpaceCast;
3646 case 15:
3647 // FIXME: this state can be merged with (1), but the following assert
3648 // is useful to check the correcteness of the sequence due to semantic
3649 // change of bitcast.
3650 assert(
3651 SrcTy->isIntOrIntVectorTy() &&
3652 MidTy->isPtrOrPtrVectorTy() &&
3653 DstTy->isPtrOrPtrVectorTy() &&
3654 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
3655 "Illegal inttoptr, bitcast sequence!");
3656 // Allowed, use first cast's opcode
3657 return firstOp;
3658 case 16:
3659 // FIXME: this state can be merged with (2), but the following assert
3660 // is useful to check the correcteness of the sequence due to semantic
3661 // change of bitcast.
3662 assert(
3663 SrcTy->isPtrOrPtrVectorTy() &&
3664 MidTy->isPtrOrPtrVectorTy() &&
3665 DstTy->isIntOrIntVectorTy() &&
3666 SrcTy->getPointerAddressSpace() == MidTy->getPointerAddressSpace() &&
3667 "Illegal bitcast, ptrtoint sequence!");
3668 // Allowed, use second cast's opcode
3669 return secondOp;
3670 case 17:
3671 // (sitofp (zext x)) -> (uitofp x)
3672 return Instruction::UIToFP;
3673 case 99:
3674 // Cast combination can't happen (error in input). This is for all cases
3675 // where the MidTy is not the same for the two cast instructions.
3676 llvm_unreachable("Invalid Cast Combination");
3677 default:
3678 llvm_unreachable("Error in CastResults table!!!");
3679 }
3680}
3681
3683 const Twine &Name,
3684 BasicBlock::iterator InsertBefore) {
3685 assert(castIsValid(op, S, Ty) && "Invalid cast!");
3686 // Construct and return the appropriate CastInst subclass
3687 switch (op) {
3688 case Trunc: return new TruncInst (S, Ty, Name, InsertBefore);
3689 case ZExt: return new ZExtInst (S, Ty, Name, InsertBefore);
3690 case SExt: return new SExtInst (S, Ty, Name, InsertBefore);
3691 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertBefore);
3692 case FPExt: return new FPExtInst (S, Ty, Name, InsertBefore);
3693 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertBefore);
3694 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore);
3695 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore);
3696 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore);
3697 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore);
3698 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore);
3699 case BitCast: return new BitCastInst (S, Ty, Name, InsertBefore);
3700 case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertBefore);
3701 default: llvm_unreachable("Invalid opcode provided");
3702 }
3703}
3704
3706 const Twine &Name, Instruction *InsertBefore) {
3707 assert(castIsValid(op, S, Ty) && "Invalid cast!");
3708 // Construct and return the appropriate CastInst subclass
3709 switch (op) {
3710 case Trunc: return new TruncInst (S, Ty, Name, InsertBefore);
3711 case ZExt: return new ZExtInst (S, Ty, Name, InsertBefore);
3712 case SExt: return new SExtInst (S, Ty, Name, InsertBefore);
3713 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertBefore);
3714 case FPExt: return new FPExtInst (S, Ty, Name, InsertBefore);
3715 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertBefore);
3716 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore);
3717 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore);
3718 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore);
3719 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore);
3720 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore);
3721 case BitCast: return new BitCastInst (S, Ty, Name, InsertBefore);
3722 case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertBefore);
3723 default: llvm_unreachable("Invalid opcode provided");
3724 }
3725}
3726
3728 const Twine &Name, BasicBlock *InsertAtEnd) {
3729 assert(castIsValid(op, S, Ty) && "Invalid cast!");
3730 // Construct and return the appropriate CastInst subclass
3731 switch (op) {
3732 case Trunc: return new TruncInst (S, Ty, Name, InsertAtEnd);
3733 case ZExt: return new ZExtInst (S, Ty, Name, InsertAtEnd);
3734 case SExt: return new SExtInst (S, Ty, Name, InsertAtEnd);
3735 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertAtEnd);
3736 case FPExt: return new FPExtInst (S, Ty, Name, InsertAtEnd);
3737 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertAtEnd);
3738 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertAtEnd);
3739 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertAtEnd);
3740 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertAtEnd);
3741 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertAtEnd);
3742 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertAtEnd);
3743 case BitCast: return new BitCastInst (S, Ty, Name, InsertAtEnd);
3744 case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertAtEnd);
3745 default: llvm_unreachable("Invalid opcode provided");
3746 }
3747}
3748
3750 BasicBlock::iterator InsertBefore) {
3751 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3752 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3753 return Create(Instruction::ZExt, S, Ty, Name, InsertBefore);
3754}
3755
3757 const Twine &Name,
3758 Instruction *InsertBefore) {
3759 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3760 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3761 return Create(Instruction::ZExt, S, Ty, Name, InsertBefore);
3762}
3763
3765 const Twine &Name,
3766 BasicBlock *InsertAtEnd) {
3767 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3768 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
3769 return Create(Instruction::ZExt, S, Ty, Name, InsertAtEnd);
3770}
3771
3773 BasicBlock::iterator InsertBefore) {
3774 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3775 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3776 return Create(Instruction::SExt, S, Ty, Name, InsertBefore);
3777}
3778
3780 const Twine &Name,
3781 Instruction *InsertBefore) {
3782 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3783 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3784 return Create(Instruction::SExt, S, Ty, Name, InsertBefore);
3785}
3786
3788 const Twine &Name,
3789 BasicBlock *InsertAtEnd) {
3790 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3791 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
3792 return Create(Instruction::SExt, S, Ty, Name, InsertAtEnd);
3793}
3794
3796 BasicBlock::iterator InsertBefore) {
3797 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3798 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3799 return Create(Instruction::Trunc, S, Ty, Name, InsertBefore);
3800}
3801
3803 const Twine &Name,
3804 Instruction *InsertBefore) {
3805 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3806 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3807 return Create(Instruction::Trunc, S, Ty, Name, InsertBefore);
3808}
3809
3811 const Twine &Name,
3812 BasicBlock *InsertAtEnd) {
3813 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3814 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
3815 return Create(Instruction::Trunc, S, Ty, Name, InsertAtEnd);
3816}
3817
3819 const Twine &Name,
3820 BasicBlock *InsertAtEnd) {
3821 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3822 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
3823 "Invalid cast");
3824 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
3825 assert((!Ty->isVectorTy() ||
3826 cast<VectorType>(Ty)->getElementCount() ==
3827 cast<VectorType>(S->getType())->getElementCount()) &&
3828 "Invalid cast");
3829
3830 if (Ty->isIntOrIntVectorTy())
3831 return Create(Instruction::PtrToInt, S, Ty, Name, InsertAtEnd);
3832
3833 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertAtEnd);
3834}
3835
3836/// Create a BitCast or a PtrToInt cast instruction
3838 BasicBlock::iterator InsertBefore) {
3839 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3840 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
3841 "Invalid cast");
3842 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
3843 assert((!Ty->isVectorTy() ||
3844 cast<VectorType>(Ty)->getElementCount() ==
3845 cast<VectorType>(S->getType())->getElementCount()) &&
3846 "Invalid cast");
3847
3848 if (Ty->isIntOrIntVectorTy())
3849 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3850
3851 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore);
3852}
3853
3854/// Create a BitCast or a PtrToInt cast instruction
3856 Instruction *InsertBefore) {
3857 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3858 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
3859 "Invalid cast");
3860 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
3861 assert((!Ty->isVectorTy() ||
3862 cast<VectorType>(Ty)->getElementCount() ==
3863 cast<VectorType>(S->getType())->getElementCount()) &&
3864 "Invalid cast");
3865
3866 if (Ty->isIntOrIntVectorTy())
3867 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3868
3869 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore);
3870}
3871
3873 Value *S, Type *Ty,
3874 const Twine &Name,
3875 BasicBlock *InsertAtEnd) {
3876 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3877 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
3878
3880 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertAtEnd);
3881
3882 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
3883}
3884
3886 Value *S, Type *Ty, const Twine &Name, BasicBlock::iterator InsertBefore) {
3887 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3888 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
3889
3891 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore);
3892
3893 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3894}
3895
3897 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore) {
3898 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3899 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
3900
3902 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore);
3903
3904 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3905}
3906
3908 const Twine &Name,
3909 BasicBlock::iterator InsertBefore) {
3910 if (S->getType()->isPointerTy() && Ty->isIntegerTy())
3911 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3912 if (S->getType()->isIntegerTy() && Ty->isPointerTy())
3913 return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore);
3914
3915 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3916}
3917
3919 const Twine &Name,
3920 Instruction *InsertBefore) {
3921 if (S->getType()->isPointerTy() && Ty->isIntegerTy())
3922 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3923 if (S->getType()->isIntegerTy() && Ty->isPointerTy())
3924 return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore);
3925
3926 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3927}
3928
3930 const Twine &Name,
3931 BasicBlock::iterator InsertBefore) {
3932 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
3933 "Invalid integer cast");
3934 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3935 unsigned DstBits = Ty->getScalarSizeInBits();
3936 Instruction::CastOps opcode =
3937 (SrcBits == DstBits ? Instruction::BitCast :
3938 (SrcBits > DstBits ? Instruction::Trunc :
3939 (isSigned ? Instruction::SExt : Instruction::ZExt)));
3940 return Create(opcode, C, Ty, Name, InsertBefore);
3941}
3942
3944 bool isSigned, const Twine &Name,
3945 Instruction *InsertBefore) {
3946 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
3947 "Invalid integer cast");
3948 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3949 unsigned DstBits = Ty->getScalarSizeInBits();
3950 Instruction::CastOps opcode =
3951 (SrcBits == DstBits ? Instruction::BitCast :
3952 (SrcBits > DstBits ? Instruction::Trunc :
3953 (isSigned ? Instruction::SExt : Instruction::ZExt)));
3954 return Create(opcode, C, Ty, Name, InsertBefore);
3955}
3956
3958 bool isSigned, const Twine &Name,
3959 BasicBlock *InsertAtEnd) {
3960 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
3961 "Invalid cast");
3962 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3963 unsigned DstBits = Ty->getScalarSizeInBits();
3964 Instruction::CastOps opcode =
3965 (SrcBits == DstBits ? Instruction::BitCast :
3966 (SrcBits > DstBits ? Instruction::Trunc :
3967 (isSigned ? Instruction::SExt : Instruction::ZExt)));
3968 return Create(opcode, C, Ty, Name, InsertAtEnd);
3969}
3970
3972 BasicBlock::iterator InsertBefore) {
3973 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
3974 "Invalid cast");
3975 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3976 unsigned DstBits = Ty->getScalarSizeInBits();
3977 Instruction::CastOps opcode =
3978 (SrcBits == DstBits ? Instruction::BitCast :
3979 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
3980 return Create(opcode, C, Ty, Name, InsertBefore);
3981}
3982
3984 const Twine &Name,
3985 Instruction *InsertBefore) {
3986 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
3987 "Invalid cast");
3988 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3989 unsigned DstBits = Ty->getScalarSizeInBits();
3990 assert((C->getType() == Ty || SrcBits != DstBits) && "Invalid cast");
3991 Instruction::CastOps opcode =
3992 (SrcBits == DstBits ? Instruction::BitCast :
3993 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
3994 return Create(opcode, C, Ty, Name, InsertBefore);
3995}
3996
3998 const Twine &Name,
3999 BasicBlock *InsertAtEnd) {
4000 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
4001 "Invalid cast");
4002 unsigned SrcBits = C->getType()->getScalarSizeInBits();
4003 unsigned DstBits = Ty->getScalarSizeInBits();
4004 Instruction::CastOps opcode =
4005 (SrcBits == DstBits ? Instruction::BitCast :
4006 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
4007 return Create(opcode, C, Ty, Name, InsertAtEnd);
4008}
4009
4010bool CastInst::isBitCastable(Type *SrcTy, Type *DestTy) {
4011 if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType())
4012 return false;
4013
4014 if (SrcTy == DestTy)
4015 return true;
4016
4017 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) {
4018 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) {
4019 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
4020 // An element by element cast. Valid if casting the elements is valid.
4021 SrcTy = SrcVecTy->getElementType();
4022 DestTy = DestVecTy->getElementType();
4023 }
4024 }
4025 }
4026
4027 if (PointerType *DestPtrTy = dyn_cast<PointerType>(DestTy)) {
4028 if (PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy)) {
4029 return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace();
4030 }
4031 }
4032
4033 TypeSize SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
4034 TypeSize DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
4035
4036 // Could still have vectors of pointers if the number of elements doesn't
4037 // match
4038 if (SrcBits.getKnownMinValue() == 0 || DestBits.getKnownMinValue() == 0)
4039 return false;
4040
4041 if (SrcBits != DestBits)
4042 return false;
4043
4044 if (DestTy->isX86_MMXTy() || SrcTy->isX86_MMXTy())
4045 return false;
4046
4047 return true;
4048}
4049
4051 const DataLayout &DL) {
4052 // ptrtoint and inttoptr are not allowed on non-integral pointers
4053 if (auto *PtrTy = dyn_cast<PointerType>(SrcTy))
4054 if (auto *IntTy = dyn_cast<IntegerType>(DestTy))
4055 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
4056 !DL.isNonIntegralPointerType(PtrTy));
4057 if (auto *PtrTy = dyn_cast<PointerType>(DestTy))
4058 if (auto *IntTy = dyn_cast<IntegerType>(SrcTy))
4059 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
4060 !DL.isNonIntegralPointerType(PtrTy));
4061
4062 return isBitCastable(SrcTy, DestTy);
4063}
4064
4065// Provide a way to get a "cast" where the cast opcode is inferred from the
4066// types and size of the operand. This, basically, is a parallel of the
4067// logic in the castIsValid function below. This axiom should hold:
4068// castIsValid( getCastOpcode(Val, Ty), Val, Ty)
4069// should not assert in castIsValid. In other words, this produces a "correct"
4070// casting opcode for the arguments passed to it.
4073 const Value *Src, bool SrcIsSigned, Type *DestTy, bool DestIsSigned) {
4074 Type *SrcTy = Src->getType();
4075
4076 assert(SrcTy->isFirstClassType() && DestTy->isFirstClassType() &&
4077 "Only first class types are castable!");
4078
4079 if (SrcTy == DestTy)
4080 return BitCast;
4081
4082 // FIXME: Check address space sizes here
4083 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy))
4084 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy))
4085 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
4086 // An element by element cast. Find the appropriate opcode based on the
4087 // element types.
4088 SrcTy = SrcVecTy->getElementType();
4089 DestTy = DestVecTy->getElementType();
4090 }
4091
4092 // Get the bit sizes, we'll need these
4093 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
4094 unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
4095
4096 // Run through the possibilities ...
4097 if (DestTy->isIntegerTy()) { // Casting to integral
4098 if (SrcTy->isIntegerTy()) { // Casting from integral
4099 if (DestBits < SrcBits)
4100 return Trunc; // int -> smaller int
4101 else if (DestBits > SrcBits) { // its an extension
4102 if (SrcIsSigned)
4103 return SExt; // signed -> SEXT
4104 else
4105 return ZExt; // unsigned -> ZEXT
4106 } else {
4107 return BitCast; // Same size, No-op cast
4108 }
4109 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
4110 if (DestIsSigned)
4111 return FPToSI; // FP -> sint
4112 else
4113 return FPToUI; // FP -> uint
4114 } else if (SrcTy->isVectorTy()) {
4115 assert(DestBits == SrcBits &&
4116 "Casting vector to integer of different width");
4117 return BitCast; // Same size, no-op cast
4118 } else {
4119 assert(SrcTy->isPointerTy() &&
4120 "Casting from a value that is not first-class type");
4121 return PtrToInt; // ptr -> int
4122 }
4123 } else if (DestTy->isFloatingPointTy()) { // Casting to floating pt
4124 if (SrcTy->isIntegerTy()) { // Casting from integral
4125 if (SrcIsSigned)
4126 return SIToFP; // sint -> FP
4127 else
4128 return UIToFP; // uint -> FP
4129 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
4130 if (DestBits < SrcBits) {
4131 return FPTrunc; // FP -> smaller FP
4132 } else if (DestBits > SrcBits) {
4133 return FPExt; // FP -> larger FP
4134 } else {
4135 return BitCast; // same size, no-op cast
4136 }
4137 } else if (SrcTy->isVectorTy()) {
4138 assert(DestBits == SrcBits &&
4139 "Casting vector to floating point of different width");
4140 return BitCast; // same size, no-op cast
4141 }
4142 llvm_unreachable("Casting pointer or non-first class to float");
4143 } else if (DestTy->isVectorTy()) {
4144 assert(DestBits == SrcBits &&
4145 "Illegal cast to vector (wrong type or size)");
4146 return BitCast;
4147 } else if (DestTy->isPointerTy()) {
4148 if (SrcTy->isPointerTy()) {
4149 if (DestTy->getPointerAddressSpace() != SrcTy->getPointerAddressSpace())
4150 return AddrSpaceCast;
4151 return BitCast; // ptr -> ptr
4152 } else if (SrcTy->isIntegerTy()) {
4153 return IntToPtr; // int -> ptr
4154 }
4155 llvm_unreachable("Casting pointer to other than pointer or int");
4156 } else if (DestTy->isX86_MMXTy()) {
4157 if (SrcTy->isVectorTy()) {
4158 assert(DestBits == SrcBits && "Casting vector of wrong width to X86_MMX");
4159 return BitCast; // 64-bit vector to MMX
4160 }
4161 llvm_unreachable("Illegal cast to X86_MMX");
4162 }
4163 llvm_unreachable("Casting to type that is not first-class");
4164}
4165
4166//===----------------------------------------------------------------------===//
4167// CastInst SubClass Constructors
4168//===----------------------------------------------------------------------===//
4169
4170/// Check that the construction parameters for a CastInst are correct. This
4171/// could be broken out into the separate constructors but it is useful to have
4172/// it in one place and to eliminate the redundant code for getting the sizes
4173/// of the types involved.
4174bool
4176 if (!SrcTy->isFirstClassType() || !DstTy->isFirstClassType() ||
4177 SrcTy->isAggregateType() || DstTy->isAggregateType())
4178 return false;
4179
4180 // Get the size of the types in bits, and whether we are dealing
4181 // with vector types, we'll need this later.
4182 bool SrcIsVec = isa<VectorType>(SrcTy);
4183 bool DstIsVec = isa<VectorType>(DstTy);
4184 unsigned SrcScalarBitSize = SrcTy->getScalarSizeInBits();
4185 unsigned DstScalarBitSize = DstTy->getScalarSizeInBits();
4186
4187 // If these are vector types, get the lengths of the vectors (using zero for
4188 // scalar types means that checking that vector lengths match also checks that
4189 // scalars are not being converted to vectors or vectors to scalars).
4190 ElementCount SrcEC = SrcIsVec ? cast<VectorType>(SrcTy)->getElementCount()
4192 ElementCount DstEC = DstIsVec ? cast<VectorType>(DstTy)->getElementCount()
4194
4195 // Switch on the opcode provided
4196 switch (op) {
4197 default: return false; // This is an input error
4198 case Instruction::Trunc:
4199 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
4200 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
4201 case Instruction::ZExt:
4202 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
4203 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
4204 case Instruction::SExt:
4205 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
4206 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
4207 case Instruction::FPTrunc:
4208 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
4209 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
4210 case Instruction::FPExt:
4211 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
4212 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
4213 case Instruction::UIToFP:
4214 case Instruction::SIToFP:
4215 return SrcTy->isIntOrIntVectorTy() && DstTy->isFPOrFPVectorTy() &&
4216 SrcEC == DstEC;
4217 case Instruction::FPToUI:
4218 case Instruction::FPToSI:
4219 return SrcTy->isFPOrFPVectorTy() && DstTy->isIntOrIntVectorTy() &&
4220 SrcEC == DstEC;
4221 case Instruction::PtrToInt:
4222 if (SrcEC != DstEC)
4223 return false;
4224 return SrcTy->isPtrOrPtrVectorTy() && DstTy->isIntOrIntVectorTy();
4225 case Instruction::IntToPtr:
4226 if (SrcEC != DstEC)
4227 return false;
4228 return SrcTy->isIntOrIntVectorTy() && DstTy->isPtrOrPtrVectorTy();
4229 case Instruction::BitCast: {
4230 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
4231 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
4232
4233 // BitCast implies a no-op cast of type only. No bits change.
4234 // However, you can't cast pointers to anything but pointers.
4235 if (!SrcPtrTy != !DstPtrTy)
4236 return false;
4237
4238 // For non-pointer cases, the cast is okay if the source and destination bit
4239 // widths are identical.
4240 if (!SrcPtrTy)
4241 return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits();
4242
4243 // If both are pointers then the address spaces must match.
4244 if (SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace())
4245 return false;
4246
4247 // A vector of pointers must have the same number of elements.
4248 if (SrcIsVec && DstIsVec)
4249 return SrcEC == DstEC;
4250 if (SrcIsVec)
4251 return SrcEC == ElementCount::getFixed(1);
4252 if (DstIsVec)
4253 return DstEC == ElementCount::getFixed(1);
4254
4255 return true;
4256 }
4257 case Instruction::AddrSpaceCast: {
4258 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
4259 if (!SrcPtrTy)
4260 return false;
4261
4262 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
4263 if (!DstPtrTy)
4264 return false;
4265
4266 if (SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace())
4267 return false;
4268
4269 return SrcEC == DstEC;
4270 }
4271 }
4272}
4273
4275 BasicBlock::iterator InsertBefore)
4276 : CastInst(Ty, Trunc, S, Name, InsertBefore) {
4277 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
4278}
4279
4281 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4282) : CastInst(Ty, Trunc, S, Name, InsertBefore) {
4283 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
4284}
4285
4287 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4288) : CastInst(Ty, Trunc, S, Name, InsertAtEnd) {
4289 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
4290}
4291
4293 BasicBlock::iterator InsertBefore)
4294 : CastInst(Ty, ZExt, S, Name, InsertBefore) {
4295 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
4296}
4297
4299 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4300) : CastInst(Ty, ZExt, S, Name, InsertBefore) {
4301 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
4302}
4303
4305 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4306) : CastInst(Ty, ZExt, S, Name, InsertAtEnd) {
4307 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
4308}
4309
4311 BasicBlock::iterator InsertBefore)
4312 : CastInst(Ty, SExt, S, Name, InsertBefore) {
4313 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
4314}
4315
4317 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4318) : CastInst(Ty, SExt, S, Name, InsertBefore) {
4319 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
4320}
4321
4323 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4324) : CastInst(Ty, SExt, S, Name, InsertAtEnd) {
4325 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
4326}
4327
4329 BasicBlock::iterator InsertBefore)
4330 : CastInst(Ty, FPTrunc, S, Name, InsertBefore) {
4331 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
4332}
4333
4335 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4336) : CastInst(Ty, FPTrunc, S, Name, InsertBefore) {
4337 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
4338}
4339
4341 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4342) : CastInst(Ty, FPTrunc, S, Name, InsertAtEnd) {
4343 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
4344}
4345
4347 BasicBlock::iterator InsertBefore)
4348 : CastInst(Ty, FPExt, S, Name, InsertBefore) {
4349 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
4350}
4351
4353 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4354) : CastInst(Ty, FPExt, S, Name, InsertBefore) {
4355 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
4356}
4357
4359 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4360) : CastInst(Ty, FPExt, S, Name, InsertAtEnd) {
4361 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
4362}
4363
4365 BasicBlock::iterator InsertBefore)
4366 : CastInst(Ty, UIToFP, S, Name, InsertBefore) {
4367 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
4368}
4369
4371 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4372) : CastInst(Ty, UIToFP, S, Name, InsertBefore) {
4373 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
4374}
4375
4377 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4378) : CastInst(Ty, UIToFP, S, Name, InsertAtEnd) {
4379 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
4380}
4381
4383 BasicBlock::iterator InsertBefore)
4384 : CastInst(Ty, SIToFP, S, Name, InsertBefore) {
4385 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
4386}
4387
4389 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4390) : CastInst(Ty, SIToFP, S, Name, InsertBefore) {
4391 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
4392}
4393
4395 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4396) : CastInst(Ty, SIToFP, S, Name, InsertAtEnd) {
4397 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
4398}
4399
4401 BasicBlock::iterator InsertBefore)
4402 : CastInst(Ty, FPToUI, S, Name, InsertBefore) {
4403 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
4404}
4405
4407 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4408) : CastInst(Ty, FPToUI, S, Name, InsertBefore) {
4409 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
4410}
4411
4413 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4414) : CastInst(Ty, FPToUI, S, Name, InsertAtEnd) {
4415 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
4416}
4417
4419 BasicBlock::iterator InsertBefore)
4420 : CastInst(Ty, FPToSI, S, Name, InsertBefore) {
4421 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
4422}
4423
4425 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4426) : CastInst(Ty, FPToSI, S, Name, InsertBefore) {
4427 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
4428}
4429
4431 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4432) : CastInst(Ty, FPToSI, S, Name, InsertAtEnd) {
4433 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
4434}
4435
4437 BasicBlock::iterator InsertBefore)
4438 : CastInst(Ty, PtrToInt, S, Name, InsertBefore) {
4439 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
4440}
4441
4443 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4444) : CastInst(Ty, PtrToInt, S, Name, InsertBefore) {
4445 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
4446}
4447
4449 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4450) : CastInst(Ty, PtrToInt, S, Name, InsertAtEnd) {
4451 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
4452}
4453
4455 BasicBlock::iterator InsertBefore)
4456 : CastInst(Ty, IntToPtr, S, Name, InsertBefore) {
4457 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
4458}
4459
4461 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4462) : CastInst(Ty, IntToPtr, S, Name, InsertBefore) {
4463 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
4464}
4465
4467 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4468) : CastInst(Ty, IntToPtr, S, Name, InsertAtEnd) {
4469 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
4470}
4471
4473 BasicBlock::iterator InsertBefore)
4474 : CastInst(Ty, BitCast, S, Name, InsertBefore) {
4475 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
4476}
4477
4479 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4480) : CastInst(Ty, BitCast, S, Name, InsertBefore) {
4481 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
4482}
4483
4485 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4486) : CastInst(Ty, BitCast, S, Name, InsertAtEnd) {
4487 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
4488}
4489
4491 BasicBlock::iterator InsertBefore)
4492 : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) {
4493 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
4494}
4495
4497 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4498) : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) {
4499 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
4500}
4501
4503 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4504) : CastInst(Ty, AddrSpaceCast, S, Name, InsertAtEnd) {
4505 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
4506}
4507
4508//===----------------------------------------------------------------------===//
4509// CmpInst Classes
4510//===----------------------------------------------------------------------===//
4511
4513 Value *RHS, const Twine &Name,
4514 BasicBlock::iterator InsertBefore, Instruction *FlagsSource)
4515 : Instruction(ty, op, OperandTraits<CmpInst>::op_begin(this),
4516 OperandTraits<CmpInst>::operands(this), InsertBefore) {
4517 Op<0>() = LHS;
4518 Op<1>() = RHS;
4519 setPredicate((Predicate)predicate);
4520 setName(Name);
4521 if (FlagsSource)
4522 copyIRFlags(FlagsSource);
4523}
4524
4526 Value *RHS, const Twine &Name, Instruction *InsertBefore,
4527 Instruction *FlagsSource)
4528 : Instruction(ty, op,
4529 OperandTraits<CmpInst>::op_begin(this),
4530 OperandTraits<CmpInst>::operands(this),
4531 InsertBefore) {
4532 Op<0>() = LHS;
4533 Op<1>() = RHS;
4534 setPredicate((Predicate)predicate);
4535 setName(Name);
4536 if (FlagsSource)
4537 copyIRFlags(FlagsSource);
4538}
4539
4541 Value *RHS, const Twine &Name, BasicBlock *InsertAtEnd)
4542 : Instruction(ty, op,
4543 OperandTraits<CmpInst>::op_begin(this),
4544 OperandTraits<CmpInst>::operands(this),
4545 InsertAtEnd) {
4546 Op<0>() = LHS;
4547 Op<1>() = RHS;
4548 setPredicate((Predicate)predicate);
4549 setName(Name);
4550}
4551
4552CmpInst *
4554 const Twine &Name, BasicBlock::iterator InsertBefore) {
4555 if (Op == Instruction::ICmp) {
4556 return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate),
4557 S1, S2, Name);
4558 }
4559
4560 return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate),
4561 S1, S2, Name);
4562}
4563
4564CmpInst *
4566 const Twine &Name, Instruction *InsertBefore) {
4567 if (Op == Instruction::ICmp) {
4568 if (InsertBefore)
4569 return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate),
4570 S1, S2, Name);
4571 else
4572 return new ICmpInst(CmpInst::Predicate(predicate),
4573 S1, S2, Name);
4574 }
4575
4576 if (InsertBefore)
4577 return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate),
4578 S1, S2, Name);
4579 else
4580 return new FCmpInst(CmpInst::Predicate(predicate),
4581 S1, S2, Name);
4582}
4583
4584CmpInst *
4586 const Twine &Name, BasicBlock *InsertAtEnd) {
4587 if (Op == Instruction::ICmp) {
4588 return new ICmpInst(InsertAtEnd, CmpInst::Predicate(predicate),
4589 S1, S2, Name);
4590 }
4591 return new FCmpInst(InsertAtEnd, CmpInst::Predicate(predicate),
4592 S1, S2, Name);
4593}
4594
4596 Value *S2,
4597 const Instruction *FlagsSource,
4598 const Twine &Name,
4599 Instruction *InsertBefore) {
4600 CmpInst *Inst = Create(Op, Pred, S1, S2, Name, InsertBefore);
4601 Inst->copyIRFlags(FlagsSource);
4602 return Inst;
4603}
4604
4606 if (ICmpInst *IC = dyn_cast<ICmpInst>(this))
4607 IC->swapOperands();
4608 else
4609 cast<FCmpInst>(this)->swapOperands();
4610}
4611
4613 if (const ICmpInst *IC = dyn_cast<ICmpInst>(this))
4614 return IC->isCommutative();
4615 return cast<FCmpInst>(this)->isCommutative();
4616}
4617
4620 return ICmpInst::isEquality(P);
4622 return FCmpInst::isEquality(P);
4623 llvm_unreachable("Unsupported predicate kind");
4624}
4625
4627 switch (pred) {
4628 default: llvm_unreachable("Unknown cmp predicate!");
4629 case ICMP_EQ: return ICMP_NE;
4630 case ICMP_NE: return ICMP_EQ;
4631 case ICMP_UGT: return ICMP_ULE;
4632 case ICMP_ULT: return ICMP_UGE;
4633 case ICMP_UGE: return ICMP_ULT;
4634 case ICMP_ULE: return ICMP_UGT;
4635 case ICMP_SGT: return ICMP_SLE;
4636 case ICMP_SLT: return ICMP_SGE;
4637 case ICMP_SGE: return ICMP_SLT;
4638 case ICMP_SLE: return ICMP_SGT;
4639
4640 case FCMP_OEQ: return FCMP_UNE;
4641 case FCMP_ONE: return FCMP_UEQ;
4642 case FCMP_OGT: return FCMP_ULE;
4643 case FCMP_OLT: return FCMP_UGE;
4644 case FCMP_OGE: return FCMP_ULT;
4645 case FCMP_OLE: return FCMP_UGT;
4646 case FCMP_UEQ: return FCMP_ONE;
4647 case FCMP_UNE: return FCMP_OEQ;
4648 case FCMP_UGT: return FCMP_OLE;
4649 case FCMP_ULT: return FCMP_OGE;
4650 case FCMP_UGE: return FCMP_OLT;
4651 case FCMP_ULE: return FCMP_OGT;
4652 case FCMP_ORD: return FCMP_UNO;
4653 case FCMP_UNO: return FCMP_ORD;
4654 case FCMP_TRUE: return FCMP_FALSE;
4655 case FCMP_FALSE: return FCMP_TRUE;
4656 }
4657}
4658
4660 switch (Pred) {
4661 default: return "unknown";
4662 case FCmpInst::FCMP_FALSE: return "false";
4663 case FCmpInst::FCMP_OEQ: return "oeq";
4664 case FCmpInst::FCMP_OGT: return "ogt";
4665 case FCmpInst::FCMP_OGE: return "oge";
4666 case FCmpInst::FCMP_OLT: return "olt";
4667 case FCmpInst::FCMP_OLE: return "ole";
4668 case FCmpInst::FCMP_ONE: return "one";
4669 case FCmpInst::FCMP_ORD: return "ord";
4670 case FCmpInst::FCMP_UNO: return "uno";
4671 case FCmpInst::FCMP_UEQ: return "ueq";
4672 case FCmpInst::FCMP_UGT: return "ugt";
4673 case FCmpInst::FCMP_UGE: return "uge";
4674 case FCmpInst::FCMP_ULT: return "ult";
4675 case FCmpInst::FCMP_ULE: return "ule";
4676 case FCmpInst::FCMP_UNE: return "une";
4677 case FCmpInst::FCMP_TRUE: return "true";
4678 case ICmpInst::ICMP_EQ: return "eq";
4679 case ICmpInst::ICMP_NE: return "ne";
4680 case ICmpInst::ICMP_SGT: return "sgt";
4681 case ICmpInst::ICMP_SGE: return "sge";
4682 case ICmpInst::ICMP_SLT: return "slt";
4683 case ICmpInst::ICMP_SLE: return "sle";
4684 case ICmpInst::ICMP_UGT: return "ugt";
4685 case ICmpInst::ICMP_UGE: return "uge";
4686 case ICmpInst::ICMP_ULT: return "ult";
4687 case ICmpInst::ICMP_ULE: return "ule";
4688 }
4689}
4690
4693 return OS;
4694}
4695
4697 switch (pred) {
4698 default: llvm_unreachable("Unknown icmp predicate!");
4699 case ICMP_EQ: case ICMP_NE:
4700 case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE:
4701 return pred;
4702 case ICMP_UGT: return ICMP_SGT;
4703 case ICMP_ULT: return ICMP_SLT;
4704 case ICMP_UGE: return ICMP_SGE;
4705 case ICMP_ULE: return ICMP_SLE;
4706 }
4707}
4708
4710 switch (pred) {
4711 default: llvm_unreachable("Unknown icmp predicate!");
4712 case ICMP_EQ: case ICMP_NE:
4713 case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE:
4714 return pred;
4715 case ICMP_SGT: return ICMP_UGT;
4716 case ICMP_SLT: return ICMP_ULT;
4717 case ICMP_SGE: return ICMP_UGE;
4718 case ICMP_SLE: return ICMP_ULE;
4719 }
4720}
4721
4723 switch (pred) {
4724 default: llvm_unreachable("Unknown cmp predicate!");
4725 case ICMP_EQ: case ICMP_NE:
4726 return pred;
4727 case ICMP_SGT: return ICMP_SLT;
4728 case ICMP_SLT: return ICMP_SGT;
4729 case ICMP_SGE: return ICMP_SLE;
4730 case ICMP_SLE: return ICMP_SGE;
4731 case ICMP_UGT: return ICMP_ULT;
4732 case ICMP_ULT: return ICMP_UGT;
4733 case ICMP_UGE: return ICMP_ULE;
4734 case ICMP_ULE: return ICMP_UGE;
4735
4736 case FCMP_FALSE: case FCMP_TRUE:
4737 case FCMP_OEQ: case FCMP_ONE:
4738 case FCMP_UEQ: case FCMP_UNE:
4739 case FCMP_ORD: case FCMP_UNO:
4740 return pred;
4741 case FCMP_OGT: return FCMP_OLT;
4742 case FCMP_OLT: return FCMP_OGT;
4743 case FCMP_OGE: return FCMP_OLE;
4744 case FCMP_OLE: return FCMP_OGE;
4745 case FCMP_UGT: return FCMP_ULT;
4746 case FCMP_ULT: return FCMP_UGT;
4747 case FCMP_UGE: return FCMP_ULE;
4748 case FCMP_ULE: return FCMP_UGE;
4749 }
4750}
4751
4753 switch (pred) {
4754 case ICMP_SGE:
4755 case ICMP_SLE:
4756 case ICMP_UGE:
4757 case ICMP_ULE:
4758 case FCMP_OGE:
4759 case FCMP_OLE:
4760 case FCMP_UGE:
4761 case FCMP_ULE:
4762 return true;
4763 default:
4764 return false;
4765 }
4766}
4767
4769 switch (pred) {
4770 case ICMP_SGT:
4771 case ICMP_SLT:
4772 case ICMP_UGT:
4773 case ICMP_ULT:
4774 case FCMP_OGT:
4775 case FCMP_OLT:
4776 case FCMP_UGT:
4777 case FCMP_ULT:
4778 return true;
4779 default:
4780 return false;
4781 }
4782}
4783
4785 switch (pred) {
4786 case ICMP_SGE:
4787 return ICMP_SGT;
4788 case ICMP_SLE:
4789 return ICMP_SLT;
4790 case ICMP_UGE:
4791 return ICMP_UGT;
4792 case ICMP_ULE:
4793 return ICMP_ULT;
4794 case FCMP_OGE:
4795 return FCMP_OGT;
4796 case FCMP_OLE:
4797 return FCMP_OLT;
4798 case FCMP_UGE:
4799 return FCMP_UGT;
4800 case FCMP_ULE:
4801 return FCMP_ULT;
4802 default:
4803 return pred;
4804 }
4805}
4806
4808 switch (pred) {
4809 case ICMP_SGT:
4810 return ICMP_SGE;
4811 case ICMP_SLT:
4812 return ICMP_SLE;
4813 case ICMP_UGT:
4814 return ICMP_UGE;
4815 case ICMP_ULT:
4816 return ICMP_ULE;
4817 case FCMP_OGT:
4818 return FCMP_OGE;
4819 case FCMP_OLT:
4820 return FCMP_OLE;
4821 case FCMP_UGT:
4822 return FCMP_UGE;
4823 case FCMP_ULT:
4824 return FCMP_ULE;
4825 default:
4826 return pred;
4827 }
4828}
4829
4831 assert(CmpInst::isRelational(pred) && "Call only with relational predicate!");
4832
4836 return getStrictPredicate(pred);
4837
4838 llvm_unreachable("Unknown predicate!");
4839}
4840
4842 assert(CmpInst::isUnsigned(pred) && "Call only with unsigned predicates!");
4843
4844 switch (pred) {
4845 default:
4846 llvm_unreachable("Unknown predicate!");
4847 case CmpInst::ICMP_ULT:
4848 return CmpInst::ICMP_SLT;
4849 case CmpInst::ICMP_ULE:
4850 return CmpInst::ICMP_SLE;
4851 case CmpInst::ICMP_UGT:
4852 return CmpInst::ICMP_SGT;
4853 case CmpInst::ICMP_UGE:
4854 return CmpInst::ICMP_SGE;
4855 }
4856}
4857
4859 assert(CmpInst::isSigned(pred) && "Call only with signed predicates!");
4860
4861 switch (pred) {
4862 default:
4863 llvm_unreachable("Unknown predicate!");
4864 case CmpInst::ICMP_SLT:
4865 return CmpInst::ICMP_ULT;
4866 case CmpInst::ICMP_SLE:
4867 return CmpInst::ICMP_ULE;
4868 case CmpInst::ICMP_SGT:
4869 return CmpInst::ICMP_UGT;
4870 case CmpInst::ICMP_SGE:
4871 return CmpInst::ICMP_UGE;
4872 }
4873}
4874
4876 switch (predicate) {
4877 default: return false;
4879 case ICmpInst::ICMP_UGE: return true;
4880 }
4881}
4882
4884 switch (predicate) {
4885 default: return false;
4887 case ICmpInst::ICMP_SGE: return true;
4888 }
4889}
4890
4891bool ICmpInst::compare(const APInt &LHS, const APInt &RHS,
4892 ICmpInst::Predicate Pred) {
4893 assert(ICmpInst::isIntPredicate(Pred) && "Only for integer predicates!");
4894 switch (Pred) {
4896 return LHS.eq(RHS);
4898 return LHS.ne(RHS);
4900 return LHS.ugt(RHS);
4902 return LHS.uge(RHS);
4904 return LHS.ult(RHS);
4906 return LHS.ule(RHS);
4908 return LHS.sgt(RHS);
4910 return LHS.sge(RHS);
4912 return LHS.slt(RHS);
4914 return LHS.sle(RHS);
4915 default:
4916 llvm_unreachable("Unexpected non-integer predicate.");
4917 };
4918}
4919
4920bool FCmpInst::compare(const APFloat &LHS, const APFloat &RHS,
4921 FCmpInst::Predicate Pred) {
4922 APFloat::cmpResult R = LHS.compare(RHS);
4923 switch (Pred) {
4924 default:
4925 llvm_unreachable("Invalid FCmp Predicate");
4927 return false;
4929 return true;
4930 case FCmpInst::FCMP_UNO:
4931 return R == APFloat::cmpUnordered;
4932 case FCmpInst::FCMP_ORD:
4933 return R != APFloat::cmpUnordered;
4934 case FCmpInst::FCMP_UEQ:
4935 return R == APFloat::cmpUnordered || R == APFloat::cmpEqual;
4936 case FCmpInst::FCMP_OEQ:
4937 return R == APFloat::cmpEqual;
4938 case FCmpInst::FCMP_UNE:
4939 return R != APFloat::cmpEqual;
4940 case FCmpInst::FCMP_ONE:
4942 case FCmpInst::FCMP_ULT:
4943 return R == APFloat::cmpUnordered || R == APFloat::cmpLessThan;
4944 case FCmpInst::FCMP_OLT:
4945 return R == APFloat::cmpLessThan;
4946 case FCmpInst::FCMP_UGT:
4948 case FCmpInst::FCMP_OGT:
4949 return R == APFloat::cmpGreaterThan;
4950 case FCmpInst::FCMP_ULE:
4951 return R != APFloat::cmpGreaterThan;
4952 case FCmpInst::FCMP_OLE:
4953 return R == APFloat::cmpLessThan || R == APFloat::cmpEqual;
4954 case FCmpInst::FCMP_UGE:
4955 return R != APFloat::cmpLessThan;
4956 case FCmpInst::FCMP_OGE:
4957 return R == APFloat::cmpGreaterThan || R == APFloat::cmpEqual;
4958 }
4959}
4960
4963 "Call only with non-equality predicates!");
4964
4965 if (isSigned(pred))
4966 return getUnsignedPredicate(pred);
4967 if (isUnsigned(pred))
4968 return getSignedPredicate(pred);
4969
4970 llvm_unreachable("Unknown predicate!");
4971}
4972
4974 switch (predicate) {
4975 default: return false;
4978 case FCmpInst::FCMP_ORD: return true;
4979 }
4980}
4981
4983 switch (predicate) {
4984 default: return false;
4987 case FCmpInst::FCMP_UNO: return true;
4988 }
4989}
4990
4992 switch(predicate) {
4993 default: return false;
4994 case ICMP_EQ: case ICMP_UGE: case ICMP_ULE: case ICMP_SGE: case ICMP_SLE:
4995 case FCMP_TRUE: case FCMP_UEQ: case FCMP_UGE: case FCMP_ULE: return true;
4996 }
4997}
4998
5000 switch(predicate) {
5001 case ICMP_NE: case ICMP_UGT: case ICMP_ULT: case ICMP_SGT: case ICMP_SLT:
5002 case FCMP_FALSE: case FCMP_ONE: case FCMP_OGT: case FCMP_OLT: return true;
5003 default: return false;
5004 }
5005}
5006
5008 // If the predicates match, then we know the first condition implies the
5009 // second is true.
5010 if (Pred1 == Pred2)
5011 return true;
5012
5013 switch (Pred1) {
5014 default:
5015 break;
5016 case ICMP_EQ:
5017 // A == B implies A >=u B, A <=u B, A >=s B, and A <=s B are true.
5018 return Pred2 == ICMP_UGE || Pred2 == ICMP_ULE || Pred2 == ICMP_SGE ||
5019 Pred2 == ICMP_SLE;
5020 case ICMP_UGT: // A >u B implies A != B and A >=u B are true.
5021 return Pred2 == ICMP_NE || Pred2 == ICMP_UGE;
5022 case ICMP_ULT: // A <u B implies A != B and A <=u B are true.
5023 return Pred2 == ICMP_NE || Pred2 == ICMP_ULE;
5024 case ICMP_SGT: // A >s B implies A != B and A >=s B are true.
5025 return Pred2 == ICMP_NE || Pred2 == ICMP_SGE;
5026 case ICMP_SLT: // A <s B implies A != B and A <=s B are true.
5027 return Pred2 == ICMP_NE || Pred2 == ICMP_SLE;
5028 }
5029 return false;
5030}
5031
5033 return isImpliedTrueByMatchingCmp(Pred1, getInversePredicate(Pred2));
5034}
5035
5036//===----------------------------------------------------------------------===//
5037// SwitchInst Implementation
5038//===----------------------------------------------------------------------===//
5039
5040void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumReserved) {
5041 assert(Value && Default && NumReserved);
5042 ReservedSpace = NumReserved;
5044 allocHungoffUses(ReservedSpace);
5045
5046 Op<0>() = Value;
5047 Op<1>() = Default;
5048}
5049
5050/// SwitchInst ctor - Create a new switch instruction, specifying a value to
5051/// switch on and a default destination. The number of additional cases can
5052/// be specified here to make memory allocation more efficient. This
5053/// constructor can also autoinsert before another instruction.
5054SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
5055 BasicBlock::iterator InsertBefore)
5056 : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch,
5057 nullptr, 0, InsertBefore) {
5058 init(Value, Default, 2 + NumCases * 2);
5059}
5060
5061/// SwitchInst ctor - Create a new switch instruction, specifying a value to
5062/// switch on and a default destination. The number of additional cases can
5063/// be specified here to make memory allocation more efficient. This
5064/// constructor can also autoinsert before another instruction.
5065SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
5066 Instruction *InsertBefore)
5067 : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch,
5068 nullptr, 0, InsertBefore) {
5069 init(Value, Default, 2+NumCases*2);
5070}
5071
5072/// SwitchInst ctor - Create a new switch instruction, specifying a value to
5073/// switch on and a default destination. The number of additional cases can
5074/// be specified here to make memory allocation more efficient. This
5075/// constructor also autoinserts at the end of the specified BasicBlock.
5076SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
5077 BasicBlock *InsertAtEnd)
5078 : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch,
5079 nullptr, 0, InsertAtEnd) {
5080 init(Value, Default, 2+NumCases*2);
5081}
5082
5083SwitchInst::SwitchInst(const SwitchInst &SI)
5084 : Instruction(SI.getType(), Instruction::Switch, nullptr, 0) {
5085 init(SI.getCondition(), SI.getDefaultDest(), SI.getNumOperands());
5086 setNumHungOffUseOperands(SI.getNumOperands());
5087 Use *OL = getOperandList();
5088 const Use *InOL = SI.getOperandList();
5089 for (unsigned i = 2, E = SI.getNumOperands(); i != E; i += 2) {
5090 OL[i] = InOL[i];
5091 OL[i+1] = InOL[i+1];
5092 }
5093 SubclassOptionalData = SI.SubclassOptionalData;
5094}
5095
5096/// addCase - Add an entry to the switch instruction...
5097///
5099 unsigned NewCaseIdx = getNumCases();
5100 unsigned OpNo = getNumOperands();
5101 if (OpNo+2 > ReservedSpace)
5102 growOperands(); // Get more space!
5103 // Initialize some new operands.
5104 assert(OpNo+1 < ReservedSpace && "Growing didn't work!");
5106 CaseHandle Case(this, NewCaseIdx);
5107 Case.setValue(OnVal);
5108 Case.setSuccessor(Dest);
5109}
5110
5111/// removeCase - This method removes the specified case and its successor
5112/// from the switch instruction.
5114 unsigned idx = I->getCaseIndex();
5115
5116 assert(2 + idx*2 < getNumOperands() && "Case index out of range!!!");
5117
5118 unsigned NumOps = getNumOperands();
5119 Use *OL = getOperandList();
5120
5121 // Overwrite this case with the end of the list.
5122 if (2 + (idx + 1) * 2 != NumOps) {
5123 OL[2 + idx * 2] = OL[NumOps - 2];
5124 OL[2 + idx * 2 + 1] = OL[NumOps - 1];
5125 }
5126
5127 // Nuke the last value.
5128 OL[NumOps-2].set(nullptr);
5129 OL[NumOps-2+1].set(nullptr);
5130 setNumHungOffUseOperands(NumOps-2);
5131
5132 return CaseIt(this, idx);
5133}
5134
5135/// growOperands - grow operands - This grows the operand list in response
5136/// to a push_back style of operation. This grows the number of ops by 3 times.
5137///
5138void SwitchInst::growOperands() {
5139 unsigned e = getNumOperands();
5140 unsigned NumOps = e*3;
5141
5142 ReservedSpace = NumOps;
5143 growHungoffUses(ReservedSpace);
5144}
5145
5147 assert(Changed && "called only if metadata has changed");
5148
5149 if (!Weights)
5150 return nullptr;
5151
5152 assert(SI.getNumSuccessors() == Weights->size() &&
5153 "num of prof branch_weights must accord with num of successors");
5154
5155 bool AllZeroes = all_of(*Weights, [](uint32_t W) { return W == 0; });
5156
5157 if (AllZeroes || Weights->size() < 2)
5158 return nullptr;
5159
5160 return MDBuilder(SI.getParent()->getContext()).createBranchWeights(*Weights);
5161}
5162
5164 MDNode *ProfileData = getBranchWeightMDNode(SI);
5165 if (!ProfileData)
5166 return;
5167
5168 if (ProfileData->getNumOperands() != SI.getNumSuccessors() + 1) {
5169 llvm_unreachable("number of prof branch_weights metadata operands does "
5170 "not correspond to number of succesors");
5171 }
5172
5174 if (!extractBranchWeights(ProfileData, Weights))
5175 return;
5176 this->Weights = std::move(Weights);
5177}
5178
5181 if (Weights) {
5182 assert(SI.getNumSuccessors() == Weights->size() &&
5183 "num of prof branch_weights must accord with num of successors");
5184 Changed = true;
5185 // Copy the last case to the place of the removed one and shrink.
5186 // This is tightly coupled with the way SwitchInst::removeCase() removes
5187 // the cases in SwitchInst::removeCase(CaseIt).
5188 (*Weights)[I->getCaseIndex() + 1] = Weights->back();
5189 Weights->pop_back();
5190 }
5191 return SI.removeCase(I);
5192}
5193
5195 ConstantInt *OnVal, BasicBlock *Dest,
5197 SI.addCase(OnVal, Dest);
5198
5199 if (!Weights && W && *W) {
5200 Changed = true;
5201 Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
5202 (*Weights)[SI.getNumSuccessors() - 1] = *W;
5203 } else if (Weights) {
5204 Changed = true;
5205 Weights->push_back(W.value_or(0));
5206 }
5207 if (Weights)
5208 assert(SI.getNumSuccessors() == Weights->size() &&
5209 "num of prof branch_weights must accord with num of successors");
5210}
5211
5214 // Instruction is erased. Mark as unchanged to not touch it in the destructor.
5215 Changed = false;
5216 if (Weights)
5217 Weights->resize(0);
5218 return SI.eraseFromParent();
5219}
5220
5223 if (!Weights)
5224 return std::nullopt;
5225 return (*Weights)[idx];
5226}
5227
5230 if (!W)
5231 return;
5232
5233 if (!Weights && *W)
5234 Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
5235
5236 if (Weights) {
5237 auto &OldW = (*Weights)[idx];
5238 if (*W != OldW) {
5239 Changed = true;
5240 OldW = *W;
5241 }
5242 }
5243}
5244
5247 unsigned idx) {
5248 if (MDNode *ProfileData = getBranchWeightMDNode(SI))
5249 if (ProfileData->getNumOperands() == SI.getNumSuccessors() + 1)
5250 return mdconst::extract<ConstantInt>(ProfileData->getOperand(idx + 1))
5251 ->getValue()
5252 .getZExtValue();
5253
5254 return std::nullopt;
5255}
5256
5257//===----------------------------------------------------------------------===//
5258// IndirectBrInst Implementation
5259//===----------------------------------------------------------------------===//
5260
5261void IndirectBrInst::init(Value *Address, unsigned NumDests) {
5262 assert(Address && Address->getType()->isPointerTy() &&
5263 "Address of indirectbr must be a pointer");
5264 ReservedSpace = 1+NumDests;
5266 allocHungoffUses(ReservedSpace);
5267
5268 Op<0>() = Address;
5269}
5270
5271
5272/// growOperands - grow operands - This grows the operand list in response
5273/// to a push_back style of operation. This grows the number of ops by 2 times.
5274///
5275void IndirectBrInst::growOperands() {
5276 unsigned e = getNumOperands();
5277 unsigned NumOps = e*2;
5278
5279 ReservedSpace = NumOps;
5280 growHungoffUses(ReservedSpace);
5281}
5282
5283IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
5284 BasicBlock::iterator InsertBefore)
5285 : Instruction(Type::getVoidTy(Address->getContext()),
5286 Instruction::IndirectBr, nullptr, 0, InsertBefore) {
5287 init(Address, NumCases);
5288}
5289
5290IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
5291 Instruction *InsertBefore)
5292 : Instruction(Type::getVoidTy(Address->getContext()),
5293 Instruction::IndirectBr, nullptr, 0, InsertBefore) {
5294 init(Address, NumCases);
5295}
5296
5297IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
5298 BasicBlock *InsertAtEnd)
5299 : Instruction(Type::getVoidTy(Address->getContext()),
5300 Instruction::IndirectBr, nullptr, 0, InsertAtEnd) {
5301 init(Address, NumCases);
5302}
5303
5304IndirectBrInst::IndirectBrInst(const IndirectBrInst &IBI)
5305 : Instruction(Type::getVoidTy(IBI.getContext()), Instruction::IndirectBr,
5306 nullptr, IBI.getNumOperands()) {
5307 allocHungoffUses(IBI.getNumOperands());
5308 Use *OL = getOperandList();
5309 const Use *InOL = IBI.getOperandList();
5310 for (unsigned i = 0, E = IBI.getNumOperands(); i != E; ++i)
5311 OL[i] = InOL[i];
5312 SubclassOptionalData = IBI.SubclassOptionalData;
5313}
5314
5315/// addDestination - Add a destination.
5316///
5318 unsigned OpNo = getNumOperands();
5319 if (OpNo+1 > ReservedSpace)
5320 growOperands(); // Get more space!
5321 // Initialize some new operands.
5322 assert(OpNo < ReservedSpace && "Growing didn't work!");
5324 getOperandList()[OpNo] = DestBB;
5325}
5326
5327/// removeDestination - This method removes the specified successor from the
5328/// indirectbr instruction.
5330 assert(idx < getNumOperands()-1 && "Successor index out of range!");
5331
5332 unsigned NumOps = getNumOperands();
5333 Use *OL = getOperandList();
5334
5335 // Replace this value with the last one.
5336 OL[idx+1] = OL[NumOps-1];
5337
5338 // Nuke the last value.
5339 OL[NumOps-1].set(nullptr);
5340 setNumHungOffUseOperands(NumOps-1);
5341}
5342
5343//===----------------------------------------------------------------------===//
5344// FreezeInst Implementation
5345//===----------------------------------------------------------------------===//
5346
5348 BasicBlock::iterator InsertBefore)
5349 : UnaryInstruction(S->getType(), Freeze, S, InsertBefore) {
5350 setName(Name);
5351}
5352
5354 const Twine &Name, Instruction *InsertBefore)
5355 : UnaryInstruction(S->getType(), Freeze, S, InsertBefore) {
5356 setName(Name);
5357}
5358
5360 const Twine &Name, BasicBlock *InsertAtEnd)
5361 : UnaryInstruction(S->getType(), Freeze, S, InsertAtEnd) {
5362 setName(Name);
5363}
5364
5365//===----------------------------------------------------------------------===//
5366// cloneImpl() implementations
5367//===----------------------------------------------------------------------===//
5368
5369// Define these methods here so vtables don't get emitted into every translation
5370// unit that uses these classes.
5371
5373 return new (getNumOperands()) GetElementPtrInst(*this);
5374}
5375
5377 return Create(getOpcode(), Op<0>());
5378}
5379
5381 return Create(getOpcode(), Op<0>(), Op<1>());
5382}
5383
5385 return new FCmpInst(getPredicate(), Op<0>(), Op<1>());
5386}
5387
5389 return new ICmpInst(getPredicate(), Op<0>(), Op<1>());
5390}
5391
5393 return new ExtractValueInst(*this);
5394}
5395
5397 return new InsertValueInst(*this);
5398}
5399
5402 getOperand(0), getAlign());
5403 Result->setUsedWithInAlloca(isUsedWithInAlloca());
5404 Result->setSwiftError(isSwiftError());
5405 return Result;
5406}
5407
5409 return new LoadInst(getType(), getOperand(0), Twine(), isVolatile(),
5411}
5412
5414 return new StoreInst(getOperand(0), getOperand(1), isVolatile(), getAlign(),
5416}
5417
5422 Result->setVolatile(isVolatile());
5423 Result->setWeak(isWeak());
5424 return Result;
5425}
5426
5428 AtomicRMWInst *Result =
5431 Result->setVolatile(isVolatile());
5432 return Result;
5433}
5434
5436 return new FenceInst(getContext(), getOrdering(), getSyncScopeID());
5437}
5438
5440 return new TruncInst(getOperand(0), getType());
5441}
5442
5444 return new ZExtInst(getOperand(0), getType());
5445}
5446
5448 return new SExtInst(getOperand(0), getType());
5449}
5450
5452 return new FPTruncInst(getOperand(0), getType());
5453}
5454
5456 return new FPExtInst(getOperand(0), getType());
5457}
5458
5460 return new UIToFPInst(getOperand(0), getType());
5461}
5462
5464 return new SIToFPInst(getOperand(0), getType());
5465}
5466
5468 return new FPToUIInst(getOperand(0), getType());
5469}
5470
5472 return new FPToSIInst(getOperand(0), getType());
5473}
5474
5476 return new PtrToIntInst(getOperand(0), getType());
5477}
5478
5480 return new IntToPtrInst(getOperand(0), getType());
5481}
5482
5484 return new BitCastInst(getOperand(0), getType());
5485}
5486
5488 return new AddrSpaceCastInst(getOperand(0), getType());
5489}
5490
5492 if (hasOperandBundles()) {
5493 unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo);
5494 return new(getNumOperands(), DescriptorBytes) CallInst(*this);
5495 }
5496 return new(getNumOperands()) CallInst(*this);
5497}
5498
5501}
5502
5504 return new VAArgInst(getOperand(0), getType());
5505}
5506
5509}
5510
5513}
5514
5517}
5518
5519PHINode *PHINode::cloneImpl() const { return new PHINode(*this); }
5520
5522 return new LandingPadInst(*this);
5523}
5524
5526 return new(getNumOperands()) ReturnInst(*this);
5527}
5528
5530 return new(getNumOperands()) BranchInst(*this);
5531}
5532
5533SwitchInst *SwitchInst::cloneImpl() const { return new SwitchInst(*this); }
5534
5536 return new IndirectBrInst(*this);
5537}
5538
5540 if (hasOperandBundles()) {
5541 unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo);
5542 return new(getNumOperands(), DescriptorBytes) InvokeInst(*this);
5543 }
5544 return new(getNumOperands()) InvokeInst(*this);
5545}
5546
5548 if (hasOperandBundles()) {
5549 unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo);
5550 return new (getNumOperands(), DescriptorBytes) CallBrInst(*this);
5551 }
5552 return new (getNumOperands()) CallBrInst(*this);
5553}
5554
5555ResumeInst *ResumeInst::cloneImpl() const { return new (1) ResumeInst(*this); }
5556
5558 return new (getNumOperands()) CleanupReturnInst(*this);
5559}
5560
5562 return new (getNumOperands()) CatchReturnInst(*this);
5563}
5564
5566 return new CatchSwitchInst(*this);
5567}
5568
5570 return new (getNumOperands()) FuncletPadInst(*this);
5571}
5572
5575 return new UnreachableInst(Context);
5576}
5577
5579 return new FreezeInst(getOperand(0));
5580}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static const LLT S1
Rewrite undef for PHI
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
std::string Name
uint64_t Size
bool End
Definition: ELF_riscv.cpp:480
static bool isSigned(unsigned int Opcode)
#define op(i)
hexagon gen pred
static Value * createPlaceholderForShuffleVector(Value *V)
static Align computeAllocaDefaultAlign(Type *Ty, BasicBlock *BB)
static cl::opt< bool > DisableI2pP2iOpt("disable-i2p-p2i-opt", cl::init(false), cl::desc("Disables inttoptr/ptrtoint roundtrip optimization"))
static int matchShuffleAsBitRotate(ArrayRef< int > Mask, int NumSubElts)
Try to lower a vector shuffle as a bit rotation.
static Type * getIndexedTypeInternal(Type *Ty, ArrayRef< IndexTy > IdxList)
static bool isReplicationMaskWithParams(ArrayRef< int > Mask, int ReplicationFactor, int VF)
static bool isIdentityMaskImpl(ArrayRef< int > Mask, int NumOpElts)
static bool isSingleSourceMaskImpl(ArrayRef< int > Mask, int NumOpElts)
static Value * getAISize(LLVMContext &Context, Value *Amt)
static Align computeLoadStoreDefaultAlign(Type *Ty, BasicBlock *BB)
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file contains the declarations for metadata subclasses.
Module.h This file contains the declarations for the Module class.
IntegerType * Int32Ty
LLVMContext & Context
#define P(N)
PowerPC Reduce CR logical Operation
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
raw_pwrite_stream & OS
This file implements the SmallBitVector class.
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:40
@ Struct
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition: VPlanSLP.cpp:191
Value * RHS
Value * LHS
float convertToFloat() const
Converts this APFloat to host float value.
Definition: APFloat.cpp:5268
Class for arbitrary precision integers.
Definition: APInt.h:76
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
Definition: APInt.h:1308
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition: APInt.h:358
unsigned countr_zero() const
Count the number of trailing zero bits.
Definition: APInt.h:1589
unsigned countl_zero() const
The APInt version of std::countl_zero.
Definition: APInt.h:1548
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition: APInt.h:178
This class represents a conversion between pointers from one address space to another.
AddrSpaceCastInst * cloneImpl() const
Clone an identical AddrSpaceCastInst.
AddrSpaceCastInst(Value *S, Type *Ty, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructor with insert-before-instruction semantics.
an instruction to allocate memory on the stack
Definition: Instructions.h:59
std::optional< TypeSize > getAllocationSizeInBits(const DataLayout &DL) const
Get allocation size in bits.
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Definition: Instructions.h:157
bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:132
AllocaInst * cloneImpl() const
AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, const Twine &Name, BasicBlock::iterator InsertBefore)
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:125
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
Definition: Instructions.h:147
unsigned getAddressSpace() const
Return the address space for the allocation.
Definition: Instructions.h:112
std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
void setAlignment(Align Align)
Definition: Instructions.h:136
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:103
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
iterator end() const
Definition: ArrayRef.h:154
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
iterator begin() const
Definition: ArrayRef.h:153
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:160
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Definition: ArrayRef.h:195
Class to represent array types.
Definition: DerivedTypes.h:371
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:539
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this cmpxchg instruction.
Definition: Instructions.h:669
bool isVolatile() const
Return true if this is a cmpxchg from a volatile memory location.
Definition: Instructions.h:599
void setFailureOrdering(AtomicOrdering Ordering)
Sets the failure ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:643
AtomicOrdering getFailureOrdering() const
Returns the failure ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:638
void setSuccessOrdering(AtomicOrdering Ordering)
Sets the success ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:631
AtomicCmpXchgInst * cloneImpl() const
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:588
bool isWeak() const
Return true if this cmpxchg may spuriously fail.
Definition: Instructions.h:606
void setAlignment(Align Align)
Definition: Instructions.h:592
AtomicOrdering getSuccessOrdering() const
Returns the success ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:626
AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment, AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, SyncScope::ID SSID, BasicBlock::iterator InsertBefore)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this cmpxchg instruction.
Definition: Instructions.h:664
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:748
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:867
AtomicRMWInst * cloneImpl() const
bool isVolatile() const
Return true if this is a RMW on a volatile memory location.
Definition: Instructions.h:877
BinOp
This enumeration lists the possible modifications atomicrmw can make.
Definition: Instructions.h:760
@ Add
*p = old + v
Definition: Instructions.h:764
@ FAdd
*p = old + v
Definition: Instructions.h:785
@ Min
*p = old <signed v ? old : v
Definition: Instructions.h:778
@ Or
*p = old | v
Definition: Instructions.h:772
@ Sub
*p = old - v
Definition: Instructions.h:766
@ And
*p = old & v
Definition: Instructions.h:768
@ Xor
*p = old ^ v
Definition: Instructions.h:774
@ FSub
*p = old - v
Definition: Instructions.h:788
@ UIncWrap
Increment one up to a maximum value.
Definition: Instructions.h:800
@ Max
*p = old >signed v ? old : v
Definition: Instructions.h:776
@ UMin
*p = old <unsigned v ? old : v
Definition: Instructions.h:782
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
Definition: Instructions.h:796
@ UMax
*p = old >unsigned v ? old : v
Definition: Instructions.h:780
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
Definition: Instructions.h:792
@ UDecWrap
Decrement one until a minimum value or zero.
Definition: Instructions.h:804
@ Nand
*p = ~(old & v)
Definition: Instructions.h:770
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this rmw instruction.
Definition: Instructions.h:906
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this rmw instruction.
Definition: Instructions.h:892
AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment, AtomicOrdering Ordering, SyncScope::ID SSID, BasicBlock::iterator InsertBefore)
void setOperation(BinOp Operation)
Definition: Instructions.h:861
BinOp getOperation() const
Definition: Instructions.h:845
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this rmw instruction.
Definition: Instructions.h:901
void setAlignment(Align Align)
Definition: Instructions.h:871
static StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
Definition: Instructions.h:887
bool hasAttrSomewhere(Attribute::AttrKind Kind, unsigned *Index=nullptr) const
Return true if the specified attribute is set for at least one parameter or for the return value.
FPClassTest getRetNoFPClass() const
Get the disallowed floating-point classes of the return value.
bool hasParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Return true if the attribute exists for the given argument.
Definition: Attributes.h:783
FPClassTest getParamNoFPClass(unsigned ArgNo) const
Get the disallowed floating-point classes of the argument value.
MemoryEffects getMemoryEffects() const
Returns memory effects of the function.
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition: Attributes.h:85
ConstantRange getRange() const
Returns the value of the range attribute.
Definition: Attributes.cpp:447
static Attribute getWithMemoryEffects(LLVMContext &Context, MemoryEffects ME)
Definition: Attributes.cpp:241
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition: Attributes.h:193
LLVM Basic Block Representation.
Definition: BasicBlock.h:60
iterator end()
Definition: BasicBlock.h:443
bool isEntryBlock() const
Return true if this is the entry block of the containing function.
Definition: BasicBlock.cpp:564
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:206
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:165
const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
Definition: BasicBlock.cpp:289
BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty, const Twine &Name, BasicBlock::iterator InsertBefore)
static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name, BasicBlock::iterator InsertBefore)
Construct a binary instruction, given the opcode and the two operands.
static BinaryOperator * CreateNSWNeg(Value *Op, const Twine &Name, BasicBlock::iterator InsertBefore)
BinaryOps getOpcode() const
Definition: InstrTypes.h:513
static BinaryOperator * CreateNeg(Value *Op, const Twine &Name, BasicBlock::iterator InsertBefore)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
bool swapOperands()
Exchange the two operands to this instruction.
static BinaryOperator * CreateNot(Value *Op, const Twine &Name, BasicBlock::iterator InsertBefore)
BinaryOperator * cloneImpl() const
This class represents a no-op cast from one type to another.
BitCastInst * cloneImpl() const
Clone an identical BitCastInst.
BitCastInst(Value *S, Type *Ty, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructor with insert-before-instruction semantics.
Conditional or Unconditional Branch instruction.
void swapSuccessors()
Swap the successors of this branch instruction.
BranchInst * cloneImpl() const
bool isConditional() const
Value * getCondition() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1494
FPClassTest getParamNoFPClass(unsigned i) const
Extract a test mask for disallowed floating-point value classes for the parameter.
bool isInlineAsm() const
Check if this call is an inline asm statement.
Definition: InstrTypes.h:1809
BundleOpInfo & getBundleOpInfoForOperand(unsigned OpIdx)
Return the BundleOpInfo for the operand at index OpIdx.
Attribute getRetAttr(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind for the return value.
Definition: InstrTypes.h:1952
void setCallingConv(CallingConv::ID CC)
Definition: InstrTypes.h:1804
FPClassTest getRetNoFPClass() const
Extract a test mask for disallowed floating-point value classes for the return value.
bundle_op_iterator bundle_op_info_begin()
Return the start of the list of BundleOpInfo instances associated with this OperandBundleUser.
Definition: InstrTypes.h:2561
MemoryEffects getMemoryEffects() const
void addFnAttr(Attribute::AttrKind Kind)
Adds the attribute to the function.
Definition: InstrTypes.h:1851
bool doesNotAccessMemory() const
Determine if the call does not access memory.
void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
void setOnlyAccessesArgMemory()
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
Definition: InstrTypes.h:2369
void setOnlyAccessesInaccessibleMemOrArgMem()
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Definition: InstrTypes.h:2400
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Definition: InstrTypes.h:1742
void setDoesNotAccessMemory()
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
Definition: InstrTypes.h:1945
bool onlyAccessesInaccessibleMemory() const
Determine if the function may only access memory that is inaccessible from the IR.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
Definition: InstrTypes.h:2313
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1800
bundle_op_iterator bundle_op_info_end()
Return the end of the list of BundleOpInfo instances associated with this OperandBundleUser.
Definition: InstrTypes.h:2578
unsigned getNumSubclassExtraOperandsDynamic() const
Get the number of extra operands for instructions that don't have a fixed number of extra operands.
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
Definition: InstrTypes.h:1662
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
bool isIndirectCall() const
Return true if the callsite is an indirect call.
bool onlyReadsMemory() const
Determine if the call does not access or only reads memory.
iterator_range< bundle_op_iterator > bundle_op_infos()
Return the range [bundle_op_info_begin, bundle_op_info_end).
Definition: InstrTypes.h:2594
void setOnlyReadsMemory()
bool onlyAccessesInaccessibleMemOrArgMem() const
Determine if the function may only access memory that is either inaccessible from the IR or pointed t...
static CallBase * Create(CallBase *CB, ArrayRef< OperandBundleDef > Bundles, BasicBlock::iterator InsertPt)
Create a clone of CB with a different set of operand bundles and insert it before InsertPt.
static CallBase * removeOperandBundle(CallBase *CB, uint32_t ID, Instruction *InsertPt=nullptr)
Create a clone of CB with operand bundle ID removed.
Value * getCalledOperand() const
Definition: InstrTypes.h:1735
void setOnlyWritesMemory()
op_iterator populateBundleOperandInfos(ArrayRef< OperandBundleDef > Bundles, const unsigned BeginIndex)
Populate the BundleOpInfo instances and the Use& vector from Bundles.
AttributeList Attrs
parameter attributes for callable
Definition: InstrTypes.h:1508
bool hasOperandBundlesOtherThan(ArrayRef< uint32_t > IDs) const
Return true if this operand bundle user contains operand bundles with tags other than those specified...
Definition: InstrTypes.h:2474
std::optional< ConstantRange > getRange() const
If this return value has a range attribute, return the value range of the argument.
bool isReturnNonNull() const
Return true if the return value is known to be not null.
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1687
FunctionType * FTy
Definition: InstrTypes.h:1509
uint64_t getRetDereferenceableBytes() const
Extract the number of dereferenceable bytes for a call or parameter (0=unknown).
Definition: InstrTypes.h:2157
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
Definition: InstrTypes.h:1668
FunctionType * getFunctionType() const
Definition: InstrTypes.h:1600
Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
static unsigned CountBundleInputs(ArrayRef< OperandBundleDef > Bundles)
Return the total number of values used in Bundles.
Definition: InstrTypes.h:2624
Value * getArgOperandWithAttribute(Attribute::AttrKind Kind) const
If one of the arguments has the specified attribute, returns its operand value.
void setOnlyAccessesInaccessibleMemory()
bool onlyWritesMemory() const
Determine if the call does not access or only writes memory.
bool hasClobberingOperandBundles() const
Return true if this operand bundle user has operand bundles that may write to the heap.
void setCalledOperand(Value *V)
Definition: InstrTypes.h:1778
bool hasReadingOperandBundles() const
Return true if this operand bundle user has operand bundles that may read from the heap.
bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
unsigned arg_size() const
Definition: InstrTypes.h:1685
AttributeList getAttributes() const
Return the parameter attributes for this call.
Definition: InstrTypes.h:1819
static CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, Instruction *InsertPt=nullptr)
Create a clone of CB with operand bundle OB added.
void setMemoryEffects(MemoryEffects ME)
bool hasOperandBundles() const
Return true if this User has any operand bundles.
Definition: InstrTypes.h:2318
bool isTailCall() const
Tests if this call site is marked as a tail call.
Function * getCaller()
Helper to get the caller (the parent function).
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
static CallBrInst * Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, ArrayRef< BasicBlock * > IndirectDests, ArrayRef< Value * > Args, const Twine &NameStr, BasicBlock::iterator InsertBefore)
SmallVector< BasicBlock *, 16 > getIndirectDests() const
void setDefaultDest(BasicBlock *B)
void setIndirectDest(unsigned i, BasicBlock *B)
BasicBlock * getDefaultDest() const
CallBrInst * cloneImpl() const
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr, BasicBlock::iterator InsertBefore)
void updateProfWeight(uint64_t S, uint64_t T)
Updates profile metadata by scaling it by S / T.
TailCallKind getTailCallKind() const
CallInst * cloneImpl() const
This is the base class for all instructions that perform data casts.
Definition: InstrTypes.h:601
static Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)
Returns the opcode necessary to cast Val into Ty using usual casting rules.
static CastInst * CreateFPCast(Value *S, Type *Ty, const Twine &Name, BasicBlock::iterator InsertBefore)
Create an FPExt, BitCast, or FPTrunc for fp -> fp casts.
Instruction::CastOps getOpcode() const
Return the opcode of this CastInst.
Definition: InstrTypes.h:930
static CastInst * CreateZExtOrBitCast(Value *S, Type *Ty, const Twine &Name, BasicBlock::iterator InsertBefore)
Create a ZExt or BitCast cast instruction.
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name, BasicBlock::iterator InsertBefore)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
static unsigned isEliminableCastPair(Instruction::CastOps firstOpcode, Instruction::CastOps secondOpcode, Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy, Type *DstIntPtrTy)
Determine how a pair of casts can be eliminated, if they can be at all.
static bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
static CastInst * CreatePointerBitCastOrAddrSpaceCast(Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd)
Create a BitCast or an AddrSpaceCast cast instruction.
static bool isBitCastable(Type *SrcTy, Type *DestTy)
Check whether a bitcast between these types is valid.
static CastInst * CreatePointerCast(Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd)
Create a BitCast AddrSpaceCast, or a PtrToInt cast instruction.
static bool isNoopCast(Instruction::CastOps Opcode, Type *SrcTy, Type *DstTy, const DataLayout &DL)
A no-op cast is one that can be effected without changing any bits.
static CastInst * CreateBitOrPointerCast(Value *S, Type *Ty, const Twine &Name, BasicBlock::iterator InsertBefore)
Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
static CastInst * CreateTruncOrBitCast(Value *S, Type *Ty, const Twine &Name, BasicBlock::iterator InsertBefore)
Create a Trunc or BitCast cast instruction.
static CastInst * CreateSExtOrBitCast(Value *S, Type *Ty, const Twine &Name, BasicBlock::iterator InsertBefore)
Create a SExt or BitCast cast instruction.
bool isIntegerCast() const
There are several places where we need to know if a cast instruction only deals with integer source a...
static CastInst * CreateIntegerCast(Value *S, Type *Ty, bool isSigned, const Twine &Name, BasicBlock::iterator InsertBefore)
Create a ZExt, BitCast, or Trunc for int -> int casts.
static bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
CatchReturnInst * cloneImpl() const
void setUnwindDest(BasicBlock *UnwindDest)
void addHandler(BasicBlock *Dest)
Add an entry to the switch instruction... Note: This action invalidates handler_end().
CatchSwitchInst * cloneImpl() const
Value * getParentPad() const
void setParentPad(Value *ParentPad)
BasicBlock * getUnwindDest() const
void removeHandler(handler_iterator HI)
bool hasUnwindDest() const
CleanupReturnInst * cloneImpl() const
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:983
Predicate getStrictPredicate() const
For example, SGE -> SGT, SLE -> SLT, ULE -> ULT, UGE -> UGT.
Definition: InstrTypes.h:1198
bool isEquality() const
Determine if this is an equals/not equals predicate.
Definition: InstrTypes.h:1255
void setPredicate(Predicate P)
Set the predicate for this instruction to the specified value.
Definition: InstrTypes.h:1108
bool isFalseWhenEqual() const
This is just a convenience.
Definition: InstrTypes.h:1320
Predicate getSignedPredicate()
For example, ULT->SLT, ULE->SLE, UGT->SGT, UGE->SGE, SLT->Failed assert.
Definition: InstrTypes.h:1284
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:993
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition: InstrTypes.h:996
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
Definition: InstrTypes.h:1010
@ ICMP_SLT
signed less than
Definition: InstrTypes.h:1022
@ ICMP_SLE
signed less or equal
Definition: InstrTypes.h:1023
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition: InstrTypes.h:999
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
Definition: InstrTypes.h:1008
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Definition: InstrTypes.h:997
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Definition: InstrTypes.h:998
@ ICMP_UGE
unsigned greater or equal
Definition: InstrTypes.h:1017
@ ICMP_UGT
unsigned greater than
Definition: InstrTypes.h:1016
@ ICMP_SGT
signed greater than
Definition: InstrTypes.h:1020
@ FCMP_ULT
1 1 0 0 True if unordered or less than
Definition: InstrTypes.h:1007
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition: InstrTypes.h:1001
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition: InstrTypes.h:1004
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:1018
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
Definition: InstrTypes.h:1005
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition: InstrTypes.h:1000
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition: InstrTypes.h:1002
@ ICMP_EQ
equal
Definition: InstrTypes.h:1014
@ ICMP_NE
not equal
Definition: InstrTypes.h:1015
@ ICMP_SGE
signed greater or equal
Definition: InstrTypes.h:1021
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition: InstrTypes.h:1009
@ ICMP_ULE
unsigned less or equal
Definition: InstrTypes.h:1019
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
Definition: InstrTypes.h:1006
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
Definition: InstrTypes.h:995
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition: InstrTypes.h:1003
bool isSigned() const
Definition: InstrTypes.h:1265
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition: InstrTypes.h:1167
bool isTrueWhenEqual() const
This is just a convenience.
Definition: InstrTypes.h:1314
Predicate getUnsignedPredicate()
For example, SLT->ULT, SLE->ULE, SGT->UGT, SGE->UGE, ULT->Failed assert.
Definition: InstrTypes.h:1296
Predicate getNonStrictPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
Definition: InstrTypes.h:1211
bool isNonStrictPredicate() const
Definition: InstrTypes.h:1192
bool isFPPredicate() const
Definition: InstrTypes.h:1122
static CmpInst * Create(OtherOps Op, Predicate Pred, Value *S1, Value *S2, const Twine &Name, BasicBlock::iterator InsertBefore)
Construct a compare instruction, given the opcode, the predicate and the two operands.
void swapOperands()
This is just a convenience that dispatches to the subclasses.
CmpInst(Type *ty, Instruction::OtherOps op, Predicate pred, Value *LHS, Value *RHS, const Twine &Name, BasicBlock::iterator InsertBefore, Instruction *FlagsSource=nullptr)
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition: InstrTypes.h:1129
static StringRef getPredicateName(Predicate P)
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:1105
static CmpInst * CreateWithCopiedFlags(OtherOps Op, Predicate Pred, Value *S1, Value *S2, const Instruction *FlagsSource, const Twine &Name="", Instruction *InsertBefore=nullptr)
Construct a compare instruction, given the opcode, the predicate, the two operands and the instructio...
bool isStrictPredicate() const
Definition: InstrTypes.h:1183
static bool isUnordered(Predicate predicate)
Determine if the predicate is an unordered operation.
Predicate getFlippedStrictnessPredicate() const
For predicate of kind "is X or equal to 0" returns the predicate "is X".
Definition: InstrTypes.h:1233
static bool isImpliedTrueByMatchingCmp(Predicate Pred1, Predicate Pred2)
Determine if Pred1 implies Pred2 is true when two compares have matching operands.
Predicate getFlippedSignednessPredicate()
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->Failed assert.
Definition: InstrTypes.h:1308
bool isIntPredicate() const
Definition: InstrTypes.h:1123
static bool isOrdered(Predicate predicate)
Determine if the predicate is an ordered operation.
static bool isImpliedFalseByMatchingCmp(Predicate Pred1, Predicate Pred2)
Determine if Pred1 implies Pred2 is false when two compares have matching operands.
bool isUnsigned() const
Definition: InstrTypes.h:1271
bool isCommutative() const
This is just a convenience that dispatches to the subclasses.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
Definition: InstrTypes.h:1261
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:268
const APFloat & getValueAPF() const
Definition: Constants.h:311
This is the shared class of boolean and integer constants.
Definition: Constants.h:80
static Constant * get(ArrayRef< Constant * > V)
Definition: Constants.cpp:1398
This is an important base class in LLVM.
Definition: Constant.h:41
static Constant * getAllOnesValue(Type *Ty)
Definition: Constants.cpp:417
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:370
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition: TypeSize.h:296
This instruction extracts a single (scalar) element from a VectorType value.
ExtractElementInst * cloneImpl() const
static ExtractElementInst * Create(Value *Vec, Value *Idx, const Twine &NameStr, BasicBlock::iterator InsertBefore)
static bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
This instruction extracts a struct member or array element value from an aggregate value.
static Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
ExtractValueInst * cloneImpl() const
This instruction compares its operands according to the predicate given to the constructor.
bool isEquality() const
static bool compare(const APFloat &LHS, const APFloat &RHS, FCmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
FCmpInst * cloneImpl() const
Clone an identical FCmpInst.
This class represents an extension of floating point types.
FPExtInst * cloneImpl() const
Clone an identical FPExtInst.
FPExtInst(Value *S, Type *Ty, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructor with insert-before-instruction semantics.
float getFPAccuracy() const
Get the maximum error permitted by this operation in ULPs.
This class represents a cast from floating point to signed integer.
FPToSIInst * cloneImpl() const
Clone an identical FPToSIInst.
FPToSIInst(Value *S, Type *Ty, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructor with insert-before-instruction semantics.
This class represents a cast from floating point to unsigned integer.
FPToUIInst(Value *S, Type *Ty, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructor with insert-before-instruction semantics.
FPToUIInst * cloneImpl() const
Clone an identical FPToUIInst.
This class represents a truncation of floating point types.
FPTruncInst(Value *S, Type *Ty, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructor with insert-before-instruction semantics.
FPTruncInst * cloneImpl() const
Clone an identical FPTruncInst.
An instruction for ordering other memory operations.
Definition: Instructions.h:460
FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID, BasicBlock::iterator InsertBefore)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this fence instruction.
Definition: Instructions.h:498
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this fence instruction.
Definition: Instructions.h:503
FenceInst * cloneImpl() const
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this fence instruction.
Definition: Instructions.h:493
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Definition: Instructions.h:487
Class to represent fixed width SIMD vectors.
Definition: DerivedTypes.h:539
unsigned getNumElements() const
Definition: DerivedTypes.h:582
This class represents a freeze function that returns random concrete value if an operand is either a ...
FreezeInst(Value *S, const Twine &NameStr, BasicBlock::iterator InsertBefore)
FreezeInst * cloneImpl() const
Clone an identical FreezeInst.
void setParentPad(Value *ParentPad)
Definition: InstrTypes.h:2707
Value * getParentPad() const
Convenience accessors.
Definition: InstrTypes.h:2706
FuncletPadInst * cloneImpl() const
Class to represent function types.
Definition: DerivedTypes.h:103
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Definition: DerivedTypes.h:142
Type * getParamType(unsigned i) const
Parameter type accessors.
Definition: DerivedTypes.h:135
bool isVarArg() const
Definition: DerivedTypes.h:123
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:973
bool isInBounds() const
Determine whether the GEP has the inbounds flag.
static Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
bool hasAllZeroIndices() const
Return true if all of the indices of this GEP are zeros.
bool hasAllConstantIndices() const
Return true if all of the indices of this GEP are constant integers.
bool collectOffset(const DataLayout &DL, unsigned BitWidth, MapVector< Value *, APInt > &VariableOffsets, APInt &ConstantOffset) const
void setIsInBounds(bool b=true)
Set or clear the inbounds flag on this GEP instruction.
static Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const
Accumulate the constant address offset of this GEP if possible.
GetElementPtrInst * cloneImpl() const
This instruction compares its operands according to the predicate given to the constructor.
static bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
ICmpInst * cloneImpl() const
Clone an identical ICmpInst.
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
bool isEquality() const
Return true if this predicate is either EQ or NE.
Predicate getUnsignedPredicate() const
For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
Indirect Branch Instruction.
void addDestination(BasicBlock *Dest)
Add a destination.
void removeDestination(unsigned i)
This method removes the specified successor from the indirectbr instruction.
IndirectBrInst * cloneImpl() const
This instruction inserts a single (scalar) element into a VectorType value.
InsertElementInst * cloneImpl() const
static bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
static InsertElementInst * Create(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr, BasicBlock::iterator InsertBefore)
This instruction inserts a struct field of array element value into an aggregate value.
InsertValueInst * cloneImpl() const
BitfieldElement::Type getSubclassData() const
Definition: Instruction.h:1003
void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:454
bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
const BasicBlock * getParent() const
Definition: Instruction.h:152
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
void swapProfMetadata()
If the instruction has "branch_weights" MD_prof metadata and the MDNode has three operands (including...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:252
InstListType::iterator insertInto(BasicBlock *ParentBB, InstListType::iterator It)
Inserts an unlinked instruction into ParentBB at position It and returns the iterator of the inserted...
This class represents a cast from an integer to a pointer.
IntToPtrInst * cloneImpl() const
Clone an identical IntToPtrInst.
IntToPtrInst(Value *S, Type *Ty, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructor with insert-before-instruction semantics.
Invoke instruction.
BasicBlock * getUnwindDest() const
void setNormalDest(BasicBlock *B)
InvokeInst * cloneImpl() const
LandingPadInst * getLandingPadInst() const
Get the landingpad instruction from the landing pad block (the unwind destination).
void setUnwindDest(BasicBlock *B)
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, BasicBlock::iterator InsertBefore)
BasicBlock * getNormalDest() const
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
LLVMContextImpl *const pImpl
Definition: LLVMContext.h:69
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
LandingPadInst * cloneImpl() const
void addClause(Constant *ClauseVal)
Add a catch or filter clause to the landing pad.
void setCleanup(bool V)
Indicate that this landingpad instruction is a cleanup.
static LandingPadInst * Create(Type *RetTy, unsigned NumReservedClauses, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructors - NumReservedClauses is a hint for the number of incoming clauses that this landingpad w...
An instruction for reading from memory.
Definition: Instructions.h:184
void setAlignment(Align Align)
Definition: Instructions.h:240
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Definition: Instructions.h:230
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this load instruction.
Definition: Instructions.h:266
LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock::iterator InsertBefore)
LoadInst * cloneImpl() const
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
Definition: Instructions.h:245
void setVolatile(bool V)
Specify whether this is a volatile load or not.
Definition: Instructions.h:233
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Definition: Instructions.h:255
Align getAlign() const
Return the alignment of the access that is being performed.
Definition: Instructions.h:236
MDNode * createBranchWeights(uint32_t TrueWeight, uint32_t FalseWeight)
Return metadata containing two branch weights.
Definition: MDBuilder.cpp:37
Metadata node.
Definition: Metadata.h:1067
const MDOperand & getOperand(unsigned I) const
Definition: Metadata.h:1428
unsigned getNumOperands() const
Return number of MDNode operands.
Definition: Metadata.h:1434
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
static MemoryEffectsBase readOnly()
Create MemoryEffectsBase that can read any memory.
Definition: ModRef.h:122
bool onlyWritesMemory() const
Whether this function only (at most) writes memory.
Definition: ModRef.h:198
bool doesNotAccessMemory() const
Whether this function accesses no memory.
Definition: ModRef.h:192
static MemoryEffectsBase argMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Create MemoryEffectsBase that can only access argument memory.
Definition: ModRef.h:132
static MemoryEffectsBase inaccessibleMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Create MemoryEffectsBase that can only access inaccessible memory.
Definition: ModRef.h:138
bool onlyAccessesInaccessibleMem() const
Whether this function only (at most) accesses inaccessible memory.
Definition: ModRef.h:211
bool onlyAccessesArgPointees() const
Whether this function only (at most) accesses argument memory.
Definition: ModRef.h:201
bool onlyReadsMemory() const
Whether this function only (at most) reads memory.
Definition: ModRef.h:195
static MemoryEffectsBase writeOnly()
Create MemoryEffectsBase that can write any memory.
Definition: ModRef.h:127
static MemoryEffectsBase inaccessibleOrArgMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Create MemoryEffectsBase that can only access inaccessible or argument memory.
Definition: ModRef.h:145
static MemoryEffectsBase none()
Create MemoryEffectsBase that cannot read or write any memory.
Definition: ModRef.h:117
bool onlyAccessesInaccessibleOrArgMem() const
Whether this function only (at most) accesses argument and inaccessible memory.
Definition: ModRef.h:217
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition: Module.h:293
A container for an operand bundle being viewed as a set of values rather than a set of uses.
Definition: InstrTypes.h:1447
StringRef getTag() const
Definition: InstrTypes.h:1470
iterator_range< const_block_iterator > blocks() const
void allocHungoffUses(unsigned N)
const_block_iterator block_begin() const
void removeIncomingValueIf(function_ref< bool(unsigned)> Predicate, bool DeletePHIIfEmpty=true)
Remove all incoming values for which the predicate returns true.
Value * removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty=true)
Remove an incoming value.
bool hasConstantOrUndefValue() const
Whether the specified PHI node always merges together the same value, assuming undefs are equal to a ...
void copyIncomingBlocks(iterator_range< const_block_iterator > BBRange, uint32_t ToIdx=0)
Copies the basic blocks from BBRange to the incoming basic block list of this PHINode,...
const_block_iterator block_end() const
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
Value * hasConstantValue() const
If the specified PHI node always merges together the same value, return the value,...
PHINode * cloneImpl() const
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Class to represent pointers.
Definition: DerivedTypes.h:646
unsigned getAddressSpace() const
Return the address space of the Pointer type.
Definition: DerivedTypes.h:679
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1827
This class represents a cast from a pointer to an integer.
PtrToIntInst(Value *S, Type *Ty, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructor with insert-before-instruction semantics.
PtrToIntInst * cloneImpl() const
Clone an identical PtrToIntInst.
Resume the propagation of an exception.
ResumeInst * cloneImpl() const
Return a value (possibly void), from a function.
ReturnInst * cloneImpl() const
This class represents a sign extension of integer types.
SExtInst(Value *S, Type *Ty, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructor with insert-before-instruction semantics.
SExtInst * cloneImpl() const
Clone an identical SExtInst.
This class represents a cast from signed integer to floating point.
SIToFPInst * cloneImpl() const
Clone an identical SIToFPInst.
SIToFPInst(Value *S, Type *Ty, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructor with insert-before-instruction semantics.
Class to represent scalable SIMD vectors.
Definition: DerivedTypes.h:586
This class represents the LLVM 'select' instruction.
SelectInst * cloneImpl() const
static const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr, BasicBlock::iterator InsertBefore, Instruction *MDFrom=nullptr)
This instruction constructs a fixed permutation of two input vectors.
static bool isZeroEltSplatMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses all elements with the same value as the first element of exa...
ArrayRef< int > getShuffleMask() const
static bool isSpliceMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is a splice mask, concatenating the two inputs together and then ext...
int getMaskValue(unsigned Elt) const
Return the shuffle mask value of this instruction for the given element index.
static bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr, BasicBlock::iterator InsertBefore)
static bool isSelectMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from its source vectors without lane crossings.
static bool isBitRotateMask(ArrayRef< int > Mask, unsigned EltSizeInBits, unsigned MinSubElts, unsigned MaxSubElts, unsigned &NumSubElts, unsigned &RotateAmt)
Checks if the shuffle is a bit rotation of the first operand across multiple subelements,...
VectorType * getType() const
Overload to return most specific vector type.
bool isIdentityWithExtract() const
Return true if this shuffle extracts the first N elements of exactly one source vector.
static bool isOneUseSingleSourceMask(ArrayRef< int > Mask, int VF)
Return true if this shuffle mask represents "clustered" mask of size VF, i.e.
bool isIdentityWithPadding() const
Return true if this shuffle lengthens exactly one source vector with undefs in the high elements.
static bool isSingleSourceMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from exactly one source vector.
bool isConcat() const
Return true if this shuffle concatenates its 2 source vectors.
static bool isDeInterleaveMaskOfFactor(ArrayRef< int > Mask, unsigned Factor, unsigned &Index)
Check if the mask is a DE-interleave mask of the given factor Factor like: <Index,...
ShuffleVectorInst * cloneImpl() const
static bool isIdentityMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from exactly one source vector without lane crossin...
static bool isExtractSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is an extract subvector mask.
void setShuffleMask(ArrayRef< int > Mask)
bool isInterleave(unsigned Factor)
Return if this shuffle interleaves its two input vectors together.
static bool isReverseMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask swaps the order of elements from exactly one source vector.
static bool isTransposeMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask is a transpose mask.
void commute()
Swap the operands and adjust the mask to preserve the semantics of the instruction.
static bool isInsertSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &NumSubElts, int &Index)
Return true if this shuffle mask is an insert subvector mask.
static Constant * convertShuffleMaskForBitcode(ArrayRef< int > Mask, Type *ResultTy)
static bool isReplicationMask(ArrayRef< int > Mask, int &ReplicationFactor, int &VF)
Return true if this shuffle mask replicates each of the VF elements in a vector ReplicationFactor tim...
static bool isInterleaveMask(ArrayRef< int > Mask, unsigned Factor, unsigned NumInputElts, SmallVectorImpl< unsigned > &StartIndexes)
Return true if the mask interleaves one or more input vectors together.
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
Implements a dense probed hash-table based set with some number of buckets stored inline.
Definition: DenseSet.h:290
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void assign(size_type NumElts, ValueParamT Elt)
Definition: SmallVector.h:717
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:950
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:696
void resize(size_type N)
Definition: SmallVector.h:651
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
An instruction for storing to memory.
Definition: Instructions.h:317
AtomicOrdering getOrdering() const
Returns the ordering constraint of this store instruction.
Definition: Instructions.h:378
Align getAlign() const
Definition: Instructions.h:369
void setVolatile(bool V)
Specify whether this is a volatile store or not.
Definition: Instructions.h:364
void setAlignment(Align Align)
Definition: Instructions.h:373
StoreInst * cloneImpl() const
StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this store instruction.
Definition: Instructions.h:389
bool isVolatile() const
Return true if this is a store to a volatile memory location.
Definition: Instructions.h:361
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this store instruction.
Definition: Instructions.h:400
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
Class to represent struct types.
Definition: DerivedTypes.h:216
void setSuccessorWeight(unsigned idx, CaseWeightOpt W)
Instruction::InstListType::iterator eraseFromParent()
Delegate the call to the underlying SwitchInst::eraseFromParent() and mark this object to not touch t...
void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W)
Delegate the call to the underlying SwitchInst::addCase() and set the specified branch weight for the...
CaseWeightOpt getSuccessorWeight(unsigned idx)
std::optional< uint32_t > CaseWeightOpt
SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I)
Delegate the call to the underlying SwitchInst::removeCase() and remove correspondent branch weight.
void setValue(ConstantInt *V) const
Sets the new value for current case.
void setSuccessor(BasicBlock *S) const
Sets the new successor for current case.
Multiway switch.
SwitchInst * cloneImpl() const
void addCase(ConstantInt *OnVal, BasicBlock *Dest)
Add an entry to the switch instruction.
CaseIteratorImpl< CaseHandle > CaseIt
unsigned getNumCases() const
Return the number of 'cases' in this switch instruction, excluding the default case.
CaseIt removeCase(CaseIt I)
This method removes the specified case and its successor from the switch instruction.
This class represents a truncation of integer types.
TruncInst * cloneImpl() const
Clone an identical TruncInst.
TruncInst(Value *S, Type *Ty, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructor with insert-before-instruction semantics.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:265
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition: Type.h:234
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:255
static IntegerType * getInt1Ty(LLVMContext &C)
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isX86_MMXTy() const
Return true if this is X86 MMX.
Definition: Type.h:201
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isFirstClassType() const
Return true if the type is "first class", meaning it is a valid type for a Value.
Definition: Type.h:281
bool isAggregateType() const
Return true if the type is an aggregate type.
Definition: Type.h:295
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:129
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition: Type.h:185
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition: Type.h:262
static IntegerType * getInt32Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:228
bool isTokenTy() const
Return true if this is 'token'.
Definition: Type.h:225
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition: Type.h:216
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isVoidTy() const
Return true if this is 'void'.
Definition: Type.h:140
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition: Type.h:348
This class represents a cast unsigned integer to floating point.
UIToFPInst(Value *S, Type *Ty, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructor with insert-before-instruction semantics.
UIToFPInst * cloneImpl() const
Clone an identical UIToFPInst.
UnaryOperator * cloneImpl() const
UnaryOperator(UnaryOps iType, Value *S, Type *Ty, const Twine &Name, BasicBlock::iterator InsertBefore)
static UnaryOperator * Create(UnaryOps Op, Value *S, const Twine &Name, BasicBlock::iterator InsertBefore)
Construct a unary instruction, given the opcode and an operand.
UnaryOps getOpcode() const
Definition: InstrTypes.h:205
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Definition: Constants.cpp:1808
This function has undefined behavior.
UnreachableInst(LLVMContext &C, BasicBlock::iterator InsertBefore)
UnreachableInst * cloneImpl() const
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
void set(Value *Val)
Definition: Value.h:882
const Use * getOperandList() const
Definition: User.h:162
op_range operands()
Definition: User.h:242
void allocHungoffUses(unsigned N, bool IsPhi=false)
Allocate the array of Uses, followed by a pointer (with bottom bit set) to the User.
Definition: User.cpp:50
op_iterator op_begin()
Definition: User.h:234
void setNumHungOffUseOperands(unsigned NumOps)
Subclasses with hung off uses need to manage the operand count themselves.
Definition: User.h:215
Use & Op()
Definition: User.h:133
Value * getOperand(unsigned i) const
Definition: User.h:169
unsigned getNumOperands() const
Definition: User.h:191
op_iterator op_end()
Definition: User.h:236
void growHungoffUses(unsigned N, bool IsPhi=false)
Grow the number of hung off uses.
Definition: User.cpp:67
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
VAArgInst * cloneImpl() const
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
unsigned char SubclassOptionalData
Hold subclass data that can be dropped.
Definition: Value.h:84
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:377
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:534
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1074
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
Base class of all SIMD vector types.
Definition: DerivedTypes.h:403
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
Definition: DerivedTypes.h:641
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Definition: Type.cpp:676
This class represents zero extension of integer types.
ZExtInst(Value *S, Type *Ty, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructor with insert-before-instruction semantics.
ZExtInst * cloneImpl() const
Clone an identical ZExtInst.
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:206
size_type size() const
Definition: DenseSet.h:81
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
Definition: DenseSet.h:185
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:168
An efficient, type-erasing, non-owning reference to a callable.
base_list_type::iterator iterator
Definition: ilist.h:121
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ CE
Windows NT (Windows on ARM)
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:450
@ Switch
The "resume-switch" lowering, where there are separate resume and destroy functions that are shared b...
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
@ Offset
Definition: DWP.cpp:456
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1722
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition: STLExtras.h:1680
unsigned getPointerAddressSpace(const Type *T)
Definition: SPIRVUtils.h:122
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
MDNode * getBranchWeightMDNode(const Instruction &I)
Get the branch weights metadata node.
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:419
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:275
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Definition: Function.cpp:2043
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
bool isPointerTy(const Type *T)
Definition: SPIRVUtils.h:116
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition: Casting.h:548
constexpr int PoisonMaskElem
AtomicOrdering
Atomic ordering for LLVM's memory model.
auto remove_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::remove_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1761
@ Or
Bitwise or logical OR of integers.
@ Mul
Product of integers.
@ Xor
Bitwise or logical XOR of integers.
@ FMul
Product of floats.
@ And
Bitwise or logical AND of integers.
@ Add
Sum of integers.
@ FAdd
Sum of floats.
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
Definition: APFixedPoint.h:293
OutputIt copy(R &&Range, OutputIt Out)
Definition: STLExtras.h:1824
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:191
bool extractBranchWeights(const MDNode *ProfileData, SmallVectorImpl< uint32_t > &Weights)
Extract branch weights from MD_prof metadata.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition: Casting.h:565
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1879
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
Definition: STLExtras.h:2039
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
Definition: Sequence.h:305
@ Default
The result values are uniform if and only if all operands are uniform.
void scaleProfData(Instruction &I, uint64_t S, uint64_t T)
Scaling the profile data attached to 'I' using the ratio of S/T.
cmpResult
IEEE-754R 5.11: Floating Point Comparison Relations.
Definition: APFloat.h:220
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Describes an element of a Bitfield.
Definition: Bitfields.h:223
Used to keep track of an operand bundle.
Definition: InstrTypes.h:2485
uint32_t End
The index in the Use& vector where operands for this operand bundle ends.
Definition: InstrTypes.h:2496
uint32_t Begin
The index in the Use& vector where operands for this operand bundle starts.
Definition: InstrTypes.h:2492
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
Compile-time customization of User operands.
Definition: User.h:42