LLVM 19.0.0git
Instructions.cpp
Go to the documentation of this file.
1//===- Instructions.cpp - Implement the LLVM instructions -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements all of the non-inline methods for the LLVM instruction
10// classes.
11//
12//===----------------------------------------------------------------------===//
13
15#include "LLVMContextImpl.h"
18#include "llvm/ADT/Twine.h"
19#include "llvm/IR/Attributes.h"
20#include "llvm/IR/BasicBlock.h"
21#include "llvm/IR/Constant.h"
22#include "llvm/IR/Constants.h"
23#include "llvm/IR/DataLayout.h"
25#include "llvm/IR/Function.h"
26#include "llvm/IR/InstrTypes.h"
27#include "llvm/IR/Instruction.h"
28#include "llvm/IR/Intrinsics.h"
29#include "llvm/IR/LLVMContext.h"
30#include "llvm/IR/MDBuilder.h"
31#include "llvm/IR/Metadata.h"
32#include "llvm/IR/Module.h"
33#include "llvm/IR/Operator.h"
35#include "llvm/IR/Type.h"
36#include "llvm/IR/Value.h"
41#include "llvm/Support/ModRef.h"
43#include <algorithm>
44#include <cassert>
45#include <cstdint>
46#include <optional>
47#include <vector>
48
49using namespace llvm;
50
52 "disable-i2p-p2i-opt", cl::init(false),
53 cl::desc("Disables inttoptr/ptrtoint roundtrip optimization"));
54
55//===----------------------------------------------------------------------===//
56// AllocaInst Class
57//===----------------------------------------------------------------------===//
58
59std::optional<TypeSize>
61 TypeSize Size = DL.getTypeAllocSize(getAllocatedType());
62 if (isArrayAllocation()) {
63 auto *C = dyn_cast<ConstantInt>(getArraySize());
64 if (!C)
65 return std::nullopt;
66 assert(!Size.isScalable() && "Array elements cannot have a scalable size");
67 Size *= C->getZExtValue();
68 }
69 return Size;
70}
71
72std::optional<TypeSize>
74 std::optional<TypeSize> Size = getAllocationSize(DL);
75 if (Size)
76 return *Size * 8;
77 return std::nullopt;
78}
79
80//===----------------------------------------------------------------------===//
81// SelectInst Class
82//===----------------------------------------------------------------------===//
83
84/// areInvalidOperands - Return a string if the specified operands are invalid
85/// for a select operation, otherwise return null.
86const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) {
87 if (Op1->getType() != Op2->getType())
88 return "both values to select must have same type";
89
90 if (Op1->getType()->isTokenTy())
91 return "select values cannot have token type";
92
93 if (VectorType *VT = dyn_cast<VectorType>(Op0->getType())) {
94 // Vector select.
95 if (VT->getElementType() != Type::getInt1Ty(Op0->getContext()))
96 return "vector select condition element type must be i1";
97 VectorType *ET = dyn_cast<VectorType>(Op1->getType());
98 if (!ET)
99 return "selected values for vector select must be vectors";
100 if (ET->getElementCount() != VT->getElementCount())
101 return "vector select requires selected vectors to have "
102 "the same vector length as select condition";
103 } else if (Op0->getType() != Type::getInt1Ty(Op0->getContext())) {
104 return "select condition must be i1 or <n x i1>";
105 }
106 return nullptr;
107}
108
109//===----------------------------------------------------------------------===//
110// PHINode Class
111//===----------------------------------------------------------------------===//
112
113PHINode::PHINode(const PHINode &PN)
114 : Instruction(PN.getType(), Instruction::PHI, nullptr, PN.getNumOperands()),
115 ReservedSpace(PN.getNumOperands()) {
117 std::copy(PN.op_begin(), PN.op_end(), op_begin());
118 copyIncomingBlocks(make_range(PN.block_begin(), PN.block_end()));
120}
121
122// removeIncomingValue - Remove an incoming value. This is useful if a
123// predecessor basic block is deleted.
124Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) {
125 Value *Removed = getIncomingValue(Idx);
126
127 // Move everything after this operand down.
128 //
129 // FIXME: we could just swap with the end of the list, then erase. However,
130 // clients might not expect this to happen. The code as it is thrashes the
131 // use/def lists, which is kinda lame.
132 std::copy(op_begin() + Idx + 1, op_end(), op_begin() + Idx);
134
135 // Nuke the last value.
136 Op<-1>().set(nullptr);
138
139 // If the PHI node is dead, because it has zero entries, nuke it now.
140 if (getNumOperands() == 0 && DeletePHIIfEmpty) {
141 // If anyone is using this PHI, make them use a dummy value instead...
144 }
145 return Removed;
146}
147
148void PHINode::removeIncomingValueIf(function_ref<bool(unsigned)> Predicate,
149 bool DeletePHIIfEmpty) {
150 SmallDenseSet<unsigned> RemoveIndices;
151 for (unsigned Idx = 0; Idx < getNumIncomingValues(); ++Idx)
152 if (Predicate(Idx))
153 RemoveIndices.insert(Idx);
154
155 if (RemoveIndices.empty())
156 return;
157
158 // Remove operands.
159 auto NewOpEnd = remove_if(operands(), [&](Use &U) {
160 return RemoveIndices.contains(U.getOperandNo());
161 });
162 for (Use &U : make_range(NewOpEnd, op_end()))
163 U.set(nullptr);
164
165 // Remove incoming blocks.
166 (void)std::remove_if(const_cast<block_iterator>(block_begin()),
167 const_cast<block_iterator>(block_end()), [&](BasicBlock *&BB) {
168 return RemoveIndices.contains(&BB - block_begin());
169 });
170
171 setNumHungOffUseOperands(getNumOperands() - RemoveIndices.size());
172
173 // If the PHI node is dead, because it has zero entries, nuke it now.
174 if (getNumOperands() == 0 && DeletePHIIfEmpty) {
175 // If anyone is using this PHI, make them use a dummy value instead...
178 }
179}
180
181/// growOperands - grow operands - This grows the operand list in response
182/// to a push_back style of operation. This grows the number of ops by 1.5
183/// times.
184///
185void PHINode::growOperands() {
186 unsigned e = getNumOperands();
187 unsigned NumOps = e + e / 2;
188 if (NumOps < 2) NumOps = 2; // 2 op PHI nodes are VERY common.
189
190 ReservedSpace = NumOps;
191 growHungoffUses(ReservedSpace, /* IsPhi */ true);
192}
193
194/// hasConstantValue - If the specified PHI node always merges together the same
195/// value, return the value, otherwise return null.
197 // Exploit the fact that phi nodes always have at least one entry.
198 Value *ConstantValue = getIncomingValue(0);
199 for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i)
200 if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) {
201 if (ConstantValue != this)
202 return nullptr; // Incoming values not all the same.
203 // The case where the first value is this PHI.
204 ConstantValue = getIncomingValue(i);
205 }
206 if (ConstantValue == this)
207 return UndefValue::get(getType());
208 return ConstantValue;
209}
210
211/// hasConstantOrUndefValue - Whether the specified PHI node always merges
212/// together the same value, assuming that undefs result in the same value as
213/// non-undefs.
214/// Unlike \ref hasConstantValue, this does not return a value because the
215/// unique non-undef incoming value need not dominate the PHI node.
217 Value *ConstantValue = nullptr;
218 for (unsigned i = 0, e = getNumIncomingValues(); i != e; ++i) {
220 if (Incoming != this && !isa<UndefValue>(Incoming)) {
221 if (ConstantValue && ConstantValue != Incoming)
222 return false;
223 ConstantValue = Incoming;
224 }
225 }
226 return true;
227}
228
229//===----------------------------------------------------------------------===//
230// LandingPadInst Implementation
231//===----------------------------------------------------------------------===//
232
233LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
234 const Twine &NameStr,
235 BasicBlock::iterator InsertBefore)
236 : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertBefore) {
237 init(NumReservedValues, NameStr);
238}
239
240LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
241 const Twine &NameStr, Instruction *InsertBefore)
242 : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertBefore) {
243 init(NumReservedValues, NameStr);
244}
245
246LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
247 const Twine &NameStr, BasicBlock *InsertAtEnd)
248 : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertAtEnd) {
249 init(NumReservedValues, NameStr);
250}
251
252LandingPadInst::LandingPadInst(const LandingPadInst &LP)
253 : Instruction(LP.getType(), Instruction::LandingPad, nullptr,
254 LP.getNumOperands()),
255 ReservedSpace(LP.getNumOperands()) {
257 Use *OL = getOperandList();
258 const Use *InOL = LP.getOperandList();
259 for (unsigned I = 0, E = ReservedSpace; I != E; ++I)
260 OL[I] = InOL[I];
261
262 setCleanup(LP.isCleanup());
263}
264
265LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
266 const Twine &NameStr,
267 Instruction *InsertBefore) {
268 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertBefore);
269}
270
271LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
272 const Twine &NameStr,
273 BasicBlock *InsertAtEnd) {
274 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertAtEnd);
275}
276
277void LandingPadInst::init(unsigned NumReservedValues, const Twine &NameStr) {
278 ReservedSpace = NumReservedValues;
280 allocHungoffUses(ReservedSpace);
281 setName(NameStr);
282 setCleanup(false);
283}
284
285/// growOperands - grow operands - This grows the operand list in response to a
286/// push_back style of operation. This grows the number of ops by 2 times.
287void LandingPadInst::growOperands(unsigned Size) {
288 unsigned e = getNumOperands();
289 if (ReservedSpace >= e + Size) return;
290 ReservedSpace = (std::max(e, 1U) + Size / 2) * 2;
291 growHungoffUses(ReservedSpace);
292}
293
295 unsigned OpNo = getNumOperands();
296 growOperands(1);
297 assert(OpNo < ReservedSpace && "Growing didn't work!");
299 getOperandList()[OpNo] = Val;
300}
301
302//===----------------------------------------------------------------------===//
303// CallBase Implementation
304//===----------------------------------------------------------------------===//
305
307 BasicBlock::iterator InsertPt) {
308 switch (CB->getOpcode()) {
309 case Instruction::Call:
310 return CallInst::Create(cast<CallInst>(CB), Bundles, InsertPt);
311 case Instruction::Invoke:
312 return InvokeInst::Create(cast<InvokeInst>(CB), Bundles, InsertPt);
313 case Instruction::CallBr:
314 return CallBrInst::Create(cast<CallBrInst>(CB), Bundles, InsertPt);
315 default:
316 llvm_unreachable("Unknown CallBase sub-class!");
317 }
318}
319
321 Instruction *InsertPt) {
322 switch (CB->getOpcode()) {
323 case Instruction::Call:
324 return CallInst::Create(cast<CallInst>(CB), Bundles, InsertPt);
325 case Instruction::Invoke:
326 return InvokeInst::Create(cast<InvokeInst>(CB), Bundles, InsertPt);
327 case Instruction::CallBr:
328 return CallBrInst::Create(cast<CallBrInst>(CB), Bundles, InsertPt);
329 default:
330 llvm_unreachable("Unknown CallBase sub-class!");
331 }
332}
333
335 Instruction *InsertPt) {
337 for (unsigned i = 0, e = CI->getNumOperandBundles(); i < e; ++i) {
338 auto ChildOB = CI->getOperandBundleAt(i);
339 if (ChildOB.getTagName() != OpB.getTag())
340 OpDefs.emplace_back(ChildOB);
341 }
342 OpDefs.emplace_back(OpB);
343 return CallBase::Create(CI, OpDefs, InsertPt);
344}
345
346
348
350 assert(getOpcode() == Instruction::CallBr && "Unexpected opcode!");
351 return cast<CallBrInst>(this)->getNumIndirectDests() + 1;
352}
353
355 const Value *V = getCalledOperand();
356 if (isa<Function>(V) || isa<Constant>(V))
357 return false;
358 return !isInlineAsm();
359}
360
361/// Tests if this call site must be tail call optimized. Only a CallInst can
362/// be tail call optimized.
364 if (auto *CI = dyn_cast<CallInst>(this))
365 return CI->isMustTailCall();
366 return false;
367}
368
369/// Tests if this call site is marked as a tail call.
371 if (auto *CI = dyn_cast<CallInst>(this))
372 return CI->isTailCall();
373 return false;
374}
375
377 if (auto *F = getCalledFunction())
378 return F->getIntrinsicID();
380}
381
384
385 if (const Function *F = getCalledFunction())
386 Mask |= F->getAttributes().getRetNoFPClass();
387 return Mask;
388}
389
392
393 if (const Function *F = getCalledFunction())
394 Mask |= F->getAttributes().getParamNoFPClass(i);
395 return Mask;
396}
397
399 if (hasRetAttr(Attribute::NonNull))
400 return true;
401
402 if (getRetDereferenceableBytes() > 0 &&
404 return true;
405
406 return false;
407}
408
410 unsigned Index;
411
412 if (Attrs.hasAttrSomewhere(Kind, &Index))
414 if (const Function *F = getCalledFunction())
415 if (F->getAttributes().hasAttrSomewhere(Kind, &Index))
417
418 return nullptr;
419}
420
421/// Determine whether the argument or parameter has the given attribute.
422bool CallBase::paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
423 assert(ArgNo < arg_size() && "Param index out of bounds!");
424
425 if (Attrs.hasParamAttr(ArgNo, Kind))
426 return true;
427
428 const Function *F = getCalledFunction();
429 if (!F)
430 return false;
431
432 if (!F->getAttributes().hasParamAttr(ArgNo, Kind))
433 return false;
434
435 // Take into account mod/ref by operand bundles.
436 switch (Kind) {
437 case Attribute::ReadNone:
439 case Attribute::ReadOnly:
441 case Attribute::WriteOnly:
442 return !hasReadingOperandBundles();
443 default:
444 return true;
445 }
446}
447
448bool CallBase::hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const {
449 Value *V = getCalledOperand();
450 if (auto *CE = dyn_cast<ConstantExpr>(V))
451 if (CE->getOpcode() == BitCast)
452 V = CE->getOperand(0);
453
454 if (auto *F = dyn_cast<Function>(V))
455 return F->getAttributes().hasFnAttr(Kind);
456
457 return false;
458}
459
460bool CallBase::hasFnAttrOnCalledFunction(StringRef Kind) const {
461 Value *V = getCalledOperand();
462 if (auto *CE = dyn_cast<ConstantExpr>(V))
463 if (CE->getOpcode() == BitCast)
464 V = CE->getOperand(0);
465
466 if (auto *F = dyn_cast<Function>(V))
467 return F->getAttributes().hasFnAttr(Kind);
468
469 return false;
470}
471
472template <typename AK>
473Attribute CallBase::getFnAttrOnCalledFunction(AK Kind) const {
474 if constexpr (std::is_same_v<AK, Attribute::AttrKind>) {
475 // getMemoryEffects() correctly combines memory effects from the call-site,
476 // operand bundles and function.
477 assert(Kind != Attribute::Memory && "Use getMemoryEffects() instead");
478 }
479
481 if (auto *CE = dyn_cast<ConstantExpr>(V))
482 if (CE->getOpcode() == BitCast)
483 V = CE->getOperand(0);
484
485 if (auto *F = dyn_cast<Function>(V))
486 return F->getAttributes().getFnAttr(Kind);
487
488 return Attribute();
489}
490
491template Attribute
492CallBase::getFnAttrOnCalledFunction(Attribute::AttrKind Kind) const;
493template Attribute CallBase::getFnAttrOnCalledFunction(StringRef Kind) const;
494
497 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
499}
500
503 const unsigned BeginIndex) {
504 auto It = op_begin() + BeginIndex;
505 for (auto &B : Bundles)
506 It = std::copy(B.input_begin(), B.input_end(), It);
507
508 auto *ContextImpl = getContext().pImpl;
509 auto BI = Bundles.begin();
510 unsigned CurrentIndex = BeginIndex;
511
512 for (auto &BOI : bundle_op_infos()) {
513 assert(BI != Bundles.end() && "Incorrect allocation?");
514
515 BOI.Tag = ContextImpl->getOrInsertBundleTag(BI->getTag());
516 BOI.Begin = CurrentIndex;
517 BOI.End = CurrentIndex + BI->input_size();
518 CurrentIndex = BOI.End;
519 BI++;
520 }
521
522 assert(BI == Bundles.end() && "Incorrect allocation?");
523
524 return It;
525}
526
528 /// When there isn't many bundles, we do a simple linear search.
529 /// Else fallback to a binary-search that use the fact that bundles usually
530 /// have similar number of argument to get faster convergence.
532 for (auto &BOI : bundle_op_infos())
533 if (BOI.Begin <= OpIdx && OpIdx < BOI.End)
534 return BOI;
535
536 llvm_unreachable("Did not find operand bundle for operand!");
537 }
538
539 assert(OpIdx >= arg_size() && "the Idx is not in the operand bundles");
541 OpIdx < std::prev(bundle_op_info_end())->End &&
542 "The Idx isn't in the operand bundle");
543
544 /// We need a decimal number below and to prevent using floating point numbers
545 /// we use an intergal value multiplied by this constant.
546 constexpr unsigned NumberScaling = 1024;
547
550 bundle_op_iterator Current = Begin;
551
552 while (Begin != End) {
553 unsigned ScaledOperandPerBundle =
554 NumberScaling * (std::prev(End)->End - Begin->Begin) / (End - Begin);
555 Current = Begin + (((OpIdx - Begin->Begin) * NumberScaling) /
556 ScaledOperandPerBundle);
557 if (Current >= End)
558 Current = std::prev(End);
559 assert(Current < End && Current >= Begin &&
560 "the operand bundle doesn't cover every value in the range");
561 if (OpIdx >= Current->Begin && OpIdx < Current->End)
562 break;
563 if (OpIdx >= Current->End)
564 Begin = Current + 1;
565 else
566 End = Current;
567 }
568
569 assert(OpIdx >= Current->Begin && OpIdx < Current->End &&
570 "the operand bundle doesn't cover every value in the range");
571 return *Current;
572}
573
576 BasicBlock::iterator InsertPt) {
577 if (CB->getOperandBundle(ID))
578 return CB;
579
581 CB->getOperandBundlesAsDefs(Bundles);
582 Bundles.push_back(OB);
583 return Create(CB, Bundles, InsertPt);
584}
585
588 Instruction *InsertPt) {
589 if (CB->getOperandBundle(ID))
590 return CB;
591
593 CB->getOperandBundlesAsDefs(Bundles);
594 Bundles.push_back(OB);
595 return Create(CB, Bundles, InsertPt);
596}
597
599 BasicBlock::iterator InsertPt) {
601 bool CreateNew = false;
602
603 for (unsigned I = 0, E = CB->getNumOperandBundles(); I != E; ++I) {
604 auto Bundle = CB->getOperandBundleAt(I);
605 if (Bundle.getTagID() == ID) {
606 CreateNew = true;
607 continue;
608 }
609 Bundles.emplace_back(Bundle);
610 }
611
612 return CreateNew ? Create(CB, Bundles, InsertPt) : CB;
613}
614
616 Instruction *InsertPt) {
618 bool CreateNew = false;
619
620 for (unsigned I = 0, E = CB->getNumOperandBundles(); I != E; ++I) {
621 auto Bundle = CB->getOperandBundleAt(I);
622 if (Bundle.getTagID() == ID) {
623 CreateNew = true;
624 continue;
625 }
626 Bundles.emplace_back(Bundle);
627 }
628
629 return CreateNew ? Create(CB, Bundles, InsertPt) : CB;
630}
631
633 // Implementation note: this is a conservative implementation of operand
634 // bundle semantics, where *any* non-assume operand bundle (other than
635 // ptrauth) forces a callsite to be at least readonly.
638 getIntrinsicID() != Intrinsic::assume;
639}
640
645 getIntrinsicID() != Intrinsic::assume;
646}
647
650 if (auto *Fn = dyn_cast<Function>(getCalledOperand())) {
651 MemoryEffects FnME = Fn->getMemoryEffects();
652 if (hasOperandBundles()) {
653 // TODO: Add a method to get memory effects for operand bundles instead.
655 FnME |= MemoryEffects::readOnly();
657 FnME |= MemoryEffects::writeOnly();
658 }
659 ME &= FnME;
660 }
661 return ME;
662}
665}
666
667/// Determine if the function does not access memory.
670}
673}
674
675/// Determine if the function does not access or only reads memory.
678}
681}
682
683/// Determine if the function does not access or only writes memory.
686}
689}
690
691/// Determine if the call can access memmory only using pointers based
692/// on its arguments.
695}
698}
699
700/// Determine if the function may only access memory that is
701/// inaccessible from the IR.
704}
707}
708
709/// Determine if the function may only access memory that is
710/// either inaccessible from the IR or pointed to by its arguments.
713}
717}
718
719//===----------------------------------------------------------------------===//
720// CallInst Implementation
721//===----------------------------------------------------------------------===//
722
723void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
724 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) {
725 this->FTy = FTy;
726 assert(getNumOperands() == Args.size() + CountBundleInputs(Bundles) + 1 &&
727 "NumOperands not set up?");
728
729#ifndef NDEBUG
730 assert((Args.size() == FTy->getNumParams() ||
731 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
732 "Calling a function with bad signature!");
733
734 for (unsigned i = 0; i != Args.size(); ++i)
735 assert((i >= FTy->getNumParams() ||
736 FTy->getParamType(i) == Args[i]->getType()) &&
737 "Calling a function with a bad signature!");
738#endif
739
740 // Set operands in order of their index to match use-list-order
741 // prediction.
742 llvm::copy(Args, op_begin());
743 setCalledOperand(Func);
744
745 auto It = populateBundleOperandInfos(Bundles, Args.size());
746 (void)It;
747 assert(It + 1 == op_end() && "Should add up!");
748
749 setName(NameStr);
750}
751
752void CallInst::init(FunctionType *FTy, Value *Func, const Twine &NameStr) {
753 this->FTy = FTy;
754 assert(getNumOperands() == 1 && "NumOperands not set up?");
755 setCalledOperand(Func);
756
757 assert(FTy->getNumParams() == 0 && "Calling a function with bad signature");
758
759 setName(NameStr);
760}
761
762CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
763 BasicBlock::iterator InsertBefore)
764 : CallBase(Ty->getReturnType(), Instruction::Call,
765 OperandTraits<CallBase>::op_end(this) - 1, 1, InsertBefore) {
766 init(Ty, Func, Name);
767}
768
769CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
770 Instruction *InsertBefore)
771 : CallBase(Ty->getReturnType(), Instruction::Call,
772 OperandTraits<CallBase>::op_end(this) - 1, 1, InsertBefore) {
773 init(Ty, Func, Name);
774}
775
776CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
777 BasicBlock *InsertAtEnd)
778 : CallBase(Ty->getReturnType(), Instruction::Call,
779 OperandTraits<CallBase>::op_end(this) - 1, 1, InsertAtEnd) {
780 init(Ty, Func, Name);
781}
782
783CallInst::CallInst(const CallInst &CI)
784 : CallBase(CI.Attrs, CI.FTy, CI.getType(), Instruction::Call,
785 OperandTraits<CallBase>::op_end(this) - CI.getNumOperands(),
786 CI.getNumOperands()) {
787 setTailCallKind(CI.getTailCallKind());
789
790 std::copy(CI.op_begin(), CI.op_end(), op_begin());
791 std::copy(CI.bundle_op_info_begin(), CI.bundle_op_info_end(),
794}
795
797 BasicBlock::iterator InsertPt) {
798 std::vector<Value *> Args(CI->arg_begin(), CI->arg_end());
799
800 auto *NewCI = CallInst::Create(CI->getFunctionType(), CI->getCalledOperand(),
801 Args, OpB, CI->getName(), InsertPt);
802 NewCI->setTailCallKind(CI->getTailCallKind());
803 NewCI->setCallingConv(CI->getCallingConv());
804 NewCI->SubclassOptionalData = CI->SubclassOptionalData;
805 NewCI->setAttributes(CI->getAttributes());
806 NewCI->setDebugLoc(CI->getDebugLoc());
807 return NewCI;
808}
809
811 Instruction *InsertPt) {
812 std::vector<Value *> Args(CI->arg_begin(), CI->arg_end());
813
814 auto *NewCI = CallInst::Create(CI->getFunctionType(), CI->getCalledOperand(),
815 Args, OpB, CI->getName(), InsertPt);
816 NewCI->setTailCallKind(CI->getTailCallKind());
817 NewCI->setCallingConv(CI->getCallingConv());
818 NewCI->SubclassOptionalData = CI->SubclassOptionalData;
819 NewCI->setAttributes(CI->getAttributes());
820 NewCI->setDebugLoc(CI->getDebugLoc());
821 return NewCI;
822}
823
824// Update profile weight for call instruction by scaling it using the ratio
825// of S/T. The meaning of "branch_weights" meta data for call instruction is
826// transfered to represent call count.
828 auto *ProfileData = getMetadata(LLVMContext::MD_prof);
829 if (ProfileData == nullptr)
830 return;
831
832 auto *ProfDataName = dyn_cast<MDString>(ProfileData->getOperand(0));
833 if (!ProfDataName || (!ProfDataName->getString().equals("branch_weights") &&
834 !ProfDataName->getString().equals("VP")))
835 return;
836
837 if (T == 0) {
838 LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
839 "div by 0. Ignoring. Likely the function "
840 << getParent()->getParent()->getName()
841 << " has 0 entry count, and contains call instructions "
842 "with non-zero prof info.");
843 return;
844 }
845
846 MDBuilder MDB(getContext());
848 Vals.push_back(ProfileData->getOperand(0));
849 APInt APS(128, S), APT(128, T);
850 if (ProfDataName->getString().equals("branch_weights") &&
851 ProfileData->getNumOperands() > 0) {
852 // Using APInt::div may be expensive, but most cases should fit 64 bits.
853 APInt Val(128, mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(1))
854 ->getValue()
855 .getZExtValue());
856 Val *= APS;
857 Vals.push_back(MDB.createConstant(
858 ConstantInt::get(Type::getInt32Ty(getContext()),
859 Val.udiv(APT).getLimitedValue(UINT32_MAX))));
860 } else if (ProfDataName->getString().equals("VP"))
861 for (unsigned i = 1; i < ProfileData->getNumOperands(); i += 2) {
862 // The first value is the key of the value profile, which will not change.
863 Vals.push_back(ProfileData->getOperand(i));
864 uint64_t Count =
865 mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(i + 1))
866 ->getValue()
867 .getZExtValue();
868 // Don't scale the magic number.
869 if (Count == NOMORE_ICP_MAGICNUM) {
870 Vals.push_back(ProfileData->getOperand(i + 1));
871 continue;
872 }
873 // Using APInt::div may be expensive, but most cases should fit 64 bits.
874 APInt Val(128, Count);
875 Val *= APS;
876 Vals.push_back(MDB.createConstant(
877 ConstantInt::get(Type::getInt64Ty(getContext()),
878 Val.udiv(APT).getLimitedValue())));
879 }
880 setMetadata(LLVMContext::MD_prof, MDNode::get(getContext(), Vals));
881}
882
883//===----------------------------------------------------------------------===//
884// InvokeInst Implementation
885//===----------------------------------------------------------------------===//
886
887void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal,
888 BasicBlock *IfException, ArrayRef<Value *> Args,
890 const Twine &NameStr) {
891 this->FTy = FTy;
892
893 assert((int)getNumOperands() ==
894 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)) &&
895 "NumOperands not set up?");
896
897#ifndef NDEBUG
898 assert(((Args.size() == FTy->getNumParams()) ||
899 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
900 "Invoking a function with bad signature");
901
902 for (unsigned i = 0, e = Args.size(); i != e; i++)
903 assert((i >= FTy->getNumParams() ||
904 FTy->getParamType(i) == Args[i]->getType()) &&
905 "Invoking a function with a bad signature!");
906#endif
907
908 // Set operands in order of their index to match use-list-order
909 // prediction.
910 llvm::copy(Args, op_begin());
911 setNormalDest(IfNormal);
912 setUnwindDest(IfException);
914
915 auto It = populateBundleOperandInfos(Bundles, Args.size());
916 (void)It;
917 assert(It + 3 == op_end() && "Should add up!");
918
919 setName(NameStr);
920}
921
922InvokeInst::InvokeInst(const InvokeInst &II)
923 : CallBase(II.Attrs, II.FTy, II.getType(), Instruction::Invoke,
924 OperandTraits<CallBase>::op_end(this) - II.getNumOperands(),
925 II.getNumOperands()) {
927 std::copy(II.op_begin(), II.op_end(), op_begin());
928 std::copy(II.bundle_op_info_begin(), II.bundle_op_info_end(),
931}
932
934 BasicBlock::iterator InsertPt) {
935 std::vector<Value *> Args(II->arg_begin(), II->arg_end());
936
937 auto *NewII = InvokeInst::Create(
939 II->getUnwindDest(), Args, OpB, II->getName(), InsertPt);
940 NewII->setCallingConv(II->getCallingConv());
941 NewII->SubclassOptionalData = II->SubclassOptionalData;
942 NewII->setAttributes(II->getAttributes());
943 NewII->setDebugLoc(II->getDebugLoc());
944 return NewII;
945}
946
948 Instruction *InsertPt) {
949 std::vector<Value *> Args(II->arg_begin(), II->arg_end());
950
951 auto *NewII = InvokeInst::Create(
953 II->getUnwindDest(), Args, OpB, II->getName(), InsertPt);
954 NewII->setCallingConv(II->getCallingConv());
955 NewII->SubclassOptionalData = II->SubclassOptionalData;
956 NewII->setAttributes(II->getAttributes());
957 NewII->setDebugLoc(II->getDebugLoc());
958 return NewII;
959}
960
962 return cast<LandingPadInst>(getUnwindDest()->getFirstNonPHI());
963}
964
965//===----------------------------------------------------------------------===//
966// CallBrInst Implementation
967//===----------------------------------------------------------------------===//
968
969void CallBrInst::init(FunctionType *FTy, Value *Fn, BasicBlock *Fallthrough,
970 ArrayRef<BasicBlock *> IndirectDests,
973 const Twine &NameStr) {
974 this->FTy = FTy;
975
976 assert((int)getNumOperands() ==
977 ComputeNumOperands(Args.size(), IndirectDests.size(),
978 CountBundleInputs(Bundles)) &&
979 "NumOperands not set up?");
980
981#ifndef NDEBUG
982 assert(((Args.size() == FTy->getNumParams()) ||
983 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
984 "Calling a function with bad signature");
985
986 for (unsigned i = 0, e = Args.size(); i != e; i++)
987 assert((i >= FTy->getNumParams() ||
988 FTy->getParamType(i) == Args[i]->getType()) &&
989 "Calling a function with a bad signature!");
990#endif
991
992 // Set operands in order of their index to match use-list-order
993 // prediction.
994 std::copy(Args.begin(), Args.end(), op_begin());
995 NumIndirectDests = IndirectDests.size();
996 setDefaultDest(Fallthrough);
997 for (unsigned i = 0; i != NumIndirectDests; ++i)
998 setIndirectDest(i, IndirectDests[i]);
1000
1001 auto It = populateBundleOperandInfos(Bundles, Args.size());
1002 (void)It;
1003 assert(It + 2 + IndirectDests.size() == op_end() && "Should add up!");
1004
1005 setName(NameStr);
1006}
1007
1008CallBrInst::CallBrInst(const CallBrInst &CBI)
1009 : CallBase(CBI.Attrs, CBI.FTy, CBI.getType(), Instruction::CallBr,
1010 OperandTraits<CallBase>::op_end(this) - CBI.getNumOperands(),
1011 CBI.getNumOperands()) {
1013 std::copy(CBI.op_begin(), CBI.op_end(), op_begin());
1014 std::copy(CBI.bundle_op_info_begin(), CBI.bundle_op_info_end(),
1017 NumIndirectDests = CBI.NumIndirectDests;
1018}
1019
1021 BasicBlock::iterator InsertPt) {
1022 std::vector<Value *> Args(CBI->arg_begin(), CBI->arg_end());
1023
1024 auto *NewCBI = CallBrInst::Create(
1025 CBI->getFunctionType(), CBI->getCalledOperand(), CBI->getDefaultDest(),
1026 CBI->getIndirectDests(), Args, OpB, CBI->getName(), InsertPt);
1027 NewCBI->setCallingConv(CBI->getCallingConv());
1028 NewCBI->SubclassOptionalData = CBI->SubclassOptionalData;
1029 NewCBI->setAttributes(CBI->getAttributes());
1030 NewCBI->setDebugLoc(CBI->getDebugLoc());
1031 NewCBI->NumIndirectDests = CBI->NumIndirectDests;
1032 return NewCBI;
1033}
1034
1036 Instruction *InsertPt) {
1037 std::vector<Value *> Args(CBI->arg_begin(), CBI->arg_end());
1038
1039 auto *NewCBI = CallBrInst::Create(
1040 CBI->getFunctionType(), CBI->getCalledOperand(), CBI->getDefaultDest(),
1041 CBI->getIndirectDests(), Args, OpB, CBI->getName(), InsertPt);
1042 NewCBI->setCallingConv(CBI->getCallingConv());
1043 NewCBI->SubclassOptionalData = CBI->SubclassOptionalData;
1044 NewCBI->setAttributes(CBI->getAttributes());
1045 NewCBI->setDebugLoc(CBI->getDebugLoc());
1046 NewCBI->NumIndirectDests = CBI->NumIndirectDests;
1047 return NewCBI;
1048}
1049
1050//===----------------------------------------------------------------------===//
1051// ReturnInst Implementation
1052//===----------------------------------------------------------------------===//
1053
1054ReturnInst::ReturnInst(const ReturnInst &RI)
1055 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Ret,
1056 OperandTraits<ReturnInst>::op_end(this) - RI.getNumOperands(),
1057 RI.getNumOperands()) {
1058 if (RI.getNumOperands())
1059 Op<0>() = RI.Op<0>();
1061}
1062
1063ReturnInst::ReturnInst(LLVMContext &C, Value *retVal,
1064 BasicBlock::iterator InsertBefore)
1065 : Instruction(Type::getVoidTy(C), Instruction::Ret,
1066 OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal,
1067 InsertBefore) {
1068 if (retVal)
1069 Op<0>() = retVal;
1070}
1071
1072ReturnInst::ReturnInst(LLVMContext &C, Value *retVal,
1073 Instruction *InsertBefore)
1074 : Instruction(Type::getVoidTy(C), Instruction::Ret,
1075 OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal,
1076 InsertBefore) {
1077 if (retVal)
1078 Op<0>() = retVal;
1079}
1080
1081ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd)
1082 : Instruction(Type::getVoidTy(C), Instruction::Ret,
1083 OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal,
1084 InsertAtEnd) {
1085 if (retVal)
1086 Op<0>() = retVal;
1087}
1088
1089ReturnInst::ReturnInst(LLVMContext &Context, BasicBlock *InsertAtEnd)
1090 : Instruction(Type::getVoidTy(Context), Instruction::Ret,
1091 OperandTraits<ReturnInst>::op_end(this), 0, InsertAtEnd) {}
1092
1093//===----------------------------------------------------------------------===//
1094// ResumeInst Implementation
1095//===----------------------------------------------------------------------===//
1096
1097ResumeInst::ResumeInst(const ResumeInst &RI)
1098 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Resume,
1099 OperandTraits<ResumeInst>::op_begin(this), 1) {
1100 Op<0>() = RI.Op<0>();
1101}
1102
1103ResumeInst::ResumeInst(Value *Exn, BasicBlock::iterator InsertBefore)
1104 : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
1105 OperandTraits<ResumeInst>::op_begin(this), 1, InsertBefore) {
1106 Op<0>() = Exn;
1107}
1108
1109ResumeInst::ResumeInst(Value *Exn, Instruction *InsertBefore)
1110 : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
1111 OperandTraits<ResumeInst>::op_begin(this), 1, InsertBefore) {
1112 Op<0>() = Exn;
1113}
1114
1115ResumeInst::ResumeInst(Value *Exn, BasicBlock *InsertAtEnd)
1116 : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
1117 OperandTraits<ResumeInst>::op_begin(this), 1, InsertAtEnd) {
1118 Op<0>() = Exn;
1119}
1120
1121//===----------------------------------------------------------------------===//
1122// CleanupReturnInst Implementation
1123//===----------------------------------------------------------------------===//
1124
1125CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst &CRI)
1126 : Instruction(CRI.getType(), Instruction::CleanupRet,
1127 OperandTraits<CleanupReturnInst>::op_end(this) -
1128 CRI.getNumOperands(),
1129 CRI.getNumOperands()) {
1130 setSubclassData<Instruction::OpaqueField>(
1132 Op<0>() = CRI.Op<0>();
1133 if (CRI.hasUnwindDest())
1134 Op<1>() = CRI.Op<1>();
1135}
1136
1137void CleanupReturnInst::init(Value *CleanupPad, BasicBlock *UnwindBB) {
1138 if (UnwindBB)
1139 setSubclassData<UnwindDestField>(true);
1140
1141 Op<0>() = CleanupPad;
1142 if (UnwindBB)
1143 Op<1>() = UnwindBB;
1144}
1145
1146CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
1147 unsigned Values,
1148 BasicBlock::iterator InsertBefore)
1149 : Instruction(Type::getVoidTy(CleanupPad->getContext()),
1150 Instruction::CleanupRet,
1151 OperandTraits<CleanupReturnInst>::op_end(this) - Values,
1152 Values, InsertBefore) {
1153 init(CleanupPad, UnwindBB);
1154}
1155
1156CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
1157 unsigned Values, Instruction *InsertBefore)
1158 : Instruction(Type::getVoidTy(CleanupPad->getContext()),
1159 Instruction::CleanupRet,
1160 OperandTraits<CleanupReturnInst>::op_end(this) - Values,
1161 Values, InsertBefore) {
1162 init(CleanupPad, UnwindBB);
1163}
1164
1165CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
1166 unsigned Values, BasicBlock *InsertAtEnd)
1167 : Instruction(Type::getVoidTy(CleanupPad->getContext()),
1168 Instruction::CleanupRet,
1169 OperandTraits<CleanupReturnInst>::op_end(this) - Values,
1170 Values, InsertAtEnd) {
1171 init(CleanupPad, UnwindBB);
1172}
1173
1174//===----------------------------------------------------------------------===//
1175// CatchReturnInst Implementation
1176//===----------------------------------------------------------------------===//
1177void CatchReturnInst::init(Value *CatchPad, BasicBlock *BB) {
1178 Op<0>() = CatchPad;
1179 Op<1>() = BB;
1180}
1181
1182CatchReturnInst::CatchReturnInst(const CatchReturnInst &CRI)
1183 : Instruction(Type::getVoidTy(CRI.getContext()), Instruction::CatchRet,
1184 OperandTraits<CatchReturnInst>::op_begin(this), 2) {
1185 Op<0>() = CRI.Op<0>();
1186 Op<1>() = CRI.Op<1>();
1187}
1188
1189CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
1190 BasicBlock::iterator InsertBefore)
1191 : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
1192 OperandTraits<CatchReturnInst>::op_begin(this), 2,
1193 InsertBefore) {
1194 init(CatchPad, BB);
1195}
1196
1197CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
1198 Instruction *InsertBefore)
1199 : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
1200 OperandTraits<CatchReturnInst>::op_begin(this), 2,
1201 InsertBefore) {
1202 init(CatchPad, BB);
1203}
1204
1205CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
1206 BasicBlock *InsertAtEnd)
1207 : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
1208 OperandTraits<CatchReturnInst>::op_begin(this), 2,
1209 InsertAtEnd) {
1210 init(CatchPad, BB);
1211}
1212
1213//===----------------------------------------------------------------------===//
1214// CatchSwitchInst Implementation
1215//===----------------------------------------------------------------------===//
1216
1217CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
1218 unsigned NumReservedValues,
1219 const Twine &NameStr,
1220 BasicBlock::iterator InsertBefore)
1221 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
1222 InsertBefore) {
1223 if (UnwindDest)
1224 ++NumReservedValues;
1225 init(ParentPad, UnwindDest, NumReservedValues + 1);
1226 setName(NameStr);
1227}
1228
1229CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
1230 unsigned NumReservedValues,
1231 const Twine &NameStr,
1232 Instruction *InsertBefore)
1233 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
1234 InsertBefore) {
1235 if (UnwindDest)
1236 ++NumReservedValues;
1237 init(ParentPad, UnwindDest, NumReservedValues + 1);
1238 setName(NameStr);
1239}
1240
1241CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
1242 unsigned NumReservedValues,
1243 const Twine &NameStr, BasicBlock *InsertAtEnd)
1244 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
1245 InsertAtEnd) {
1246 if (UnwindDest)
1247 ++NumReservedValues;
1248 init(ParentPad, UnwindDest, NumReservedValues + 1);
1249 setName(NameStr);
1250}
1251
1252CatchSwitchInst::CatchSwitchInst(const CatchSwitchInst &CSI)
1253 : Instruction(CSI.getType(), Instruction::CatchSwitch, nullptr,
1254 CSI.getNumOperands()) {
1255 init(CSI.getParentPad(), CSI.getUnwindDest(), CSI.getNumOperands());
1256 setNumHungOffUseOperands(ReservedSpace);
1257 Use *OL = getOperandList();
1258 const Use *InOL = CSI.getOperandList();
1259 for (unsigned I = 1, E = ReservedSpace; I != E; ++I)
1260 OL[I] = InOL[I];
1261}
1262
1263void CatchSwitchInst::init(Value *ParentPad, BasicBlock *UnwindDest,
1264 unsigned NumReservedValues) {
1265 assert(ParentPad && NumReservedValues);
1266
1267 ReservedSpace = NumReservedValues;
1268 setNumHungOffUseOperands(UnwindDest ? 2 : 1);
1269 allocHungoffUses(ReservedSpace);
1270
1271 Op<0>() = ParentPad;
1272 if (UnwindDest) {
1273 setSubclassData<UnwindDestField>(true);
1274 setUnwindDest(UnwindDest);
1275 }
1276}
1277
1278/// growOperands - grow operands - This grows the operand list in response to a
1279/// push_back style of operation. This grows the number of ops by 2 times.
1280void CatchSwitchInst::growOperands(unsigned Size) {
1281 unsigned NumOperands = getNumOperands();
1282 assert(NumOperands >= 1);
1283 if (ReservedSpace >= NumOperands + Size)
1284 return;
1285 ReservedSpace = (NumOperands + Size / 2) * 2;
1286 growHungoffUses(ReservedSpace);
1287}
1288
1290 unsigned OpNo = getNumOperands();
1291 growOperands(1);
1292 assert(OpNo < ReservedSpace && "Growing didn't work!");
1294 getOperandList()[OpNo] = Handler;
1295}
1296
1298 // Move all subsequent handlers up one.
1299 Use *EndDst = op_end() - 1;
1300 for (Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst)
1301 *CurDst = *(CurDst + 1);
1302 // Null out the last handler use.
1303 *EndDst = nullptr;
1304
1306}
1307
1308//===----------------------------------------------------------------------===//
1309// FuncletPadInst Implementation
1310//===----------------------------------------------------------------------===//
1311void FuncletPadInst::init(Value *ParentPad, ArrayRef<Value *> Args,
1312 const Twine &NameStr) {
1313 assert(getNumOperands() == 1 + Args.size() && "NumOperands not set up?");
1314 llvm::copy(Args, op_begin());
1315 setParentPad(ParentPad);
1316 setName(NameStr);
1317}
1318
1319FuncletPadInst::FuncletPadInst(const FuncletPadInst &FPI)
1320 : Instruction(FPI.getType(), FPI.getOpcode(),
1321 OperandTraits<FuncletPadInst>::op_end(this) -
1322 FPI.getNumOperands(),
1323 FPI.getNumOperands()) {
1324 std::copy(FPI.op_begin(), FPI.op_end(), op_begin());
1326}
1327
1328FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
1329 ArrayRef<Value *> Args, unsigned Values,
1330 const Twine &NameStr,
1331 BasicBlock::iterator InsertBefore)
1332 : Instruction(ParentPad->getType(), Op,
1333 OperandTraits<FuncletPadInst>::op_end(this) - Values, Values,
1334 InsertBefore) {
1335 init(ParentPad, Args, NameStr);
1336}
1337
1338FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
1339 ArrayRef<Value *> Args, unsigned Values,
1340 const Twine &NameStr, Instruction *InsertBefore)
1341 : Instruction(ParentPad->getType(), Op,
1342 OperandTraits<FuncletPadInst>::op_end(this) - Values, Values,
1343 InsertBefore) {
1344 init(ParentPad, Args, NameStr);
1345}
1346
1347FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
1348 ArrayRef<Value *> Args, unsigned Values,
1349 const Twine &NameStr, BasicBlock *InsertAtEnd)
1350 : Instruction(ParentPad->getType(), Op,
1351 OperandTraits<FuncletPadInst>::op_end(this) - Values, Values,
1352 InsertAtEnd) {
1353 init(ParentPad, Args, NameStr);
1354}
1355
1356//===----------------------------------------------------------------------===//
1357// UnreachableInst Implementation
1358//===----------------------------------------------------------------------===//
1359
1361 BasicBlock::iterator InsertBefore)
1362 : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr,
1363 0, InsertBefore) {}
1365 Instruction *InsertBefore)
1366 : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr,
1367 0, InsertBefore) {}
1369 : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr,
1370 0, InsertAtEnd) {}
1371
1372//===----------------------------------------------------------------------===//
1373// BranchInst Implementation
1374//===----------------------------------------------------------------------===//
1375
1376void BranchInst::AssertOK() {
1377 if (isConditional())
1378 assert(getCondition()->getType()->isIntegerTy(1) &&
1379 "May only branch on boolean predicates!");
1380}
1381
1382BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock::iterator InsertBefore)
1383 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1384 OperandTraits<BranchInst>::op_end(this) - 1, 1,
1385 InsertBefore) {
1386 assert(IfTrue && "Branch destination may not be null!");
1387 Op<-1>() = IfTrue;
1388}
1389
1390BranchInst::BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore)
1391 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1392 OperandTraits<BranchInst>::op_end(this) - 1, 1,
1393 InsertBefore) {
1394 assert(IfTrue && "Branch destination may not be null!");
1395 Op<-1>() = IfTrue;
1396}
1397
1398BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
1399 BasicBlock::iterator InsertBefore)
1400 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1401 OperandTraits<BranchInst>::op_end(this) - 3, 3,
1402 InsertBefore) {
1403 // Assign in order of operand index to make use-list order predictable.
1404 Op<-3>() = Cond;
1405 Op<-2>() = IfFalse;
1406 Op<-1>() = IfTrue;
1407#ifndef NDEBUG
1408 AssertOK();
1409#endif
1410}
1411
1412BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
1413 Instruction *InsertBefore)
1414 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1415 OperandTraits<BranchInst>::op_end(this) - 3, 3,
1416 InsertBefore) {
1417 // Assign in order of operand index to make use-list order predictable.
1418 Op<-3>() = Cond;
1419 Op<-2>() = IfFalse;
1420 Op<-1>() = IfTrue;
1421#ifndef NDEBUG
1422 AssertOK();
1423#endif
1424}
1425
1426BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd)
1427 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1428 OperandTraits<BranchInst>::op_end(this) - 1, 1, InsertAtEnd) {
1429 assert(IfTrue && "Branch destination may not be null!");
1430 Op<-1>() = IfTrue;
1431}
1432
1433BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
1434 BasicBlock *InsertAtEnd)
1435 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1436 OperandTraits<BranchInst>::op_end(this) - 3, 3, InsertAtEnd) {
1437 // Assign in order of operand index to make use-list order predictable.
1438 Op<-3>() = Cond;
1439 Op<-2>() = IfFalse;
1440 Op<-1>() = IfTrue;
1441#ifndef NDEBUG
1442 AssertOK();
1443#endif
1444}
1445
1446BranchInst::BranchInst(const BranchInst &BI)
1447 : Instruction(Type::getVoidTy(BI.getContext()), Instruction::Br,
1448 OperandTraits<BranchInst>::op_end(this) - BI.getNumOperands(),
1449 BI.getNumOperands()) {
1450 // Assign in order of operand index to make use-list order predictable.
1451 if (BI.getNumOperands() != 1) {
1452 assert(BI.getNumOperands() == 3 && "BR can have 1 or 3 operands!");
1453 Op<-3>() = BI.Op<-3>();
1454 Op<-2>() = BI.Op<-2>();
1455 }
1456 Op<-1>() = BI.Op<-1>();
1458}
1459
1462 "Cannot swap successors of an unconditional branch");
1463 Op<-1>().swap(Op<-2>());
1464
1465 // Update profile metadata if present and it matches our structural
1466 // expectations.
1468}
1469
1470//===----------------------------------------------------------------------===//
1471// AllocaInst Implementation
1472//===----------------------------------------------------------------------===//
1473
1474static Value *getAISize(LLVMContext &Context, Value *Amt) {
1475 if (!Amt)
1476 Amt = ConstantInt::get(Type::getInt32Ty(Context), 1);
1477 else {
1478 assert(!isa<BasicBlock>(Amt) &&
1479 "Passed basic block into allocation size parameter! Use other ctor");
1480 assert(Amt->getType()->isIntegerTy() &&
1481 "Allocation array size is not an integer!");
1482 }
1483 return Amt;
1484}
1485
1487 assert(BB && "Insertion BB cannot be null when alignment not provided!");
1488 assert(BB->getParent() &&
1489 "BB must be in a Function when alignment not provided!");
1490 const DataLayout &DL = BB->getModule()->getDataLayout();
1491 return DL.getPrefTypeAlign(Ty);
1492}
1493
1495 return computeAllocaDefaultAlign(Ty, It->getParent());
1496}
1497
1499 assert(I && "Insertion position cannot be null when alignment not provided!");
1500 return computeAllocaDefaultAlign(Ty, I->getParent());
1501}
1502
1503AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
1504 BasicBlock::iterator InsertBefore)
1505 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertBefore) {}
1506
1507AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
1508 Instruction *InsertBefore)
1509 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertBefore) {}
1510
1511AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
1512 BasicBlock *InsertAtEnd)
1513 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertAtEnd) {}
1514
1515AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1516 const Twine &Name, BasicBlock::iterator InsertBefore)
1517 : AllocaInst(Ty, AddrSpace, ArraySize,
1518 computeAllocaDefaultAlign(Ty, InsertBefore), Name,
1519 InsertBefore) {}
1520
1521AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1522 const Twine &Name, Instruction *InsertBefore)
1523 : AllocaInst(Ty, AddrSpace, ArraySize,
1524 computeAllocaDefaultAlign(Ty, InsertBefore), Name,
1525 InsertBefore) {}
1526
1527AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1528 const Twine &Name, BasicBlock *InsertAtEnd)
1529 : AllocaInst(Ty, AddrSpace, ArraySize,
1530 computeAllocaDefaultAlign(Ty, InsertAtEnd), Name,
1531 InsertAtEnd) {}
1532
1533AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1534 Align Align, const Twine &Name,
1535 BasicBlock::iterator InsertBefore)
1536 : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca,
1537 getAISize(Ty->getContext(), ArraySize), InsertBefore),
1538 AllocatedType(Ty) {
1540 assert(!Ty->isVoidTy() && "Cannot allocate void!");
1541 setName(Name);
1542}
1543
1544AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1545 Align Align, const Twine &Name,
1546 Instruction *InsertBefore)
1547 : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca,
1548 getAISize(Ty->getContext(), ArraySize), InsertBefore),
1549 AllocatedType(Ty) {
1551 assert(!Ty->isVoidTy() && "Cannot allocate void!");
1552 setName(Name);
1553}
1554
1555AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1556 Align Align, const Twine &Name, BasicBlock *InsertAtEnd)
1557 : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca,
1558 getAISize(Ty->getContext(), ArraySize), InsertAtEnd),
1559 AllocatedType(Ty) {
1561 assert(!Ty->isVoidTy() && "Cannot allocate void!");
1562 setName(Name);
1563}
1564
1565
1567 if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(0)))
1568 return !CI->isOne();
1569 return true;
1570}
1571
1572/// isStaticAlloca - Return true if this alloca is in the entry block of the
1573/// function and is a constant size. If so, the code generator will fold it
1574/// into the prolog/epilog code, so it is basically free.
1576 // Must be constant size.
1577 if (!isa<ConstantInt>(getArraySize())) return false;
1578
1579 // Must be in the entry block.
1580 const BasicBlock *Parent = getParent();
1581 return Parent->isEntryBlock() && !isUsedWithInAlloca();
1582}
1583
1584//===----------------------------------------------------------------------===//
1585// LoadInst Implementation
1586//===----------------------------------------------------------------------===//
1587
1588void LoadInst::AssertOK() {
1590 "Ptr must have pointer type.");
1591}
1592
1594 assert(BB && "Insertion BB cannot be null when alignment not provided!");
1595 assert(BB->getParent() &&
1596 "BB must be in a Function when alignment not provided!");
1597 const DataLayout &DL = BB->getModule()->getDataLayout();
1598 return DL.getABITypeAlign(Ty);
1599}
1600
1602 return computeLoadStoreDefaultAlign(Ty, It->getParent());
1603}
1604
1606 assert(I && "Insertion position cannot be null when alignment not provided!");
1607 return computeLoadStoreDefaultAlign(Ty, I->getParent());
1608}
1609
1611 BasicBlock::iterator InsertBef)
1612 : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertBef) {}
1613
1615 Instruction *InsertBef)
1616 : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertBef) {}
1617
1619 BasicBlock *InsertAE)
1620 : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertAE) {}
1621
1622LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1623 BasicBlock::iterator InsertBef)
1624 : LoadInst(Ty, Ptr, Name, isVolatile,
1625 computeLoadStoreDefaultAlign(Ty, InsertBef), InsertBef) {}
1626
1627LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1628 Instruction *InsertBef)
1629 : LoadInst(Ty, Ptr, Name, isVolatile,
1630 computeLoadStoreDefaultAlign(Ty, InsertBef), InsertBef) {}
1631
1632LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1633 BasicBlock *InsertAE)
1634 : LoadInst(Ty, Ptr, Name, isVolatile,
1635 computeLoadStoreDefaultAlign(Ty, InsertAE), InsertAE) {}
1636
1637LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1638 Align Align, BasicBlock::iterator InsertBef)
1639 : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
1640 SyncScope::System, InsertBef) {}
1641
1642LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1643 Align Align, Instruction *InsertBef)
1644 : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
1645 SyncScope::System, InsertBef) {}
1646
1647LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1648 Align Align, BasicBlock *InsertAE)
1649 : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
1650 SyncScope::System, InsertAE) {}
1651
1652LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1654 BasicBlock::iterator InsertBef)
1655 : UnaryInstruction(Ty, Load, Ptr, InsertBef) {
1658 setAtomic(Order, SSID);
1659 AssertOK();
1660 setName(Name);
1661}
1662
1663LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1665 Instruction *InsertBef)
1666 : UnaryInstruction(Ty, Load, Ptr, InsertBef) {
1669 setAtomic(Order, SSID);
1670 AssertOK();
1671 setName(Name);
1672}
1673
1674LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1676 BasicBlock *InsertAE)
1677 : UnaryInstruction(Ty, Load, Ptr, InsertAE) {
1680 setAtomic(Order, SSID);
1681 AssertOK();
1682 setName(Name);
1683}
1684
1685//===----------------------------------------------------------------------===//
1686// StoreInst Implementation
1687//===----------------------------------------------------------------------===//
1688
1689void StoreInst::AssertOK() {
1690 assert(getOperand(0) && getOperand(1) && "Both operands must be non-null!");
1692 "Ptr must have pointer type!");
1693}
1694
1695StoreInst::StoreInst(Value *val, Value *addr, Instruction *InsertBefore)
1696 : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {}
1697
1698StoreInst::StoreInst(Value *val, Value *addr, BasicBlock *InsertAtEnd)
1699 : StoreInst(val, addr, /*isVolatile=*/false, InsertAtEnd) {}
1700
1702 : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {}
1703
1704StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1705 Instruction *InsertBefore)
1706 : StoreInst(val, addr, isVolatile,
1707 computeLoadStoreDefaultAlign(val->getType(), InsertBefore),
1708 InsertBefore) {}
1709
1710StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1711 BasicBlock *InsertAtEnd)
1712 : StoreInst(val, addr, isVolatile,
1713 computeLoadStoreDefaultAlign(val->getType(), InsertAtEnd),
1714 InsertAtEnd) {}
1715
1716StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1717 BasicBlock::iterator InsertBefore)
1718 : StoreInst(val, addr, isVolatile,
1719 computeLoadStoreDefaultAlign(val->getType(), &*InsertBefore),
1720 InsertBefore) {}
1721
1722StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1723 Instruction *InsertBefore)
1724 : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
1725 SyncScope::System, InsertBefore) {}
1726
1727StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1728 BasicBlock *InsertAtEnd)
1729 : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
1730 SyncScope::System, InsertAtEnd) {}
1731
1732StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1733 BasicBlock::iterator InsertBefore)
1734 : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
1735 SyncScope::System, InsertBefore) {}
1736
1737StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1738 AtomicOrdering Order, SyncScope::ID SSID,
1739 Instruction *InsertBefore)
1740 : Instruction(Type::getVoidTy(val->getContext()), Store,
1741 OperandTraits<StoreInst>::op_begin(this),
1742 OperandTraits<StoreInst>::operands(this), InsertBefore) {
1743 Op<0>() = val;
1744 Op<1>() = addr;
1747 setAtomic(Order, SSID);
1748 AssertOK();
1749}
1750
1751StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1752 AtomicOrdering Order, SyncScope::ID SSID,
1753 BasicBlock *InsertAtEnd)
1754 : Instruction(Type::getVoidTy(val->getContext()), Store,
1755 OperandTraits<StoreInst>::op_begin(this),
1756 OperandTraits<StoreInst>::operands(this), InsertAtEnd) {
1757 Op<0>() = val;
1758 Op<1>() = addr;
1761 setAtomic(Order, SSID);
1762 AssertOK();
1763}
1764
1765StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1766 AtomicOrdering Order, SyncScope::ID SSID,
1767 BasicBlock::iterator InsertBefore)
1768 : Instruction(Type::getVoidTy(val->getContext()), Store,
1769 OperandTraits<StoreInst>::op_begin(this),
1770 OperandTraits<StoreInst>::operands(this)) {
1771 Op<0>() = val;
1772 Op<1>() = addr;
1775 setAtomic(Order, SSID);
1776 insertBefore(*InsertBefore->getParent(), InsertBefore);
1777 AssertOK();
1778}
1779
1780//===----------------------------------------------------------------------===//
1781// AtomicCmpXchgInst Implementation
1782//===----------------------------------------------------------------------===//
1783
1784void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
1785 Align Alignment, AtomicOrdering SuccessOrdering,
1786 AtomicOrdering FailureOrdering,
1787 SyncScope::ID SSID) {
1788 Op<0>() = Ptr;
1789 Op<1>() = Cmp;
1790 Op<2>() = NewVal;
1791 setSuccessOrdering(SuccessOrdering);
1792 setFailureOrdering(FailureOrdering);
1793 setSyncScopeID(SSID);
1794 setAlignment(Alignment);
1795
1796 assert(getOperand(0) && getOperand(1) && getOperand(2) &&
1797 "All operands must be non-null!");
1799 "Ptr must have pointer type!");
1800 assert(getOperand(1)->getType() == getOperand(2)->getType() &&
1801 "Cmp type and NewVal type must be same!");
1802}
1803
1805 Align Alignment,
1806 AtomicOrdering SuccessOrdering,
1807 AtomicOrdering FailureOrdering,
1808 SyncScope::ID SSID,
1809 BasicBlock::iterator InsertBefore)
1810 : Instruction(
1811 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
1812 AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
1813 OperandTraits<AtomicCmpXchgInst>::operands(this), InsertBefore) {
1814 Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
1815}
1816
1818 Align Alignment,
1819 AtomicOrdering SuccessOrdering,
1820 AtomicOrdering FailureOrdering,
1821 SyncScope::ID SSID,
1822 Instruction *InsertBefore)
1823 : Instruction(
1824 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
1825 AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
1826 OperandTraits<AtomicCmpXchgInst>::operands(this), InsertBefore) {
1827 Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
1828}
1829
1831 Align Alignment,
1832 AtomicOrdering SuccessOrdering,
1833 AtomicOrdering FailureOrdering,
1834 SyncScope::ID SSID,
1835 BasicBlock *InsertAtEnd)
1836 : Instruction(
1837 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
1838 AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
1839 OperandTraits<AtomicCmpXchgInst>::operands(this), InsertAtEnd) {
1840 Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
1841}
1842
1843//===----------------------------------------------------------------------===//
1844// AtomicRMWInst Implementation
1845//===----------------------------------------------------------------------===//
1846
1847void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,
1848 Align Alignment, AtomicOrdering Ordering,
1849 SyncScope::ID SSID) {
1850 assert(Ordering != AtomicOrdering::NotAtomic &&
1851 "atomicrmw instructions can only be atomic.");
1852 assert(Ordering != AtomicOrdering::Unordered &&
1853 "atomicrmw instructions cannot be unordered.");
1854 Op<0>() = Ptr;
1855 Op<1>() = Val;
1857 setOrdering(Ordering);
1858 setSyncScopeID(SSID);
1859 setAlignment(Alignment);
1860
1861 assert(getOperand(0) && getOperand(1) &&
1862 "All operands must be non-null!");
1864 "Ptr must have pointer type!");
1865 assert(Ordering != AtomicOrdering::NotAtomic &&
1866 "AtomicRMW instructions must be atomic!");
1867}
1868
1870 Align Alignment, AtomicOrdering Ordering,
1871 SyncScope::ID SSID,
1872 BasicBlock::iterator InsertBefore)
1873 : Instruction(Val->getType(), AtomicRMW,
1874 OperandTraits<AtomicRMWInst>::op_begin(this),
1875 OperandTraits<AtomicRMWInst>::operands(this), InsertBefore) {
1876 Init(Operation, Ptr, Val, Alignment, Ordering, SSID);
1877}
1878
1880 Align Alignment, AtomicOrdering Ordering,
1881 SyncScope::ID SSID, Instruction *InsertBefore)
1882 : Instruction(Val->getType(), AtomicRMW,
1883 OperandTraits<AtomicRMWInst>::op_begin(this),
1884 OperandTraits<AtomicRMWInst>::operands(this), InsertBefore) {
1885 Init(Operation, Ptr, Val, Alignment, Ordering, SSID);
1886}
1887
1889 Align Alignment, AtomicOrdering Ordering,
1890 SyncScope::ID SSID, BasicBlock *InsertAtEnd)
1891 : Instruction(Val->getType(), AtomicRMW,
1892 OperandTraits<AtomicRMWInst>::op_begin(this),
1893 OperandTraits<AtomicRMWInst>::operands(this), InsertAtEnd) {
1894 Init(Operation, Ptr, Val, Alignment, Ordering, SSID);
1895}
1896
1898 switch (Op) {
1900 return "xchg";
1901 case AtomicRMWInst::Add:
1902 return "add";
1903 case AtomicRMWInst::Sub:
1904 return "sub";
1905 case AtomicRMWInst::And:
1906 return "and";
1908 return "nand";
1909 case AtomicRMWInst::Or:
1910 return "or";
1911 case AtomicRMWInst::Xor:
1912 return "xor";
1913 case AtomicRMWInst::Max:
1914 return "max";
1915 case AtomicRMWInst::Min:
1916 return "min";
1918 return "umax";
1920 return "umin";
1922 return "fadd";
1924 return "fsub";
1926 return "fmax";
1928 return "fmin";
1930 return "uinc_wrap";
1932 return "udec_wrap";
1934 return "<invalid operation>";
1935 }
1936
1937 llvm_unreachable("invalid atomicrmw operation");
1938}
1939
1940//===----------------------------------------------------------------------===//
1941// FenceInst Implementation
1942//===----------------------------------------------------------------------===//
1943
1945 SyncScope::ID SSID, BasicBlock::iterator InsertBefore)
1946 : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertBefore) {
1947 setOrdering(Ordering);
1948 setSyncScopeID(SSID);
1949}
1950
1952 SyncScope::ID SSID,
1953 Instruction *InsertBefore)
1954 : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertBefore) {
1955 setOrdering(Ordering);
1956 setSyncScopeID(SSID);
1957}
1958
1960 SyncScope::ID SSID,
1961 BasicBlock *InsertAtEnd)
1962 : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertAtEnd) {
1963 setOrdering(Ordering);
1964 setSyncScopeID(SSID);
1965}
1966
1967//===----------------------------------------------------------------------===//
1968// GetElementPtrInst Implementation
1969//===----------------------------------------------------------------------===//
1970
1971void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList,
1972 const Twine &Name) {
1973 assert(getNumOperands() == 1 + IdxList.size() &&
1974 "NumOperands not initialized?");
1975 Op<0>() = Ptr;
1976 llvm::copy(IdxList, op_begin() + 1);
1977 setName(Name);
1978}
1979
1980GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI)
1981 : Instruction(GEPI.getType(), GetElementPtr,
1982 OperandTraits<GetElementPtrInst>::op_end(this) -
1983 GEPI.getNumOperands(),
1984 GEPI.getNumOperands()),
1985 SourceElementType(GEPI.SourceElementType),
1986 ResultElementType(GEPI.ResultElementType) {
1987 std::copy(GEPI.op_begin(), GEPI.op_end(), op_begin());
1989}
1990
1992 if (auto *Struct = dyn_cast<StructType>(Ty)) {
1993 if (!Struct->indexValid(Idx))
1994 return nullptr;
1995 return Struct->getTypeAtIndex(Idx);
1996 }
1997 if (!Idx->getType()->isIntOrIntVectorTy())
1998 return nullptr;
1999 if (auto *Array = dyn_cast<ArrayType>(Ty))
2000 return Array->getElementType();
2001 if (auto *Vector = dyn_cast<VectorType>(Ty))
2002 return Vector->getElementType();
2003 return nullptr;
2004}
2005
2007 if (auto *Struct = dyn_cast<StructType>(Ty)) {
2008 if (Idx >= Struct->getNumElements())
2009 return nullptr;
2010 return Struct->getElementType(Idx);
2011 }
2012 if (auto *Array = dyn_cast<ArrayType>(Ty))
2013 return Array->getElementType();
2014 if (auto *Vector = dyn_cast<VectorType>(Ty))
2015 return Vector->getElementType();
2016 return nullptr;
2017}
2018
2019template <typename IndexTy>
2021 if (IdxList.empty())
2022 return Ty;
2023 for (IndexTy V : IdxList.slice(1)) {
2025 if (!Ty)
2026 return Ty;
2027 }
2028 return Ty;
2029}
2030
2032 return getIndexedTypeInternal(Ty, IdxList);
2033}
2034
2036 ArrayRef<Constant *> IdxList) {
2037 return getIndexedTypeInternal(Ty, IdxList);
2038}
2039
2041 return getIndexedTypeInternal(Ty, IdxList);
2042}
2043
2044/// hasAllZeroIndices - Return true if all of the indices of this GEP are
2045/// zeros. If so, the result pointer and the first operand have the same
2046/// value, just potentially different types.
2048 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
2049 if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(i))) {
2050 if (!CI->isZero()) return false;
2051 } else {
2052 return false;
2053 }
2054 }
2055 return true;
2056}
2057
2058/// hasAllConstantIndices - Return true if all of the indices of this GEP are
2059/// constant integers. If so, the result pointer and the first operand have
2060/// a constant offset between them.
2062 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
2063 if (!isa<ConstantInt>(getOperand(i)))
2064 return false;
2065 }
2066 return true;
2067}
2068
2070 cast<GEPOperator>(this)->setIsInBounds(B);
2071}
2072
2074 return cast<GEPOperator>(this)->isInBounds();
2075}
2076
2078 APInt &Offset) const {
2079 // Delegate to the generic GEPOperator implementation.
2080 return cast<GEPOperator>(this)->accumulateConstantOffset(DL, Offset);
2081}
2082
2084 const DataLayout &DL, unsigned BitWidth,
2085 MapVector<Value *, APInt> &VariableOffsets,
2086 APInt &ConstantOffset) const {
2087 // Delegate to the generic GEPOperator implementation.
2088 return cast<GEPOperator>(this)->collectOffset(DL, BitWidth, VariableOffsets,
2089 ConstantOffset);
2090}
2091
2092//===----------------------------------------------------------------------===//
2093// ExtractElementInst Implementation
2094//===----------------------------------------------------------------------===//
2095
2096ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
2097 const Twine &Name,
2098 BasicBlock::iterator InsertBef)
2099 : Instruction(
2100 cast<VectorType>(Val->getType())->getElementType(), ExtractElement,
2101 OperandTraits<ExtractElementInst>::op_begin(this), 2, InsertBef) {
2102 assert(isValidOperands(Val, Index) &&
2103 "Invalid extractelement instruction operands!");
2104 Op<0>() = Val;
2105 Op<1>() = Index;
2106 setName(Name);
2107}
2108
2109ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
2110 const Twine &Name,
2111 Instruction *InsertBef)
2112 : Instruction(cast<VectorType>(Val->getType())->getElementType(),
2113 ExtractElement,
2114 OperandTraits<ExtractElementInst>::op_begin(this),
2115 2, InsertBef) {
2116 assert(isValidOperands(Val, Index) &&
2117 "Invalid extractelement instruction operands!");
2118 Op<0>() = Val;
2119 Op<1>() = Index;
2120 setName(Name);
2121}
2122
2123ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
2124 const Twine &Name,
2125 BasicBlock *InsertAE)
2126 : Instruction(cast<VectorType>(Val->getType())->getElementType(),
2127 ExtractElement,
2128 OperandTraits<ExtractElementInst>::op_begin(this),
2129 2, InsertAE) {
2130 assert(isValidOperands(Val, Index) &&
2131 "Invalid extractelement instruction operands!");
2132
2133 Op<0>() = Val;
2134 Op<1>() = Index;
2135 setName(Name);
2136}
2137
2139 if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy())
2140 return false;
2141 return true;
2142}
2143
2144//===----------------------------------------------------------------------===//
2145// InsertElementInst Implementation
2146//===----------------------------------------------------------------------===//
2147
2148InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
2149 const Twine &Name,
2150 BasicBlock::iterator InsertBef)
2151 : Instruction(Vec->getType(), InsertElement,
2152 OperandTraits<InsertElementInst>::op_begin(this), 3,
2153 InsertBef) {
2154 assert(isValidOperands(Vec, Elt, Index) &&
2155 "Invalid insertelement instruction operands!");
2156 Op<0>() = Vec;
2157 Op<1>() = Elt;
2158 Op<2>() = Index;
2159 setName(Name);
2160}
2161
2162InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
2163 const Twine &Name,
2164 Instruction *InsertBef)
2165 : Instruction(Vec->getType(), InsertElement,
2166 OperandTraits<InsertElementInst>::op_begin(this),
2167 3, InsertBef) {
2168 assert(isValidOperands(Vec, Elt, Index) &&
2169 "Invalid insertelement instruction operands!");
2170 Op<0>() = Vec;
2171 Op<1>() = Elt;
2172 Op<2>() = Index;
2173 setName(Name);
2174}
2175
2176InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
2177 const Twine &Name,
2178 BasicBlock *InsertAE)
2179 : Instruction(Vec->getType(), InsertElement,
2180 OperandTraits<InsertElementInst>::op_begin(this),
2181 3, InsertAE) {
2182 assert(isValidOperands(Vec, Elt, Index) &&
2183 "Invalid insertelement instruction operands!");
2184
2185 Op<0>() = Vec;
2186 Op<1>() = Elt;
2187 Op<2>() = Index;
2188 setName(Name);
2189}
2190
2192 const Value *Index) {
2193 if (!Vec->getType()->isVectorTy())
2194 return false; // First operand of insertelement must be vector type.
2195
2196 if (Elt->getType() != cast<VectorType>(Vec->getType())->getElementType())
2197 return false;// Second operand of insertelement must be vector element type.
2198
2199 if (!Index->getType()->isIntegerTy())
2200 return false; // Third operand of insertelement must be i32.
2201 return true;
2202}
2203
2204//===----------------------------------------------------------------------===//
2205// ShuffleVectorInst Implementation
2206//===----------------------------------------------------------------------===//
2207
2209 assert(V && "Cannot create placeholder of nullptr V");
2210 return PoisonValue::get(V->getType());
2211}
2212
2214 BasicBlock::iterator InsertBefore)
2216 InsertBefore) {}
2217
2219 Instruction *InsertBefore)
2221 InsertBefore) {}
2222
2224 BasicBlock *InsertAtEnd)
2226 InsertAtEnd) {}
2227
2229 const Twine &Name,
2230 BasicBlock::iterator InsertBefore)
2232 InsertBefore) {}
2233
2235 const Twine &Name,
2236 Instruction *InsertBefore)
2238 InsertBefore) {}
2239
2241 const Twine &Name, BasicBlock *InsertAtEnd)
2243 InsertAtEnd) {}
2244
2246 const Twine &Name,
2247 BasicBlock::iterator InsertBefore)
2248 : Instruction(
2249 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
2250 cast<VectorType>(Mask->getType())->getElementCount()),
2251 ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
2252 OperandTraits<ShuffleVectorInst>::operands(this), InsertBefore) {
2253 assert(isValidOperands(V1, V2, Mask) &&
2254 "Invalid shuffle vector instruction operands!");
2255
2256 Op<0>() = V1;
2257 Op<1>() = V2;
2258 SmallVector<int, 16> MaskArr;
2259 getShuffleMask(cast<Constant>(Mask), MaskArr);
2260 setShuffleMask(MaskArr);
2261 setName(Name);
2262}
2263
2265 const Twine &Name,
2266 Instruction *InsertBefore)
2267 : Instruction(
2268 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
2269 cast<VectorType>(Mask->getType())->getElementCount()),
2270 ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
2271 OperandTraits<ShuffleVectorInst>::operands(this), InsertBefore) {
2272 assert(isValidOperands(V1, V2, Mask) &&
2273 "Invalid shuffle vector instruction operands!");
2274
2275 Op<0>() = V1;
2276 Op<1>() = V2;
2277 SmallVector<int, 16> MaskArr;
2278 getShuffleMask(cast<Constant>(Mask), MaskArr);
2279 setShuffleMask(MaskArr);
2280 setName(Name);
2281}
2282
2284 const Twine &Name, BasicBlock *InsertAtEnd)
2285 : Instruction(
2286 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
2287 cast<VectorType>(Mask->getType())->getElementCount()),
2288 ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
2289 OperandTraits<ShuffleVectorInst>::operands(this), InsertAtEnd) {
2290 assert(isValidOperands(V1, V2, Mask) &&
2291 "Invalid shuffle vector instruction operands!");
2292
2293 Op<0>() = V1;
2294 Op<1>() = V2;
2295 SmallVector<int, 16> MaskArr;
2296 getShuffleMask(cast<Constant>(Mask), MaskArr);
2297 setShuffleMask(MaskArr);
2298 setName(Name);
2299}
2300
2302 const Twine &Name,
2303 BasicBlock::iterator InsertBefore)
2304 : Instruction(
2305 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
2306 Mask.size(), isa<ScalableVectorType>(V1->getType())),
2307 ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
2308 OperandTraits<ShuffleVectorInst>::operands(this), InsertBefore) {
2309 assert(isValidOperands(V1, V2, Mask) &&
2310 "Invalid shuffle vector instruction operands!");
2311 Op<0>() = V1;
2312 Op<1>() = V2;
2313 setShuffleMask(Mask);
2314 setName(Name);
2315}
2316
2318 const Twine &Name,
2319 Instruction *InsertBefore)
2320 : Instruction(
2321 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
2322 Mask.size(), isa<ScalableVectorType>(V1->getType())),
2323 ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
2324 OperandTraits<ShuffleVectorInst>::operands(this), InsertBefore) {
2325 assert(isValidOperands(V1, V2, Mask) &&
2326 "Invalid shuffle vector instruction operands!");
2327 Op<0>() = V1;
2328 Op<1>() = V2;
2329 setShuffleMask(Mask);
2330 setName(Name);
2331}
2332
2334 const Twine &Name, BasicBlock *InsertAtEnd)
2335 : Instruction(
2336 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
2337 Mask.size(), isa<ScalableVectorType>(V1->getType())),
2338 ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
2339 OperandTraits<ShuffleVectorInst>::operands(this), InsertAtEnd) {
2340 assert(isValidOperands(V1, V2, Mask) &&
2341 "Invalid shuffle vector instruction operands!");
2342
2343 Op<0>() = V1;
2344 Op<1>() = V2;
2345 setShuffleMask(Mask);
2346 setName(Name);
2347}
2348
2350 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2351 int NumMaskElts = ShuffleMask.size();
2352 SmallVector<int, 16> NewMask(NumMaskElts);
2353 for (int i = 0; i != NumMaskElts; ++i) {
2354 int MaskElt = getMaskValue(i);
2355 if (MaskElt == PoisonMaskElem) {
2356 NewMask[i] = PoisonMaskElem;
2357 continue;
2358 }
2359 assert(MaskElt >= 0 && MaskElt < 2 * NumOpElts && "Out-of-range mask");
2360 MaskElt = (MaskElt < NumOpElts) ? MaskElt + NumOpElts : MaskElt - NumOpElts;
2361 NewMask[i] = MaskElt;
2362 }
2363 setShuffleMask(NewMask);
2364 Op<0>().swap(Op<1>());
2365}
2366
2368 ArrayRef<int> Mask) {
2369 // V1 and V2 must be vectors of the same type.
2370 if (!isa<VectorType>(V1->getType()) || V1->getType() != V2->getType())
2371 return false;
2372
2373 // Make sure the mask elements make sense.
2374 int V1Size =
2375 cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue();
2376 for (int Elem : Mask)
2377 if (Elem != PoisonMaskElem && Elem >= V1Size * 2)
2378 return false;
2379
2380 if (isa<ScalableVectorType>(V1->getType()))
2381 if ((Mask[0] != 0 && Mask[0] != PoisonMaskElem) || !all_equal(Mask))
2382 return false;
2383
2384 return true;
2385}
2386
2388 const Value *Mask) {
2389 // V1 and V2 must be vectors of the same type.
2390 if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType())
2391 return false;
2392
2393 // Mask must be vector of i32, and must be the same kind of vector as the
2394 // input vectors
2395 auto *MaskTy = dyn_cast<VectorType>(Mask->getType());
2396 if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32) ||
2397 isa<ScalableVectorType>(MaskTy) != isa<ScalableVectorType>(V1->getType()))
2398 return false;
2399
2400 // Check to see if Mask is valid.
2401 if (isa<UndefValue>(Mask) || isa<ConstantAggregateZero>(Mask))
2402 return true;
2403
2404 if (const auto *MV = dyn_cast<ConstantVector>(Mask)) {
2405 unsigned V1Size = cast<FixedVectorType>(V1->getType())->getNumElements();
2406 for (Value *Op : MV->operands()) {
2407 if (auto *CI = dyn_cast<ConstantInt>(Op)) {
2408 if (CI->uge(V1Size*2))
2409 return false;
2410 } else if (!isa<UndefValue>(Op)) {
2411 return false;
2412 }
2413 }
2414 return true;
2415 }
2416
2417 if (const auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
2418 unsigned V1Size = cast<FixedVectorType>(V1->getType())->getNumElements();
2419 for (unsigned i = 0, e = cast<FixedVectorType>(MaskTy)->getNumElements();
2420 i != e; ++i)
2421 if (CDS->getElementAsInteger(i) >= V1Size*2)
2422 return false;
2423 return true;
2424 }
2425
2426 return false;
2427}
2428
2430 SmallVectorImpl<int> &Result) {
2431 ElementCount EC = cast<VectorType>(Mask->getType())->getElementCount();
2432
2433 if (isa<ConstantAggregateZero>(Mask)) {
2434 Result.resize(EC.getKnownMinValue(), 0);
2435 return;
2436 }
2437
2438 Result.reserve(EC.getKnownMinValue());
2439
2440 if (EC.isScalable()) {
2441 assert((isa<ConstantAggregateZero>(Mask) || isa<UndefValue>(Mask)) &&
2442 "Scalable vector shuffle mask must be undef or zeroinitializer");
2443 int MaskVal = isa<UndefValue>(Mask) ? -1 : 0;
2444 for (unsigned I = 0; I < EC.getKnownMinValue(); ++I)
2445 Result.emplace_back(MaskVal);
2446 return;
2447 }
2448
2449 unsigned NumElts = EC.getKnownMinValue();
2450
2451 if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
2452 for (unsigned i = 0; i != NumElts; ++i)
2453 Result.push_back(CDS->getElementAsInteger(i));
2454 return;
2455 }
2456 for (unsigned i = 0; i != NumElts; ++i) {
2457 Constant *C = Mask->getAggregateElement(i);
2458 Result.push_back(isa<UndefValue>(C) ? -1 :
2459 cast<ConstantInt>(C)->getZExtValue());
2460 }
2461}
2462
2464 ShuffleMask.assign(Mask.begin(), Mask.end());
2465 ShuffleMaskForBitcode = convertShuffleMaskForBitcode(Mask, getType());
2466}
2467
2469 Type *ResultTy) {
2470 Type *Int32Ty = Type::getInt32Ty(ResultTy->getContext());
2471 if (isa<ScalableVectorType>(ResultTy)) {
2472 assert(all_equal(Mask) && "Unexpected shuffle");
2473 Type *VecTy = VectorType::get(Int32Ty, Mask.size(), true);
2474 if (Mask[0] == 0)
2475 return Constant::getNullValue(VecTy);
2476 return UndefValue::get(VecTy);
2477 }
2479 for (int Elem : Mask) {
2480 if (Elem == PoisonMaskElem)
2482 else
2483 MaskConst.push_back(ConstantInt::get(Int32Ty, Elem));
2484 }
2485 return ConstantVector::get(MaskConst);
2486}
2487
2488static bool isSingleSourceMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
2489 assert(!Mask.empty() && "Shuffle mask must contain elements");
2490 bool UsesLHS = false;
2491 bool UsesRHS = false;
2492 for (int I : Mask) {
2493 if (I == -1)
2494 continue;
2495 assert(I >= 0 && I < (NumOpElts * 2) &&
2496 "Out-of-bounds shuffle mask element");
2497 UsesLHS |= (I < NumOpElts);
2498 UsesRHS |= (I >= NumOpElts);
2499 if (UsesLHS && UsesRHS)
2500 return false;
2501 }
2502 // Allow for degenerate case: completely undef mask means neither source is used.
2503 return UsesLHS || UsesRHS;
2504}
2505
2507 // We don't have vector operand size information, so assume operands are the
2508 // same size as the mask.
2509 return isSingleSourceMaskImpl(Mask, NumSrcElts);
2510}
2511
2512static bool isIdentityMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
2513 if (!isSingleSourceMaskImpl(Mask, NumOpElts))
2514 return false;
2515 for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) {
2516 if (Mask[i] == -1)
2517 continue;
2518 if (Mask[i] != i && Mask[i] != (NumOpElts + i))
2519 return false;
2520 }
2521 return true;
2522}
2523
2525 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2526 return false;
2527 // We don't have vector operand size information, so assume operands are the
2528 // same size as the mask.
2529 return isIdentityMaskImpl(Mask, NumSrcElts);
2530}
2531
2533 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2534 return false;
2535 if (!isSingleSourceMask(Mask, NumSrcElts))
2536 return false;
2537
2538 // The number of elements in the mask must be at least 2.
2539 if (NumSrcElts < 2)
2540 return false;
2541
2542 for (int I = 0, E = Mask.size(); I < E; ++I) {
2543 if (Mask[I] == -1)
2544 continue;
2545 if (Mask[I] != (NumSrcElts - 1 - I) &&
2546 Mask[I] != (NumSrcElts + NumSrcElts - 1 - I))
2547 return false;
2548 }
2549 return true;
2550}
2551
2553 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2554 return false;
2555 if (!isSingleSourceMask(Mask, NumSrcElts))
2556 return false;
2557 for (int I = 0, E = Mask.size(); I < E; ++I) {
2558 if (Mask[I] == -1)
2559 continue;
2560 if (Mask[I] != 0 && Mask[I] != NumSrcElts)
2561 return false;
2562 }
2563 return true;
2564}
2565
2567 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2568 return false;
2569 // Select is differentiated from identity. It requires using both sources.
2570 if (isSingleSourceMask(Mask, NumSrcElts))
2571 return false;
2572 for (int I = 0, E = Mask.size(); I < E; ++I) {
2573 if (Mask[I] == -1)
2574 continue;
2575 if (Mask[I] != I && Mask[I] != (NumSrcElts + I))
2576 return false;
2577 }
2578 return true;
2579}
2580
2582 // Example masks that will return true:
2583 // v1 = <a, b, c, d>
2584 // v2 = <e, f, g, h>
2585 // trn1 = shufflevector v1, v2 <0, 4, 2, 6> = <a, e, c, g>
2586 // trn2 = shufflevector v1, v2 <1, 5, 3, 7> = <b, f, d, h>
2587
2588 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2589 return false;
2590 // 1. The number of elements in the mask must be a power-of-2 and at least 2.
2591 int Sz = Mask.size();
2592 if (Sz < 2 || !isPowerOf2_32(Sz))
2593 return false;
2594
2595 // 2. The first element of the mask must be either a 0 or a 1.
2596 if (Mask[0] != 0 && Mask[0] != 1)
2597 return false;
2598
2599 // 3. The difference between the first 2 elements must be equal to the
2600 // number of elements in the mask.
2601 if ((Mask[1] - Mask[0]) != NumSrcElts)
2602 return false;
2603
2604 // 4. The difference between consecutive even-numbered and odd-numbered
2605 // elements must be equal to 2.
2606 for (int I = 2; I < Sz; ++I) {
2607 int MaskEltVal = Mask[I];
2608 if (MaskEltVal == -1)
2609 return false;
2610 int MaskEltPrevVal = Mask[I - 2];
2611 if (MaskEltVal - MaskEltPrevVal != 2)
2612 return false;
2613 }
2614 return true;
2615}
2616
2618 int &Index) {
2619 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2620 return false;
2621 // Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2622 int StartIndex = -1;
2623 for (int I = 0, E = Mask.size(); I != E; ++I) {
2624 int MaskEltVal = Mask[I];
2625 if (MaskEltVal == -1)
2626 continue;
2627
2628 if (StartIndex == -1) {
2629 // Don't support a StartIndex that begins in the second input, or if the
2630 // first non-undef index would access below the StartIndex.
2631 if (MaskEltVal < I || NumSrcElts <= (MaskEltVal - I))
2632 return false;
2633
2634 StartIndex = MaskEltVal - I;
2635 continue;
2636 }
2637
2638 // Splice is sequential starting from StartIndex.
2639 if (MaskEltVal != (StartIndex + I))
2640 return false;
2641 }
2642
2643 if (StartIndex == -1)
2644 return false;
2645
2646 // NOTE: This accepts StartIndex == 0 (COPY).
2647 Index = StartIndex;
2648 return true;
2649}
2650
2652 int NumSrcElts, int &Index) {
2653 // Must extract from a single source.
2654 if (!isSingleSourceMaskImpl(Mask, NumSrcElts))
2655 return false;
2656
2657 // Must be smaller (else this is an Identity shuffle).
2658 if (NumSrcElts <= (int)Mask.size())
2659 return false;
2660
2661 // Find start of extraction, accounting that we may start with an UNDEF.
2662 int SubIndex = -1;
2663 for (int i = 0, e = Mask.size(); i != e; ++i) {
2664 int M = Mask[i];
2665 if (M < 0)
2666 continue;
2667 int Offset = (M % NumSrcElts) - i;
2668 if (0 <= SubIndex && SubIndex != Offset)
2669 return false;
2670 SubIndex = Offset;
2671 }
2672
2673 if (0 <= SubIndex && SubIndex + (int)Mask.size() <= NumSrcElts) {
2674 Index = SubIndex;
2675 return true;
2676 }
2677 return false;
2678}
2679
2681 int NumSrcElts, int &NumSubElts,
2682 int &Index) {
2683 int NumMaskElts = Mask.size();
2684
2685 // Don't try to match if we're shuffling to a smaller size.
2686 if (NumMaskElts < NumSrcElts)
2687 return false;
2688
2689 // TODO: We don't recognize self-insertion/widening.
2690 if (isSingleSourceMaskImpl(Mask, NumSrcElts))
2691 return false;
2692
2693 // Determine which mask elements are attributed to which source.
2694 APInt UndefElts = APInt::getZero(NumMaskElts);
2695 APInt Src0Elts = APInt::getZero(NumMaskElts);
2696 APInt Src1Elts = APInt::getZero(NumMaskElts);
2697 bool Src0Identity = true;
2698 bool Src1Identity = true;
2699
2700 for (int i = 0; i != NumMaskElts; ++i) {
2701 int M = Mask[i];
2702 if (M < 0) {
2703 UndefElts.setBit(i);
2704 continue;
2705 }
2706 if (M < NumSrcElts) {
2707 Src0Elts.setBit(i);
2708 Src0Identity &= (M == i);
2709 continue;
2710 }
2711 Src1Elts.setBit(i);
2712 Src1Identity &= (M == (i + NumSrcElts));
2713 }
2714 assert((Src0Elts | Src1Elts | UndefElts).isAllOnes() &&
2715 "unknown shuffle elements");
2716 assert(!Src0Elts.isZero() && !Src1Elts.isZero() &&
2717 "2-source shuffle not found");
2718
2719 // Determine lo/hi span ranges.
2720 // TODO: How should we handle undefs at the start of subvector insertions?
2721 int Src0Lo = Src0Elts.countr_zero();
2722 int Src1Lo = Src1Elts.countr_zero();
2723 int Src0Hi = NumMaskElts - Src0Elts.countl_zero();
2724 int Src1Hi = NumMaskElts - Src1Elts.countl_zero();
2725
2726 // If src0 is in place, see if the src1 elements is inplace within its own
2727 // span.
2728 if (Src0Identity) {
2729 int NumSub1Elts = Src1Hi - Src1Lo;
2730 ArrayRef<int> Sub1Mask = Mask.slice(Src1Lo, NumSub1Elts);
2731 if (isIdentityMaskImpl(Sub1Mask, NumSrcElts)) {
2732 NumSubElts = NumSub1Elts;
2733 Index = Src1Lo;
2734 return true;
2735 }
2736 }
2737
2738 // If src1 is in place, see if the src0 elements is inplace within its own
2739 // span.
2740 if (Src1Identity) {
2741 int NumSub0Elts = Src0Hi - Src0Lo;
2742 ArrayRef<int> Sub0Mask = Mask.slice(Src0Lo, NumSub0Elts);
2743 if (isIdentityMaskImpl(Sub0Mask, NumSrcElts)) {
2744 NumSubElts = NumSub0Elts;
2745 Index = Src0Lo;
2746 return true;
2747 }
2748 }
2749
2750 return false;
2751}
2752
2754 // FIXME: Not currently possible to express a shuffle mask for a scalable
2755 // vector for this case.
2756 if (isa<ScalableVectorType>(getType()))
2757 return false;
2758
2759 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2760 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2761 if (NumMaskElts <= NumOpElts)
2762 return false;
2763
2764 // The first part of the mask must choose elements from exactly 1 source op.
2766 if (!isIdentityMaskImpl(Mask, NumOpElts))
2767 return false;
2768
2769 // All extending must be with undef elements.
2770 for (int i = NumOpElts; i < NumMaskElts; ++i)
2771 if (Mask[i] != -1)
2772 return false;
2773
2774 return true;
2775}
2776
2778 // FIXME: Not currently possible to express a shuffle mask for a scalable
2779 // vector for this case.
2780 if (isa<ScalableVectorType>(getType()))
2781 return false;
2782
2783 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2784 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2785 if (NumMaskElts >= NumOpElts)
2786 return false;
2787
2788 return isIdentityMaskImpl(getShuffleMask(), NumOpElts);
2789}
2790
2792 // Vector concatenation is differentiated from identity with padding.
2793 if (isa<UndefValue>(Op<0>()) || isa<UndefValue>(Op<1>()))
2794 return false;
2795
2796 // FIXME: Not currently possible to express a shuffle mask for a scalable
2797 // vector for this case.
2798 if (isa<ScalableVectorType>(getType()))
2799 return false;
2800
2801 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2802 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2803 if (NumMaskElts != NumOpElts * 2)
2804 return false;
2805
2806 // Use the mask length rather than the operands' vector lengths here. We
2807 // already know that the shuffle returns a vector twice as long as the inputs,
2808 // and neither of the inputs are undef vectors. If the mask picks consecutive
2809 // elements from both inputs, then this is a concatenation of the inputs.
2810 return isIdentityMaskImpl(getShuffleMask(), NumMaskElts);
2811}
2812
2814 int ReplicationFactor, int VF) {
2815 assert(Mask.size() == (unsigned)ReplicationFactor * VF &&
2816 "Unexpected mask size.");
2817
2818 for (int CurrElt : seq(VF)) {
2819 ArrayRef<int> CurrSubMask = Mask.take_front(ReplicationFactor);
2820 assert(CurrSubMask.size() == (unsigned)ReplicationFactor &&
2821 "Run out of mask?");
2822 Mask = Mask.drop_front(ReplicationFactor);
2823 if (!all_of(CurrSubMask, [CurrElt](int MaskElt) {
2824 return MaskElt == PoisonMaskElem || MaskElt == CurrElt;
2825 }))
2826 return false;
2827 }
2828 assert(Mask.empty() && "Did not consume the whole mask?");
2829
2830 return true;
2831}
2832
2834 int &ReplicationFactor, int &VF) {
2835 // undef-less case is trivial.
2836 if (!llvm::is_contained(Mask, PoisonMaskElem)) {
2837 ReplicationFactor =
2838 Mask.take_while([](int MaskElt) { return MaskElt == 0; }).size();
2839 if (ReplicationFactor == 0 || Mask.size() % ReplicationFactor != 0)
2840 return false;
2841 VF = Mask.size() / ReplicationFactor;
2842 return isReplicationMaskWithParams(Mask, ReplicationFactor, VF);
2843 }
2844
2845 // However, if the mask contains undef's, we have to enumerate possible tuples
2846 // and pick one. There are bounds on replication factor: [1, mask size]
2847 // (where RF=1 is an identity shuffle, RF=mask size is a broadcast shuffle)
2848 // Additionally, mask size is a replication factor multiplied by vector size,
2849 // which further significantly reduces the search space.
2850
2851 // Before doing that, let's perform basic correctness checking first.
2852 int Largest = -1;
2853 for (int MaskElt : Mask) {
2854 if (MaskElt == PoisonMaskElem)
2855 continue;
2856 // Elements must be in non-decreasing order.
2857 if (MaskElt < Largest)
2858 return false;
2859 Largest = std::max(Largest, MaskElt);
2860 }
2861
2862 // Prefer larger replication factor if all else equal.
2863 for (int PossibleReplicationFactor :
2864 reverse(seq_inclusive<unsigned>(1, Mask.size()))) {
2865 if (Mask.size() % PossibleReplicationFactor != 0)
2866 continue;
2867 int PossibleVF = Mask.size() / PossibleReplicationFactor;
2868 if (!isReplicationMaskWithParams(Mask, PossibleReplicationFactor,
2869 PossibleVF))
2870 continue;
2871 ReplicationFactor = PossibleReplicationFactor;
2872 VF = PossibleVF;
2873 return true;
2874 }
2875
2876 return false;
2877}
2878
2879bool ShuffleVectorInst::isReplicationMask(int &ReplicationFactor,
2880 int &VF) const {
2881 // Not possible to express a shuffle mask for a scalable vector for this
2882 // case.
2883 if (isa<ScalableVectorType>(getType()))
2884 return false;
2885
2886 VF = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2887 if (ShuffleMask.size() % VF != 0)
2888 return false;
2889 ReplicationFactor = ShuffleMask.size() / VF;
2890
2891 return isReplicationMaskWithParams(ShuffleMask, ReplicationFactor, VF);
2892}
2893
2895 if (VF <= 0 || Mask.size() < static_cast<unsigned>(VF) ||
2896 Mask.size() % VF != 0)
2897 return false;
2898 for (unsigned K = 0, Sz = Mask.size(); K < Sz; K += VF) {
2899 ArrayRef<int> SubMask = Mask.slice(K, VF);
2900 if (all_of(SubMask, [](int Idx) { return Idx == PoisonMaskElem; }))
2901 continue;
2902 SmallBitVector Used(VF, false);
2903 for (int Idx : SubMask) {
2904 if (Idx != PoisonMaskElem && Idx < VF)
2905 Used.set(Idx);
2906 }
2907 if (!Used.all())
2908 return false;
2909 }
2910 return true;
2911}
2912
2913/// Return true if this shuffle mask is a replication mask.
2915 // Not possible to express a shuffle mask for a scalable vector for this
2916 // case.
2917 if (isa<ScalableVectorType>(getType()))
2918 return false;
2919 if (!isSingleSourceMask(ShuffleMask, VF))
2920 return false;
2921
2922 return isOneUseSingleSourceMask(ShuffleMask, VF);
2923}
2924
2925bool ShuffleVectorInst::isInterleave(unsigned Factor) {
2926 FixedVectorType *OpTy = dyn_cast<FixedVectorType>(getOperand(0)->getType());
2927 // shuffle_vector can only interleave fixed length vectors - for scalable
2928 // vectors, see the @llvm.experimental.vector.interleave2 intrinsic
2929 if (!OpTy)
2930 return false;
2931 unsigned OpNumElts = OpTy->getNumElements();
2932
2933 return isInterleaveMask(ShuffleMask, Factor, OpNumElts * 2);
2934}
2935
2937 ArrayRef<int> Mask, unsigned Factor, unsigned NumInputElts,
2938 SmallVectorImpl<unsigned> &StartIndexes) {
2939 unsigned NumElts = Mask.size();
2940 if (NumElts % Factor)
2941 return false;
2942
2943 unsigned LaneLen = NumElts / Factor;
2944 if (!isPowerOf2_32(LaneLen))
2945 return false;
2946
2947 StartIndexes.resize(Factor);
2948
2949 // Check whether each element matches the general interleaved rule.
2950 // Ignore undef elements, as long as the defined elements match the rule.
2951 // Outer loop processes all factors (x, y, z in the above example)
2952 unsigned I = 0, J;
2953 for (; I < Factor; I++) {
2954 unsigned SavedLaneValue;
2955 unsigned SavedNoUndefs = 0;
2956
2957 // Inner loop processes consecutive accesses (x, x+1... in the example)
2958 for (J = 0; J < LaneLen - 1; J++) {
2959 // Lane computes x's position in the Mask
2960 unsigned Lane = J * Factor + I;
2961 unsigned NextLane = Lane + Factor;
2962 int LaneValue = Mask[Lane];
2963 int NextLaneValue = Mask[NextLane];
2964
2965 // If both are defined, values must be sequential
2966 if (LaneValue >= 0 && NextLaneValue >= 0 &&
2967 LaneValue + 1 != NextLaneValue)
2968 break;
2969
2970 // If the next value is undef, save the current one as reference
2971 if (LaneValue >= 0 && NextLaneValue < 0) {
2972 SavedLaneValue = LaneValue;
2973 SavedNoUndefs = 1;
2974 }
2975
2976 // Undefs are allowed, but defined elements must still be consecutive:
2977 // i.e.: x,..., undef,..., x + 2,..., undef,..., undef,..., x + 5, ....
2978 // Verify this by storing the last non-undef followed by an undef
2979 // Check that following non-undef masks are incremented with the
2980 // corresponding distance.
2981 if (SavedNoUndefs > 0 && LaneValue < 0) {
2982 SavedNoUndefs++;
2983 if (NextLaneValue >= 0 &&
2984 SavedLaneValue + SavedNoUndefs != (unsigned)NextLaneValue)
2985 break;
2986 }
2987 }
2988
2989 if (J < LaneLen - 1)
2990 return false;
2991
2992 int StartMask = 0;
2993 if (Mask[I] >= 0) {
2994 // Check that the start of the I range (J=0) is greater than 0
2995 StartMask = Mask[I];
2996 } else if (Mask[(LaneLen - 1) * Factor + I] >= 0) {
2997 // StartMask defined by the last value in lane
2998 StartMask = Mask[(LaneLen - 1) * Factor + I] - J;
2999 } else if (SavedNoUndefs > 0) {
3000 // StartMask defined by some non-zero value in the j loop
3001 StartMask = SavedLaneValue - (LaneLen - 1 - SavedNoUndefs);
3002 }
3003 // else StartMask remains set to 0, i.e. all elements are undefs
3004
3005 if (StartMask < 0)
3006 return false;
3007 // We must stay within the vectors; This case can happen with undefs.
3008 if (StartMask + LaneLen > NumInputElts)
3009 return false;
3010
3011 StartIndexes[I] = StartMask;
3012 }
3013
3014 return true;
3015}
3016
3017/// Try to lower a vector shuffle as a bit rotation.
3018///
3019/// Look for a repeated rotation pattern in each sub group.
3020/// Returns an element-wise left bit rotation amount or -1 if failed.
3021static int matchShuffleAsBitRotate(ArrayRef<int> Mask, int NumSubElts) {
3022 int NumElts = Mask.size();
3023 assert((NumElts % NumSubElts) == 0 && "Illegal shuffle mask");
3024
3025 int RotateAmt = -1;
3026 for (int i = 0; i != NumElts; i += NumSubElts) {
3027 for (int j = 0; j != NumSubElts; ++j) {
3028 int M = Mask[i + j];
3029 if (M < 0)
3030 continue;
3031 if (M < i || M >= i + NumSubElts)
3032 return -1;
3033 int Offset = (NumSubElts - (M - (i + j))) % NumSubElts;
3034 if (0 <= RotateAmt && Offset != RotateAmt)
3035 return -1;
3036 RotateAmt = Offset;
3037 }
3038 }
3039 return RotateAmt;
3040}
3041
3043 ArrayRef<int> Mask, unsigned EltSizeInBits, unsigned MinSubElts,
3044 unsigned MaxSubElts, unsigned &NumSubElts, unsigned &RotateAmt) {
3045 for (NumSubElts = MinSubElts; NumSubElts <= MaxSubElts; NumSubElts *= 2) {
3046 int EltRotateAmt = matchShuffleAsBitRotate(Mask, NumSubElts);
3047 if (EltRotateAmt < 0)
3048 continue;
3049 RotateAmt = EltRotateAmt * EltSizeInBits;
3050 return true;
3051 }
3052
3053 return false;
3054}
3055
3056//===----------------------------------------------------------------------===//
3057// InsertValueInst Class
3058//===----------------------------------------------------------------------===//
3059
3060void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
3061 const Twine &Name) {
3062 assert(getNumOperands() == 2 && "NumOperands not initialized?");
3063
3064 // There's no fundamental reason why we require at least one index
3065 // (other than weirdness with &*IdxBegin being invalid; see
3066 // getelementptr's init routine for example). But there's no
3067 // present need to support it.
3068 assert(!Idxs.empty() && "InsertValueInst must have at least one index");
3069
3071 Val->getType() && "Inserted value must match indexed type!");
3072 Op<0>() = Agg;
3073 Op<1>() = Val;
3074
3075 Indices.append(Idxs.begin(), Idxs.end());
3076 setName(Name);
3077}
3078
3079InsertValueInst::InsertValueInst(const InsertValueInst &IVI)
3080 : Instruction(IVI.getType(), InsertValue,
3081 OperandTraits<InsertValueInst>::op_begin(this), 2),
3082 Indices(IVI.Indices) {
3083 Op<0>() = IVI.getOperand(0);
3084 Op<1>() = IVI.getOperand(1);
3086}
3087
3088//===----------------------------------------------------------------------===//
3089// ExtractValueInst Class
3090//===----------------------------------------------------------------------===//
3091
3092void ExtractValueInst::init(ArrayRef<unsigned> Idxs, const Twine &Name) {
3093 assert(getNumOperands() == 1 && "NumOperands not initialized?");
3094
3095 // There's no fundamental reason why we require at least one index.
3096 // But there's no present need to support it.
3097 assert(!Idxs.empty() && "ExtractValueInst must have at least one index");
3098
3099 Indices.append(Idxs.begin(), Idxs.end());
3100 setName(Name);
3101}
3102
3103ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI)
3104 : UnaryInstruction(EVI.getType(), ExtractValue, EVI.getOperand(0)),
3105 Indices(EVI.Indices) {
3107}
3108
3109// getIndexedType - Returns the type of the element that would be extracted
3110// with an extractvalue instruction with the specified parameters.
3111//
3112// A null type is returned if the indices are invalid for the specified
3113// pointer type.
3114//
3116 ArrayRef<unsigned> Idxs) {
3117 for (unsigned Index : Idxs) {
3118 // We can't use CompositeType::indexValid(Index) here.
3119 // indexValid() always returns true for arrays because getelementptr allows
3120 // out-of-bounds indices. Since we don't allow those for extractvalue and
3121 // insertvalue we need to check array indexing manually.
3122 // Since the only other types we can index into are struct types it's just
3123 // as easy to check those manually as well.
3124 if (ArrayType *AT = dyn_cast<ArrayType>(Agg)) {
3125 if (Index >= AT->getNumElements())
3126 return nullptr;
3127 Agg = AT->getElementType();
3128 } else if (StructType *ST = dyn_cast<StructType>(Agg)) {
3129 if (Index >= ST->getNumElements())
3130 return nullptr;
3131 Agg = ST->getElementType(Index);
3132 } else {
3133 // Not a valid type to index into.
3134 return nullptr;
3135 }
3136 }
3137 return const_cast<Type*>(Agg);
3138}
3139
3140//===----------------------------------------------------------------------===//
3141// UnaryOperator Class
3142//===----------------------------------------------------------------------===//
3143
3145 const Twine &Name,
3146 BasicBlock::iterator InsertBefore)
3147 : UnaryInstruction(Ty, iType, S, InsertBefore) {
3148 Op<0>() = S;
3149 setName(Name);
3150 AssertOK();
3151}
3152
3154 Type *Ty, const Twine &Name,
3155 Instruction *InsertBefore)
3156 : UnaryInstruction(Ty, iType, S, InsertBefore) {
3157 Op<0>() = S;
3158 setName(Name);
3159 AssertOK();
3160}
3161
3163 Type *Ty, const Twine &Name,
3164 BasicBlock *InsertAtEnd)
3165 : UnaryInstruction(Ty, iType, S, InsertAtEnd) {
3166 Op<0>() = S;
3167 setName(Name);
3168 AssertOK();
3169}
3170
3172 BasicBlock::iterator InsertBefore) {
3173 return new UnaryOperator(Op, S, S->getType(), Name, InsertBefore);
3174}
3175
3177 const Twine &Name,
3178 Instruction *InsertBefore) {
3179 return new UnaryOperator(Op, S, S->getType(), Name, InsertBefore);
3180}
3181
3183 const Twine &Name,
3184 BasicBlock *InsertAtEnd) {
3185 UnaryOperator *Res = Create(Op, S, Name);
3186 Res->insertInto(InsertAtEnd, InsertAtEnd->end());
3187 return Res;
3188}
3189
3190void UnaryOperator::AssertOK() {
3191 Value *LHS = getOperand(0);
3192 (void)LHS; // Silence warnings.
3193#ifndef NDEBUG
3194 switch (getOpcode()) {
3195 case FNeg:
3196 assert(getType() == LHS->getType() &&
3197 "Unary operation should return same type as operand!");
3198 assert(getType()->isFPOrFPVectorTy() &&
3199 "Tried to create a floating-point operation on a "
3200 "non-floating-point type!");
3201 break;
3202 default: llvm_unreachable("Invalid opcode provided");
3203 }
3204#endif
3205}
3206
3207//===----------------------------------------------------------------------===//
3208// BinaryOperator Class
3209//===----------------------------------------------------------------------===//
3210
3212 const Twine &Name,
3213 BasicBlock::iterator InsertBefore)
3214 : Instruction(Ty, iType, OperandTraits<BinaryOperator>::op_begin(this),
3215 OperandTraits<BinaryOperator>::operands(this), InsertBefore) {
3216 Op<0>() = S1;
3217 Op<1>() = S2;
3218 setName(Name);
3219 AssertOK();
3220}
3221
3223 Type *Ty, const Twine &Name,
3224 Instruction *InsertBefore)
3225 : Instruction(Ty, iType,
3226 OperandTraits<BinaryOperator>::op_begin(this),
3227 OperandTraits<BinaryOperator>::operands(this),
3228 InsertBefore) {
3229 Op<0>() = S1;
3230 Op<1>() = S2;
3231 setName(Name);
3232 AssertOK();
3233}
3234
3236 Type *Ty, const Twine &Name,
3237 BasicBlock *InsertAtEnd)
3238 : Instruction(Ty, iType,
3239 OperandTraits<BinaryOperator>::op_begin(this),
3240 OperandTraits<BinaryOperator>::operands(this),
3241 InsertAtEnd) {
3242 Op<0>() = S1;
3243 Op<1>() = S2;
3244 setName(Name);
3245 AssertOK();
3246}
3247
3248void BinaryOperator::AssertOK() {
3249 Value *LHS = getOperand(0), *RHS = getOperand(1);
3250 (void)LHS; (void)RHS; // Silence warnings.
3251 assert(LHS->getType() == RHS->getType() &&
3252 "Binary operator operand types must match!");
3253#ifndef NDEBUG
3254 switch (getOpcode()) {
3255 case Add: case Sub:
3256 case Mul:
3257 assert(getType() == LHS->getType() &&
3258 "Arithmetic operation should return same type as operands!");
3259 assert(getType()->isIntOrIntVectorTy() &&
3260 "Tried to create an integer operation on a non-integer type!");
3261 break;
3262 case FAdd: case FSub:
3263 case FMul:
3264 assert(getType() == LHS->getType() &&
3265 "Arithmetic operation should return same type as operands!");
3266 assert(getType()->isFPOrFPVectorTy() &&
3267 "Tried to create a floating-point operation on a "
3268 "non-floating-point type!");
3269 break;
3270 case UDiv:
3271 case SDiv:
3272 assert(getType() == LHS->getType() &&
3273 "Arithmetic operation should return same type as operands!");
3274 assert(getType()->isIntOrIntVectorTy() &&
3275 "Incorrect operand type (not integer) for S/UDIV");
3276 break;
3277 case FDiv:
3278 assert(getType() == LHS->getType() &&
3279 "Arithmetic operation should return same type as operands!");
3280 assert(getType()->isFPOrFPVectorTy() &&
3281 "Incorrect operand type (not floating point) for FDIV");
3282 break;
3283 case URem:
3284 case SRem:
3285 assert(getType() == LHS->getType() &&
3286 "Arithmetic operation should return same type as operands!");
3287 assert(getType()->isIntOrIntVectorTy() &&
3288 "Incorrect operand type (not integer) for S/UREM");
3289 break;
3290 case FRem:
3291 assert(getType() == LHS->getType() &&
3292 "Arithmetic operation should return same type as operands!");
3293 assert(getType()->isFPOrFPVectorTy() &&
3294 "Incorrect operand type (not floating point) for FREM");
3295 break;
3296 case Shl:
3297 case LShr:
3298 case AShr:
3299 assert(getType() == LHS->getType() &&
3300 "Shift operation should return same type as operands!");
3301 assert(getType()->isIntOrIntVectorTy() &&
3302 "Tried to create a shift operation on a non-integral type!");
3303 break;
3304 case And: case Or:
3305 case Xor:
3306 assert(getType() == LHS->getType() &&
3307 "Logical operation should return same type as operands!");
3308 assert(getType()->isIntOrIntVectorTy() &&
3309 "Tried to create a logical operation on a non-integral type!");
3310 break;
3311 default: llvm_unreachable("Invalid opcode provided");
3312 }
3313#endif
3314}
3315
3317 const Twine &Name,
3318 BasicBlock::iterator InsertBefore) {
3319 assert(S1->getType() == S2->getType() &&
3320 "Cannot create binary operator with two operands of differing type!");
3321 return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore);
3322}
3323
3325 const Twine &Name,
3326 Instruction *InsertBefore) {
3327 assert(S1->getType() == S2->getType() &&
3328 "Cannot create binary operator with two operands of differing type!");
3329 return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore);
3330}
3331
3333 const Twine &Name,
3334 BasicBlock *InsertAtEnd) {
3335 BinaryOperator *Res = Create(Op, S1, S2, Name);
3336 Res->insertInto(InsertAtEnd, InsertAtEnd->end());
3337 return Res;
3338}
3339
3341 BasicBlock::iterator InsertBefore) {
3342 Value *Zero = ConstantInt::get(Op->getType(), 0);
3343 return new BinaryOperator(Instruction::Sub, Zero, Op, Op->getType(), Name,
3344 InsertBefore);
3345}
3346
3348 BasicBlock *InsertAtEnd) {
3349 Value *Zero = ConstantInt::get(Op->getType(), 0);
3350 return new BinaryOperator(Instruction::Sub,
3351 Zero, Op,
3352 Op->getType(), Name, InsertAtEnd);
3353}
3354
3356 Instruction *InsertBefore) {
3357 Value *Zero = ConstantInt::get(Op->getType(), 0);
3358 return BinaryOperator::CreateNSWSub(Zero, Op, Name, InsertBefore);
3359}
3360
3362 BasicBlock *InsertAtEnd) {
3363 Value *Zero = ConstantInt::get(Op->getType(), 0);
3364 return BinaryOperator::CreateNSWSub(Zero, Op, Name, InsertAtEnd);
3365}
3366
3368 Instruction *InsertBefore) {
3369 Value *Zero = ConstantInt::get(Op->getType(), 0);
3370 return BinaryOperator::CreateNUWSub(Zero, Op, Name, InsertBefore);
3371}
3372
3374 BasicBlock *InsertAtEnd) {
3375 Value *Zero = ConstantInt::get(Op->getType(), 0);
3376 return BinaryOperator::CreateNUWSub(Zero, Op, Name, InsertAtEnd);
3377}
3378
3380 BasicBlock::iterator InsertBefore) {
3381 Constant *C = Constant::getAllOnesValue(Op->getType());
3382 return new BinaryOperator(Instruction::Xor, Op, C,
3383 Op->getType(), Name, InsertBefore);
3384}
3385
3387 Instruction *InsertBefore) {
3388 Constant *C = Constant::getAllOnesValue(Op->getType());
3389 return new BinaryOperator(Instruction::Xor, Op, C,
3390 Op->getType(), Name, InsertBefore);
3391}
3392
3394 BasicBlock *InsertAtEnd) {
3396 return new BinaryOperator(Instruction::Xor, Op, AllOnes,
3397 Op->getType(), Name, InsertAtEnd);
3398}
3399
3400// Exchange the two operands to this instruction. This instruction is safe to
3401// use on any binary instruction and does not modify the semantics of the
3402// instruction. If the instruction is order-dependent (SetLT f.e.), the opcode
3403// is changed.
3405 if (!isCommutative())
3406 return true; // Can't commute operands
3407 Op<0>().swap(Op<1>());
3408 return false;
3409}
3410
3411//===----------------------------------------------------------------------===//
3412// FPMathOperator Class
3413//===----------------------------------------------------------------------===//
3414
3416 const MDNode *MD =
3417 cast<Instruction>(this)->getMetadata(LLVMContext::MD_fpmath);
3418 if (!MD)
3419 return 0.0;
3420 ConstantFP *Accuracy = mdconst::extract<ConstantFP>(MD->getOperand(0));
3421 return Accuracy->getValueAPF().convertToFloat();
3422}
3423
3424//===----------------------------------------------------------------------===//
3425// CastInst Class
3426//===----------------------------------------------------------------------===//
3427
3428// Just determine if this cast only deals with integral->integral conversion.
3430 switch (getOpcode()) {
3431 default: return false;
3432 case Instruction::ZExt:
3433 case Instruction::SExt:
3434 case Instruction::Trunc:
3435 return true;
3436 case Instruction::BitCast:
3437 return getOperand(0)->getType()->isIntegerTy() &&
3438 getType()->isIntegerTy();
3439 }
3440}
3441
3442/// This function determines if the CastInst does not require any bits to be
3443/// changed in order to effect the cast. Essentially, it identifies cases where
3444/// no code gen is necessary for the cast, hence the name no-op cast. For
3445/// example, the following are all no-op casts:
3446/// # bitcast i32* %x to i8*
3447/// # bitcast <2 x i32> %x to <4 x i16>
3448/// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only
3449/// Determine if the described cast is a no-op.
3451 Type *SrcTy,
3452 Type *DestTy,
3453 const DataLayout &DL) {
3454 assert(castIsValid(Opcode, SrcTy, DestTy) && "method precondition");
3455 switch (Opcode) {
3456 default: llvm_unreachable("Invalid CastOp");
3457 case Instruction::Trunc:
3458 case Instruction::ZExt:
3459 case Instruction::SExt:
3460 case Instruction::FPTrunc:
3461 case Instruction::FPExt:
3462 case Instruction::UIToFP:
3463 case Instruction::SIToFP:
3464 case Instruction::FPToUI:
3465 case Instruction::FPToSI:
3466 case Instruction::AddrSpaceCast:
3467 // TODO: Target informations may give a more accurate answer here.
3468 return false;
3469 case Instruction::BitCast:
3470 return true; // BitCast never modifies bits.
3471 case Instruction::PtrToInt:
3472 return DL.getIntPtrType(SrcTy)->getScalarSizeInBits() ==
3473 DestTy->getScalarSizeInBits();
3474 case Instruction::IntToPtr:
3475 return DL.getIntPtrType(DestTy)->getScalarSizeInBits() ==
3476 SrcTy->getScalarSizeInBits();
3477 }
3478}
3479
3481 return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), DL);
3482}
3483
3484/// This function determines if a pair of casts can be eliminated and what
3485/// opcode should be used in the elimination. This assumes that there are two
3486/// instructions like this:
3487/// * %F = firstOpcode SrcTy %x to MidTy
3488/// * %S = secondOpcode MidTy %F to DstTy
3489/// The function returns a resultOpcode so these two casts can be replaced with:
3490/// * %Replacement = resultOpcode %SrcTy %x to DstTy
3491/// If no such cast is permitted, the function returns 0.
3494 Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy,
3495 Type *DstIntPtrTy) {
3496 // Define the 144 possibilities for these two cast instructions. The values
3497 // in this matrix determine what to do in a given situation and select the
3498 // case in the switch below. The rows correspond to firstOp, the columns
3499 // correspond to secondOp. In looking at the table below, keep in mind
3500 // the following cast properties:
3501 //
3502 // Size Compare Source Destination
3503 // Operator Src ? Size Type Sign Type Sign
3504 // -------- ------------ ------------------- ---------------------
3505 // TRUNC > Integer Any Integral Any
3506 // ZEXT < Integral Unsigned Integer Any
3507 // SEXT < Integral Signed Integer Any
3508 // FPTOUI n/a FloatPt n/a Integral Unsigned
3509 // FPTOSI n/a FloatPt n/a Integral Signed
3510 // UITOFP n/a Integral Unsigned FloatPt n/a
3511 // SITOFP n/a Integral Signed FloatPt n/a
3512 // FPTRUNC > FloatPt n/a FloatPt n/a
3513 // FPEXT < FloatPt n/a FloatPt n/a
3514 // PTRTOINT n/a Pointer n/a Integral Unsigned
3515 // INTTOPTR n/a Integral Unsigned Pointer n/a
3516 // BITCAST = FirstClass n/a FirstClass n/a
3517 // ADDRSPCST n/a Pointer n/a Pointer n/a
3518 //
3519 // NOTE: some transforms are safe, but we consider them to be non-profitable.
3520 // For example, we could merge "fptoui double to i32" + "zext i32 to i64",
3521 // into "fptoui double to i64", but this loses information about the range
3522 // of the produced value (we no longer know the top-part is all zeros).
3523 // Further this conversion is often much more expensive for typical hardware,
3524 // and causes issues when building libgcc. We disallow fptosi+sext for the
3525 // same reason.
3526 const unsigned numCastOps =
3527 Instruction::CastOpsEnd - Instruction::CastOpsBegin;
3528 static const uint8_t CastResults[numCastOps][numCastOps] = {
3529 // T F F U S F F P I B A -+
3530 // R Z S P P I I T P 2 N T S |
3531 // U E E 2 2 2 2 R E I T C C +- secondOp
3532 // N X X U S F F N X N 2 V V |
3533 // C T T I I P P C T T P T T -+
3534 { 1, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // Trunc -+
3535 { 8, 1, 9,99,99, 2,17,99,99,99, 2, 3, 0}, // ZExt |
3536 { 8, 0, 1,99,99, 0, 2,99,99,99, 0, 3, 0}, // SExt |
3537 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToUI |
3538 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToSI |
3539 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // UIToFP +- firstOp
3540 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // SIToFP |
3541 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // FPTrunc |
3542 { 99,99,99, 2, 2,99,99, 8, 2,99,99, 4, 0}, // FPExt |
3543 { 1, 0, 0,99,99, 0, 0,99,99,99, 7, 3, 0}, // PtrToInt |
3544 { 99,99,99,99,99,99,99,99,99,11,99,15, 0}, // IntToPtr |
3545 { 5, 5, 5, 0, 0, 5, 5, 0, 0,16, 5, 1,14}, // BitCast |
3546 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+
3547 };
3548
3549 // TODO: This logic could be encoded into the table above and handled in the
3550 // switch below.
3551 // If either of the casts are a bitcast from scalar to vector, disallow the
3552 // merging. However, any pair of bitcasts are allowed.
3553 bool IsFirstBitcast = (firstOp == Instruction::BitCast);
3554 bool IsSecondBitcast = (secondOp == Instruction::BitCast);
3555 bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast;
3556
3557 // Check if any of the casts convert scalars <-> vectors.
3558 if ((IsFirstBitcast && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) ||
3559 (IsSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy)))
3560 if (!AreBothBitcasts)
3561 return 0;
3562
3563 int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin]
3564 [secondOp-Instruction::CastOpsBegin];
3565 switch (ElimCase) {
3566 case 0:
3567 // Categorically disallowed.
3568 return 0;
3569 case 1:
3570 // Allowed, use first cast's opcode.
3571 return firstOp;
3572 case 2:
3573 // Allowed, use second cast's opcode.
3574 return secondOp;
3575 case 3:
3576 // No-op cast in second op implies firstOp as long as the DestTy
3577 // is integer and we are not converting between a vector and a
3578 // non-vector type.
3579 if (!SrcTy->isVectorTy() && DstTy->isIntegerTy())
3580 return firstOp;
3581 return 0;
3582 case 4:
3583 // No-op cast in second op implies firstOp as long as the DestTy
3584 // matches MidTy.
3585 if (DstTy == MidTy)
3586 return firstOp;
3587 return 0;
3588 case 5:
3589 // No-op cast in first op implies secondOp as long as the SrcTy
3590 // is an integer.
3591 if (SrcTy->isIntegerTy())
3592 return secondOp;
3593 return 0;
3594 case 7: {
3595 // Disable inttoptr/ptrtoint optimization if enabled.
3596 if (DisableI2pP2iOpt)
3597 return 0;
3598
3599 // Cannot simplify if address spaces are different!
3600 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
3601 return 0;
3602
3603 unsigned MidSize = MidTy->getScalarSizeInBits();
3604 // We can still fold this without knowing the actual sizes as long we
3605 // know that the intermediate pointer is the largest possible
3606 // pointer size.
3607 // FIXME: Is this always true?
3608 if (MidSize == 64)
3609 return Instruction::BitCast;
3610
3611 // ptrtoint, inttoptr -> bitcast (ptr -> ptr) if int size is >= ptr size.
3612 if (!SrcIntPtrTy || DstIntPtrTy != SrcIntPtrTy)
3613 return 0;
3614 unsigned PtrSize = SrcIntPtrTy->getScalarSizeInBits();
3615 if (MidSize >= PtrSize)
3616 return Instruction::BitCast;
3617 return 0;
3618 }
3619 case 8: {
3620 // ext, trunc -> bitcast, if the SrcTy and DstTy are the same
3621 // ext, trunc -> ext, if sizeof(SrcTy) < sizeof(DstTy)
3622 // ext, trunc -> trunc, if sizeof(SrcTy) > sizeof(DstTy)
3623 unsigned SrcSize = SrcTy->getScalarSizeInBits();
3624 unsigned DstSize = DstTy->getScalarSizeInBits();
3625 if (SrcTy == DstTy)
3626 return Instruction::BitCast;
3627 if (SrcSize < DstSize)
3628 return firstOp;
3629 if (SrcSize > DstSize)
3630 return secondOp;
3631 return 0;
3632 }
3633 case 9:
3634 // zext, sext -> zext, because sext can't sign extend after zext
3635 return Instruction::ZExt;
3636 case 11: {
3637 // inttoptr, ptrtoint -> bitcast if SrcSize<=PtrSize and SrcSize==DstSize
3638 if (!MidIntPtrTy)
3639 return 0;
3640 unsigned PtrSize = MidIntPtrTy->getScalarSizeInBits();
3641 unsigned SrcSize = SrcTy->getScalarSizeInBits();
3642 unsigned DstSize = DstTy->getScalarSizeInBits();
3643 if (SrcSize <= PtrSize && SrcSize == DstSize)
3644 return Instruction::BitCast;
3645 return 0;
3646 }
3647 case 12:
3648 // addrspacecast, addrspacecast -> bitcast, if SrcAS == DstAS
3649 // addrspacecast, addrspacecast -> addrspacecast, if SrcAS != DstAS
3650 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
3651 return Instruction::AddrSpaceCast;
3652 return Instruction::BitCast;
3653 case 13:
3654 // FIXME: this state can be merged with (1), but the following assert
3655 // is useful to check the correcteness of the sequence due to semantic
3656 // change of bitcast.
3657 assert(
3658 SrcTy->isPtrOrPtrVectorTy() &&
3659 MidTy->isPtrOrPtrVectorTy() &&
3660 DstTy->isPtrOrPtrVectorTy() &&
3661 SrcTy->getPointerAddressSpace() != MidTy->getPointerAddressSpace() &&
3662 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
3663 "Illegal addrspacecast, bitcast sequence!");
3664 // Allowed, use first cast's opcode
3665 return firstOp;
3666 case 14:
3667 // bitcast, addrspacecast -> addrspacecast
3668 return Instruction::AddrSpaceCast;
3669 case 15:
3670 // FIXME: this state can be merged with (1), but the following assert
3671 // is useful to check the correcteness of the sequence due to semantic
3672 // change of bitcast.
3673 assert(
3674 SrcTy->isIntOrIntVectorTy() &&
3675 MidTy->isPtrOrPtrVectorTy() &&
3676 DstTy->isPtrOrPtrVectorTy() &&
3677 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
3678 "Illegal inttoptr, bitcast sequence!");
3679 // Allowed, use first cast's opcode
3680 return firstOp;
3681 case 16:
3682 // FIXME: this state can be merged with (2), but the following assert
3683 // is useful to check the correcteness of the sequence due to semantic
3684 // change of bitcast.
3685 assert(
3686 SrcTy->isPtrOrPtrVectorTy() &&
3687 MidTy->isPtrOrPtrVectorTy() &&
3688 DstTy->isIntOrIntVectorTy() &&
3689 SrcTy->getPointerAddressSpace() == MidTy->getPointerAddressSpace() &&
3690 "Illegal bitcast, ptrtoint sequence!");
3691 // Allowed, use second cast's opcode
3692 return secondOp;
3693 case 17:
3694 // (sitofp (zext x)) -> (uitofp x)
3695 return Instruction::UIToFP;
3696 case 99:
3697 // Cast combination can't happen (error in input). This is for all cases
3698 // where the MidTy is not the same for the two cast instructions.
3699 llvm_unreachable("Invalid Cast Combination");
3700 default:
3701 llvm_unreachable("Error in CastResults table!!!");
3702 }
3703}
3704
3706 const Twine &Name,
3707 BasicBlock::iterator InsertBefore) {
3708 assert(castIsValid(op, S, Ty) && "Invalid cast!");
3709 // Construct and return the appropriate CastInst subclass
3710 switch (op) {
3711 case Trunc: return new TruncInst (S, Ty, Name, InsertBefore);
3712 case ZExt: return new ZExtInst (S, Ty, Name, InsertBefore);
3713 case SExt: return new SExtInst (S, Ty, Name, InsertBefore);
3714 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertBefore);
3715 case FPExt: return new FPExtInst (S, Ty, Name, InsertBefore);
3716 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertBefore);
3717 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore);
3718 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore);
3719 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore);
3720 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore);
3721 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore);
3722 case BitCast: return new BitCastInst (S, Ty, Name, InsertBefore);
3723 case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertBefore);
3724 default: llvm_unreachable("Invalid opcode provided");
3725 }
3726}
3727
3729 const Twine &Name, Instruction *InsertBefore) {
3730 assert(castIsValid(op, S, Ty) && "Invalid cast!");
3731 // Construct and return the appropriate CastInst subclass
3732 switch (op) {
3733 case Trunc: return new TruncInst (S, Ty, Name, InsertBefore);
3734 case ZExt: return new ZExtInst (S, Ty, Name, InsertBefore);
3735 case SExt: return new SExtInst (S, Ty, Name, InsertBefore);
3736 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertBefore);
3737 case FPExt: return new FPExtInst (S, Ty, Name, InsertBefore);
3738 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertBefore);
3739 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore);
3740 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore);
3741 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore);
3742 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore);
3743 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore);
3744 case BitCast: return new BitCastInst (S, Ty, Name, InsertBefore);
3745 case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertBefore);
3746 default: llvm_unreachable("Invalid opcode provided");
3747 }
3748}
3749
3751 const Twine &Name, BasicBlock *InsertAtEnd) {
3752 assert(castIsValid(op, S, Ty) && "Invalid cast!");
3753 // Construct and return the appropriate CastInst subclass
3754 switch (op) {
3755 case Trunc: return new TruncInst (S, Ty, Name, InsertAtEnd);
3756 case ZExt: return new ZExtInst (S, Ty, Name, InsertAtEnd);
3757 case SExt: return new SExtInst (S, Ty, Name, InsertAtEnd);
3758 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertAtEnd);
3759 case FPExt: return new FPExtInst (S, Ty, Name, InsertAtEnd);
3760 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertAtEnd);
3761 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertAtEnd);
3762 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertAtEnd);
3763 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertAtEnd);
3764 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertAtEnd);
3765 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertAtEnd);
3766 case BitCast: return new BitCastInst (S, Ty, Name, InsertAtEnd);
3767 case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertAtEnd);
3768 default: llvm_unreachable("Invalid opcode provided");
3769 }
3770}
3771
3773 BasicBlock::iterator InsertBefore) {
3774 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3775 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3776 return Create(Instruction::ZExt, S, Ty, Name, InsertBefore);
3777}
3778
3780 const Twine &Name,
3781 Instruction *InsertBefore) {
3782 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3783 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3784 return Create(Instruction::ZExt, S, Ty, Name, InsertBefore);
3785}
3786
3788 const Twine &Name,
3789 BasicBlock *InsertAtEnd) {
3790 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3791 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
3792 return Create(Instruction::ZExt, S, Ty, Name, InsertAtEnd);
3793}
3794
3796 BasicBlock::iterator InsertBefore) {
3797 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3798 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3799 return Create(Instruction::SExt, S, Ty, Name, InsertBefore);
3800}
3801
3803 const Twine &Name,
3804 Instruction *InsertBefore) {
3805 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3806 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3807 return Create(Instruction::SExt, S, Ty, Name, InsertBefore);
3808}
3809
3811 const Twine &Name,
3812 BasicBlock *InsertAtEnd) {
3813 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3814 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
3815 return Create(Instruction::SExt, S, Ty, Name, InsertAtEnd);
3816}
3817
3819 BasicBlock::iterator InsertBefore) {
3820 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3821 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3822 return Create(Instruction::Trunc, S, Ty, Name, InsertBefore);
3823}
3824
3826 const Twine &Name,
3827 Instruction *InsertBefore) {
3828 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3829 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3830 return Create(Instruction::Trunc, S, Ty, Name, InsertBefore);
3831}
3832
3834 const Twine &Name,
3835 BasicBlock *InsertAtEnd) {
3836 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3837 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
3838 return Create(Instruction::Trunc, S, Ty, Name, InsertAtEnd);
3839}
3840
3842 const Twine &Name,
3843 BasicBlock *InsertAtEnd) {
3844 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3845 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
3846 "Invalid cast");
3847 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
3848 assert((!Ty->isVectorTy() ||
3849 cast<VectorType>(Ty)->getElementCount() ==
3850 cast<VectorType>(S->getType())->getElementCount()) &&
3851 "Invalid cast");
3852
3853 if (Ty->isIntOrIntVectorTy())
3854 return Create(Instruction::PtrToInt, S, Ty, Name, InsertAtEnd);
3855
3856 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertAtEnd);
3857}
3858
3859/// Create a BitCast or a PtrToInt cast instruction
3861 BasicBlock::iterator InsertBefore) {
3862 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3863 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
3864 "Invalid cast");
3865 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
3866 assert((!Ty->isVectorTy() ||
3867 cast<VectorType>(Ty)->getElementCount() ==
3868 cast<VectorType>(S->getType())->getElementCount()) &&
3869 "Invalid cast");
3870
3871 if (Ty->isIntOrIntVectorTy())
3872 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3873
3874 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore);
3875}
3876
3877/// Create a BitCast or a PtrToInt cast instruction
3879 Instruction *InsertBefore) {
3880 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3881 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
3882 "Invalid cast");
3883 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
3884 assert((!Ty->isVectorTy() ||
3885 cast<VectorType>(Ty)->getElementCount() ==
3886 cast<VectorType>(S->getType())->getElementCount()) &&
3887 "Invalid cast");
3888
3889 if (Ty->isIntOrIntVectorTy())
3890 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3891
3892 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore);
3893}
3894
3896 Value *S, Type *Ty,
3897 const Twine &Name,
3898 BasicBlock *InsertAtEnd) {
3899 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3900 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
3901
3903 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertAtEnd);
3904
3905 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
3906}
3907
3909 Value *S, Type *Ty, const Twine &Name, BasicBlock::iterator InsertBefore) {
3910 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3911 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
3912
3914 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore);
3915
3916 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3917}
3918
3920 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore) {
3921 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3922 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
3923
3925 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore);
3926
3927 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3928}
3929
3931 const Twine &Name,
3932 BasicBlock::iterator InsertBefore) {
3933 if (S->getType()->isPointerTy() && Ty->isIntegerTy())
3934 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3935 if (S->getType()->isIntegerTy() && Ty->isPointerTy())
3936 return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore);
3937
3938 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3939}
3940
3942 const Twine &Name,
3943 Instruction *InsertBefore) {
3944 if (S->getType()->isPointerTy() && Ty->isIntegerTy())
3945 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3946 if (S->getType()->isIntegerTy() && Ty->isPointerTy())
3947 return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore);
3948
3949 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3950}
3951
3953 const Twine &Name,
3954 BasicBlock::iterator InsertBefore) {
3955 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
3956 "Invalid integer cast");
3957 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3958 unsigned DstBits = Ty->getScalarSizeInBits();
3959 Instruction::CastOps opcode =
3960 (SrcBits == DstBits ? Instruction::BitCast :
3961 (SrcBits > DstBits ? Instruction::Trunc :
3962 (isSigned ? Instruction::SExt : Instruction::ZExt)));
3963 return Create(opcode, C, Ty, Name, InsertBefore);
3964}
3965
3967 bool isSigned, const Twine &Name,
3968 Instruction *InsertBefore) {
3969 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
3970 "Invalid integer cast");
3971 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3972 unsigned DstBits = Ty->getScalarSizeInBits();
3973 Instruction::CastOps opcode =
3974 (SrcBits == DstBits ? Instruction::BitCast :
3975 (SrcBits > DstBits ? Instruction::Trunc :
3976 (isSigned ? Instruction::SExt : Instruction::ZExt)));
3977 return Create(opcode, C, Ty, Name, InsertBefore);
3978}
3979
3981 bool isSigned, const Twine &Name,
3982 BasicBlock *InsertAtEnd) {
3983 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
3984 "Invalid cast");
3985 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3986 unsigned DstBits = Ty->getScalarSizeInBits();
3987 Instruction::CastOps opcode =
3988 (SrcBits == DstBits ? Instruction::BitCast :
3989 (SrcBits > DstBits ? Instruction::Trunc :
3990 (isSigned ? Instruction::SExt : Instruction::ZExt)));
3991 return Create(opcode, C, Ty, Name, InsertAtEnd);
3992}
3993
3995 BasicBlock::iterator InsertBefore) {
3996 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
3997 "Invalid cast");
3998 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3999 unsigned DstBits = Ty->getScalarSizeInBits();
4000 Instruction::CastOps opcode =
4001 (SrcBits == DstBits ? Instruction::BitCast :
4002 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
4003 return Create(opcode, C, Ty, Name, InsertBefore);
4004}
4005
4007 const Twine &Name,
4008 Instruction *InsertBefore) {
4009 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
4010 "Invalid cast");
4011 unsigned SrcBits = C->getType()->getScalarSizeInBits();
4012 unsigned DstBits = Ty->getScalarSizeInBits();
4013 assert((C->getType() == Ty || SrcBits != DstBits) && "Invalid cast");
4014 Instruction::CastOps opcode =
4015 (SrcBits == DstBits ? Instruction::BitCast :
4016 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
4017 return Create(opcode, C, Ty, Name, InsertBefore);
4018}
4019
4021 const Twine &Name,
4022 BasicBlock *InsertAtEnd) {
4023 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
4024 "Invalid cast");
4025 unsigned SrcBits = C->getType()->getScalarSizeInBits();
4026 unsigned DstBits = Ty->getScalarSizeInBits();
4027 Instruction::CastOps opcode =
4028 (SrcBits == DstBits ? Instruction::BitCast :
4029 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
4030 return Create(opcode, C, Ty, Name, InsertAtEnd);
4031}
4032
4033bool CastInst::isBitCastable(Type *SrcTy, Type *DestTy) {
4034 if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType())
4035 return false;
4036
4037 if (SrcTy == DestTy)
4038 return true;
4039
4040 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) {
4041 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) {
4042 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
4043 // An element by element cast. Valid if casting the elements is valid.
4044 SrcTy = SrcVecTy->getElementType();
4045 DestTy = DestVecTy->getElementType();
4046 }
4047 }
4048 }
4049
4050 if (PointerType *DestPtrTy = dyn_cast<PointerType>(DestTy)) {
4051 if (PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy)) {
4052 return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace();
4053 }
4054 }
4055
4056 TypeSize SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
4057 TypeSize DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
4058
4059 // Could still have vectors of pointers if the number of elements doesn't
4060 // match
4061 if (SrcBits.getKnownMinValue() == 0 || DestBits.getKnownMinValue() == 0)
4062 return false;
4063
4064 if (SrcBits != DestBits)
4065 return false;
4066
4067 if (DestTy->isX86_MMXTy() || SrcTy->isX86_MMXTy())
4068 return false;
4069
4070 return true;
4071}
4072
4074 const DataLayout &DL) {
4075 // ptrtoint and inttoptr are not allowed on non-integral pointers
4076 if (auto *PtrTy = dyn_cast<PointerType>(SrcTy))
4077 if (auto *IntTy = dyn_cast<IntegerType>(DestTy))
4078 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
4079 !DL.isNonIntegralPointerType(PtrTy));
4080 if (auto *PtrTy = dyn_cast<PointerType>(DestTy))
4081 if (auto *IntTy = dyn_cast<IntegerType>(SrcTy))
4082 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
4083 !DL.isNonIntegralPointerType(PtrTy));
4084
4085 return isBitCastable(SrcTy, DestTy);
4086}
4087
4088// Provide a way to get a "cast" where the cast opcode is inferred from the
4089// types and size of the operand. This, basically, is a parallel of the
4090// logic in the castIsValid function below. This axiom should hold:
4091// castIsValid( getCastOpcode(Val, Ty), Val, Ty)
4092// should not assert in castIsValid. In other words, this produces a "correct"
4093// casting opcode for the arguments passed to it.
4096 const Value *Src, bool SrcIsSigned, Type *DestTy, bool DestIsSigned) {
4097 Type *SrcTy = Src->getType();
4098
4099 assert(SrcTy->isFirstClassType() && DestTy->isFirstClassType() &&
4100 "Only first class types are castable!");
4101
4102 if (SrcTy == DestTy)
4103 return BitCast;
4104
4105 // FIXME: Check address space sizes here
4106 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy))
4107 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy))
4108 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
4109 // An element by element cast. Find the appropriate opcode based on the
4110 // element types.
4111 SrcTy = SrcVecTy->getElementType();
4112 DestTy = DestVecTy->getElementType();
4113 }
4114
4115 // Get the bit sizes, we'll need these
4116 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
4117 unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
4118
4119 // Run through the possibilities ...
4120 if (DestTy->isIntegerTy()) { // Casting to integral
4121 if (SrcTy->isIntegerTy()) { // Casting from integral
4122 if (DestBits < SrcBits)
4123 return Trunc; // int -> smaller int
4124 else if (DestBits > SrcBits) { // its an extension
4125 if (SrcIsSigned)
4126 return SExt; // signed -> SEXT
4127 else
4128 return ZExt; // unsigned -> ZEXT
4129 } else {
4130 return BitCast; // Same size, No-op cast
4131 }
4132 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
4133 if (DestIsSigned)
4134 return FPToSI; // FP -> sint
4135 else
4136 return FPToUI; // FP -> uint
4137 } else if (SrcTy->isVectorTy()) {
4138 assert(DestBits == SrcBits &&
4139 "Casting vector to integer of different width");
4140 return BitCast; // Same size, no-op cast
4141 } else {
4142 assert(SrcTy->isPointerTy() &&
4143 "Casting from a value that is not first-class type");
4144 return PtrToInt; // ptr -> int
4145 }
4146 } else if (DestTy->isFloatingPointTy()) { // Casting to floating pt
4147 if (SrcTy->isIntegerTy()) { // Casting from integral
4148 if (SrcIsSigned)
4149 return SIToFP; // sint -> FP
4150 else
4151 return UIToFP; // uint -> FP
4152 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
4153 if (DestBits < SrcBits) {
4154 return FPTrunc; // FP -> smaller FP
4155 } else if (DestBits > SrcBits) {
4156 return FPExt; // FP -> larger FP
4157 } else {
4158 return BitCast; // same size, no-op cast
4159 }
4160 } else if (SrcTy->isVectorTy()) {
4161 assert(DestBits == SrcBits &&
4162 "Casting vector to floating point of different width");
4163 return BitCast; // same size, no-op cast
4164 }
4165 llvm_unreachable("Casting pointer or non-first class to float");
4166 } else if (DestTy->isVectorTy()) {
4167 assert(DestBits == SrcBits &&
4168 "Illegal cast to vector (wrong type or size)");
4169 return BitCast;
4170 } else if (DestTy->isPointerTy()) {
4171 if (SrcTy->isPointerTy()) {
4172 if (DestTy->getPointerAddressSpace() != SrcTy->getPointerAddressSpace())
4173 return AddrSpaceCast;
4174 return BitCast; // ptr -> ptr
4175 } else if (SrcTy->isIntegerTy()) {
4176 return IntToPtr; // int -> ptr
4177 }
4178 llvm_unreachable("Casting pointer to other than pointer or int");
4179 } else if (DestTy->isX86_MMXTy()) {
4180 if (SrcTy->isVectorTy()) {
4181 assert(DestBits == SrcBits && "Casting vector of wrong width to X86_MMX");
4182 return BitCast; // 64-bit vector to MMX
4183 }
4184 llvm_unreachable("Illegal cast to X86_MMX");
4185 }
4186 llvm_unreachable("Casting to type that is not first-class");
4187}
4188
4189//===----------------------------------------------------------------------===//
4190// CastInst SubClass Constructors
4191//===----------------------------------------------------------------------===//
4192
4193/// Check that the construction parameters for a CastInst are correct. This
4194/// could be broken out into the separate constructors but it is useful to have
4195/// it in one place and to eliminate the redundant code for getting the sizes
4196/// of the types involved.
4197bool
4199 if (!SrcTy->isFirstClassType() || !DstTy->isFirstClassType() ||
4200 SrcTy->isAggregateType() || DstTy->isAggregateType())
4201 return false;
4202
4203 // Get the size of the types in bits, and whether we are dealing
4204 // with vector types, we'll need this later.
4205 bool SrcIsVec = isa<VectorType>(SrcTy);
4206 bool DstIsVec = isa<VectorType>(DstTy);
4207 unsigned SrcScalarBitSize = SrcTy->getScalarSizeInBits();
4208 unsigned DstScalarBitSize = DstTy->getScalarSizeInBits();
4209
4210 // If these are vector types, get the lengths of the vectors (using zero for
4211 // scalar types means that checking that vector lengths match also checks that
4212 // scalars are not being converted to vectors or vectors to scalars).
4213 ElementCount SrcEC = SrcIsVec ? cast<VectorType>(SrcTy)->getElementCount()
4215 ElementCount DstEC = DstIsVec ? cast<VectorType>(DstTy)->getElementCount()
4217
4218 // Switch on the opcode provided
4219 switch (op) {
4220 default: return false; // This is an input error
4221 case Instruction::Trunc:
4222 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
4223 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
4224 case Instruction::ZExt:
4225 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
4226 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
4227 case Instruction::SExt:
4228 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
4229 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
4230 case Instruction::FPTrunc:
4231 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
4232 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
4233 case Instruction::FPExt:
4234 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
4235 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
4236 case Instruction::UIToFP:
4237 case Instruction::SIToFP:
4238 return SrcTy->isIntOrIntVectorTy() && DstTy->isFPOrFPVectorTy() &&
4239 SrcEC == DstEC;
4240 case Instruction::FPToUI:
4241 case Instruction::FPToSI:
4242 return SrcTy->isFPOrFPVectorTy() && DstTy->isIntOrIntVectorTy() &&
4243 SrcEC == DstEC;
4244 case Instruction::PtrToInt:
4245 if (SrcEC != DstEC)
4246 return false;
4247 return SrcTy->isPtrOrPtrVectorTy() && DstTy->isIntOrIntVectorTy();
4248 case Instruction::IntToPtr:
4249 if (SrcEC != DstEC)
4250 return false;
4251 return SrcTy->isIntOrIntVectorTy() && DstTy->isPtrOrPtrVectorTy();
4252 case Instruction::BitCast: {
4253 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
4254 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
4255
4256 // BitCast implies a no-op cast of type only. No bits change.
4257 // However, you can't cast pointers to anything but pointers.
4258 if (!SrcPtrTy != !DstPtrTy)
4259 return false;
4260
4261 // For non-pointer cases, the cast is okay if the source and destination bit
4262 // widths are identical.
4263 if (!SrcPtrTy)
4264 return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits();
4265
4266 // If both are pointers then the address spaces must match.
4267 if (SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace())
4268 return false;
4269
4270 // A vector of pointers must have the same number of elements.
4271 if (SrcIsVec && DstIsVec)
4272 return SrcEC == DstEC;
4273 if (SrcIsVec)
4274 return SrcEC == ElementCount::getFixed(1);
4275 if (DstIsVec)
4276 return DstEC == ElementCount::getFixed(1);
4277
4278 return true;
4279 }
4280 case Instruction::AddrSpaceCast: {
4281 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
4282 if (!SrcPtrTy)
4283 return false;
4284
4285 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
4286 if (!DstPtrTy)
4287 return false;
4288
4289 if (SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace())
4290 return false;
4291
4292 return SrcEC == DstEC;
4293 }
4294 }
4295}
4296
4298 BasicBlock::iterator InsertBefore)
4299 : CastInst(Ty, Trunc, S, Name, InsertBefore) {
4300 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
4301}
4302
4304 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4305) : CastInst(Ty, Trunc, S, Name, InsertBefore) {
4306 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
4307}
4308
4310 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4311) : CastInst(Ty, Trunc, S, Name, InsertAtEnd) {
4312 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
4313}
4314
4316 BasicBlock::iterator InsertBefore)
4317 : CastInst(Ty, ZExt, S, Name, InsertBefore) {
4318 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
4319}
4320
4322 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4323) : CastInst(Ty, ZExt, S, Name, InsertBefore) {
4324 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
4325}
4326
4328 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4329) : CastInst(Ty, ZExt, S, Name, InsertAtEnd) {
4330 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
4331}
4332
4334 BasicBlock::iterator InsertBefore)
4335 : CastInst(Ty, SExt, S, Name, InsertBefore) {
4336 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
4337}
4338
4340 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4341) : CastInst(Ty, SExt, S, Name, InsertBefore) {
4342 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
4343}
4344
4346 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4347) : CastInst(Ty, SExt, S, Name, InsertAtEnd) {
4348 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
4349}
4350
4352 BasicBlock::iterator InsertBefore)
4353 : CastInst(Ty, FPTrunc, S, Name, InsertBefore) {
4354 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
4355}
4356
4358 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4359) : CastInst(Ty, FPTrunc, S, Name, InsertBefore) {
4360 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
4361}
4362
4364 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4365) : CastInst(Ty, FPTrunc, S, Name, InsertAtEnd) {
4366 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
4367}
4368
4370 BasicBlock::iterator InsertBefore)
4371 : CastInst(Ty, FPExt, S, Name, InsertBefore) {
4372 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
4373}
4374
4376 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4377) : CastInst(Ty, FPExt, S, Name, InsertBefore) {
4378 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
4379}
4380
4382 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4383) : CastInst(Ty, FPExt, S, Name, InsertAtEnd) {
4384 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
4385}
4386
4388 BasicBlock::iterator InsertBefore)
4389 : CastInst(Ty, UIToFP, S, Name, InsertBefore) {
4390 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
4391}
4392
4394 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4395) : CastInst(Ty, UIToFP, S, Name, InsertBefore) {
4396 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
4397}
4398
4400 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4401) : CastInst(Ty, UIToFP, S, Name, InsertAtEnd) {
4402 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
4403}
4404
4406 BasicBlock::iterator InsertBefore)
4407 : CastInst(Ty, SIToFP, S, Name, InsertBefore) {
4408 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
4409}
4410
4412 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4413) : CastInst(Ty, SIToFP, S, Name, InsertBefore) {
4414 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
4415}
4416
4418 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4419) : CastInst(Ty, SIToFP, S, Name, InsertAtEnd) {
4420 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
4421}
4422
4424 BasicBlock::iterator InsertBefore)
4425 : CastInst(Ty, FPToUI, S, Name, InsertBefore) {
4426 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
4427}
4428
4430 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4431) : CastInst(Ty, FPToUI, S, Name, InsertBefore) {
4432 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
4433}
4434
4436 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4437) : CastInst(Ty, FPToUI, S, Name, InsertAtEnd) {
4438 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
4439}
4440
4442 BasicBlock::iterator InsertBefore)
4443 : CastInst(Ty, FPToSI, S, Name, InsertBefore) {
4444 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
4445}
4446
4448 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4449) : CastInst(Ty, FPToSI, S, Name, InsertBefore) {
4450 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
4451}
4452
4454 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4455) : CastInst(Ty, FPToSI, S, Name, InsertAtEnd) {
4456 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
4457}
4458
4460 BasicBlock::iterator InsertBefore)
4461 : CastInst(Ty, PtrToInt, S, Name, InsertBefore) {
4462 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
4463}
4464
4466 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4467) : CastInst(Ty, PtrToInt, S, Name, InsertBefore) {
4468 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
4469}
4470
4472 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4473) : CastInst(Ty, PtrToInt, S, Name, InsertAtEnd) {
4474 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
4475}
4476
4478 BasicBlock::iterator InsertBefore)
4479 : CastInst(Ty, IntToPtr, S, Name, InsertBefore) {
4480 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
4481}
4482
4484 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4485) : CastInst(Ty, IntToPtr, S, Name, InsertBefore) {
4486 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
4487}
4488
4490 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4491) : CastInst(Ty, IntToPtr, S, Name, InsertAtEnd) {
4492 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
4493}
4494
4496 BasicBlock::iterator InsertBefore)
4497 : CastInst(Ty, BitCast, S, Name, InsertBefore) {
4498 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
4499}
4500
4502 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4503) : CastInst(Ty, BitCast, S, Name, InsertBefore) {
4504 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
4505}
4506
4508 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4509) : CastInst(Ty, BitCast, S, Name, InsertAtEnd) {
4510 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
4511}
4512
4514 BasicBlock::iterator InsertBefore)
4515 : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) {
4516 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
4517}
4518
4520 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4521) : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) {
4522 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
4523}
4524
4526 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4527) : CastInst(Ty, AddrSpaceCast, S, Name, InsertAtEnd) {
4528 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
4529}
4530
4531//===----------------------------------------------------------------------===//
4532// CmpInst Classes
4533//===----------------------------------------------------------------------===//
4534
4536 Value *RHS, const Twine &Name,
4537 BasicBlock::iterator InsertBefore, Instruction *FlagsSource)
4538 : Instruction(ty, op, OperandTraits<CmpInst>::op_begin(this),
4539 OperandTraits<CmpInst>::operands(this), InsertBefore) {
4540 Op<0>() = LHS;
4541 Op<1>() = RHS;
4542 setPredicate((Predicate)predicate);
4543 setName(Name);
4544 if (FlagsSource)
4545 copyIRFlags(FlagsSource);
4546}
4547
4549 Value *RHS, const Twine &Name, Instruction *InsertBefore,
4550 Instruction *FlagsSource)
4551 : Instruction(ty, op,
4552 OperandTraits<CmpInst>::op_begin(this),
4553 OperandTraits<CmpInst>::operands(this),
4554 InsertBefore) {
4555 Op<0>() = LHS;
4556 Op<1>() = RHS;
4557 setPredicate((Predicate)predicate);
4558 setName(Name);
4559 if (FlagsSource)
4560 copyIRFlags(FlagsSource);
4561}
4562
4564 Value *RHS, const Twine &Name, BasicBlock *InsertAtEnd)
4565 : Instruction(ty, op,
4566 OperandTraits<CmpInst>::op_begin(this),
4567 OperandTraits<CmpInst>::operands(this),
4568 InsertAtEnd) {
4569 Op<0>() = LHS;
4570 Op<1>() = RHS;
4571 setPredicate((Predicate)predicate);
4572 setName(Name);
4573}
4574
4575CmpInst *
4577 const Twine &Name, BasicBlock::iterator InsertBefore) {
4578 if (Op == Instruction::ICmp) {
4579 return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate),
4580 S1, S2, Name);
4581 }
4582
4583 return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate),
4584 S1, S2, Name);
4585}
4586
4587CmpInst *
4589 const Twine &Name, Instruction *InsertBefore) {
4590 if (Op == Instruction::ICmp) {
4591 if (InsertBefore)
4592 return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate),
4593 S1, S2, Name);
4594 else
4595 return new ICmpInst(CmpInst::Predicate(predicate),
4596 S1, S2, Name);
4597 }
4598
4599 if (InsertBefore)
4600 return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate),
4601 S1, S2, Name);
4602 else
4603 return new FCmpInst(CmpInst::Predicate(predicate),
4604 S1, S2, Name);
4605}
4606
4607CmpInst *
4609 const Twine &Name, BasicBlock *InsertAtEnd) {
4610 if (Op == Instruction::ICmp) {
4611 return new ICmpInst(InsertAtEnd, CmpInst::Predicate(predicate),
4612 S1, S2, Name);
4613 }
4614 return new FCmpInst(InsertAtEnd, CmpInst::Predicate(predicate),
4615 S1, S2, Name);
4616}
4617
4619 if (ICmpInst *IC = dyn_cast<ICmpInst>(this))
4620 IC->swapOperands();
4621 else
4622 cast<FCmpInst>(this)->swapOperands();
4623}
4624
4626 if (const ICmpInst *IC = dyn_cast<ICmpInst>(this))
4627 return IC->isCommutative();
4628 return cast<FCmpInst>(this)->isCommutative();
4629}
4630
4633 return ICmpInst::isEquality(P);
4635 return FCmpInst::isEquality(P);
4636 llvm_unreachable("Unsupported predicate kind");
4637}
4638
4640 switch (pred) {
4641 default: llvm_unreachable("Unknown cmp predicate!");
4642 case ICMP_EQ: return ICMP_NE;
4643 case ICMP_NE: return ICMP_EQ;
4644 case ICMP_UGT: return ICMP_ULE;
4645 case ICMP_ULT: return ICMP_UGE;
4646 case ICMP_UGE: return ICMP_ULT;
4647 case ICMP_ULE: return ICMP_UGT;
4648 case ICMP_SGT: return ICMP_SLE;
4649 case ICMP_SLT: return ICMP_SGE;
4650 case ICMP_SGE: return ICMP_SLT;
4651 case ICMP_SLE: return ICMP_SGT;
4652
4653 case FCMP_OEQ: return FCMP_UNE;
4654 case FCMP_ONE: return FCMP_UEQ;
4655 case FCMP_OGT: return FCMP_ULE;
4656 case FCMP_OLT: return FCMP_UGE;
4657 case FCMP_OGE: return FCMP_ULT;
4658 case FCMP_OLE: return FCMP_UGT;
4659 case FCMP_UEQ: return FCMP_ONE;
4660 case FCMP_UNE: return FCMP_OEQ;
4661 case FCMP_UGT: return FCMP_OLE;
4662 case FCMP_ULT: return FCMP_OGE;
4663 case FCMP_UGE: return FCMP_OLT;
4664 case FCMP_ULE: return FCMP_OGT;
4665 case FCMP_ORD: return FCMP_UNO;
4666 case FCMP_UNO: return FCMP_ORD;
4667 case FCMP_TRUE: return FCMP_FALSE;
4668 case FCMP_FALSE: return FCMP_TRUE;
4669 }
4670}
4671
4673 switch (Pred) {
4674 default: return "unknown";
4675 case FCmpInst::FCMP_FALSE: return "false";
4676 case FCmpInst::FCMP_OEQ: return "oeq";
4677 case FCmpInst::FCMP_OGT: return "ogt";
4678 case FCmpInst::FCMP_OGE: return "oge";
4679 case FCmpInst::FCMP_OLT: return "olt";
4680 case FCmpInst::FCMP_OLE: return "ole";
4681 case FCmpInst::FCMP_ONE: return "one";
4682 case FCmpInst::FCMP_ORD: return "ord";
4683 case FCmpInst::FCMP_UNO: return "uno";
4684 case FCmpInst::FCMP_UEQ: return "ueq";
4685 case FCmpInst::FCMP_UGT: return "ugt";
4686 case FCmpInst::FCMP_UGE: return "uge";
4687 case FCmpInst::FCMP_ULT: return "ult";
4688 case FCmpInst::FCMP_ULE: return "ule";
4689 case FCmpInst::FCMP_UNE: return "une";
4690 case FCmpInst::FCMP_TRUE: return "true";
4691 case ICmpInst::ICMP_EQ: return "eq";
4692 case ICmpInst::ICMP_NE: return "ne";
4693 case ICmpInst::ICMP_SGT: return "sgt";
4694 case ICmpInst::ICMP_SGE: return "sge";
4695 case ICmpInst::ICMP_SLT: return "slt";
4696 case ICmpInst::ICMP_SLE: return "sle";
4697 case ICmpInst::ICMP_UGT: return "ugt";
4698 case ICmpInst::ICMP_UGE: return "uge";
4699 case ICmpInst::ICMP_ULT: return "ult";
4700 case ICmpInst::ICMP_ULE: return "ule";
4701 }
4702}
4703
4706 return OS;
4707}
4708
4710 switch (pred) {
4711 default: llvm_unreachable("Unknown icmp predicate!");
4712 case ICMP_EQ: case ICMP_NE:
4713 case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE:
4714 return pred;
4715 case ICMP_UGT: return ICMP_SGT;
4716 case ICMP_ULT: return ICMP_SLT;
4717 case ICMP_UGE: return ICMP_SGE;
4718 case ICMP_ULE: return ICMP_SLE;
4719 }
4720}
4721
4723 switch (pred) {
4724 default: llvm_unreachable("Unknown icmp predicate!");
4725 case ICMP_EQ: case ICMP_NE:
4726 case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE:
4727 return pred;
4728 case ICMP_SGT: return ICMP_UGT;
4729 case ICMP_SLT: return ICMP_ULT;
4730 case ICMP_SGE: return ICMP_UGE;
4731 case ICMP_SLE: return ICMP_ULE;
4732 }
4733}
4734
4736 switch (pred) {
4737 default: llvm_unreachable("Unknown cmp predicate!");
4738 case ICMP_EQ: case ICMP_NE:
4739 return pred;
4740 case ICMP_SGT: return ICMP_SLT;
4741 case ICMP_SLT: return ICMP_SGT;
4742 case ICMP_SGE: return ICMP_SLE;
4743 case ICMP_SLE: return ICMP_SGE;
4744 case ICMP_UGT: return ICMP_ULT;
4745 case ICMP_ULT: return ICMP_UGT;
4746 case ICMP_UGE: return ICMP_ULE;
4747 case ICMP_ULE: return ICMP_UGE;
4748
4749 case FCMP_FALSE: case FCMP_TRUE:
4750 case FCMP_OEQ: case FCMP_ONE:
4751 case FCMP_UEQ: case FCMP_UNE:
4752 case FCMP_ORD: case FCMP_UNO:
4753 return pred;
4754 case FCMP_OGT: return FCMP_OLT;
4755 case FCMP_OLT: return FCMP_OGT;
4756 case FCMP_OGE: return FCMP_OLE;
4757 case FCMP_OLE: return FCMP_OGE;
4758 case FCMP_UGT: return FCMP_ULT;
4759 case FCMP_ULT: return FCMP_UGT;
4760 case FCMP_UGE: return FCMP_ULE;
4761 case FCMP_ULE: return FCMP_UGE;
4762 }
4763}
4764
4766 switch (pred) {
4767 case ICMP_SGE:
4768 case ICMP_SLE:
4769 case ICMP_UGE:
4770 case ICMP_ULE:
4771 case FCMP_OGE:
4772 case FCMP_OLE:
4773 case FCMP_UGE:
4774 case FCMP_ULE:
4775 return true;
4776 default:
4777 return false;
4778 }
4779}
4780
4782 switch (pred) {
4783 case ICMP_SGT:
4784 case ICMP_SLT:
4785 case ICMP_UGT:
4786 case ICMP_ULT:
4787 case FCMP_OGT:
4788 case FCMP_OLT:
4789 case FCMP_UGT:
4790 case FCMP_ULT:
4791 return true;
4792 default:
4793 return false;
4794 }
4795}
4796
4798 switch (pred) {
4799 case ICMP_SGE:
4800 return ICMP_SGT;
4801 case ICMP_SLE:
4802 return ICMP_SLT;
4803 case ICMP_UGE:
4804 return ICMP_UGT;
4805 case ICMP_ULE:
4806 return ICMP_ULT;
4807 case FCMP_OGE:
4808 return FCMP_OGT;
4809 case FCMP_OLE:
4810 return FCMP_OLT;
4811 case FCMP_UGE:
4812 return FCMP_UGT;
4813 case FCMP_ULE:
4814 return FCMP_ULT;
4815 default:
4816 return pred;
4817 }
4818}
4819
4821 switch (pred) {
4822 case ICMP_SGT:
4823 return ICMP_SGE;
4824 case ICMP_SLT:
4825 return ICMP_SLE;
4826 case ICMP_UGT:
4827 return ICMP_UGE;
4828 case ICMP_ULT:
4829 return ICMP_ULE;
4830 case FCMP_OGT:
4831 return FCMP_OGE;
4832 case FCMP_OLT:
4833 return FCMP_OLE;
4834 case FCMP_UGT:
4835 return FCMP_UGE;
4836 case FCMP_ULT:
4837 return FCMP_ULE;
4838 default:
4839 return pred;
4840 }
4841}
4842
4844 assert(CmpInst::isRelational(pred) && "Call only with relational predicate!");
4845
4849 return getStrictPredicate(pred);
4850
4851 llvm_unreachable("Unknown predicate!");
4852}
4853
4855 assert(CmpInst::isUnsigned(pred) && "Call only with unsigned predicates!");
4856
4857 switch (pred) {
4858 default:
4859 llvm_unreachable("Unknown predicate!");
4860 case CmpInst::ICMP_ULT:
4861 return CmpInst::ICMP_SLT;
4862 case CmpInst::ICMP_ULE:
4863 return CmpInst::ICMP_SLE;
4864 case CmpInst::ICMP_UGT:
4865 return CmpInst::ICMP_SGT;
4866 case CmpInst::ICMP_UGE:
4867 return CmpInst::ICMP_SGE;
4868 }
4869}
4870
4872 assert(CmpInst::isSigned(pred) && "Call only with signed predicates!");
4873
4874 switch (pred) {
4875 default:
4876 llvm_unreachable("Unknown predicate!");
4877 case CmpInst::ICMP_SLT:
4878 return CmpInst::ICMP_ULT;
4879 case CmpInst::ICMP_SLE:
4880 return CmpInst::ICMP_ULE;
4881 case CmpInst::ICMP_SGT:
4882 return CmpInst::ICMP_UGT;
4883 case CmpInst::ICMP_SGE:
4884 return CmpInst::ICMP_UGE;
4885 }
4886}
4887
4889 switch (predicate) {
4890 default: return false;
4892 case ICmpInst::ICMP_UGE: return true;
4893 }
4894}
4895
4897 switch (predicate) {
4898 default: return false;
4900 case ICmpInst::ICMP_SGE: return true;
4901 }
4902}
4903
4904bool ICmpInst::compare(const APInt &LHS, const APInt &RHS,
4905 ICmpInst::Predicate Pred) {
4906 assert(ICmpInst::isIntPredicate(Pred) && "Only for integer predicates!");
4907 switch (Pred) {
4909 return LHS.eq(RHS);
4911 return LHS.ne(RHS);
4913 return LHS.ugt(RHS);
4915 return LHS.uge(RHS);
4917 return LHS.ult(RHS);
4919 return LHS.ule(RHS);
4921 return LHS.sgt(RHS);
4923 return LHS.sge(RHS);
4925 return LHS.slt(RHS);
4927 return LHS.sle(RHS);
4928 default:
4929 llvm_unreachable("Unexpected non-integer predicate.");
4930 };
4931}
4932
4933bool FCmpInst::compare(const APFloat &LHS, const APFloat &RHS,
4934 FCmpInst::Predicate Pred) {
4935 APFloat::cmpResult R = LHS.compare(RHS);
4936 switch (Pred) {
4937 default:
4938 llvm_unreachable("Invalid FCmp Predicate");
4940 return false;
4942 return true;
4943 case FCmpInst::FCMP_UNO:
4944 return R == APFloat::cmpUnordered;
4945 case FCmpInst::FCMP_ORD:
4946 return R != APFloat::cmpUnordered;
4947 case FCmpInst::FCMP_UEQ:
4948 return R == APFloat::cmpUnordered || R == APFloat::cmpEqual;
4949 case FCmpInst::FCMP_OEQ:
4950 return R == APFloat::cmpEqual;
4951 case FCmpInst::FCMP_UNE:
4952 return R != APFloat::cmpEqual;
4953 case FCmpInst::FCMP_ONE:
4955 case FCmpInst::FCMP_ULT:
4956 return R == APFloat::cmpUnordered || R == APFloat::cmpLessThan;
4957 case FCmpInst::FCMP_OLT:
4958 return R == APFloat::cmpLessThan;
4959 case FCmpInst::FCMP_UGT:
4961 case FCmpInst::FCMP_OGT:
4962 return R == APFloat::cmpGreaterThan;
4963 case FCmpInst::FCMP_ULE:
4964 return R != APFloat::cmpGreaterThan;
4965 case FCmpInst::FCMP_OLE:
4966 return R == APFloat::cmpLessThan || R == APFloat::cmpEqual;
4967 case FCmpInst::FCMP_UGE:
4968 return R != APFloat::cmpLessThan;
4969 case FCmpInst::FCMP_OGE:
4970 return R == APFloat::cmpGreaterThan || R == APFloat::cmpEqual;
4971 }
4972}
4973
4976 "Call only with non-equality predicates!");
4977
4978 if (isSigned(pred))
4979 return getUnsignedPredicate(pred);
4980 if (isUnsigned(pred))
4981 return getSignedPredicate(pred);
4982
4983 llvm_unreachable("Unknown predicate!");
4984}
4985
4987 switch (predicate) {
4988 default: return false;
4991 case FCmpInst::FCMP_ORD: return true;
4992 }
4993}
4994
4996 switch (predicate) {
4997 default: return false;
5000 case FCmpInst::FCMP_UNO: return true;
5001 }
5002}
5003
5005 switch(predicate) {
5006 default: return false;
5007 case ICMP_EQ: case ICMP_UGE: case ICMP_ULE: case ICMP_SGE: case ICMP_SLE:
5008 case FCMP_TRUE: case FCMP_UEQ: case FCMP_UGE: case FCMP_ULE: return true;
5009 }
5010}
5011
5013 switch(predicate) {
5014 case ICMP_NE: case ICMP_UGT: case ICMP_ULT: case ICMP_SGT: case ICMP_SLT:
5015 case FCMP_FALSE: case FCMP_ONE: case FCMP_OGT: case FCMP_OLT: return true;
5016 default: return false;
5017 }
5018}
5019
5021 // If the predicates match, then we know the first condition implies the
5022 // second is true.
5023 if (Pred1 == Pred2)
5024 return true;
5025
5026 switch (Pred1) {
5027 default:
5028 break;
5029 case ICMP_EQ:
5030 // A == B implies A >=u B, A <=u B, A >=s B, and A <=s B are true.
5031 return Pred2 == ICMP_UGE || Pred2 == ICMP_ULE || Pred2 == ICMP_SGE ||
5032 Pred2 == ICMP_SLE;
5033 case ICMP_UGT: // A >u B implies A != B and A >=u B are true.
5034 return Pred2 == ICMP_NE || Pred2 == ICMP_UGE;
5035 case ICMP_ULT: // A <u B implies A != B and A <=u B are true.
5036 return Pred2 == ICMP_NE || Pred2 == ICMP_ULE;
5037 case ICMP_SGT: // A >s B implies A != B and A >=s B are true.
5038 return Pred2 == ICMP_NE || Pred2 == ICMP_SGE;
5039 case ICMP_SLT: // A <s B implies A != B and A <=s B are true.
5040 return Pred2 == ICMP_NE || Pred2 == ICMP_SLE;
5041 }
5042 return false;
5043}
5044
5046 return isImpliedTrueByMatchingCmp(Pred1, getInversePredicate(Pred2));
5047}
5048
5049//===----------------------------------------------------------------------===//
5050// SwitchInst Implementation
5051//===----------------------------------------------------------------------===//
5052
5053void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumReserved) {
5054 assert(Value && Default && NumReserved);
5055 ReservedSpace = NumReserved;
5057 allocHungoffUses(ReservedSpace);
5058
5059 Op<0>() = Value;
5060 Op<1>() = Default;
5061}
5062
5063/// SwitchInst ctor - Create a new switch instruction, specifying a value to
5064/// switch on and a default destination. The number of additional cases can
5065/// be specified here to make memory allocation more efficient. This
5066/// constructor can also autoinsert before another instruction.
5067SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
5068 BasicBlock::iterator InsertBefore)
5069 : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch,
5070 nullptr, 0, InsertBefore) {
5071 init(Value, Default, 2 + NumCases * 2);
5072}
5073
5074/// SwitchInst ctor - Create a new switch instruction, specifying a value to
5075/// switch on and a default destination. The number of additional cases can
5076/// be specified here to make memory allocation more efficient. This
5077/// constructor can also autoinsert before another instruction.
5078SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
5079 Instruction *InsertBefore)
5080 : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch,
5081 nullptr, 0, InsertBefore) {
5082 init(Value, Default, 2+NumCases*2);
5083}
5084
5085/// SwitchInst ctor - Create a new switch instruction, specifying a value to
5086/// switch on and a default destination. The number of additional cases can
5087/// be specified here to make memory allocation more efficient. This
5088/// constructor also autoinserts at the end of the specified BasicBlock.
5089SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
5090 BasicBlock *InsertAtEnd)
5091 : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch,
5092 nullptr, 0, InsertAtEnd) {
5093 init(Value, Default, 2+NumCases*2);
5094}
5095
5096SwitchInst::SwitchInst(const SwitchInst &SI)
5097 : Instruction(SI.getType(), Instruction::Switch, nullptr, 0) {
5098 init(SI.getCondition(), SI.getDefaultDest(), SI.getNumOperands());
5099 setNumHungOffUseOperands(SI.getNumOperands());
5100 Use *OL = getOperandList();
5101 const Use *InOL = SI.getOperandList();
5102 for (unsigned i = 2, E = SI.getNumOperands(); i != E; i += 2) {
5103 OL[i] = InOL[i];
5104 OL[i+1] = InOL[i+1];
5105 }
5106 SubclassOptionalData = SI.SubclassOptionalData;
5107}
5108
5109/// addCase - Add an entry to the switch instruction...
5110///
5112 unsigned NewCaseIdx = getNumCases();
5113 unsigned OpNo = getNumOperands();
5114 if (OpNo+2 > ReservedSpace)
5115 growOperands(); // Get more space!
5116 // Initialize some new operands.
5117 assert(OpNo+1 < ReservedSpace && "Growing didn't work!");
5119 CaseHandle Case(this, NewCaseIdx);
5120 Case.setValue(OnVal);
5121 Case.setSuccessor(Dest);
5122}
5123
5124/// removeCase - This method removes the specified case and its successor
5125/// from the switch instruction.
5127 unsigned idx = I->getCaseIndex();
5128
5129 assert(2 + idx*2 < getNumOperands() && "Case index out of range!!!");
5130
5131 unsigned NumOps = getNumOperands();
5132 Use *OL = getOperandList();
5133
5134 // Overwrite this case with the end of the list.
5135 if (2 + (idx + 1) * 2 != NumOps) {
5136 OL[2 + idx * 2] = OL[NumOps - 2];
5137 OL[2 + idx * 2 + 1] = OL[NumOps - 1];
5138 }
5139
5140 // Nuke the last value.
5141 OL[NumOps-2].set(nullptr);
5142 OL[NumOps-2+1].set(nullptr);
5143 setNumHungOffUseOperands(NumOps-2);
5144
5145 return CaseIt(this, idx);
5146}
5147
5148/// growOperands - grow operands - This grows the operand list in response
5149/// to a push_back style of operation. This grows the number of ops by 3 times.
5150///
5151void SwitchInst::growOperands() {
5152 unsigned e = getNumOperands();
5153 unsigned NumOps = e*3;
5154
5155 ReservedSpace = NumOps;
5156 growHungoffUses(ReservedSpace);
5157}
5158
5160 assert(Changed && "called only if metadata has changed");
5161
5162 if (!Weights)
5163 return nullptr;
5164
5165 assert(SI.getNumSuccessors() == Weights->size() &&
5166 "num of prof branch_weights must accord with num of successors");
5167
5168 bool AllZeroes = all_of(*Weights, [](uint32_t W) { return W == 0; });
5169
5170 if (AllZeroes || Weights->size() < 2)
5171 return nullptr;
5172
5173 return MDBuilder(SI.getParent()->getContext()).createBranchWeights(*Weights);
5174}
5175
5177 MDNode *ProfileData = getBranchWeightMDNode(SI);
5178 if (!ProfileData)
5179 return;
5180
5181 if (ProfileData->getNumOperands() != SI.getNumSuccessors() + 1) {
5182 llvm_unreachable("number of prof branch_weights metadata operands does "
5183 "not correspond to number of succesors");
5184 }
5185
5187 if (!extractBranchWeights(ProfileData, Weights))
5188 return;
5189 this->Weights = std::move(Weights);
5190}
5191
5194 if (Weights) {
5195 assert(SI.getNumSuccessors() == Weights->size() &&
5196 "num of prof branch_weights must accord with num of successors");
5197 Changed = true;
5198 // Copy the last case to the place of the removed one and shrink.
5199 // This is tightly coupled with the way SwitchInst::removeCase() removes
5200 // the cases in SwitchInst::removeCase(CaseIt).
5201 (*Weights)[I->getCaseIndex() + 1] = Weights->back();
5202 Weights->pop_back();
5203 }
5204 return SI.removeCase(I);
5205}
5206
5208 ConstantInt *OnVal, BasicBlock *Dest,
5210 SI.addCase(OnVal, Dest);
5211
5212 if (!Weights && W && *W) {
5213 Changed = true;
5214 Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
5215 (*Weights)[SI.getNumSuccessors() - 1] = *W;
5216 } else if (Weights) {
5217 Changed = true;
5218 Weights->push_back(W.value_or(0));
5219 }
5220 if (Weights)
5221 assert(SI.getNumSuccessors() == Weights->size() &&
5222 "num of prof branch_weights must accord with num of successors");
5223}
5224
5227 // Instruction is erased. Mark as unchanged to not touch it in the destructor.
5228 Changed = false;
5229 if (Weights)
5230 Weights->resize(0);
5231 return SI.eraseFromParent();
5232}
5233
5236 if (!Weights)
5237 return std::nullopt;
5238 return (*Weights)[idx];
5239}
5240
5243 if (!W)
5244 return;
5245
5246 if (!Weights && *W)
5247 Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
5248
5249 if (Weights) {
5250 auto &OldW = (*Weights)[idx];
5251 if (*W != OldW) {
5252 Changed = true;
5253 OldW = *W;
5254 }
5255 }
5256}
5257
5260 unsigned idx) {
5261 if (MDNode *ProfileData = getBranchWeightMDNode(SI))
5262 if (ProfileData->getNumOperands() == SI.getNumSuccessors() + 1)
5263 return mdconst::extract<ConstantInt>(ProfileData->getOperand(idx + 1))
5264 ->getValue()
5265 .getZExtValue();
5266
5267 return std::nullopt;
5268}
5269
5270//===----------------------------------------------------------------------===//
5271// IndirectBrInst Implementation
5272//===----------------------------------------------------------------------===//
5273
5274void IndirectBrInst::init(Value *Address, unsigned NumDests) {
5275 assert(Address && Address->getType()->isPointerTy() &&
5276 "Address of indirectbr must be a pointer");
5277 ReservedSpace = 1+NumDests;
5279 allocHungoffUses(ReservedSpace);
5280
5281 Op<0>() = Address;
5282}
5283
5284
5285/// growOperands - grow operands - This grows the operand list in response
5286/// to a push_back style of operation. This grows the number of ops by 2 times.
5287///
5288void IndirectBrInst::growOperands() {
5289 unsigned e = getNumOperands();
5290 unsigned NumOps = e*2;
5291
5292 ReservedSpace = NumOps;
5293 growHungoffUses(ReservedSpace);
5294}
5295
5296IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
5297 BasicBlock::iterator InsertBefore)
5298 : Instruction(Type::getVoidTy(Address->getContext()),
5299 Instruction::IndirectBr, nullptr, 0, InsertBefore) {
5300 init(Address, NumCases);
5301}
5302
5303IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
5304 Instruction *InsertBefore)
5305 : Instruction(Type::getVoidTy(Address->getContext()),
5306 Instruction::IndirectBr, nullptr, 0, InsertBefore) {
5307 init(Address, NumCases);
5308}
5309
5310IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
5311 BasicBlock *InsertAtEnd)
5312 : Instruction(Type::getVoidTy(Address->getContext()),
5313 Instruction::IndirectBr, nullptr, 0, InsertAtEnd) {
5314 init(Address, NumCases);
5315}
5316
5317IndirectBrInst::IndirectBrInst(const IndirectBrInst &IBI)
5318 : Instruction(Type::getVoidTy(IBI.getContext()), Instruction::IndirectBr,
5319 nullptr, IBI.getNumOperands()) {
5320 allocHungoffUses(IBI.getNumOperands());
5321 Use *OL = getOperandList();
5322 const Use *InOL = IBI.getOperandList();
5323 for (unsigned i = 0, E = IBI.getNumOperands(); i != E; ++i)
5324 OL[i] = InOL[i];
5325 SubclassOptionalData = IBI.SubclassOptionalData;
5326}
5327
5328/// addDestination - Add a destination.
5329///
5331 unsigned OpNo = getNumOperands();
5332 if (OpNo+1 > ReservedSpace)
5333 growOperands(); // Get more space!
5334 // Initialize some new operands.
5335 assert(OpNo < ReservedSpace && "Growing didn't work!");
5337 getOperandList()[OpNo] = DestBB;
5338}
5339
5340/// removeDestination - This method removes the specified successor from the
5341/// indirectbr instruction.
5343 assert(idx < getNumOperands()-1 && "Successor index out of range!");
5344
5345 unsigned NumOps = getNumOperands();
5346 Use *OL = getOperandList();
5347
5348 // Replace this value with the last one.
5349 OL[idx+1] = OL[NumOps-1];
5350
5351 // Nuke the last value.
5352 OL[NumOps-1].set(nullptr);
5353 setNumHungOffUseOperands(NumOps-1);
5354}
5355
5356//===----------------------------------------------------------------------===//
5357// FreezeInst Implementation
5358//===----------------------------------------------------------------------===//
5359
5361 BasicBlock::iterator InsertBefore)
5362 : UnaryInstruction(S->getType(), Freeze, S, InsertBefore) {
5363 setName(Name);
5364}
5365
5367 const Twine &Name, Instruction *InsertBefore)
5368 : UnaryInstruction(S->getType(), Freeze, S, InsertBefore) {
5369 setName(Name);
5370}
5371
5373 const Twine &Name, BasicBlock *InsertAtEnd)
5374 : UnaryInstruction(S->getType(), Freeze, S, InsertAtEnd) {
5375 setName(Name);
5376}
5377
5378//===----------------------------------------------------------------------===//
5379// cloneImpl() implementations
5380//===----------------------------------------------------------------------===//
5381
5382// Define these methods here so vtables don't get emitted into every translation
5383// unit that uses these classes.
5384
5386 return new (getNumOperands()) GetElementPtrInst(*this);
5387}
5388
5390 return Create(getOpcode(), Op<0>());
5391}
5392
5394 return Create(getOpcode(), Op<0>(), Op<1>());
5395}
5396
5398 return new FCmpInst(getPredicate(), Op<0>(), Op<1>());
5399}
5400
5402 return new ICmpInst(getPredicate(), Op<0>(), Op<1>());
5403}
5404
5406 return new ExtractValueInst(*this);
5407}
5408
5410 return new InsertValueInst(*this);
5411}
5412
5415 getOperand(0), getAlign());
5416 Result->setUsedWithInAlloca(isUsedWithInAlloca());
5417 Result->setSwiftError(isSwiftError());
5418 return Result;
5419}
5420
5422 return new LoadInst(getType(), getOperand(0), Twine(), isVolatile(),
5424}
5425
5427 return new StoreInst(getOperand(0), getOperand(1), isVolatile(), getAlign(),
5429}
5430
5435 Result->setVolatile(isVolatile());
5436 Result->setWeak(isWeak());
5437 return Result;
5438}
5439
5441 AtomicRMWInst *Result =
5444 Result->setVolatile(isVolatile());
5445 return Result;
5446}
5447
5449 return new FenceInst(getContext(), getOrdering(), getSyncScopeID());
5450}
5451
5453 return new TruncInst(getOperand(0), getType());
5454}
5455
5457 return new ZExtInst(getOperand(0), getType());
5458}
5459
5461 return new SExtInst(getOperand(0), getType());
5462}
5463
5465 return new FPTruncInst(getOperand(0), getType());
5466}
5467
5469 return new FPExtInst(getOperand(0), getType());
5470}
5471
5473 return new UIToFPInst(getOperand(0), getType());
5474}
5475
5477 return new SIToFPInst(getOperand(0), getType());
5478}
5479
5481 return new FPToUIInst(getOperand(0), getType());
5482}
5483
5485 return new FPToSIInst(getOperand(0), getType());
5486}
5487
5489 return new PtrToIntInst(getOperand(0), getType());
5490}
5491
5493 return new IntToPtrInst(getOperand(0), getType());
5494}
5495
5497 return new BitCastInst(getOperand(0), getType());
5498}
5499
5501 return new AddrSpaceCastInst(getOperand(0), getType());
5502}
5503
5505 if (hasOperandBundles()) {
5506 unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo);
5507 return new(getNumOperands(), DescriptorBytes) CallInst(*this);
5508 }
5509 return new(getNumOperands()) CallInst(*this);
5510}
5511
5514}
5515
5517 return new VAArgInst(getOperand(0), getType());
5518}
5519
5522}
5523
5526}
5527
5530}
5531
5532PHINode *PHINode::cloneImpl() const { return new PHINode(*this); }
5533
5535 return new LandingPadInst(*this);
5536}
5537
5539 return new(getNumOperands()) ReturnInst(*this);
5540}
5541
5543 return new(getNumOperands()) BranchInst(*this);
5544}
5545
5546SwitchInst *SwitchInst::cloneImpl() const { return new SwitchInst(*this); }
5547
5549 return new IndirectBrInst(*this);
5550}
5551
5553 if (hasOperandBundles()) {
5554 unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo);
5555 return new(getNumOperands(), DescriptorBytes) InvokeInst(*this);
5556 }
5557 return new(getNumOperands()) InvokeInst(*this);
5558}
5559
5561 if (hasOperandBundles()) {
5562 unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo);
5563 return new (getNumOperands(), DescriptorBytes) CallBrInst(*this);
5564 }
5565 return new (getNumOperands()) CallBrInst(*this);
5566}
5567
5568ResumeInst *ResumeInst::cloneImpl() const { return new (1) ResumeInst(*this); }
5569
5571 return new (getNumOperands()) CleanupReturnInst(*this);
5572}
5573
5575 return new (getNumOperands()) CatchReturnInst(*this);
5576}
5577
5579 return new CatchSwitchInst(*this);
5580}
5581
5583 return new (getNumOperands()) FuncletPadInst(*this);
5584}
5585
5588 return new UnreachableInst(Context);
5589}
5590
5592 return new FreezeInst(getOperand(0));
5593}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static const LLT S1
Rewrite undef for PHI
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
std::string Name
uint64_t Size
bool End
Definition: ELF_riscv.cpp:480
static bool isSigned(unsigned int Opcode)
#define op(i)
hexagon gen pred
static Value * createPlaceholderForShuffleVector(Value *V)
static Align computeAllocaDefaultAlign(Type *Ty, BasicBlock *BB)
static cl::opt< bool > DisableI2pP2iOpt("disable-i2p-p2i-opt", cl::init(false), cl::desc("Disables inttoptr/ptrtoint roundtrip optimization"))
static int matchShuffleAsBitRotate(ArrayRef< int > Mask, int NumSubElts)
Try to lower a vector shuffle as a bit rotation.
static Type * getIndexedTypeInternal(Type *Ty, ArrayRef< IndexTy > IdxList)
static bool isReplicationMaskWithParams(ArrayRef< int > Mask, int ReplicationFactor, int VF)
static bool isIdentityMaskImpl(ArrayRef< int > Mask, int NumOpElts)
static bool isSingleSourceMaskImpl(ArrayRef< int > Mask, int NumOpElts)
static Value * getAISize(LLVMContext &Context, Value *Amt)
static Align computeLoadStoreDefaultAlign(Type *Ty, BasicBlock *BB)
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file contains the declarations for metadata subclasses.
Module.h This file contains the declarations for the Module class.
IntegerType * Int32Ty
LLVMContext & Context
#define P(N)
PowerPC Reduce CR logical Operation
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
raw_pwrite_stream & OS
This file implements the SmallBitVector class.
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:40
@ Struct
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition: VPlanSLP.cpp:191
Value * RHS
Value * LHS
float convertToFloat() const
Converts this APFloat to host float value.
Definition: APFloat.cpp:5268
Class for arbitrary precision integers.
Definition: APInt.h:76
APInt udiv(const APInt &RHS) const
Unsigned division operation.
Definition: APInt.cpp:1579
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
Definition: APInt.h:1308
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition: APInt.h:358
unsigned countr_zero() const
Count the number of trailing zero bits.
Definition: APInt.h:1589
unsigned countl_zero() const
The APInt version of std::countl_zero.
Definition: APInt.h:1548
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
Definition: APInt.h:453
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition: APInt.h:178
This class represents a conversion between pointers from one address space to another.
AddrSpaceCastInst * cloneImpl() const
Clone an identical AddrSpaceCastInst.
AddrSpaceCastInst(Value *S, Type *Ty, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructor with insert-before-instruction semantics.
an instruction to allocate memory on the stack
Definition: Instructions.h:59
std::optional< TypeSize > getAllocationSizeInBits(const DataLayout &DL) const
Get allocation size in bits.
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Definition: Instructions.h:157
bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:132
AllocaInst * cloneImpl() const
AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, const Twine &Name, BasicBlock::iterator InsertBefore)
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:125
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
Definition: Instructions.h:147
unsigned getAddressSpace() const
Return the address space for the allocation.
Definition: Instructions.h:112
std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
void setAlignment(Align Align)
Definition: Instructions.h:136
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:103
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
iterator end() const
Definition: ArrayRef.h:154
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
iterator begin() const
Definition: ArrayRef.h:153
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:160
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Definition: ArrayRef.h:195
Class to represent array types.
Definition: DerivedTypes.h:371
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:539
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this cmpxchg instruction.
Definition: Instructions.h:669
bool isVolatile() const
Return true if this is a cmpxchg from a volatile memory location.
Definition: Instructions.h:599
void setFailureOrdering(AtomicOrdering Ordering)
Sets the failure ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:643
AtomicOrdering getFailureOrdering() const
Returns the failure ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:638
void setSuccessOrdering(AtomicOrdering Ordering)
Sets the success ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:631
AtomicCmpXchgInst * cloneImpl() const
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:588
bool isWeak() const
Return true if this cmpxchg may spuriously fail.
Definition: Instructions.h:606
void setAlignment(Align Align)
Definition: Instructions.h:592
AtomicOrdering getSuccessOrdering() const
Returns the success ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:626
AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment, AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, SyncScope::ID SSID, BasicBlock::iterator InsertBefore)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this cmpxchg instruction.
Definition: Instructions.h:664
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:748
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:867
AtomicRMWInst * cloneImpl() const
bool isVolatile() const
Return true if this is a RMW on a volatile memory location.
Definition: Instructions.h:877
BinOp
This enumeration lists the possible modifications atomicrmw can make.
Definition: Instructions.h:760
@ Add
*p = old + v
Definition: Instructions.h:764
@ FAdd
*p = old + v
Definition: Instructions.h:785
@ Min
*p = old <signed v ? old : v
Definition: Instructions.h:778
@ Or
*p = old | v
Definition: Instructions.h:772
@ Sub
*p = old - v
Definition: Instructions.h:766
@ And
*p = old & v
Definition: Instructions.h:768
@ Xor
*p = old ^ v
Definition: Instructions.h:774
@ FSub
*p = old - v
Definition: Instructions.h:788
@ UIncWrap
Increment one up to a maximum value.
Definition: Instructions.h:800
@ Max
*p = old >signed v ? old : v
Definition: Instructions.h:776
@ UMin
*p = old <unsigned v ? old : v
Definition: Instructions.h:782
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
Definition: Instructions.h:796
@ UMax
*p = old >unsigned v ? old : v
Definition: Instructions.h:780
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
Definition: Instructions.h:792
@ UDecWrap
Decrement one until a minimum value or zero.
Definition: Instructions.h:804
@ Nand
*p = ~(old & v)
Definition: Instructions.h:770
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this rmw instruction.
Definition: Instructions.h:906
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this rmw instruction.
Definition: Instructions.h:892
AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment, AtomicOrdering Ordering, SyncScope::ID SSID, BasicBlock::iterator InsertBefore)
void setOperation(BinOp Operation)
Definition: Instructions.h:861
BinOp getOperation() const
Definition: Instructions.h:845
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this rmw instruction.
Definition: Instructions.h:901
void setAlignment(Align Align)
Definition: Instructions.h:871
static StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
Definition: Instructions.h:887
bool hasAttrSomewhere(Attribute::AttrKind Kind, unsigned *Index=nullptr) const
Return true if the specified attribute is set for at least one parameter or for the return value.
FPClassTest getRetNoFPClass() const
Get the disallowed floating-point classes of the return value.
bool hasParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Return true if the attribute exists for the given argument.
Definition: Attributes.h:783
FPClassTest getParamNoFPClass(unsigned ArgNo) const
Get the disallowed floating-point classes of the argument value.
MemoryEffects getMemoryEffects() const
Returns memory effects of the function.
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition: Attributes.h:85
static Attribute getWithMemoryEffects(LLVMContext &Context, MemoryEffects ME)
Definition: Attributes.cpp:241
LLVM Basic Block Representation.
Definition: BasicBlock.h:60
iterator end()
Definition: BasicBlock.h:442
bool isEntryBlock() const
Return true if this is the entry block of the containing function.
Definition: BasicBlock.cpp:551
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:205
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:164
const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
Definition: BasicBlock.cpp:276
BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty, const Twine &Name, BasicBlock::iterator InsertBefore)
static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name, BasicBlock::iterator InsertBefore)
Construct a binary instruction, given the opcode and the two operands.
static BinaryOperator * CreateNSWNeg(Value *Op, const Twine &Name, BasicBlock::iterator InsertBefore)
BinaryOps getOpcode() const
Definition: InstrTypes.h:491
static BinaryOperator * CreateNeg(Value *Op, const Twine &Name, BasicBlock::iterator InsertBefore)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
bool swapOperands()
Exchange the two operands to this instruction.
static BinaryOperator * CreateNUWNeg(Value *Op, const Twine &Name, BasicBlock::iterator InsertBefore)
static BinaryOperator * CreateNot(Value *Op, const Twine &Name, BasicBlock::iterator InsertBefore)
BinaryOperator * cloneImpl() const
This class represents a no-op cast from one type to another.
BitCastInst * cloneImpl() const
Clone an identical BitCastInst.
BitCastInst(Value *S, Type *Ty, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructor with insert-before-instruction semantics.
Conditional or Unconditional Branch instruction.
void swapSuccessors()
Swap the successors of this branch instruction.
BranchInst * cloneImpl() const
bool isConditional() const
Value * getCondition() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1455
FPClassTest getParamNoFPClass(unsigned i) const
Extract a test mask for disallowed floating-point value classes for the parameter.
bool isInlineAsm() const
Check if this call is an inline asm statement.
Definition: InstrTypes.h:1770
BundleOpInfo & getBundleOpInfoForOperand(unsigned OpIdx)
Return the BundleOpInfo for the operand at index OpIdx.
void setCallingConv(CallingConv::ID CC)
Definition: InstrTypes.h:1765
FPClassTest getRetNoFPClass() const
Extract a test mask for disallowed floating-point value classes for the return value.
bundle_op_iterator bundle_op_info_begin()
Return the start of the list of BundleOpInfo instances associated with this OperandBundleUser.
Definition: InstrTypes.h:2518
MemoryEffects getMemoryEffects() const
void addFnAttr(Attribute::AttrKind Kind)
Adds the attribute to the function.
Definition: InstrTypes.h:1812
bool doesNotAccessMemory() const
Determine if the call does not access memory.
void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
void setOnlyAccessesArgMemory()
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
Definition: InstrTypes.h:2326
void setOnlyAccessesInaccessibleMemOrArgMem()
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Definition: InstrTypes.h:2357
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Definition: InstrTypes.h:1703
void setDoesNotAccessMemory()
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
Definition: InstrTypes.h:1906
bool onlyAccessesInaccessibleMemory() const
Determine if the function may only access memory that is inaccessible from the IR.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
Definition: InstrTypes.h:2270
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1761
bundle_op_iterator bundle_op_info_end()
Return the end of the list of BundleOpInfo instances associated with this OperandBundleUser.
Definition: InstrTypes.h:2535
unsigned getNumSubclassExtraOperandsDynamic() const
Get the number of extra operands for instructions that don't have a fixed number of extra operands.
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
Definition: InstrTypes.h:1623
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
bool isIndirectCall() const
Return true if the callsite is an indirect call.
bool onlyReadsMemory() const
Determine if the call does not access or only reads memory.
iterator_range< bundle_op_iterator > bundle_op_infos()
Return the range [bundle_op_info_begin, bundle_op_info_end).
Definition: InstrTypes.h:2551
void setOnlyReadsMemory()
bool onlyAccessesInaccessibleMemOrArgMem() const
Determine if the function may only access memory that is either inaccessible from the IR or pointed t...
static CallBase * Create(CallBase *CB, ArrayRef< OperandBundleDef > Bundles, BasicBlock::iterator InsertPt)
Create a clone of CB with a different set of operand bundles and insert it before InsertPt.
static CallBase * removeOperandBundle(CallBase *CB, uint32_t ID, Instruction *InsertPt=nullptr)
Create a clone of CB with operand bundle ID removed.
Value * getCalledOperand() const
Definition: InstrTypes.h:1696
void setOnlyWritesMemory()
op_iterator populateBundleOperandInfos(ArrayRef< OperandBundleDef > Bundles, const unsigned BeginIndex)
Populate the BundleOpInfo instances and the Use& vector from Bundles.
AttributeList Attrs
parameter attributes for callable
Definition: InstrTypes.h:1469
bool hasOperandBundlesOtherThan(ArrayRef< uint32_t > IDs) const
Return true if this operand bundle user contains operand bundles with tags other than those specified...
Definition: InstrTypes.h:2431
bool isReturnNonNull() const
Return true if the return value is known to be not null.
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1648
FunctionType * FTy
Definition: InstrTypes.h:1470
uint64_t getRetDereferenceableBytes() const
Extract the number of dereferenceable bytes for a call or parameter (0=unknown).
Definition: InstrTypes.h:2118
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
Definition: InstrTypes.h:1629
FunctionType * getFunctionType() const
Definition: InstrTypes.h:1561
Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
static unsigned CountBundleInputs(ArrayRef< OperandBundleDef > Bundles)
Return the total number of values used in Bundles.
Definition: InstrTypes.h:2581
Value * getArgOperandWithAttribute(Attribute::AttrKind Kind) const
If one of the arguments has the specified attribute, returns its operand value.
void setOnlyAccessesInaccessibleMemory()
bool onlyWritesMemory() const
Determine if the call does not access or only writes memory.
bool hasClobberingOperandBundles() const
Return true if this operand bundle user has operand bundles that may write to the heap.
void setCalledOperand(Value *V)
Definition: InstrTypes.h:1739
bool hasReadingOperandBundles() const
Return true if this operand bundle user has operand bundles that may read from the heap.
bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
unsigned arg_size() const
Definition: InstrTypes.h:1646
AttributeList getAttributes() const
Return the parameter attributes for this call.
Definition: InstrTypes.h:1780
static CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, Instruction *InsertPt=nullptr)
Create a clone of CB with operand bundle OB added.
void setMemoryEffects(MemoryEffects ME)
bool hasOperandBundles() const
Return true if this User has any operand bundles.
Definition: InstrTypes.h:2275
bool isTailCall() const
Tests if this call site is marked as a tail call.
Function * getCaller()
Helper to get the caller (the parent function).
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
static CallBrInst * Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, ArrayRef< BasicBlock * > IndirectDests, ArrayRef< Value * > Args, const Twine &NameStr, BasicBlock::iterator InsertBefore)
SmallVector< BasicBlock *, 16 > getIndirectDests() const
void setDefaultDest(BasicBlock *B)
void setIndirectDest(unsigned i, BasicBlock *B)
BasicBlock * getDefaultDest() const
CallBrInst * cloneImpl() const
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr, BasicBlock::iterator InsertBefore)
void updateProfWeight(uint64_t S, uint64_t T)
Updates profile metadata by scaling it by S / T.
TailCallKind getTailCallKind() const
CallInst * cloneImpl() const
This is the base class for all instructions that perform data casts.
Definition: InstrTypes.h:579
static Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)
Returns the opcode necessary to cast Val into Ty using usual casting rules.
static CastInst * CreateFPCast(Value *S, Type *Ty, const Twine &Name, BasicBlock::iterator InsertBefore)
Create an FPExt, BitCast, or FPTrunc for fp -> fp casts.
Instruction::CastOps getOpcode() const
Return the opcode of this CastInst.
Definition: InstrTypes.h:908
static CastInst * CreateZExtOrBitCast(Value *S, Type *Ty, const Twine &Name, BasicBlock::iterator InsertBefore)
Create a ZExt or BitCast cast instruction.
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name, BasicBlock::iterator InsertBefore)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
static unsigned isEliminableCastPair(Instruction::CastOps firstOpcode, Instruction::CastOps secondOpcode, Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy, Type *DstIntPtrTy)
Determine how a pair of casts can be eliminated, if they can be at all.
static bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
static CastInst * CreatePointerBitCastOrAddrSpaceCast(Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd)
Create a BitCast or an AddrSpaceCast cast instruction.
static bool isBitCastable(Type *SrcTy, Type *DestTy)
Check whether a bitcast between these types is valid.
static CastInst * CreatePointerCast(Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd)
Create a BitCast AddrSpaceCast, or a PtrToInt cast instruction.
static bool isNoopCast(Instruction::CastOps Opcode, Type *SrcTy, Type *DstTy, const DataLayout &DL)
A no-op cast is one that can be effected without changing any bits.
static CastInst * CreateBitOrPointerCast(Value *S, Type *Ty, const Twine &Name, BasicBlock::iterator InsertBefore)
Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
static CastInst * CreateTruncOrBitCast(Value *S, Type *Ty, const Twine &Name, BasicBlock::iterator InsertBefore)
Create a Trunc or BitCast cast instruction.
static CastInst * CreateSExtOrBitCast(Value *S, Type *Ty, const Twine &Name, BasicBlock::iterator InsertBefore)
Create a SExt or BitCast cast instruction.
bool isIntegerCast() const
There are several places where we need to know if a cast instruction only deals with integer source a...
static CastInst * CreateIntegerCast(Value *S, Type *Ty, bool isSigned, const Twine &Name, BasicBlock::iterator InsertBefore)
Create a ZExt, BitCast, or Trunc for int -> int casts.
static bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
CatchReturnInst * cloneImpl() const
void setUnwindDest(BasicBlock *UnwindDest)
void addHandler(BasicBlock *Dest)
Add an entry to the switch instruction... Note: This action invalidates handler_end().
CatchSwitchInst * cloneImpl() const
Value * getParentPad() const
void setParentPad(Value *ParentPad)
BasicBlock * getUnwindDest() const
void removeHandler(handler_iterator HI)
bool hasUnwindDest() const
CleanupReturnInst * cloneImpl() const
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:955
Predicate getStrictPredicate() const
For example, SGE -> SGT, SLE -> SLT, ULE -> ULT, UGE -> UGT.
Definition: InstrTypes.h:1159
bool isEquality() const
Determine if this is an equals/not equals predicate.
Definition: InstrTypes.h:1216
void setPredicate(Predicate P)
Set the predicate for this instruction to the specified value.
Definition: InstrTypes.h:1069
bool isFalseWhenEqual() const
This is just a convenience.
Definition: InstrTypes.h:1281
Predicate getSignedPredicate()
For example, ULT->SLT, ULE->SLE, UGT->SGT, UGE->SGE, SLT->Failed assert.
Definition: InstrTypes.h:1245
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:965
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition: InstrTypes.h:968
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
Definition: InstrTypes.h:982
@ ICMP_SLT
signed less than
Definition: InstrTypes.h:994
@ ICMP_SLE
signed less or equal
Definition: InstrTypes.h:995
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition: InstrTypes.h:971
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
Definition: InstrTypes.h:980
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Definition: InstrTypes.h:969
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Definition: InstrTypes.h:970
@ ICMP_UGE
unsigned greater or equal
Definition: InstrTypes.h:989
@ ICMP_UGT
unsigned greater than
Definition: InstrTypes.h:988
@ ICMP_SGT
signed greater than
Definition: InstrTypes.h:992
@ FCMP_ULT
1 1 0 0 True if unordered or less than
Definition: InstrTypes.h:979
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition: InstrTypes.h:973
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition: InstrTypes.h:976
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:990
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
Definition: InstrTypes.h:977
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition: InstrTypes.h:972
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition: InstrTypes.h:974
@ ICMP_EQ
equal
Definition: InstrTypes.h:986
@ ICMP_NE
not equal
Definition: InstrTypes.h:987
@ ICMP_SGE
signed greater or equal
Definition: InstrTypes.h:993
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition: InstrTypes.h:981
@ ICMP_ULE
unsigned less or equal
Definition: InstrTypes.h:991
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
Definition: InstrTypes.h:978
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
Definition: InstrTypes.h:967
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition: InstrTypes.h:975
bool isSigned() const
Definition: InstrTypes.h:1226
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition: InstrTypes.h:1128
static CmpInst * Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2, const Twine &Name, BasicBlock::iterator InsertBefore)
Construct a compare instruction, given the opcode, the predicate and the two operands.
bool isTrueWhenEqual() const
This is just a convenience.
Definition: InstrTypes.h:1275
Predicate getUnsignedPredicate()
For example, SLT->ULT, SLE->ULE, SGT->UGT, SGE->UGE, ULT->Failed assert.
Definition: InstrTypes.h:1257
Predicate getNonStrictPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
Definition: InstrTypes.h:1172
bool isNonStrictPredicate() const
Definition: InstrTypes.h:1153
bool isFPPredicate() const
Definition: InstrTypes.h:1083
void swapOperands()
This is just a convenience that dispatches to the subclasses.
CmpInst(Type *ty, Instruction::OtherOps op, Predicate pred, Value *LHS, Value *RHS, const Twine &Name, BasicBlock::iterator InsertBefore, Instruction *FlagsSource=nullptr)
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition: InstrTypes.h:1090
static StringRef getPredicateName(Predicate P)
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:1066
bool isStrictPredicate() const
Definition: InstrTypes.h:1144
static bool isUnordered(Predicate predicate)
Determine if the predicate is an unordered operation.
Predicate getFlippedStrictnessPredicate() const
For predicate of kind "is X or equal to 0" returns the predicate "is X".
Definition: InstrTypes.h:1194
static bool isImpliedTrueByMatchingCmp(Predicate Pred1, Predicate Pred2)
Determine if Pred1 implies Pred2 is true when two compares have matching operands.
Predicate getFlippedSignednessPredicate()
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->Failed assert.
Definition: InstrTypes.h:1269
bool isIntPredicate() const
Definition: InstrTypes.h:1084
static bool isOrdered(Predicate predicate)
Determine if the predicate is an ordered operation.
static bool isImpliedFalseByMatchingCmp(Predicate Pred1, Predicate Pred2)
Determine if Pred1 implies Pred2 is false when two compares have matching operands.
bool isUnsigned() const
Definition: InstrTypes.h:1232
bool isCommutative() const
This is just a convenience that dispatches to the subclasses.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
Definition: InstrTypes.h:1222
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:267
const APFloat & getValueAPF() const
Definition: Constants.h:310
This is the shared class of boolean and integer constants.
Definition: Constants.h:79
static Constant * get(ArrayRef< Constant * > V)
Definition: Constants.cpp:1398
This is an important base class in LLVM.
Definition: Constant.h:41
static Constant * getAllOnesValue(Type *Ty)
Definition: Constants.cpp:417
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:370
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition: TypeSize.h:296
This instruction extracts a single (scalar) element from a VectorType value.
ExtractElementInst * cloneImpl() const
static ExtractElementInst * Create(Value *Vec, Value *Idx, const Twine &NameStr, BasicBlock::iterator InsertBefore)
static bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
This instruction extracts a struct member or array element value from an aggregate value.
static Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
ExtractValueInst * cloneImpl() const
This instruction compares its operands according to the predicate given to the constructor.
bool isEquality() const
static bool compare(const APFloat &LHS, const APFloat &RHS, FCmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
FCmpInst * cloneImpl() const
Clone an identical FCmpInst.
This class represents an extension of floating point types.
FPExtInst * cloneImpl() const
Clone an identical FPExtInst.
FPExtInst(Value *S, Type *Ty, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructor with insert-before-instruction semantics.
float getFPAccuracy() const
Get the maximum error permitted by this operation in ULPs.
This class represents a cast from floating point to signed integer.
FPToSIInst * cloneImpl() const
Clone an identical FPToSIInst.
FPToSIInst(Value *S, Type *Ty, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructor with insert-before-instruction semantics.
This class represents a cast from floating point to unsigned integer.
FPToUIInst(Value *S, Type *Ty, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructor with insert-before-instruction semantics.
FPToUIInst * cloneImpl() const
Clone an identical FPToUIInst.
This class represents a truncation of floating point types.
FPTruncInst(Value *S, Type *Ty, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructor with insert-before-instruction semantics.
FPTruncInst * cloneImpl() const
Clone an identical FPTruncInst.
An instruction for ordering other memory operations.
Definition: Instructions.h:460
FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID, BasicBlock::iterator InsertBefore)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this fence instruction.
Definition: Instructions.h:498
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this fence instruction.
Definition: Instructions.h:503
FenceInst * cloneImpl() const
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this fence instruction.
Definition: Instructions.h:493
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Definition: Instructions.h:487
Class to represent fixed width SIMD vectors.
Definition: DerivedTypes.h:539
unsigned getNumElements() const
Definition: DerivedTypes.h:582
This class represents a freeze function that returns random concrete value if an operand is either a ...
FreezeInst(Value *S, const Twine &NameStr, BasicBlock::iterator InsertBefore)
FreezeInst * cloneImpl() const
Clone an identical FreezeInst.
void setParentPad(Value *ParentPad)
Definition: InstrTypes.h:2664
Value * getParentPad() const
Convenience accessors.
Definition: InstrTypes.h:2663
FuncletPadInst * cloneImpl() const
Class to represent function types.
Definition: DerivedTypes.h:103
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Definition: DerivedTypes.h:142
Type * getParamType(unsigned i) const
Parameter type accessors.
Definition: DerivedTypes.h:135
bool isVarArg() const
Definition: DerivedTypes.h:123
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:973
bool isInBounds() const
Determine whether the GEP has the inbounds flag.
static Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
bool hasAllZeroIndices() const
Return true if all of the indices of this GEP are zeros.
bool hasAllConstantIndices() const
Return true if all of the indices of this GEP are constant integers.
bool collectOffset(const DataLayout &DL, unsigned BitWidth, MapVector< Value *, APInt > &VariableOffsets, APInt &ConstantOffset) const
void setIsInBounds(bool b=true)
Set or clear the inbounds flag on this GEP instruction.
static Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const
Accumulate the constant address offset of this GEP if possible.
GetElementPtrInst * cloneImpl() const
This instruction compares its operands according to the predicate given to the constructor.
static bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
ICmpInst * cloneImpl() const
Clone an identical ICmpInst.
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
bool isEquality() const
Return true if this predicate is either EQ or NE.
Predicate getUnsignedPredicate() const
For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
Indirect Branch Instruction.
void addDestination(BasicBlock *Dest)
Add a destination.
void removeDestination(unsigned i)
This method removes the specified successor from the indirectbr instruction.
IndirectBrInst * cloneImpl() const
This instruction inserts a single (scalar) element into a VectorType value.
InsertElementInst * cloneImpl() const
static bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
static InsertElementInst * Create(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr, BasicBlock::iterator InsertBefore)
This instruction inserts a struct field of array element value into an aggregate value.
InsertValueInst * cloneImpl() const
BitfieldElement::Type getSubclassData() const
Definition: Instruction.h:992
void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:453
bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
const BasicBlock * getParent() const
Definition: Instruction.h:151
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:358
void swapProfMetadata()
If the instruction has "branch_weights" MD_prof metadata and the MDNode has three operands (including...
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
Definition: Metadata.cpp:1633
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:251
InstListType::iterator insertInto(BasicBlock *ParentBB, InstListType::iterator It)
Inserts an unlinked instruction into ParentBB at position It and returns the iterator of the inserted...
This class represents a cast from an integer to a pointer.
IntToPtrInst * cloneImpl() const
Clone an identical IntToPtrInst.
IntToPtrInst(Value *S, Type *Ty, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructor with insert-before-instruction semantics.
Invoke instruction.
BasicBlock * getUnwindDest() const
void setNormalDest(BasicBlock *B)
InvokeInst * cloneImpl() const
LandingPadInst * getLandingPadInst() const
Get the landingpad instruction from the landing pad block (the unwind destination).
void setUnwindDest(BasicBlock *B)
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, BasicBlock::iterator InsertBefore)
BasicBlock * getNormalDest() const
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
LLVMContextImpl *const pImpl
Definition: LLVMContext.h:69
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
LandingPadInst * cloneImpl() const
void addClause(Constant *ClauseVal)
Add a catch or filter clause to the landing pad.
void setCleanup(bool V)
Indicate that this landingpad instruction is a cleanup.
static LandingPadInst * Create(Type *RetTy, unsigned NumReservedClauses, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructors - NumReservedClauses is a hint for the number of incoming clauses that this landingpad w...
An instruction for reading from memory.
Definition: Instructions.h:184
void setAlignment(Align Align)
Definition: Instructions.h:240
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Definition: Instructions.h:230
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this load instruction.
Definition: Instructions.h:266
LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock::iterator InsertBefore)
LoadInst * cloneImpl() const
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
Definition: Instructions.h:245
void setVolatile(bool V)
Specify whether this is a volatile load or not.
Definition: Instructions.h:233
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Definition: Instructions.h:255
Align getAlign() const
Return the alignment of the access that is being performed.
Definition: Instructions.h:236
ConstantAsMetadata * createConstant(Constant *C)
Return the given constant as metadata.
Definition: MDBuilder.cpp:24
MDNode * createBranchWeights(uint32_t TrueWeight, uint32_t FalseWeight)
Return metadata containing two branch weights.
Definition: MDBuilder.cpp:37
Metadata node.
Definition: Metadata.h:1067
const MDOperand & getOperand(unsigned I) const
Definition: Metadata.h:1428
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1541
unsigned getNumOperands() const
Return number of MDNode operands.
Definition: Metadata.h:1434
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
static MemoryEffectsBase readOnly()
Create MemoryEffectsBase that can read any memory.
Definition: ModRef.h:122
bool onlyWritesMemory() const
Whether this function only (at most) writes memory.
Definition: ModRef.h:198
bool doesNotAccessMemory() const
Whether this function accesses no memory.
Definition: ModRef.h:192
static MemoryEffectsBase argMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Create MemoryEffectsBase that can only access argument memory.
Definition: ModRef.h:132
static MemoryEffectsBase inaccessibleMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Create MemoryEffectsBase that can only access inaccessible memory.
Definition: ModRef.h:138
bool onlyAccessesInaccessibleMem() const
Whether this function only (at most) accesses inaccessible memory.
Definition: ModRef.h:211
bool onlyAccessesArgPointees() const
Whether this function only (at most) accesses argument memory.
Definition: ModRef.h:201
bool onlyReadsMemory() const
Whether this function only (at most) reads memory.
Definition: ModRef.h:195
static MemoryEffectsBase writeOnly()
Create MemoryEffectsBase that can write any memory.
Definition: ModRef.h:127
static MemoryEffectsBase inaccessibleOrArgMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Create MemoryEffectsBase that can only access inaccessible or argument memory.
Definition: ModRef.h:145
static MemoryEffectsBase none()
Create MemoryEffectsBase that cannot read or write any memory.
Definition: ModRef.h:117
bool onlyAccessesInaccessibleOrArgMem() const
Whether this function only (at most) accesses argument and inaccessible memory.
Definition: ModRef.h:217
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition: Module.h:287
A container for an operand bundle being viewed as a set of values rather than a set of uses.
Definition: InstrTypes.h:1408
StringRef getTag() const
Definition: InstrTypes.h:1431
iterator_range< const_block_iterator > blocks() const
void allocHungoffUses(unsigned N)
const_block_iterator block_begin() const
void removeIncomingValueIf(function_ref< bool(unsigned)> Predicate, bool DeletePHIIfEmpty=true)
Remove all incoming values for which the predicate returns true.
Value * removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty=true)
Remove an incoming value.
bool hasConstantOrUndefValue() const
Whether the specified PHI node always merges together the same value, assuming undefs are equal to a ...
void copyIncomingBlocks(iterator_range< const_block_iterator > BBRange, uint32_t ToIdx=0)
Copies the basic blocks from BBRange to the incoming basic block list of this PHINode,...
const_block_iterator block_end() const
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
Value * hasConstantValue() const
If the specified PHI node always merges together the same value, return the value,...
PHINode * cloneImpl() const
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Class to represent pointers.
Definition: DerivedTypes.h:646
unsigned getAddressSpace() const
Return the address space of the Pointer type.
Definition: DerivedTypes.h:679
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1827
This class represents a cast from a pointer to an integer.
PtrToIntInst(Value *S, Type *Ty, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructor with insert-before-instruction semantics.
PtrToIntInst * cloneImpl() const
Clone an identical PtrToIntInst.
Resume the propagation of an exception.
ResumeInst * cloneImpl() const
Return a value (possibly void), from a function.
ReturnInst * cloneImpl() const
This class represents a sign extension of integer types.
SExtInst(Value *S, Type *Ty, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructor with insert-before-instruction semantics.
SExtInst * cloneImpl() const
Clone an identical SExtInst.
This class represents a cast from signed integer to floating point.
SIToFPInst * cloneImpl() const
Clone an identical SIToFPInst.
SIToFPInst(Value *S, Type *Ty, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructor with insert-before-instruction semantics.
Class to represent scalable SIMD vectors.
Definition: DerivedTypes.h:586
This class represents the LLVM 'select' instruction.
SelectInst * cloneImpl() const
static const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr, BasicBlock::iterator InsertBefore, Instruction *MDFrom=nullptr)
This instruction constructs a fixed permutation of two input vectors.
static bool isZeroEltSplatMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses all elements with the same value as the first element of exa...
ArrayRef< int > getShuffleMask() const
static bool isSpliceMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is a splice mask, concatenating the two inputs together and then ext...
int getMaskValue(unsigned Elt) const
Return the shuffle mask value of this instruction for the given element index.
static bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr, BasicBlock::iterator InsertBefore)
static bool isSelectMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from its source vectors without lane crossings.
static bool isBitRotateMask(ArrayRef< int > Mask, unsigned EltSizeInBits, unsigned MinSubElts, unsigned MaxSubElts, unsigned &NumSubElts, unsigned &RotateAmt)
Checks if the shuffle is a bit rotation of the first operand across multiple subelements,...
VectorType * getType() const
Overload to return most specific vector type.
bool isIdentityWithExtract() const
Return true if this shuffle extracts the first N elements of exactly one source vector.
static bool isOneUseSingleSourceMask(ArrayRef< int > Mask, int VF)
Return true if this shuffle mask represents "clustered" mask of size VF, i.e.
bool isIdentityWithPadding() const
Return true if this shuffle lengthens exactly one source vector with undefs in the high elements.
static bool isSingleSourceMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from exactly one source vector.
bool isConcat() const
Return true if this shuffle concatenates its 2 source vectors.
ShuffleVectorInst * cloneImpl() const
static bool isIdentityMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from exactly one source vector without lane crossin...
static bool isExtractSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is an extract subvector mask.
void setShuffleMask(ArrayRef< int > Mask)
bool isInterleave(unsigned Factor)
Return if this shuffle interleaves its two input vectors together.
static bool isReverseMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask swaps the order of elements from exactly one source vector.
static bool isTransposeMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask is a transpose mask.
void commute()
Swap the operands and adjust the mask to preserve the semantics of the instruction.
static bool isInsertSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &NumSubElts, int &Index)
Return true if this shuffle mask is an insert subvector mask.
static Constant * convertShuffleMaskForBitcode(ArrayRef< int > Mask, Type *ResultTy)
static bool isReplicationMask(ArrayRef< int > Mask, int &ReplicationFactor, int &VF)
Return true if this shuffle mask replicates each of the VF elements in a vector ReplicationFactor tim...
static bool isInterleaveMask(ArrayRef< int > Mask, unsigned Factor, unsigned NumInputElts, SmallVectorImpl< unsigned > &StartIndexes)
Return true if the mask interleaves one or more input vectors together.
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
Implements a dense probed hash-table based set with some number of buckets stored inline.
Definition: DenseSet.h:290
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void assign(size_type NumElts, ValueParamT Elt)
Definition: SmallVector.h:717
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:950
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:696
void resize(size_type N)
Definition: SmallVector.h:651
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
An instruction for storing to memory.
Definition: Instructions.h:317
AtomicOrdering getOrdering() const
Returns the ordering constraint of this store instruction.
Definition: Instructions.h:378
Align getAlign() const
Definition: Instructions.h:369
void setVolatile(bool V)
Specify whether this is a volatile store or not.
Definition: Instructions.h:364
void setAlignment(Align Align)
Definition: Instructions.h:373
StoreInst * cloneImpl() const
StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this store instruction.
Definition: Instructions.h:389
bool isVolatile() const
Return true if this is a store to a volatile memory location.
Definition: Instructions.h:361
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this store instruction.
Definition: Instructions.h:400
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
Class to represent struct types.
Definition: DerivedTypes.h:216
void setSuccessorWeight(unsigned idx, CaseWeightOpt W)
Instruction::InstListType::iterator eraseFromParent()
Delegate the call to the underlying SwitchInst::eraseFromParent() and mark this object to not touch t...
void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W)
Delegate the call to the underlying SwitchInst::addCase() and set the specified branch weight for the...
CaseWeightOpt getSuccessorWeight(unsigned idx)
std::optional< uint32_t > CaseWeightOpt
SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I)
Delegate the call to the underlying SwitchInst::removeCase() and remove correspondent branch weight.
void setValue(ConstantInt *V) const
Sets the new value for current case.
void setSuccessor(BasicBlock *S) const
Sets the new successor for current case.
Multiway switch.
SwitchInst * cloneImpl() const
void addCase(ConstantInt *OnVal, BasicBlock *Dest)
Add an entry to the switch instruction.
CaseIteratorImpl< CaseHandle > CaseIt
unsigned getNumCases() const
Return the number of 'cases' in this switch instruction, excluding the default case.
CaseIt removeCase(CaseIt I)
This method removes the specified case and its successor from the switch instruction.
This class represents a truncation of integer types.
TruncInst * cloneImpl() const
Clone an identical TruncInst.
TruncInst(Value *S, Type *Ty, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructor with insert-before-instruction semantics.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:265
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition: Type.h:234
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:255
static IntegerType * getInt1Ty(LLVMContext &C)
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isX86_MMXTy() const
Return true if this is X86 MMX.
Definition: Type.h:201
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isFirstClassType() const
Return true if the type is "first class", meaning it is a valid type for a Value.
Definition: Type.h:281
bool isAggregateType() const
Return true if the type is an aggregate type.
Definition: Type.h:295
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:129
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition: Type.h:185
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition: Type.h:262
static IntegerType * getInt32Ty(LLVMContext &C)
static IntegerType * getInt64Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:228
bool isTokenTy() const
Return true if this is 'token'.
Definition: Type.h:225
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition: Type.h:216
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isVoidTy() const
Return true if this is 'void'.
Definition: Type.h:140
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition: Type.h:348
This class represents a cast unsigned integer to floating point.
UIToFPInst(Value *S, Type *Ty, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructor with insert-before-instruction semantics.
UIToFPInst * cloneImpl() const
Clone an identical UIToFPInst.
UnaryOperator * cloneImpl() const
UnaryOperator(UnaryOps iType, Value *S, Type *Ty, const Twine &Name, BasicBlock::iterator InsertBefore)
static UnaryOperator * Create(UnaryOps Op, Value *S, const Twine &Name, BasicBlock::iterator InsertBefore)
Construct a unary instruction, given the opcode and an operand.
UnaryOps getOpcode() const
Definition: InstrTypes.h:203
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Definition: Constants.cpp:1808
This function has undefined behavior.
UnreachableInst(LLVMContext &C, BasicBlock::iterator InsertBefore)
UnreachableInst * cloneImpl() const
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
void set(Value *Val)
Definition: Value.h:882
const Use * getOperandList() const
Definition: User.h:162
op_range operands()
Definition: User.h:242
void allocHungoffUses(unsigned N, bool IsPhi=false)
Allocate the array of Uses, followed by a pointer (with bottom bit set) to the User.
Definition: User.cpp:50
op_iterator op_begin()
Definition: User.h:234
void setNumHungOffUseOperands(unsigned NumOps)
Subclasses with hung off uses need to manage the operand count themselves.
Definition: User.h:215
Use & Op()
Definition: User.h:133
Value * getOperand(unsigned i) const
Definition: User.h:169
unsigned getNumOperands() const
Definition: User.h:191
op_iterator op_end()
Definition: User.h:236
void growHungoffUses(unsigned N, bool IsPhi=false)
Grow the number of hung off uses.
Definition: User.cpp:67
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
VAArgInst * cloneImpl() const
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
unsigned char SubclassOptionalData
Hold subclass data that can be dropped.
Definition: Value.h:84
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:377
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:534
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1074
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
Base class of all SIMD vector types.
Definition: DerivedTypes.h:403
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
Definition: DerivedTypes.h:641
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Definition: Type.cpp:676
This class represents zero extension of integer types.
ZExtInst(Value *S, Type *Ty, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructor with insert-before-instruction semantics.
ZExtInst * cloneImpl() const
Clone an identical ZExtInst.
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:206
size_type size() const
Definition: DenseSet.h:81
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
Definition: DenseSet.h:185
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:168
An efficient, type-erasing, non-owning reference to a callable.
base_list_type::iterator iterator
Definition: ilist.h:121
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ CE
Windows NT (Windows on ARM)
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:450
@ Switch
The "resume-switch" lowering, where there are separate resume and destroy functions that are shared b...
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
@ Offset
Definition: DWP.cpp:456
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1731
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition: STLExtras.h:1689
unsigned getPointerAddressSpace(const Type *T)
Definition: SPIRVUtils.h:122
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
MDNode * getBranchWeightMDNode(const Instruction &I)
Get the branch weights metadata node.
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:428
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:264
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Definition: Function.cpp:2014
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
bool isPointerTy(const Type *T)
Definition: SPIRVUtils.h:116
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition: Casting.h:548
constexpr int PoisonMaskElem
AtomicOrdering
Atomic ordering for LLVM's memory model.
auto remove_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::remove_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1770
@ Or
Bitwise or logical OR of integers.
@ Mul
Product of integers.
@ Xor
Bitwise or logical XOR of integers.
@ FMul
Product of floats.
@ And
Bitwise or logical AND of integers.
@ Add
Sum of integers.
@ FAdd
Sum of floats.
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
Definition: APFixedPoint.h:293
OutputIt copy(R &&Range, OutputIt Out)
Definition: STLExtras.h:1833
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:191
bool extractBranchWeights(const MDNode *ProfileData, SmallVectorImpl< uint32_t > &Weights)
Extract branch weights from MD_prof metadata.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition: Casting.h:565
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1888
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
Definition: STLExtras.h:2048
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
Definition: Sequence.h:305
@ Default
The result values are uniform if and only if all operands are uniform.
const uint64_t NOMORE_ICP_MAGICNUM
Magic number in the value profile metadata showing a target has been promoted for the instruction and...
Definition: Metadata.h:57
cmpResult
IEEE-754R 5.11: Floating Point Comparison Relations.
Definition: APFloat.h:220
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Describes an element of a Bitfield.
Definition: Bitfields.h:223
Used to keep track of an operand bundle.
Definition: InstrTypes.h:2442
uint32_t End
The index in the Use& vector where operands for this operand bundle ends.
Definition: InstrTypes.h:2453
uint32_t Begin
The index in the Use& vector where operands for this operand bundle starts.
Definition: InstrTypes.h:2449
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
Compile-time customization of User operands.
Definition: User.h:42