LLVM 22.0.0git
Instructions.cpp
Go to the documentation of this file.
1//===- Instructions.cpp - Implement the LLVM instructions -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements all of the non-inline methods for the LLVM instruction
10// classes.
11//
12//===----------------------------------------------------------------------===//
13
15#include "LLVMContextImpl.h"
18#include "llvm/ADT/Twine.h"
19#include "llvm/IR/Attributes.h"
20#include "llvm/IR/BasicBlock.h"
21#include "llvm/IR/Constant.h"
23#include "llvm/IR/Constants.h"
24#include "llvm/IR/DataLayout.h"
26#include "llvm/IR/Function.h"
27#include "llvm/IR/InstrTypes.h"
28#include "llvm/IR/Instruction.h"
29#include "llvm/IR/Intrinsics.h"
30#include "llvm/IR/LLVMContext.h"
31#include "llvm/IR/MDBuilder.h"
32#include "llvm/IR/Metadata.h"
33#include "llvm/IR/Module.h"
34#include "llvm/IR/Operator.h"
37#include "llvm/IR/Type.h"
38#include "llvm/IR/Value.h"
46#include "llvm/Support/ModRef.h"
48#include <algorithm>
49#include <cassert>
50#include <cstdint>
51#include <optional>
52#include <vector>
53
54using namespace llvm;
55
57 "disable-i2p-p2i-opt", cl::init(false),
58 cl::desc("Disables inttoptr/ptrtoint roundtrip optimization"));
59
60//===----------------------------------------------------------------------===//
61// AllocaInst Class
62//===----------------------------------------------------------------------===//
63
64std::optional<TypeSize>
66 TypeSize Size = DL.getTypeAllocSize(getAllocatedType());
67 if (isArrayAllocation()) {
69 if (!C)
70 return std::nullopt;
71 assert(!Size.isScalable() && "Array elements cannot have a scalable size");
72 auto CheckedProd =
73 checkedMulUnsigned(Size.getKnownMinValue(), C->getZExtValue());
74 if (!CheckedProd)
75 return std::nullopt;
76 return TypeSize::getFixed(*CheckedProd);
77 }
78 return Size;
79}
80
81std::optional<TypeSize>
83 std::optional<TypeSize> Size = getAllocationSize(DL);
84 if (!Size)
85 return std::nullopt;
86 auto CheckedProd = checkedMulUnsigned(Size->getKnownMinValue(),
87 static_cast<TypeSize::ScalarTy>(8));
88 if (!CheckedProd)
89 return std::nullopt;
90 return TypeSize::get(*CheckedProd, Size->isScalable());
91}
92
93//===----------------------------------------------------------------------===//
94// SelectInst Class
95//===----------------------------------------------------------------------===//
96
97/// areInvalidOperands - Return a string if the specified operands are invalid
98/// for a select operation, otherwise return null.
99const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) {
100 if (Op1->getType() != Op2->getType())
101 return "both values to select must have same type";
102
103 if (Op1->getType()->isTokenTy())
104 return "select values cannot have token type";
105
106 if (VectorType *VT = dyn_cast<VectorType>(Op0->getType())) {
107 // Vector select.
108 if (VT->getElementType() != Type::getInt1Ty(Op0->getContext()))
109 return "vector select condition element type must be i1";
111 if (!ET)
112 return "selected values for vector select must be vectors";
113 if (ET->getElementCount() != VT->getElementCount())
114 return "vector select requires selected vectors to have "
115 "the same vector length as select condition";
116 } else if (Op0->getType() != Type::getInt1Ty(Op0->getContext())) {
117 return "select condition must be i1 or <n x i1>";
118 }
119 return nullptr;
120}
121
122//===----------------------------------------------------------------------===//
123// PHINode Class
124//===----------------------------------------------------------------------===//
125
126PHINode::PHINode(const PHINode &PN)
127 : Instruction(PN.getType(), Instruction::PHI, AllocMarker),
128 ReservedSpace(PN.getNumOperands()) {
131 std::copy(PN.op_begin(), PN.op_end(), op_begin());
132 copyIncomingBlocks(make_range(PN.block_begin(), PN.block_end()));
134}
135
136// removeIncomingValue - Remove an incoming value. This is useful if a
137// predecessor basic block is deleted.
138Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) {
139 Value *Removed = getIncomingValue(Idx);
140 // Swap with the end of the list.
141 unsigned Last = getNumOperands() - 1;
142 if (Idx != Last) {
145 }
146
147 // Nuke the last value.
148 Op<-1>().set(nullptr);
150
151 // If the PHI node is dead, because it has zero entries, nuke it now.
152 if (getNumOperands() == 0 && DeletePHIIfEmpty) {
153 // If anyone is using this PHI, make them use a dummy value instead...
156 }
157 return Removed;
158}
159
160void PHINode::removeIncomingValueIf(function_ref<bool(unsigned)> Predicate,
161 bool DeletePHIIfEmpty) {
162 unsigned NumOps = getNumIncomingValues();
163 unsigned Idx = 0;
164 while (Idx < NumOps) {
165 if (Predicate(Idx)) {
166 unsigned LastIdx = NumOps - 1;
167 if (Idx != LastIdx) {
168 setIncomingValue(Idx, getIncomingValue(LastIdx));
169 setIncomingBlock(Idx, getIncomingBlock(LastIdx));
170 }
171 getOperandUse(LastIdx).set(nullptr);
172 NumOps--;
173 } else {
174 Idx++;
175 }
176 }
177
179
180 // If the PHI node is dead, because it has zero entries, nuke it now.
181 if (getNumOperands() == 0 && DeletePHIIfEmpty) {
182 // If anyone is using this PHI, make them use a dummy value instead...
185 }
186}
187
188/// growOperands - grow operands - This grows the operand list in response
189/// to a push_back style of operation. This grows the number of ops by 1.5
190/// times.
191///
192void PHINode::growOperands() {
193 unsigned e = getNumOperands();
194 unsigned NumOps = e + e / 2;
195 if (NumOps < 2) NumOps = 2; // 2 op PHI nodes are VERY common.
196
197 ReservedSpace = NumOps;
198 growHungoffUses(ReservedSpace, /*WithExtraValues=*/true);
199}
200
201/// hasConstantValue - If the specified PHI node always merges together the same
202/// value, return the value, otherwise return null.
204 // Exploit the fact that phi nodes always have at least one entry.
205 Value *ConstantValue = getIncomingValue(0);
206 for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i)
207 if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) {
208 if (ConstantValue != this)
209 return nullptr; // Incoming values not all the same.
210 // The case where the first value is this PHI.
211 ConstantValue = getIncomingValue(i);
212 }
213 if (ConstantValue == this)
214 return PoisonValue::get(getType());
215 return ConstantValue;
216}
217
218/// hasConstantOrUndefValue - Whether the specified PHI node always merges
219/// together the same value, assuming that undefs result in the same value as
220/// non-undefs.
221/// Unlike \ref hasConstantValue, this does not return a value because the
222/// unique non-undef incoming value need not dominate the PHI node.
224 Value *ConstantValue = nullptr;
225 for (unsigned i = 0, e = getNumIncomingValues(); i != e; ++i) {
227 if (Incoming != this && !isa<UndefValue>(Incoming)) {
228 if (ConstantValue && ConstantValue != Incoming)
229 return false;
230 ConstantValue = Incoming;
231 }
232 }
233 return true;
234}
235
236//===----------------------------------------------------------------------===//
237// LandingPadInst Implementation
238//===----------------------------------------------------------------------===//
239
240LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
241 const Twine &NameStr,
242 InsertPosition InsertBefore)
243 : Instruction(RetTy, Instruction::LandingPad, AllocMarker, InsertBefore) {
244 init(NumReservedValues, NameStr);
245}
246
247LandingPadInst::LandingPadInst(const LandingPadInst &LP)
248 : Instruction(LP.getType(), Instruction::LandingPad, AllocMarker),
249 ReservedSpace(LP.getNumOperands()) {
252 Use *OL = getOperandList();
253 const Use *InOL = LP.getOperandList();
254 for (unsigned I = 0, E = ReservedSpace; I != E; ++I)
255 OL[I] = InOL[I];
256
257 setCleanup(LP.isCleanup());
258}
259
260LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
261 const Twine &NameStr,
262 InsertPosition InsertBefore) {
263 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertBefore);
264}
265
266void LandingPadInst::init(unsigned NumReservedValues, const Twine &NameStr) {
267 ReservedSpace = NumReservedValues;
269 allocHungoffUses(ReservedSpace);
270 setName(NameStr);
271 setCleanup(false);
272}
273
274/// growOperands - grow operands - This grows the operand list in response to a
275/// push_back style of operation. This grows the number of ops by 2 times.
276void LandingPadInst::growOperands(unsigned Size) {
277 unsigned e = getNumOperands();
278 if (ReservedSpace >= e + Size) return;
279 ReservedSpace = (std::max(e, 1U) + Size / 2) * 2;
280 growHungoffUses(ReservedSpace);
281}
282
284 unsigned OpNo = getNumOperands();
285 growOperands(1);
286 assert(OpNo < ReservedSpace && "Growing didn't work!");
288 getOperandList()[OpNo] = Val;
289}
290
291//===----------------------------------------------------------------------===//
292// CallBase Implementation
293//===----------------------------------------------------------------------===//
294
296 InsertPosition InsertPt) {
297 switch (CB->getOpcode()) {
298 case Instruction::Call:
299 return CallInst::Create(cast<CallInst>(CB), Bundles, InsertPt);
300 case Instruction::Invoke:
301 return InvokeInst::Create(cast<InvokeInst>(CB), Bundles, InsertPt);
302 case Instruction::CallBr:
303 return CallBrInst::Create(cast<CallBrInst>(CB), Bundles, InsertPt);
304 default:
305 llvm_unreachable("Unknown CallBase sub-class!");
306 }
307}
308
310 InsertPosition InsertPt) {
312 for (unsigned i = 0, e = CI->getNumOperandBundles(); i < e; ++i) {
313 auto ChildOB = CI->getOperandBundleAt(i);
314 if (ChildOB.getTagName() != OpB.getTag())
315 OpDefs.emplace_back(ChildOB);
316 }
317 OpDefs.emplace_back(OpB);
318 return CallBase::Create(CI, OpDefs, InsertPt);
319}
320
322
324 assert(getOpcode() == Instruction::CallBr && "Unexpected opcode!");
325 return cast<CallBrInst>(this)->getNumIndirectDests() + 1;
326}
327
329 const Value *V = getCalledOperand();
330 if (isa<Function>(V) || isa<Constant>(V))
331 return false;
332 return !isInlineAsm();
333}
334
335/// Tests if this call site must be tail call optimized. Only a CallInst can
336/// be tail call optimized.
338 if (auto *CI = dyn_cast<CallInst>(this))
339 return CI->isMustTailCall();
340 return false;
341}
342
343/// Tests if this call site is marked as a tail call.
345 if (auto *CI = dyn_cast<CallInst>(this))
346 return CI->isTailCall();
347 return false;
348}
349
352 return F->getIntrinsicID();
354}
355
357 FPClassTest Mask = Attrs.getRetNoFPClass();
358
359 if (const Function *F = getCalledFunction())
360 Mask |= F->getAttributes().getRetNoFPClass();
361 return Mask;
362}
363
365 FPClassTest Mask = Attrs.getParamNoFPClass(i);
366
367 if (const Function *F = getCalledFunction())
368 Mask |= F->getAttributes().getParamNoFPClass(i);
369 return Mask;
370}
371
372std::optional<ConstantRange> CallBase::getRange() const {
373 Attribute CallAttr = Attrs.getRetAttr(Attribute::Range);
375 if (const Function *F = getCalledFunction())
376 FnAttr = F->getRetAttribute(Attribute::Range);
377
378 if (CallAttr.isValid() && FnAttr.isValid())
379 return CallAttr.getRange().intersectWith(FnAttr.getRange());
380 if (CallAttr.isValid())
381 return CallAttr.getRange();
382 if (FnAttr.isValid())
383 return FnAttr.getRange();
384 return std::nullopt;
385}
386
388 if (hasRetAttr(Attribute::NonNull))
389 return true;
390
391 if (getRetDereferenceableBytes() > 0 &&
393 return true;
394
395 return false;
396}
397
399 unsigned Index;
400
401 if (Attrs.hasAttrSomewhere(Kind, &Index))
402 return getArgOperand(Index - AttributeList::FirstArgIndex);
403 if (const Function *F = getCalledFunction())
404 if (F->getAttributes().hasAttrSomewhere(Kind, &Index))
405 return getArgOperand(Index - AttributeList::FirstArgIndex);
406
407 return nullptr;
408}
409
410/// Determine whether the argument or parameter has the given attribute.
411bool CallBase::paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
412 assert(ArgNo < arg_size() && "Param index out of bounds!");
413
414 if (Attrs.hasParamAttr(ArgNo, Kind))
415 return true;
416
417 const Function *F = getCalledFunction();
418 if (!F)
419 return false;
420
421 if (!F->getAttributes().hasParamAttr(ArgNo, Kind))
422 return false;
423
424 // Take into account mod/ref by operand bundles.
425 switch (Kind) {
426 case Attribute::ReadNone:
428 case Attribute::ReadOnly:
430 case Attribute::WriteOnly:
431 return !hasReadingOperandBundles();
432 default:
433 return true;
434 }
435}
436
438 bool AllowUndefOrPoison) const {
440 "Argument must be a pointer");
441 if (paramHasAttr(ArgNo, Attribute::NonNull) &&
442 (AllowUndefOrPoison || paramHasAttr(ArgNo, Attribute::NoUndef)))
443 return true;
444
445 if (paramHasAttr(ArgNo, Attribute::Dereferenceable) &&
447 getCaller(),
449 return true;
450
451 return false;
452}
453
454bool CallBase::hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const {
456 return F->getAttributes().hasFnAttr(Kind);
457
458 return false;
459}
460
461bool CallBase::hasFnAttrOnCalledFunction(StringRef Kind) const {
463 return F->getAttributes().hasFnAttr(Kind);
464
465 return false;
466}
467
468template <typename AK>
469Attribute CallBase::getFnAttrOnCalledFunction(AK Kind) const {
470 if constexpr (std::is_same_v<AK, Attribute::AttrKind>) {
471 // getMemoryEffects() correctly combines memory effects from the call-site,
472 // operand bundles and function.
473 assert(Kind != Attribute::Memory && "Use getMemoryEffects() instead");
474 }
475
477 return F->getAttributes().getFnAttr(Kind);
478
479 return Attribute();
480}
481
482template LLVM_ABI Attribute
483CallBase::getFnAttrOnCalledFunction(Attribute::AttrKind Kind) const;
484template LLVM_ABI Attribute
485CallBase::getFnAttrOnCalledFunction(StringRef Kind) const;
486
487template <typename AK>
488Attribute CallBase::getParamAttrOnCalledFunction(unsigned ArgNo,
489 AK Kind) const {
491
492 if (auto *F = dyn_cast<Function>(V))
493 return F->getAttributes().getParamAttr(ArgNo, Kind);
494
495 return Attribute();
496}
497template LLVM_ABI Attribute CallBase::getParamAttrOnCalledFunction(
498 unsigned ArgNo, Attribute::AttrKind Kind) const;
499template LLVM_ABI Attribute
500CallBase::getParamAttrOnCalledFunction(unsigned ArgNo, StringRef Kind) const;
501
504 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
506}
507
510 const unsigned BeginIndex) {
511 auto It = op_begin() + BeginIndex;
512 for (auto &B : Bundles)
513 It = std::copy(B.input_begin(), B.input_end(), It);
514
515 auto *ContextImpl = getContext().pImpl;
516 auto BI = Bundles.begin();
517 unsigned CurrentIndex = BeginIndex;
518
519 for (auto &BOI : bundle_op_infos()) {
520 assert(BI != Bundles.end() && "Incorrect allocation?");
521
522 BOI.Tag = ContextImpl->getOrInsertBundleTag(BI->getTag());
523 BOI.Begin = CurrentIndex;
524 BOI.End = CurrentIndex + BI->input_size();
525 CurrentIndex = BOI.End;
526 BI++;
527 }
528
529 assert(BI == Bundles.end() && "Incorrect allocation?");
530
531 return It;
532}
533
535 /// When there isn't many bundles, we do a simple linear search.
536 /// Else fallback to a binary-search that use the fact that bundles usually
537 /// have similar number of argument to get faster convergence.
539 for (auto &BOI : bundle_op_infos())
540 if (BOI.Begin <= OpIdx && OpIdx < BOI.End)
541 return BOI;
542
543 llvm_unreachable("Did not find operand bundle for operand!");
544 }
545
546 assert(OpIdx >= arg_size() && "the Idx is not in the operand bundles");
548 OpIdx < std::prev(bundle_op_info_end())->End &&
549 "The Idx isn't in the operand bundle");
550
551 /// We need a decimal number below and to prevent using floating point numbers
552 /// we use an intergal value multiplied by this constant.
553 constexpr unsigned NumberScaling = 1024;
554
557 bundle_op_iterator Current = Begin;
558
559 while (Begin != End) {
560 unsigned ScaledOperandPerBundle =
561 NumberScaling * (std::prev(End)->End - Begin->Begin) / (End - Begin);
562 Current = Begin + (((OpIdx - Begin->Begin) * NumberScaling) /
563 ScaledOperandPerBundle);
564 if (Current >= End)
565 Current = std::prev(End);
566 assert(Current < End && Current >= Begin &&
567 "the operand bundle doesn't cover every value in the range");
568 if (OpIdx >= Current->Begin && OpIdx < Current->End)
569 break;
570 if (OpIdx >= Current->End)
571 Begin = Current + 1;
572 else
573 End = Current;
574 }
575
576 assert(OpIdx >= Current->Begin && OpIdx < Current->End &&
577 "the operand bundle doesn't cover every value in the range");
578 return *Current;
579}
580
583 InsertPosition InsertPt) {
584 if (CB->getOperandBundle(ID))
585 return CB;
586
588 CB->getOperandBundlesAsDefs(Bundles);
589 Bundles.push_back(OB);
590 return Create(CB, Bundles, InsertPt);
591}
592
594 InsertPosition InsertPt) {
596 bool CreateNew = false;
597
598 for (unsigned I = 0, E = CB->getNumOperandBundles(); I != E; ++I) {
599 auto Bundle = CB->getOperandBundleAt(I);
600 if (Bundle.getTagID() == ID) {
601 CreateNew = true;
602 continue;
603 }
604 Bundles.emplace_back(Bundle);
605 }
606
607 return CreateNew ? Create(CB, Bundles, InsertPt) : CB;
608}
609
611 // Implementation note: this is a conservative implementation of operand
612 // bundle semantics, where *any* non-assume operand bundle (other than
613 // ptrauth) forces a callsite to be at least readonly.
618 getIntrinsicID() != Intrinsic::assume;
619}
620
629
631 MemoryEffects ME = getAttributes().getMemoryEffects();
632 if (auto *Fn = dyn_cast<Function>(getCalledOperand())) {
633 MemoryEffects FnME = Fn->getMemoryEffects();
634 if (hasOperandBundles()) {
635 // TODO: Add a method to get memory effects for operand bundles instead.
637 FnME |= MemoryEffects::readOnly();
639 FnME |= MemoryEffects::writeOnly();
640 }
641 if (isVolatile()) {
642 // Volatile operations also access inaccessible memory.
644 }
645 ME &= FnME;
646 }
647 return ME;
648}
652
653/// Determine if the function does not access memory.
660
661/// Determine if the function does not access or only reads memory.
668
669/// Determine if the function does not access or only writes memory.
676
677/// Determine if the call can access memmory only using pointers based
678/// on its arguments.
685
686/// Determine if the function may only access memory that is
687/// inaccessible from the IR.
694
695/// Determine if the function may only access memory that is
696/// either inaccessible from the IR or pointed to by its arguments.
704
706 if (OpNo < arg_size()) {
707 // If the argument is passed byval, the callee does not have access to the
708 // original pointer and thus cannot capture it.
709 if (isByValArgument(OpNo))
710 return CaptureInfo::none();
711
713 if (auto *Fn = dyn_cast<Function>(getCalledOperand()))
714 CI &= Fn->getAttributes().getParamAttrs(OpNo).getCaptureInfo();
715 return CI;
716 }
717
718 // Bundles on assumes are captures(none).
719 if (getIntrinsicID() == Intrinsic::assume)
720 return CaptureInfo::none();
721
722 // deopt operand bundles are captures(none)
723 auto &BOI = getBundleOpInfoForOperand(OpNo);
724 auto OBU = operandBundleFromBundleOpInfo(BOI);
725 return OBU.isDeoptOperandBundle() ? CaptureInfo::none() : CaptureInfo::all();
726}
727
729 for (unsigned I = 0, E = arg_size(); I < E; ++I) {
731 continue;
732
734 if (auto *Fn = dyn_cast<Function>(getCalledOperand()))
735 CI &= Fn->getAttributes().getParamAttrs(I).getCaptureInfo();
737 return true;
738 }
739 return false;
740}
741
742//===----------------------------------------------------------------------===//
743// CallInst Implementation
744//===----------------------------------------------------------------------===//
745
746void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
747 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) {
748 this->FTy = FTy;
749 assert(getNumOperands() == Args.size() + CountBundleInputs(Bundles) + 1 &&
750 "NumOperands not set up?");
751
752#ifndef NDEBUG
753 assert((Args.size() == FTy->getNumParams() ||
754 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
755 "Calling a function with bad signature!");
756
757 for (unsigned i = 0; i != Args.size(); ++i)
758 assert((i >= FTy->getNumParams() ||
759 FTy->getParamType(i) == Args[i]->getType()) &&
760 "Calling a function with a bad signature!");
761#endif
762
763 // Set operands in order of their index to match use-list-order
764 // prediction.
765 llvm::copy(Args, op_begin());
766 setCalledOperand(Func);
767
768 auto It = populateBundleOperandInfos(Bundles, Args.size());
769 (void)It;
770 assert(It + 1 == op_end() && "Should add up!");
771
772 setName(NameStr);
773}
774
775void CallInst::init(FunctionType *FTy, Value *Func, const Twine &NameStr) {
776 this->FTy = FTy;
777 assert(getNumOperands() == 1 && "NumOperands not set up?");
778 setCalledOperand(Func);
779
780 assert(FTy->getNumParams() == 0 && "Calling a function with bad signature");
781
782 setName(NameStr);
783}
784
785CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
786 AllocInfo AllocInfo, InsertPosition InsertBefore)
787 : CallBase(Ty->getReturnType(), Instruction::Call, AllocInfo,
788 InsertBefore) {
789 init(Ty, Func, Name);
790}
791
792CallInst::CallInst(const CallInst &CI, AllocInfo AllocInfo)
793 : CallBase(CI.Attrs, CI.FTy, CI.getType(), Instruction::Call, AllocInfo) {
795 "Wrong number of operands allocated");
796 setTailCallKind(CI.getTailCallKind());
798
799 std::copy(CI.op_begin(), CI.op_end(), op_begin());
800 std::copy(CI.bundle_op_info_begin(), CI.bundle_op_info_end(),
803}
804
806 InsertPosition InsertPt) {
807 std::vector<Value *> Args(CI->arg_begin(), CI->arg_end());
808
809 auto *NewCI = CallInst::Create(CI->getFunctionType(), CI->getCalledOperand(),
810 Args, OpB, CI->getName(), InsertPt);
811 NewCI->setTailCallKind(CI->getTailCallKind());
812 NewCI->setCallingConv(CI->getCallingConv());
813 NewCI->SubclassOptionalData = CI->SubclassOptionalData;
814 NewCI->setAttributes(CI->getAttributes());
815 NewCI->setDebugLoc(CI->getDebugLoc());
816 return NewCI;
817}
818
819// Update profile weight for call instruction by scaling it using the ratio
820// of S/T. The meaning of "branch_weights" meta data for call instruction is
821// transfered to represent call count.
823 if (T == 0) {
824 LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
825 "div by 0. Ignoring. Likely the function "
826 << getParent()->getParent()->getName()
827 << " has 0 entry count, and contains call instructions "
828 "with non-zero prof info.");
829 return;
830 }
831 scaleProfData(*this, S, T);
832}
833
834//===----------------------------------------------------------------------===//
835// InvokeInst Implementation
836//===----------------------------------------------------------------------===//
837
838void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal,
839 BasicBlock *IfException, ArrayRef<Value *> Args,
841 const Twine &NameStr) {
842 this->FTy = FTy;
843
845 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)) &&
846 "NumOperands not set up?");
847
848#ifndef NDEBUG
849 assert(((Args.size() == FTy->getNumParams()) ||
850 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
851 "Invoking a function with bad signature");
852
853 for (unsigned i = 0, e = Args.size(); i != e; i++)
854 assert((i >= FTy->getNumParams() ||
855 FTy->getParamType(i) == Args[i]->getType()) &&
856 "Invoking a function with a bad signature!");
857#endif
858
859 // Set operands in order of their index to match use-list-order
860 // prediction.
861 llvm::copy(Args, op_begin());
862 setNormalDest(IfNormal);
863 setUnwindDest(IfException);
865
866 auto It = populateBundleOperandInfos(Bundles, Args.size());
867 (void)It;
868 assert(It + 3 == op_end() && "Should add up!");
869
870 setName(NameStr);
871}
872
873InvokeInst::InvokeInst(const InvokeInst &II, AllocInfo AllocInfo)
874 : CallBase(II.Attrs, II.FTy, II.getType(), Instruction::Invoke, AllocInfo) {
875 assert(getNumOperands() == II.getNumOperands() &&
876 "Wrong number of operands allocated");
877 setCallingConv(II.getCallingConv());
878 std::copy(II.op_begin(), II.op_end(), op_begin());
879 std::copy(II.bundle_op_info_begin(), II.bundle_op_info_end(),
881 SubclassOptionalData = II.SubclassOptionalData;
882}
883
885 InsertPosition InsertPt) {
886 std::vector<Value *> Args(II->arg_begin(), II->arg_end());
887
888 auto *NewII = InvokeInst::Create(
889 II->getFunctionType(), II->getCalledOperand(), II->getNormalDest(),
890 II->getUnwindDest(), Args, OpB, II->getName(), InsertPt);
891 NewII->setCallingConv(II->getCallingConv());
892 NewII->SubclassOptionalData = II->SubclassOptionalData;
893 NewII->setAttributes(II->getAttributes());
894 NewII->setDebugLoc(II->getDebugLoc());
895 return NewII;
896}
897
899 return cast<LandingPadInst>(getUnwindDest()->getFirstNonPHIIt());
900}
901
903 if (T == 0) {
904 LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
905 "div by 0. Ignoring. Likely the function "
906 << getParent()->getParent()->getName()
907 << " has 0 entry count, and contains call instructions "
908 "with non-zero prof info.");
909 return;
910 }
911 scaleProfData(*this, S, T);
912}
913
914//===----------------------------------------------------------------------===//
915// CallBrInst Implementation
916//===----------------------------------------------------------------------===//
917
918void CallBrInst::init(FunctionType *FTy, Value *Fn, BasicBlock *Fallthrough,
919 ArrayRef<BasicBlock *> IndirectDests,
922 const Twine &NameStr) {
923 this->FTy = FTy;
924
925 assert(getNumOperands() == ComputeNumOperands(Args.size(),
926 IndirectDests.size(),
927 CountBundleInputs(Bundles)) &&
928 "NumOperands not set up?");
929
930#ifndef NDEBUG
931 assert(((Args.size() == FTy->getNumParams()) ||
932 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
933 "Calling a function with bad signature");
934
935 for (unsigned i = 0, e = Args.size(); i != e; i++)
936 assert((i >= FTy->getNumParams() ||
937 FTy->getParamType(i) == Args[i]->getType()) &&
938 "Calling a function with a bad signature!");
939#endif
940
941 // Set operands in order of their index to match use-list-order
942 // prediction.
943 llvm::copy(Args, op_begin());
944 NumIndirectDests = IndirectDests.size();
945 setDefaultDest(Fallthrough);
946 for (unsigned i = 0; i != NumIndirectDests; ++i)
947 setIndirectDest(i, IndirectDests[i]);
949
950 auto It = populateBundleOperandInfos(Bundles, Args.size());
951 (void)It;
952 assert(It + 2 + IndirectDests.size() == op_end() && "Should add up!");
953
954 setName(NameStr);
955}
956
957CallBrInst::CallBrInst(const CallBrInst &CBI, AllocInfo AllocInfo)
958 : CallBase(CBI.Attrs, CBI.FTy, CBI.getType(), Instruction::CallBr,
959 AllocInfo) {
961 "Wrong number of operands allocated");
963 std::copy(CBI.op_begin(), CBI.op_end(), op_begin());
964 std::copy(CBI.bundle_op_info_begin(), CBI.bundle_op_info_end(),
967 NumIndirectDests = CBI.NumIndirectDests;
968}
969
970CallBrInst *CallBrInst::Create(CallBrInst *CBI, ArrayRef<OperandBundleDef> OpB,
971 InsertPosition InsertPt) {
972 std::vector<Value *> Args(CBI->arg_begin(), CBI->arg_end());
973
974 auto *NewCBI = CallBrInst::Create(
975 CBI->getFunctionType(), CBI->getCalledOperand(), CBI->getDefaultDest(),
976 CBI->getIndirectDests(), Args, OpB, CBI->getName(), InsertPt);
977 NewCBI->setCallingConv(CBI->getCallingConv());
978 NewCBI->SubclassOptionalData = CBI->SubclassOptionalData;
979 NewCBI->setAttributes(CBI->getAttributes());
980 NewCBI->setDebugLoc(CBI->getDebugLoc());
981 NewCBI->NumIndirectDests = CBI->NumIndirectDests;
982 return NewCBI;
983}
984
985//===----------------------------------------------------------------------===//
986// ReturnInst Implementation
987//===----------------------------------------------------------------------===//
988
989ReturnInst::ReturnInst(const ReturnInst &RI, AllocInfo AllocInfo)
990 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Ret,
991 AllocInfo) {
993 "Wrong number of operands allocated");
994 if (RI.getNumOperands())
995 Op<0>() = RI.Op<0>();
997}
998
999ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, AllocInfo AllocInfo,
1000 InsertPosition InsertBefore)
1001 : Instruction(Type::getVoidTy(C), Instruction::Ret, AllocInfo,
1002 InsertBefore) {
1003 if (retVal)
1004 Op<0>() = retVal;
1005}
1006
1007//===----------------------------------------------------------------------===//
1008// ResumeInst Implementation
1009//===----------------------------------------------------------------------===//
1010
1011ResumeInst::ResumeInst(const ResumeInst &RI)
1012 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Resume,
1013 AllocMarker) {
1014 Op<0>() = RI.Op<0>();
1015}
1016
1017ResumeInst::ResumeInst(Value *Exn, InsertPosition InsertBefore)
1018 : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
1019 AllocMarker, InsertBefore) {
1020 Op<0>() = Exn;
1021}
1022
1023//===----------------------------------------------------------------------===//
1024// CleanupReturnInst Implementation
1025//===----------------------------------------------------------------------===//
1026
1027CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst &CRI,
1029 : Instruction(CRI.getType(), Instruction::CleanupRet, AllocInfo) {
1031 "Wrong number of operands allocated");
1032 setSubclassData<Instruction::OpaqueField>(
1034 Op<0>() = CRI.Op<0>();
1035 if (CRI.hasUnwindDest())
1036 Op<1>() = CRI.Op<1>();
1037}
1038
1039void CleanupReturnInst::init(Value *CleanupPad, BasicBlock *UnwindBB) {
1040 if (UnwindBB)
1041 setSubclassData<UnwindDestField>(true);
1042
1043 Op<0>() = CleanupPad;
1044 if (UnwindBB)
1045 Op<1>() = UnwindBB;
1046}
1047
1048CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
1050 InsertPosition InsertBefore)
1051 : Instruction(Type::getVoidTy(CleanupPad->getContext()),
1052 Instruction::CleanupRet, AllocInfo, InsertBefore) {
1053 init(CleanupPad, UnwindBB);
1054}
1055
1056//===----------------------------------------------------------------------===//
1057// CatchReturnInst Implementation
1058//===----------------------------------------------------------------------===//
1059void CatchReturnInst::init(Value *CatchPad, BasicBlock *BB) {
1060 Op<0>() = CatchPad;
1061 Op<1>() = BB;
1062}
1063
1064CatchReturnInst::CatchReturnInst(const CatchReturnInst &CRI)
1065 : Instruction(Type::getVoidTy(CRI.getContext()), Instruction::CatchRet,
1066 AllocMarker) {
1067 Op<0>() = CRI.Op<0>();
1068 Op<1>() = CRI.Op<1>();
1069}
1070
1071CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
1072 InsertPosition InsertBefore)
1073 : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
1074 AllocMarker, InsertBefore) {
1075 init(CatchPad, BB);
1076}
1077
1078//===----------------------------------------------------------------------===//
1079// CatchSwitchInst Implementation
1080//===----------------------------------------------------------------------===//
1081
1082CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
1083 unsigned NumReservedValues,
1084 const Twine &NameStr,
1085 InsertPosition InsertBefore)
1086 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, AllocMarker,
1087 InsertBefore) {
1088 if (UnwindDest)
1089 ++NumReservedValues;
1090 init(ParentPad, UnwindDest, NumReservedValues + 1);
1091 setName(NameStr);
1092}
1093
1094CatchSwitchInst::CatchSwitchInst(const CatchSwitchInst &CSI)
1095 : Instruction(CSI.getType(), Instruction::CatchSwitch, AllocMarker) {
1097 init(CSI.getParentPad(), CSI.getUnwindDest(), CSI.getNumOperands());
1098 setNumHungOffUseOperands(ReservedSpace);
1099 Use *OL = getOperandList();
1100 const Use *InOL = CSI.getOperandList();
1101 for (unsigned I = 1, E = ReservedSpace; I != E; ++I)
1102 OL[I] = InOL[I];
1103}
1104
1105void CatchSwitchInst::init(Value *ParentPad, BasicBlock *UnwindDest,
1106 unsigned NumReservedValues) {
1107 assert(ParentPad && NumReservedValues);
1108
1109 ReservedSpace = NumReservedValues;
1110 setNumHungOffUseOperands(UnwindDest ? 2 : 1);
1111 allocHungoffUses(ReservedSpace);
1112
1113 Op<0>() = ParentPad;
1114 if (UnwindDest) {
1116 setUnwindDest(UnwindDest);
1117 }
1118}
1119
1120/// growOperands - grow operands - This grows the operand list in response to a
1121/// push_back style of operation. This grows the number of ops by 2 times.
1122void CatchSwitchInst::growOperands(unsigned Size) {
1123 unsigned NumOperands = getNumOperands();
1124 assert(NumOperands >= 1);
1125 if (ReservedSpace >= NumOperands + Size)
1126 return;
1127 ReservedSpace = (NumOperands + Size / 2) * 2;
1128 growHungoffUses(ReservedSpace);
1129}
1130
1132 unsigned OpNo = getNumOperands();
1133 growOperands(1);
1134 assert(OpNo < ReservedSpace && "Growing didn't work!");
1136 getOperandList()[OpNo] = Handler;
1137}
1138
1140 // Move all subsequent handlers up one.
1141 Use *EndDst = op_end() - 1;
1142 for (Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst)
1143 *CurDst = *(CurDst + 1);
1144 // Null out the last handler use.
1145 *EndDst = nullptr;
1146
1148}
1149
1150//===----------------------------------------------------------------------===//
1151// FuncletPadInst Implementation
1152//===----------------------------------------------------------------------===//
1153void FuncletPadInst::init(Value *ParentPad, ArrayRef<Value *> Args,
1154 const Twine &NameStr) {
1155 assert(getNumOperands() == 1 + Args.size() && "NumOperands not set up?");
1156 llvm::copy(Args, op_begin());
1157 setParentPad(ParentPad);
1158 setName(NameStr);
1159}
1160
1161FuncletPadInst::FuncletPadInst(const FuncletPadInst &FPI, AllocInfo AllocInfo)
1162 : Instruction(FPI.getType(), FPI.getOpcode(), AllocInfo) {
1164 "Wrong number of operands allocated");
1165 std::copy(FPI.op_begin(), FPI.op_end(), op_begin());
1167}
1168
1169FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
1171 const Twine &NameStr,
1172 InsertPosition InsertBefore)
1173 : Instruction(ParentPad->getType(), Op, AllocInfo, InsertBefore) {
1174 init(ParentPad, Args, NameStr);
1175}
1176
1177//===----------------------------------------------------------------------===//
1178// UnreachableInst Implementation
1179//===----------------------------------------------------------------------===//
1180
1182 InsertPosition InsertBefore)
1183 : Instruction(Type::getVoidTy(Context), Instruction::Unreachable,
1184 AllocMarker, InsertBefore) {}
1185
1186//===----------------------------------------------------------------------===//
1187// BranchInst Implementation
1188//===----------------------------------------------------------------------===//
1189
1190void BranchInst::AssertOK() {
1191 if (isConditional())
1192 assert(getCondition()->getType()->isIntegerTy(1) &&
1193 "May only branch on boolean predicates!");
1194}
1195
1196BranchInst::BranchInst(BasicBlock *IfTrue, AllocInfo AllocInfo,
1197 InsertPosition InsertBefore)
1198 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1199 AllocInfo, InsertBefore) {
1200 assert(IfTrue && "Branch destination may not be null!");
1201 Op<-1>() = IfTrue;
1202}
1203
1204BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
1205 AllocInfo AllocInfo, InsertPosition InsertBefore)
1206 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1207 AllocInfo, InsertBefore) {
1208 // Assign in order of operand index to make use-list order predictable.
1209 Op<-3>() = Cond;
1210 Op<-2>() = IfFalse;
1211 Op<-1>() = IfTrue;
1212#ifndef NDEBUG
1213 AssertOK();
1214#endif
1215}
1216
1217BranchInst::BranchInst(const BranchInst &BI, AllocInfo AllocInfo)
1218 : Instruction(Type::getVoidTy(BI.getContext()), Instruction::Br,
1219 AllocInfo) {
1221 "Wrong number of operands allocated");
1222 // Assign in order of operand index to make use-list order predictable.
1223 if (BI.getNumOperands() != 1) {
1224 assert(BI.getNumOperands() == 3 && "BR can have 1 or 3 operands!");
1225 Op<-3>() = BI.Op<-3>();
1226 Op<-2>() = BI.Op<-2>();
1227 }
1228 Op<-1>() = BI.Op<-1>();
1230}
1231
1234 "Cannot swap successors of an unconditional branch");
1235 Op<-1>().swap(Op<-2>());
1236
1237 // Update profile metadata if present and it matches our structural
1238 // expectations.
1240}
1241
1242//===----------------------------------------------------------------------===//
1243// AllocaInst Implementation
1244//===----------------------------------------------------------------------===//
1245
1246static Value *getAISize(LLVMContext &Context, Value *Amt) {
1247 if (!Amt)
1248 Amt = ConstantInt::get(Type::getInt32Ty(Context), 1);
1249 else {
1250 assert(!isa<BasicBlock>(Amt) &&
1251 "Passed basic block into allocation size parameter! Use other ctor");
1252 assert(Amt->getType()->isIntegerTy() &&
1253 "Allocation array size is not an integer!");
1254 }
1255 return Amt;
1256}
1257
1259 assert(Pos.isValid() &&
1260 "Insertion position cannot be null when alignment not provided!");
1261 BasicBlock *BB = Pos.getBasicBlock();
1262 assert(BB->getParent() &&
1263 "BB must be in a Function when alignment not provided!");
1264 const DataLayout &DL = BB->getDataLayout();
1265 return DL.getPrefTypeAlign(Ty);
1266}
1267
1268AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
1269 InsertPosition InsertBefore)
1270 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertBefore) {}
1271
1272AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1273 const Twine &Name, InsertPosition InsertBefore)
1274 : AllocaInst(Ty, AddrSpace, ArraySize,
1275 computeAllocaDefaultAlign(Ty, InsertBefore), Name,
1276 InsertBefore) {}
1277
1278AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1279 Align Align, const Twine &Name,
1280 InsertPosition InsertBefore)
1281 : UnaryInstruction(PointerType::get(Ty->getContext(), AddrSpace), Alloca,
1282 getAISize(Ty->getContext(), ArraySize), InsertBefore),
1283 AllocatedType(Ty) {
1285 assert(!Ty->isVoidTy() && "Cannot allocate void!");
1286 setName(Name);
1287}
1288
1291 return !CI->isOne();
1292 return true;
1293}
1294
1295/// isStaticAlloca - Return true if this alloca is in the entry block of the
1296/// function and is a constant size. If so, the code generator will fold it
1297/// into the prolog/epilog code, so it is basically free.
1299 // Must be constant size.
1300 if (!isa<ConstantInt>(getArraySize())) return false;
1301
1302 // Must be in the entry block.
1303 const BasicBlock *Parent = getParent();
1304 return Parent->isEntryBlock() && !isUsedWithInAlloca();
1305}
1306
1307//===----------------------------------------------------------------------===//
1308// LoadInst Implementation
1309//===----------------------------------------------------------------------===//
1310
1311void LoadInst::AssertOK() {
1313 "Ptr must have pointer type.");
1314}
1315
1317 assert(Pos.isValid() &&
1318 "Insertion position cannot be null when alignment not provided!");
1319 BasicBlock *BB = Pos.getBasicBlock();
1320 assert(BB->getParent() &&
1321 "BB must be in a Function when alignment not provided!");
1322 const DataLayout &DL = BB->getDataLayout();
1323 return DL.getABITypeAlign(Ty);
1324}
1325
1326LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name,
1327 InsertPosition InsertBef)
1328 : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertBef) {}
1329
1330LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1331 InsertPosition InsertBef)
1332 : LoadInst(Ty, Ptr, Name, isVolatile,
1333 computeLoadStoreDefaultAlign(Ty, InsertBef), InsertBef) {}
1334
1335LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1336 Align Align, InsertPosition InsertBef)
1337 : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
1338 SyncScope::System, InsertBef) {}
1339
1340LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1342 InsertPosition InsertBef)
1343 : UnaryInstruction(Ty, Load, Ptr, InsertBef) {
1346 setAtomic(Order, SSID);
1347 AssertOK();
1348 setName(Name);
1349}
1350
1351//===----------------------------------------------------------------------===//
1352// StoreInst Implementation
1353//===----------------------------------------------------------------------===//
1354
1355void StoreInst::AssertOK() {
1356 assert(getOperand(0) && getOperand(1) && "Both operands must be non-null!");
1358 "Ptr must have pointer type!");
1359}
1360
1362 : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {}
1363
1365 InsertPosition InsertBefore)
1366 : StoreInst(val, addr, isVolatile,
1367 computeLoadStoreDefaultAlign(val->getType(), InsertBefore),
1368 InsertBefore) {}
1369
1371 InsertPosition InsertBefore)
1373 SyncScope::System, InsertBefore) {}
1374
1376 AtomicOrdering Order, SyncScope::ID SSID,
1377 InsertPosition InsertBefore)
1378 : Instruction(Type::getVoidTy(val->getContext()), Store, AllocMarker,
1379 InsertBefore) {
1380 Op<0>() = val;
1381 Op<1>() = addr;
1384 setAtomic(Order, SSID);
1385 AssertOK();
1386}
1387
1388//===----------------------------------------------------------------------===//
1389// AtomicCmpXchgInst Implementation
1390//===----------------------------------------------------------------------===//
1391
1392void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
1393 Align Alignment, AtomicOrdering SuccessOrdering,
1394 AtomicOrdering FailureOrdering,
1395 SyncScope::ID SSID) {
1396 Op<0>() = Ptr;
1397 Op<1>() = Cmp;
1398 Op<2>() = NewVal;
1399 setSuccessOrdering(SuccessOrdering);
1400 setFailureOrdering(FailureOrdering);
1401 setSyncScopeID(SSID);
1402 setAlignment(Alignment);
1403
1404 assert(getOperand(0) && getOperand(1) && getOperand(2) &&
1405 "All operands must be non-null!");
1407 "Ptr must have pointer type!");
1408 assert(getOperand(1)->getType() == getOperand(2)->getType() &&
1409 "Cmp type and NewVal type must be same!");
1410}
1411
1413 Align Alignment,
1414 AtomicOrdering SuccessOrdering,
1415 AtomicOrdering FailureOrdering,
1416 SyncScope::ID SSID,
1417 InsertPosition InsertBefore)
1418 : Instruction(
1419 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
1420 AtomicCmpXchg, AllocMarker, InsertBefore) {
1421 Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
1422}
1423
1424//===----------------------------------------------------------------------===//
1425// AtomicRMWInst Implementation
1426//===----------------------------------------------------------------------===//
1427
1428void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,
1429 Align Alignment, AtomicOrdering Ordering,
1430 SyncScope::ID SSID) {
1431 assert(Ordering != AtomicOrdering::NotAtomic &&
1432 "atomicrmw instructions can only be atomic.");
1433 assert(Ordering != AtomicOrdering::Unordered &&
1434 "atomicrmw instructions cannot be unordered.");
1435 Op<0>() = Ptr;
1436 Op<1>() = Val;
1438 setOrdering(Ordering);
1439 setSyncScopeID(SSID);
1440 setAlignment(Alignment);
1441
1442 assert(getOperand(0) && getOperand(1) && "All operands must be non-null!");
1444 "Ptr must have pointer type!");
1445 assert(Ordering != AtomicOrdering::NotAtomic &&
1446 "AtomicRMW instructions must be atomic!");
1447}
1448
1450 Align Alignment, AtomicOrdering Ordering,
1451 SyncScope::ID SSID, InsertPosition InsertBefore)
1452 : Instruction(Val->getType(), AtomicRMW, AllocMarker, InsertBefore) {
1453 Init(Operation, Ptr, Val, Alignment, Ordering, SSID);
1454}
1455
1457 switch (Op) {
1459 return "xchg";
1460 case AtomicRMWInst::Add:
1461 return "add";
1462 case AtomicRMWInst::Sub:
1463 return "sub";
1464 case AtomicRMWInst::And:
1465 return "and";
1467 return "nand";
1468 case AtomicRMWInst::Or:
1469 return "or";
1470 case AtomicRMWInst::Xor:
1471 return "xor";
1472 case AtomicRMWInst::Max:
1473 return "max";
1474 case AtomicRMWInst::Min:
1475 return "min";
1477 return "umax";
1479 return "umin";
1481 return "fadd";
1483 return "fsub";
1485 return "fmax";
1487 return "fmin";
1489 return "fmaximum";
1491 return "fminimum";
1493 return "uinc_wrap";
1495 return "udec_wrap";
1497 return "usub_cond";
1499 return "usub_sat";
1501 return "<invalid operation>";
1502 }
1503
1504 llvm_unreachable("invalid atomicrmw operation");
1505}
1506
1507//===----------------------------------------------------------------------===//
1508// FenceInst Implementation
1509//===----------------------------------------------------------------------===//
1510
1512 SyncScope::ID SSID, InsertPosition InsertBefore)
1513 : Instruction(Type::getVoidTy(C), Fence, AllocMarker, InsertBefore) {
1514 setOrdering(Ordering);
1515 setSyncScopeID(SSID);
1516}
1517
1518//===----------------------------------------------------------------------===//
1519// GetElementPtrInst Implementation
1520//===----------------------------------------------------------------------===//
1521
1522void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList,
1523 const Twine &Name) {
1524 assert(getNumOperands() == 1 + IdxList.size() &&
1525 "NumOperands not initialized?");
1526 Op<0>() = Ptr;
1527 llvm::copy(IdxList, op_begin() + 1);
1528 setName(Name);
1529}
1530
1531GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI,
1533 : Instruction(GEPI.getType(), GetElementPtr, AllocInfo),
1534 SourceElementType(GEPI.SourceElementType),
1535 ResultElementType(GEPI.ResultElementType) {
1536 assert(getNumOperands() == GEPI.getNumOperands() &&
1537 "Wrong number of operands allocated");
1538 std::copy(GEPI.op_begin(), GEPI.op_end(), op_begin());
1540}
1541
1543 if (auto *Struct = dyn_cast<StructType>(Ty)) {
1544 if (!Struct->indexValid(Idx))
1545 return nullptr;
1546 return Struct->getTypeAtIndex(Idx);
1547 }
1548 if (!Idx->getType()->isIntOrIntVectorTy())
1549 return nullptr;
1550 if (auto *Array = dyn_cast<ArrayType>(Ty))
1551 return Array->getElementType();
1552 if (auto *Vector = dyn_cast<VectorType>(Ty))
1553 return Vector->getElementType();
1554 return nullptr;
1555}
1556
1558 if (auto *Struct = dyn_cast<StructType>(Ty)) {
1559 if (Idx >= Struct->getNumElements())
1560 return nullptr;
1561 return Struct->getElementType(Idx);
1562 }
1563 if (auto *Array = dyn_cast<ArrayType>(Ty))
1564 return Array->getElementType();
1565 if (auto *Vector = dyn_cast<VectorType>(Ty))
1566 return Vector->getElementType();
1567 return nullptr;
1568}
1569
1570template <typename IndexTy>
1572 if (IdxList.empty())
1573 return Ty;
1574 for (IndexTy V : IdxList.slice(1)) {
1576 if (!Ty)
1577 return Ty;
1578 }
1579 return Ty;
1580}
1581
1585
1587 ArrayRef<Constant *> IdxList) {
1588 return getIndexedTypeInternal(Ty, IdxList);
1589}
1590
1594
1595/// hasAllZeroIndices - Return true if all of the indices of this GEP are
1596/// zeros. If so, the result pointer and the first operand have the same
1597/// value, just potentially different types.
1599 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1601 if (!CI->isZero()) return false;
1602 } else {
1603 return false;
1604 }
1605 }
1606 return true;
1607}
1608
1609/// hasAllConstantIndices - Return true if all of the indices of this GEP are
1610/// constant integers. If so, the result pointer and the first operand have
1611/// a constant offset between them.
1613 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1615 return false;
1616 }
1617 return true;
1618}
1619
1623
1625 GEPNoWrapFlags NW = cast<GEPOperator>(this)->getNoWrapFlags();
1626 if (B)
1628 else
1629 NW = NW.withoutInBounds();
1630 setNoWrapFlags(NW);
1631}
1632
1634 return cast<GEPOperator>(this)->getNoWrapFlags();
1635}
1636
1638 return cast<GEPOperator>(this)->isInBounds();
1639}
1640
1642 return cast<GEPOperator>(this)->hasNoUnsignedSignedWrap();
1643}
1644
1646 return cast<GEPOperator>(this)->hasNoUnsignedWrap();
1647}
1648
1650 APInt &Offset) const {
1651 // Delegate to the generic GEPOperator implementation.
1652 return cast<GEPOperator>(this)->accumulateConstantOffset(DL, Offset);
1653}
1654
1656 const DataLayout &DL, unsigned BitWidth,
1657 SmallMapVector<Value *, APInt, 4> &VariableOffsets,
1658 APInt &ConstantOffset) const {
1659 // Delegate to the generic GEPOperator implementation.
1660 return cast<GEPOperator>(this)->collectOffset(DL, BitWidth, VariableOffsets,
1661 ConstantOffset);
1662}
1663
1664//===----------------------------------------------------------------------===//
1665// ExtractElementInst Implementation
1666//===----------------------------------------------------------------------===//
1667
1668ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
1669 const Twine &Name,
1670 InsertPosition InsertBef)
1671 : Instruction(cast<VectorType>(Val->getType())->getElementType(),
1672 ExtractElement, AllocMarker, InsertBef) {
1673 assert(isValidOperands(Val, Index) &&
1674 "Invalid extractelement instruction operands!");
1675 Op<0>() = Val;
1676 Op<1>() = Index;
1677 setName(Name);
1678}
1679
1680bool ExtractElementInst::isValidOperands(const Value *Val, const Value *Index) {
1681 if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy())
1682 return false;
1683 return true;
1684}
1685
1686//===----------------------------------------------------------------------===//
1687// InsertElementInst Implementation
1688//===----------------------------------------------------------------------===//
1689
1690InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
1691 const Twine &Name,
1692 InsertPosition InsertBef)
1693 : Instruction(Vec->getType(), InsertElement, AllocMarker, InsertBef) {
1694 assert(isValidOperands(Vec, Elt, Index) &&
1695 "Invalid insertelement instruction operands!");
1696 Op<0>() = Vec;
1697 Op<1>() = Elt;
1698 Op<2>() = Index;
1699 setName(Name);
1700}
1701
1703 const Value *Index) {
1704 if (!Vec->getType()->isVectorTy())
1705 return false; // First operand of insertelement must be vector type.
1706
1707 if (Elt->getType() != cast<VectorType>(Vec->getType())->getElementType())
1708 return false;// Second operand of insertelement must be vector element type.
1709
1710 if (!Index->getType()->isIntegerTy())
1711 return false; // Third operand of insertelement must be i32.
1712 return true;
1713}
1714
1715//===----------------------------------------------------------------------===//
1716// ShuffleVectorInst Implementation
1717//===----------------------------------------------------------------------===//
1718
1720 assert(V && "Cannot create placeholder of nullptr V");
1721 return PoisonValue::get(V->getType());
1722}
1723
1725 InsertPosition InsertBefore)
1727 InsertBefore) {}
1728
1730 const Twine &Name,
1731 InsertPosition InsertBefore)
1733 InsertBefore) {}
1734
1736 const Twine &Name,
1737 InsertPosition InsertBefore)
1738 : Instruction(
1739 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
1740 cast<VectorType>(Mask->getType())->getElementCount()),
1741 ShuffleVector, AllocMarker, InsertBefore) {
1742 assert(isValidOperands(V1, V2, Mask) &&
1743 "Invalid shuffle vector instruction operands!");
1744
1745 Op<0>() = V1;
1746 Op<1>() = V2;
1747 SmallVector<int, 16> MaskArr;
1748 getShuffleMask(cast<Constant>(Mask), MaskArr);
1749 setShuffleMask(MaskArr);
1750 setName(Name);
1751}
1752
1754 const Twine &Name,
1755 InsertPosition InsertBefore)
1756 : Instruction(
1757 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
1758 Mask.size(), isa<ScalableVectorType>(V1->getType())),
1759 ShuffleVector, AllocMarker, InsertBefore) {
1760 assert(isValidOperands(V1, V2, Mask) &&
1761 "Invalid shuffle vector instruction operands!");
1762 Op<0>() = V1;
1763 Op<1>() = V2;
1764 setShuffleMask(Mask);
1765 setName(Name);
1766}
1767
1769 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
1770 int NumMaskElts = ShuffleMask.size();
1771 SmallVector<int, 16> NewMask(NumMaskElts);
1772 for (int i = 0; i != NumMaskElts; ++i) {
1773 int MaskElt = getMaskValue(i);
1774 if (MaskElt == PoisonMaskElem) {
1775 NewMask[i] = PoisonMaskElem;
1776 continue;
1777 }
1778 assert(MaskElt >= 0 && MaskElt < 2 * NumOpElts && "Out-of-range mask");
1779 MaskElt = (MaskElt < NumOpElts) ? MaskElt + NumOpElts : MaskElt - NumOpElts;
1780 NewMask[i] = MaskElt;
1781 }
1782 setShuffleMask(NewMask);
1783 Op<0>().swap(Op<1>());
1784}
1785
1787 ArrayRef<int> Mask) {
1788 // V1 and V2 must be vectors of the same type.
1789 if (!isa<VectorType>(V1->getType()) || V1->getType() != V2->getType())
1790 return false;
1791
1792 // Make sure the mask elements make sense.
1793 int V1Size =
1794 cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue();
1795 for (int Elem : Mask)
1796 if (Elem != PoisonMaskElem && Elem >= V1Size * 2)
1797 return false;
1798
1800 if ((Mask[0] != 0 && Mask[0] != PoisonMaskElem) || !all_equal(Mask))
1801 return false;
1802
1803 return true;
1804}
1805
1807 const Value *Mask) {
1808 // V1 and V2 must be vectors of the same type.
1809 if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType())
1810 return false;
1811
1812 // Mask must be vector of i32, and must be the same kind of vector as the
1813 // input vectors
1814 auto *MaskTy = dyn_cast<VectorType>(Mask->getType());
1815 if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32) ||
1817 return false;
1818
1819 // Check to see if Mask is valid.
1821 return true;
1822
1823 // NOTE: Through vector ConstantInt we have the potential to support more
1824 // than just zero splat masks but that requires a LangRef change.
1825 if (isa<ScalableVectorType>(MaskTy))
1826 return false;
1827
1828 unsigned V1Size = cast<FixedVectorType>(V1->getType())->getNumElements();
1829
1830 if (const auto *CI = dyn_cast<ConstantInt>(Mask))
1831 return !CI->uge(V1Size * 2);
1832
1833 if (const auto *MV = dyn_cast<ConstantVector>(Mask)) {
1834 for (Value *Op : MV->operands()) {
1835 if (auto *CI = dyn_cast<ConstantInt>(Op)) {
1836 if (CI->uge(V1Size*2))
1837 return false;
1838 } else if (!isa<UndefValue>(Op)) {
1839 return false;
1840 }
1841 }
1842 return true;
1843 }
1844
1845 if (const auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
1846 for (unsigned i = 0, e = cast<FixedVectorType>(MaskTy)->getNumElements();
1847 i != e; ++i)
1848 if (CDS->getElementAsInteger(i) >= V1Size*2)
1849 return false;
1850 return true;
1851 }
1852
1853 return false;
1854}
1855
1857 SmallVectorImpl<int> &Result) {
1858 ElementCount EC = cast<VectorType>(Mask->getType())->getElementCount();
1859
1860 if (isa<ConstantAggregateZero>(Mask) || isa<UndefValue>(Mask)) {
1861 int MaskVal = isa<UndefValue>(Mask) ? -1 : 0;
1862 Result.append(EC.getKnownMinValue(), MaskVal);
1863 return;
1864 }
1865
1866 assert(!EC.isScalable() &&
1867 "Scalable vector shuffle mask must be undef or zeroinitializer");
1868
1869 unsigned NumElts = EC.getFixedValue();
1870
1871 Result.reserve(NumElts);
1872
1873 if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
1874 for (unsigned i = 0; i != NumElts; ++i)
1875 Result.push_back(CDS->getElementAsInteger(i));
1876 return;
1877 }
1878 for (unsigned i = 0; i != NumElts; ++i) {
1879 Constant *C = Mask->getAggregateElement(i);
1880 Result.push_back(isa<UndefValue>(C) ? -1 :
1881 cast<ConstantInt>(C)->getZExtValue());
1882 }
1883}
1884
1886 ShuffleMask.assign(Mask.begin(), Mask.end());
1887 ShuffleMaskForBitcode = convertShuffleMaskForBitcode(Mask, getType());
1888}
1889
1891 Type *ResultTy) {
1892 Type *Int32Ty = Type::getInt32Ty(ResultTy->getContext());
1893 if (isa<ScalableVectorType>(ResultTy)) {
1894 assert(all_equal(Mask) && "Unexpected shuffle");
1895 Type *VecTy = VectorType::get(Int32Ty, Mask.size(), true);
1896 if (Mask[0] == 0)
1897 return Constant::getNullValue(VecTy);
1898 return PoisonValue::get(VecTy);
1899 }
1901 for (int Elem : Mask) {
1902 if (Elem == PoisonMaskElem)
1904 else
1905 MaskConst.push_back(ConstantInt::get(Int32Ty, Elem));
1906 }
1907 return ConstantVector::get(MaskConst);
1908}
1909
1910static bool isSingleSourceMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
1911 assert(!Mask.empty() && "Shuffle mask must contain elements");
1912 bool UsesLHS = false;
1913 bool UsesRHS = false;
1914 for (int I : Mask) {
1915 if (I == -1)
1916 continue;
1917 assert(I >= 0 && I < (NumOpElts * 2) &&
1918 "Out-of-bounds shuffle mask element");
1919 UsesLHS |= (I < NumOpElts);
1920 UsesRHS |= (I >= NumOpElts);
1921 if (UsesLHS && UsesRHS)
1922 return false;
1923 }
1924 // Allow for degenerate case: completely undef mask means neither source is used.
1925 return UsesLHS || UsesRHS;
1926}
1927
1929 // We don't have vector operand size information, so assume operands are the
1930 // same size as the mask.
1931 return isSingleSourceMaskImpl(Mask, NumSrcElts);
1932}
1933
1934static bool isIdentityMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
1935 if (!isSingleSourceMaskImpl(Mask, NumOpElts))
1936 return false;
1937 for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) {
1938 if (Mask[i] == -1)
1939 continue;
1940 if (Mask[i] != i && Mask[i] != (NumOpElts + i))
1941 return false;
1942 }
1943 return true;
1944}
1945
1947 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1948 return false;
1949 // We don't have vector operand size information, so assume operands are the
1950 // same size as the mask.
1951 return isIdentityMaskImpl(Mask, NumSrcElts);
1952}
1953
1955 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1956 return false;
1957 if (!isSingleSourceMask(Mask, NumSrcElts))
1958 return false;
1959
1960 // The number of elements in the mask must be at least 2.
1961 if (NumSrcElts < 2)
1962 return false;
1963
1964 for (int I = 0, E = Mask.size(); I < E; ++I) {
1965 if (Mask[I] == -1)
1966 continue;
1967 if (Mask[I] != (NumSrcElts - 1 - I) &&
1968 Mask[I] != (NumSrcElts + NumSrcElts - 1 - I))
1969 return false;
1970 }
1971 return true;
1972}
1973
1975 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1976 return false;
1977 if (!isSingleSourceMask(Mask, NumSrcElts))
1978 return false;
1979 for (int I = 0, E = Mask.size(); I < E; ++I) {
1980 if (Mask[I] == -1)
1981 continue;
1982 if (Mask[I] != 0 && Mask[I] != NumSrcElts)
1983 return false;
1984 }
1985 return true;
1986}
1987
1989 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1990 return false;
1991 // Select is differentiated from identity. It requires using both sources.
1992 if (isSingleSourceMask(Mask, NumSrcElts))
1993 return false;
1994 for (int I = 0, E = Mask.size(); I < E; ++I) {
1995 if (Mask[I] == -1)
1996 continue;
1997 if (Mask[I] != I && Mask[I] != (NumSrcElts + I))
1998 return false;
1999 }
2000 return true;
2001}
2002
2004 // Example masks that will return true:
2005 // v1 = <a, b, c, d>
2006 // v2 = <e, f, g, h>
2007 // trn1 = shufflevector v1, v2 <0, 4, 2, 6> = <a, e, c, g>
2008 // trn2 = shufflevector v1, v2 <1, 5, 3, 7> = <b, f, d, h>
2009
2010 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2011 return false;
2012 // 1. The number of elements in the mask must be a power-of-2 and at least 2.
2013 int Sz = Mask.size();
2014 if (Sz < 2 || !isPowerOf2_32(Sz))
2015 return false;
2016
2017 // 2. The first element of the mask must be either a 0 or a 1.
2018 if (Mask[0] != 0 && Mask[0] != 1)
2019 return false;
2020
2021 // 3. The difference between the first 2 elements must be equal to the
2022 // number of elements in the mask.
2023 if ((Mask[1] - Mask[0]) != NumSrcElts)
2024 return false;
2025
2026 // 4. The difference between consecutive even-numbered and odd-numbered
2027 // elements must be equal to 2.
2028 for (int I = 2; I < Sz; ++I) {
2029 int MaskEltVal = Mask[I];
2030 if (MaskEltVal == -1)
2031 return false;
2032 int MaskEltPrevVal = Mask[I - 2];
2033 if (MaskEltVal - MaskEltPrevVal != 2)
2034 return false;
2035 }
2036 return true;
2037}
2038
2040 int &Index) {
2041 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2042 return false;
2043 // Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2044 int StartIndex = -1;
2045 for (int I = 0, E = Mask.size(); I != E; ++I) {
2046 int MaskEltVal = Mask[I];
2047 if (MaskEltVal == -1)
2048 continue;
2049
2050 if (StartIndex == -1) {
2051 // Don't support a StartIndex that begins in the second input, or if the
2052 // first non-undef index would access below the StartIndex.
2053 if (MaskEltVal < I || NumSrcElts <= (MaskEltVal - I))
2054 return false;
2055
2056 StartIndex = MaskEltVal - I;
2057 continue;
2058 }
2059
2060 // Splice is sequential starting from StartIndex.
2061 if (MaskEltVal != (StartIndex + I))
2062 return false;
2063 }
2064
2065 if (StartIndex == -1)
2066 return false;
2067
2068 // NOTE: This accepts StartIndex == 0 (COPY).
2069 Index = StartIndex;
2070 return true;
2071}
2072
2074 int NumSrcElts, int &Index) {
2075 // Must extract from a single source.
2076 if (!isSingleSourceMaskImpl(Mask, NumSrcElts))
2077 return false;
2078
2079 // Must be smaller (else this is an Identity shuffle).
2080 if (NumSrcElts <= (int)Mask.size())
2081 return false;
2082
2083 // Find start of extraction, accounting that we may start with an UNDEF.
2084 int SubIndex = -1;
2085 for (int i = 0, e = Mask.size(); i != e; ++i) {
2086 int M = Mask[i];
2087 if (M < 0)
2088 continue;
2089 int Offset = (M % NumSrcElts) - i;
2090 if (0 <= SubIndex && SubIndex != Offset)
2091 return false;
2092 SubIndex = Offset;
2093 }
2094
2095 if (0 <= SubIndex && SubIndex + (int)Mask.size() <= NumSrcElts) {
2096 Index = SubIndex;
2097 return true;
2098 }
2099 return false;
2100}
2101
2103 int NumSrcElts, int &NumSubElts,
2104 int &Index) {
2105 int NumMaskElts = Mask.size();
2106
2107 // Don't try to match if we're shuffling to a smaller size.
2108 if (NumMaskElts < NumSrcElts)
2109 return false;
2110
2111 // TODO: We don't recognize self-insertion/widening.
2112 if (isSingleSourceMaskImpl(Mask, NumSrcElts))
2113 return false;
2114
2115 // Determine which mask elements are attributed to which source.
2116 APInt UndefElts = APInt::getZero(NumMaskElts);
2117 APInt Src0Elts = APInt::getZero(NumMaskElts);
2118 APInt Src1Elts = APInt::getZero(NumMaskElts);
2119 bool Src0Identity = true;
2120 bool Src1Identity = true;
2121
2122 for (int i = 0; i != NumMaskElts; ++i) {
2123 int M = Mask[i];
2124 if (M < 0) {
2125 UndefElts.setBit(i);
2126 continue;
2127 }
2128 if (M < NumSrcElts) {
2129 Src0Elts.setBit(i);
2130 Src0Identity &= (M == i);
2131 continue;
2132 }
2133 Src1Elts.setBit(i);
2134 Src1Identity &= (M == (i + NumSrcElts));
2135 }
2136 assert((Src0Elts | Src1Elts | UndefElts).isAllOnes() &&
2137 "unknown shuffle elements");
2138 assert(!Src0Elts.isZero() && !Src1Elts.isZero() &&
2139 "2-source shuffle not found");
2140
2141 // Determine lo/hi span ranges.
2142 // TODO: How should we handle undefs at the start of subvector insertions?
2143 int Src0Lo = Src0Elts.countr_zero();
2144 int Src1Lo = Src1Elts.countr_zero();
2145 int Src0Hi = NumMaskElts - Src0Elts.countl_zero();
2146 int Src1Hi = NumMaskElts - Src1Elts.countl_zero();
2147
2148 // If src0 is in place, see if the src1 elements is inplace within its own
2149 // span.
2150 if (Src0Identity) {
2151 int NumSub1Elts = Src1Hi - Src1Lo;
2152 ArrayRef<int> Sub1Mask = Mask.slice(Src1Lo, NumSub1Elts);
2153 if (isIdentityMaskImpl(Sub1Mask, NumSrcElts)) {
2154 NumSubElts = NumSub1Elts;
2155 Index = Src1Lo;
2156 return true;
2157 }
2158 }
2159
2160 // If src1 is in place, see if the src0 elements is inplace within its own
2161 // span.
2162 if (Src1Identity) {
2163 int NumSub0Elts = Src0Hi - Src0Lo;
2164 ArrayRef<int> Sub0Mask = Mask.slice(Src0Lo, NumSub0Elts);
2165 if (isIdentityMaskImpl(Sub0Mask, NumSrcElts)) {
2166 NumSubElts = NumSub0Elts;
2167 Index = Src0Lo;
2168 return true;
2169 }
2170 }
2171
2172 return false;
2173}
2174
2176 // FIXME: Not currently possible to express a shuffle mask for a scalable
2177 // vector for this case.
2179 return false;
2180
2181 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2182 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2183 if (NumMaskElts <= NumOpElts)
2184 return false;
2185
2186 // The first part of the mask must choose elements from exactly 1 source op.
2188 if (!isIdentityMaskImpl(Mask, NumOpElts))
2189 return false;
2190
2191 // All extending must be with undef elements.
2192 for (int i = NumOpElts; i < NumMaskElts; ++i)
2193 if (Mask[i] != -1)
2194 return false;
2195
2196 return true;
2197}
2198
2200 // FIXME: Not currently possible to express a shuffle mask for a scalable
2201 // vector for this case.
2203 return false;
2204
2205 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2206 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2207 if (NumMaskElts >= NumOpElts)
2208 return false;
2209
2210 return isIdentityMaskImpl(getShuffleMask(), NumOpElts);
2211}
2212
2214 // Vector concatenation is differentiated from identity with padding.
2216 return false;
2217
2218 // FIXME: Not currently possible to express a shuffle mask for a scalable
2219 // vector for this case.
2221 return false;
2222
2223 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2224 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2225 if (NumMaskElts != NumOpElts * 2)
2226 return false;
2227
2228 // Use the mask length rather than the operands' vector lengths here. We
2229 // already know that the shuffle returns a vector twice as long as the inputs,
2230 // and neither of the inputs are undef vectors. If the mask picks consecutive
2231 // elements from both inputs, then this is a concatenation of the inputs.
2232 return isIdentityMaskImpl(getShuffleMask(), NumMaskElts);
2233}
2234
2236 int ReplicationFactor, int VF) {
2237 assert(Mask.size() == (unsigned)ReplicationFactor * VF &&
2238 "Unexpected mask size.");
2239
2240 for (int CurrElt : seq(VF)) {
2241 ArrayRef<int> CurrSubMask = Mask.take_front(ReplicationFactor);
2242 assert(CurrSubMask.size() == (unsigned)ReplicationFactor &&
2243 "Run out of mask?");
2244 Mask = Mask.drop_front(ReplicationFactor);
2245 if (!all_of(CurrSubMask, [CurrElt](int MaskElt) {
2246 return MaskElt == PoisonMaskElem || MaskElt == CurrElt;
2247 }))
2248 return false;
2249 }
2250 assert(Mask.empty() && "Did not consume the whole mask?");
2251
2252 return true;
2253}
2254
2256 int &ReplicationFactor, int &VF) {
2257 // undef-less case is trivial.
2258 if (!llvm::is_contained(Mask, PoisonMaskElem)) {
2259 ReplicationFactor =
2260 Mask.take_while([](int MaskElt) { return MaskElt == 0; }).size();
2261 if (ReplicationFactor == 0 || Mask.size() % ReplicationFactor != 0)
2262 return false;
2263 VF = Mask.size() / ReplicationFactor;
2264 return isReplicationMaskWithParams(Mask, ReplicationFactor, VF);
2265 }
2266
2267 // However, if the mask contains undef's, we have to enumerate possible tuples
2268 // and pick one. There are bounds on replication factor: [1, mask size]
2269 // (where RF=1 is an identity shuffle, RF=mask size is a broadcast shuffle)
2270 // Additionally, mask size is a replication factor multiplied by vector size,
2271 // which further significantly reduces the search space.
2272
2273 // Before doing that, let's perform basic correctness checking first.
2274 int Largest = -1;
2275 for (int MaskElt : Mask) {
2276 if (MaskElt == PoisonMaskElem)
2277 continue;
2278 // Elements must be in non-decreasing order.
2279 if (MaskElt < Largest)
2280 return false;
2281 Largest = std::max(Largest, MaskElt);
2282 }
2283
2284 // Prefer larger replication factor if all else equal.
2285 for (int PossibleReplicationFactor :
2286 reverse(seq_inclusive<unsigned>(1, Mask.size()))) {
2287 if (Mask.size() % PossibleReplicationFactor != 0)
2288 continue;
2289 int PossibleVF = Mask.size() / PossibleReplicationFactor;
2290 if (!isReplicationMaskWithParams(Mask, PossibleReplicationFactor,
2291 PossibleVF))
2292 continue;
2293 ReplicationFactor = PossibleReplicationFactor;
2294 VF = PossibleVF;
2295 return true;
2296 }
2297
2298 return false;
2299}
2300
2301bool ShuffleVectorInst::isReplicationMask(int &ReplicationFactor,
2302 int &VF) const {
2303 // Not possible to express a shuffle mask for a scalable vector for this
2304 // case.
2306 return false;
2307
2308 VF = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2309 if (ShuffleMask.size() % VF != 0)
2310 return false;
2311 ReplicationFactor = ShuffleMask.size() / VF;
2312
2313 return isReplicationMaskWithParams(ShuffleMask, ReplicationFactor, VF);
2314}
2315
2317 if (VF <= 0 || Mask.size() < static_cast<unsigned>(VF) ||
2318 Mask.size() % VF != 0)
2319 return false;
2320 for (unsigned K = 0, Sz = Mask.size(); K < Sz; K += VF) {
2321 ArrayRef<int> SubMask = Mask.slice(K, VF);
2322 if (all_of(SubMask, [](int Idx) { return Idx == PoisonMaskElem; }))
2323 continue;
2324 SmallBitVector Used(VF, false);
2325 for (int Idx : SubMask) {
2326 if (Idx != PoisonMaskElem && Idx < VF)
2327 Used.set(Idx);
2328 }
2329 if (!Used.all())
2330 return false;
2331 }
2332 return true;
2333}
2334
2335/// Return true if this shuffle mask is a replication mask.
2337 // Not possible to express a shuffle mask for a scalable vector for this
2338 // case.
2340 return false;
2341 if (!isSingleSourceMask(ShuffleMask, VF))
2342 return false;
2343
2344 return isOneUseSingleSourceMask(ShuffleMask, VF);
2345}
2346
2347bool ShuffleVectorInst::isInterleave(unsigned Factor) {
2349 // shuffle_vector can only interleave fixed length vectors - for scalable
2350 // vectors, see the @llvm.vector.interleave2 intrinsic
2351 if (!OpTy)
2352 return false;
2353 unsigned OpNumElts = OpTy->getNumElements();
2354
2355 return isInterleaveMask(ShuffleMask, Factor, OpNumElts * 2);
2356}
2357
2359 ArrayRef<int> Mask, unsigned Factor, unsigned NumInputElts,
2360 SmallVectorImpl<unsigned> &StartIndexes) {
2361 unsigned NumElts = Mask.size();
2362 if (NumElts % Factor)
2363 return false;
2364
2365 unsigned LaneLen = NumElts / Factor;
2366 if (!isPowerOf2_32(LaneLen))
2367 return false;
2368
2369 StartIndexes.resize(Factor);
2370
2371 // Check whether each element matches the general interleaved rule.
2372 // Ignore undef elements, as long as the defined elements match the rule.
2373 // Outer loop processes all factors (x, y, z in the above example)
2374 unsigned I = 0, J;
2375 for (; I < Factor; I++) {
2376 unsigned SavedLaneValue;
2377 unsigned SavedNoUndefs = 0;
2378
2379 // Inner loop processes consecutive accesses (x, x+1... in the example)
2380 for (J = 0; J < LaneLen - 1; J++) {
2381 // Lane computes x's position in the Mask
2382 unsigned Lane = J * Factor + I;
2383 unsigned NextLane = Lane + Factor;
2384 int LaneValue = Mask[Lane];
2385 int NextLaneValue = Mask[NextLane];
2386
2387 // If both are defined, values must be sequential
2388 if (LaneValue >= 0 && NextLaneValue >= 0 &&
2389 LaneValue + 1 != NextLaneValue)
2390 break;
2391
2392 // If the next value is undef, save the current one as reference
2393 if (LaneValue >= 0 && NextLaneValue < 0) {
2394 SavedLaneValue = LaneValue;
2395 SavedNoUndefs = 1;
2396 }
2397
2398 // Undefs are allowed, but defined elements must still be consecutive:
2399 // i.e.: x,..., undef,..., x + 2,..., undef,..., undef,..., x + 5, ....
2400 // Verify this by storing the last non-undef followed by an undef
2401 // Check that following non-undef masks are incremented with the
2402 // corresponding distance.
2403 if (SavedNoUndefs > 0 && LaneValue < 0) {
2404 SavedNoUndefs++;
2405 if (NextLaneValue >= 0 &&
2406 SavedLaneValue + SavedNoUndefs != (unsigned)NextLaneValue)
2407 break;
2408 }
2409 }
2410
2411 if (J < LaneLen - 1)
2412 return false;
2413
2414 int StartMask = 0;
2415 if (Mask[I] >= 0) {
2416 // Check that the start of the I range (J=0) is greater than 0
2417 StartMask = Mask[I];
2418 } else if (Mask[(LaneLen - 1) * Factor + I] >= 0) {
2419 // StartMask defined by the last value in lane
2420 StartMask = Mask[(LaneLen - 1) * Factor + I] - J;
2421 } else if (SavedNoUndefs > 0) {
2422 // StartMask defined by some non-zero value in the j loop
2423 StartMask = SavedLaneValue - (LaneLen - 1 - SavedNoUndefs);
2424 }
2425 // else StartMask remains set to 0, i.e. all elements are undefs
2426
2427 if (StartMask < 0)
2428 return false;
2429 // We must stay within the vectors; This case can happen with undefs.
2430 if (StartMask + LaneLen > NumInputElts)
2431 return false;
2432
2433 StartIndexes[I] = StartMask;
2434 }
2435
2436 return true;
2437}
2438
2439/// Check if the mask is a DE-interleave mask of the given factor
2440/// \p Factor like:
2441/// <Index, Index+Factor, ..., Index+(NumElts-1)*Factor>
2443 unsigned Factor,
2444 unsigned &Index) {
2445 // Check all potential start indices from 0 to (Factor - 1).
2446 for (unsigned Idx = 0; Idx < Factor; Idx++) {
2447 unsigned I = 0;
2448
2449 // Check that elements are in ascending order by Factor. Ignore undef
2450 // elements.
2451 for (; I < Mask.size(); I++)
2452 if (Mask[I] >= 0 && static_cast<unsigned>(Mask[I]) != Idx + I * Factor)
2453 break;
2454
2455 if (I == Mask.size()) {
2456 Index = Idx;
2457 return true;
2458 }
2459 }
2460
2461 return false;
2462}
2463
2464/// Try to lower a vector shuffle as a bit rotation.
2465///
2466/// Look for a repeated rotation pattern in each sub group.
2467/// Returns an element-wise left bit rotation amount or -1 if failed.
2468static int matchShuffleAsBitRotate(ArrayRef<int> Mask, int NumSubElts) {
2469 int NumElts = Mask.size();
2470 assert((NumElts % NumSubElts) == 0 && "Illegal shuffle mask");
2471
2472 int RotateAmt = -1;
2473 for (int i = 0; i != NumElts; i += NumSubElts) {
2474 for (int j = 0; j != NumSubElts; ++j) {
2475 int M = Mask[i + j];
2476 if (M < 0)
2477 continue;
2478 if (M < i || M >= i + NumSubElts)
2479 return -1;
2480 int Offset = (NumSubElts - (M - (i + j))) % NumSubElts;
2481 if (0 <= RotateAmt && Offset != RotateAmt)
2482 return -1;
2483 RotateAmt = Offset;
2484 }
2485 }
2486 return RotateAmt;
2487}
2488
2490 ArrayRef<int> Mask, unsigned EltSizeInBits, unsigned MinSubElts,
2491 unsigned MaxSubElts, unsigned &NumSubElts, unsigned &RotateAmt) {
2492 for (NumSubElts = MinSubElts; NumSubElts <= MaxSubElts; NumSubElts *= 2) {
2493 int EltRotateAmt = matchShuffleAsBitRotate(Mask, NumSubElts);
2494 if (EltRotateAmt < 0)
2495 continue;
2496 RotateAmt = EltRotateAmt * EltSizeInBits;
2497 return true;
2498 }
2499
2500 return false;
2501}
2502
2503//===----------------------------------------------------------------------===//
2504// InsertValueInst Class
2505//===----------------------------------------------------------------------===//
2506
2507void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2508 const Twine &Name) {
2509 assert(getNumOperands() == 2 && "NumOperands not initialized?");
2510
2511 // There's no fundamental reason why we require at least one index
2512 // (other than weirdness with &*IdxBegin being invalid; see
2513 // getelementptr's init routine for example). But there's no
2514 // present need to support it.
2515 assert(!Idxs.empty() && "InsertValueInst must have at least one index");
2516
2518 Val->getType() && "Inserted value must match indexed type!");
2519 Op<0>() = Agg;
2520 Op<1>() = Val;
2521
2522 Indices.append(Idxs.begin(), Idxs.end());
2523 setName(Name);
2524}
2525
2526InsertValueInst::InsertValueInst(const InsertValueInst &IVI)
2527 : Instruction(IVI.getType(), InsertValue, AllocMarker),
2528 Indices(IVI.Indices) {
2529 Op<0>() = IVI.getOperand(0);
2530 Op<1>() = IVI.getOperand(1);
2532}
2533
2534//===----------------------------------------------------------------------===//
2535// ExtractValueInst Class
2536//===----------------------------------------------------------------------===//
2537
2538void ExtractValueInst::init(ArrayRef<unsigned> Idxs, const Twine &Name) {
2539 assert(getNumOperands() == 1 && "NumOperands not initialized?");
2540
2541 // There's no fundamental reason why we require at least one index.
2542 // But there's no present need to support it.
2543 assert(!Idxs.empty() && "ExtractValueInst must have at least one index");
2544
2545 Indices.append(Idxs.begin(), Idxs.end());
2546 setName(Name);
2547}
2548
2549ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI)
2550 : UnaryInstruction(EVI.getType(), ExtractValue, EVI.getOperand(0),
2551 (BasicBlock *)nullptr),
2552 Indices(EVI.Indices) {
2554}
2555
2556// getIndexedType - Returns the type of the element that would be extracted
2557// with an extractvalue instruction with the specified parameters.
2558//
2559// A null type is returned if the indices are invalid for the specified
2560// pointer type.
2561//
2563 ArrayRef<unsigned> Idxs) {
2564 for (unsigned Index : Idxs) {
2565 // We can't use CompositeType::indexValid(Index) here.
2566 // indexValid() always returns true for arrays because getelementptr allows
2567 // out-of-bounds indices. Since we don't allow those for extractvalue and
2568 // insertvalue we need to check array indexing manually.
2569 // Since the only other types we can index into are struct types it's just
2570 // as easy to check those manually as well.
2571 if (ArrayType *AT = dyn_cast<ArrayType>(Agg)) {
2572 if (Index >= AT->getNumElements())
2573 return nullptr;
2574 Agg = AT->getElementType();
2575 } else if (StructType *ST = dyn_cast<StructType>(Agg)) {
2576 if (Index >= ST->getNumElements())
2577 return nullptr;
2578 Agg = ST->getElementType(Index);
2579 } else {
2580 // Not a valid type to index into.
2581 return nullptr;
2582 }
2583 }
2584 return Agg;
2585}
2586
2587//===----------------------------------------------------------------------===//
2588// UnaryOperator Class
2589//===----------------------------------------------------------------------===//
2590
2592 const Twine &Name, InsertPosition InsertBefore)
2593 : UnaryInstruction(Ty, iType, S, InsertBefore) {
2594 Op<0>() = S;
2595 setName(Name);
2596 AssertOK();
2597}
2598
2600 InsertPosition InsertBefore) {
2601 return new UnaryOperator(Op, S, S->getType(), Name, InsertBefore);
2602}
2603
2604void UnaryOperator::AssertOK() {
2605 Value *LHS = getOperand(0);
2606 (void)LHS; // Silence warnings.
2607#ifndef NDEBUG
2608 switch (getOpcode()) {
2609 case FNeg:
2610 assert(getType() == LHS->getType() &&
2611 "Unary operation should return same type as operand!");
2612 assert(getType()->isFPOrFPVectorTy() &&
2613 "Tried to create a floating-point operation on a "
2614 "non-floating-point type!");
2615 break;
2616 default: llvm_unreachable("Invalid opcode provided");
2617 }
2618#endif
2619}
2620
2621//===----------------------------------------------------------------------===//
2622// BinaryOperator Class
2623//===----------------------------------------------------------------------===//
2624
2626 const Twine &Name, InsertPosition InsertBefore)
2627 : Instruction(Ty, iType, AllocMarker, InsertBefore) {
2628 Op<0>() = S1;
2629 Op<1>() = S2;
2630 setName(Name);
2631 AssertOK();
2632}
2633
2634void BinaryOperator::AssertOK() {
2635 Value *LHS = getOperand(0), *RHS = getOperand(1);
2636 (void)LHS; (void)RHS; // Silence warnings.
2637 assert(LHS->getType() == RHS->getType() &&
2638 "Binary operator operand types must match!");
2639#ifndef NDEBUG
2640 switch (getOpcode()) {
2641 case Add: case Sub:
2642 case Mul:
2643 assert(getType() == LHS->getType() &&
2644 "Arithmetic operation should return same type as operands!");
2645 assert(getType()->isIntOrIntVectorTy() &&
2646 "Tried to create an integer operation on a non-integer type!");
2647 break;
2648 case FAdd: case FSub:
2649 case FMul:
2650 assert(getType() == LHS->getType() &&
2651 "Arithmetic operation should return same type as operands!");
2652 assert(getType()->isFPOrFPVectorTy() &&
2653 "Tried to create a floating-point operation on a "
2654 "non-floating-point type!");
2655 break;
2656 case UDiv:
2657 case SDiv:
2658 assert(getType() == LHS->getType() &&
2659 "Arithmetic operation should return same type as operands!");
2660 assert(getType()->isIntOrIntVectorTy() &&
2661 "Incorrect operand type (not integer) for S/UDIV");
2662 break;
2663 case FDiv:
2664 assert(getType() == LHS->getType() &&
2665 "Arithmetic operation should return same type as operands!");
2666 assert(getType()->isFPOrFPVectorTy() &&
2667 "Incorrect operand type (not floating point) for FDIV");
2668 break;
2669 case URem:
2670 case SRem:
2671 assert(getType() == LHS->getType() &&
2672 "Arithmetic operation should return same type as operands!");
2673 assert(getType()->isIntOrIntVectorTy() &&
2674 "Incorrect operand type (not integer) for S/UREM");
2675 break;
2676 case FRem:
2677 assert(getType() == LHS->getType() &&
2678 "Arithmetic operation should return same type as operands!");
2679 assert(getType()->isFPOrFPVectorTy() &&
2680 "Incorrect operand type (not floating point) for FREM");
2681 break;
2682 case Shl:
2683 case LShr:
2684 case AShr:
2685 assert(getType() == LHS->getType() &&
2686 "Shift operation should return same type as operands!");
2687 assert(getType()->isIntOrIntVectorTy() &&
2688 "Tried to create a shift operation on a non-integral type!");
2689 break;
2690 case And: case Or:
2691 case Xor:
2692 assert(getType() == LHS->getType() &&
2693 "Logical operation should return same type as operands!");
2694 assert(getType()->isIntOrIntVectorTy() &&
2695 "Tried to create a logical operation on a non-integral type!");
2696 break;
2697 default: llvm_unreachable("Invalid opcode provided");
2698 }
2699#endif
2700}
2701
2703 const Twine &Name,
2704 InsertPosition InsertBefore) {
2705 assert(S1->getType() == S2->getType() &&
2706 "Cannot create binary operator with two operands of differing type!");
2707 return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore);
2708}
2709
2711 InsertPosition InsertBefore) {
2712 Value *Zero = ConstantInt::get(Op->getType(), 0);
2713 return new BinaryOperator(Instruction::Sub, Zero, Op, Op->getType(), Name,
2714 InsertBefore);
2715}
2716
2718 InsertPosition InsertBefore) {
2719 Value *Zero = ConstantInt::get(Op->getType(), 0);
2720 return BinaryOperator::CreateNSWSub(Zero, Op, Name, InsertBefore);
2721}
2722
2724 InsertPosition InsertBefore) {
2725 Constant *C = Constant::getAllOnesValue(Op->getType());
2726 return new BinaryOperator(Instruction::Xor, Op, C,
2727 Op->getType(), Name, InsertBefore);
2728}
2729
2730// Exchange the two operands to this instruction. This instruction is safe to
2731// use on any binary instruction and does not modify the semantics of the
2732// instruction.
2734 if (!isCommutative())
2735 return true; // Can't commute operands
2736 Op<0>().swap(Op<1>());
2737 return false;
2738}
2739
2740//===----------------------------------------------------------------------===//
2741// FPMathOperator Class
2742//===----------------------------------------------------------------------===//
2743
2745 const MDNode *MD =
2746 cast<Instruction>(this)->getMetadata(LLVMContext::MD_fpmath);
2747 if (!MD)
2748 return 0.0;
2750 return Accuracy->getValueAPF().convertToFloat();
2751}
2752
2753//===----------------------------------------------------------------------===//
2754// CastInst Class
2755//===----------------------------------------------------------------------===//
2756
2757// Just determine if this cast only deals with integral->integral conversion.
2759 switch (getOpcode()) {
2760 default: return false;
2761 case Instruction::ZExt:
2762 case Instruction::SExt:
2763 case Instruction::Trunc:
2764 return true;
2765 case Instruction::BitCast:
2766 return getOperand(0)->getType()->isIntegerTy() &&
2767 getType()->isIntegerTy();
2768 }
2769}
2770
2771/// This function determines if the CastInst does not require any bits to be
2772/// changed in order to effect the cast. Essentially, it identifies cases where
2773/// no code gen is necessary for the cast, hence the name no-op cast. For
2774/// example, the following are all no-op casts:
2775/// # bitcast i32* %x to i8*
2776/// # bitcast <2 x i32> %x to <4 x i16>
2777/// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only
2778/// Determine if the described cast is a no-op.
2780 Type *SrcTy,
2781 Type *DestTy,
2782 const DataLayout &DL) {
2783 assert(castIsValid(Opcode, SrcTy, DestTy) && "method precondition");
2784 switch (Opcode) {
2785 default: llvm_unreachable("Invalid CastOp");
2786 case Instruction::Trunc:
2787 case Instruction::ZExt:
2788 case Instruction::SExt:
2789 case Instruction::FPTrunc:
2790 case Instruction::FPExt:
2791 case Instruction::UIToFP:
2792 case Instruction::SIToFP:
2793 case Instruction::FPToUI:
2794 case Instruction::FPToSI:
2795 case Instruction::AddrSpaceCast:
2796 // TODO: Target informations may give a more accurate answer here.
2797 return false;
2798 case Instruction::BitCast:
2799 return true; // BitCast never modifies bits.
2800 case Instruction::PtrToAddr:
2801 case Instruction::PtrToInt:
2802 return DL.getIntPtrType(SrcTy)->getScalarSizeInBits() ==
2803 DestTy->getScalarSizeInBits();
2804 case Instruction::IntToPtr:
2805 return DL.getIntPtrType(DestTy)->getScalarSizeInBits() ==
2806 SrcTy->getScalarSizeInBits();
2807 }
2808}
2809
2811 return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), DL);
2812}
2813
2814/// This function determines if a pair of casts can be eliminated and what
2815/// opcode should be used in the elimination. This assumes that there are two
2816/// instructions like this:
2817/// * %F = firstOpcode SrcTy %x to MidTy
2818/// * %S = secondOpcode MidTy %F to DstTy
2819/// The function returns a resultOpcode so these two casts can be replaced with:
2820/// * %Replacement = resultOpcode %SrcTy %x to DstTy
2821/// If no such cast is permitted, the function returns 0.
2823 Instruction::CastOps secondOp,
2824 Type *SrcTy, Type *MidTy, Type *DstTy,
2825 const DataLayout *DL) {
2826 // Define the 144 possibilities for these two cast instructions. The values
2827 // in this matrix determine what to do in a given situation and select the
2828 // case in the switch below. The rows correspond to firstOp, the columns
2829 // correspond to secondOp. In looking at the table below, keep in mind
2830 // the following cast properties:
2831 //
2832 // Size Compare Source Destination
2833 // Operator Src ? Size Type Sign Type Sign
2834 // -------- ------------ ------------------- ---------------------
2835 // TRUNC > Integer Any Integral Any
2836 // ZEXT < Integral Unsigned Integer Any
2837 // SEXT < Integral Signed Integer Any
2838 // FPTOUI n/a FloatPt n/a Integral Unsigned
2839 // FPTOSI n/a FloatPt n/a Integral Signed
2840 // UITOFP n/a Integral Unsigned FloatPt n/a
2841 // SITOFP n/a Integral Signed FloatPt n/a
2842 // FPTRUNC > FloatPt n/a FloatPt n/a
2843 // FPEXT < FloatPt n/a FloatPt n/a
2844 // PTRTOINT n/a Pointer n/a Integral Unsigned
2845 // PTRTOADDR n/a Pointer n/a Integral Unsigned
2846 // INTTOPTR n/a Integral Unsigned Pointer n/a
2847 // BITCAST = FirstClass n/a FirstClass n/a
2848 // ADDRSPCST n/a Pointer n/a Pointer n/a
2849 //
2850 // NOTE: some transforms are safe, but we consider them to be non-profitable.
2851 // For example, we could merge "fptoui double to i32" + "zext i32 to i64",
2852 // into "fptoui double to i64", but this loses information about the range
2853 // of the produced value (we no longer know the top-part is all zeros).
2854 // Further this conversion is often much more expensive for typical hardware,
2855 // and causes issues when building libgcc. We disallow fptosi+sext for the
2856 // same reason.
2857 const unsigned numCastOps =
2858 Instruction::CastOpsEnd - Instruction::CastOpsBegin;
2859 // clang-format off
2860 static const uint8_t CastResults[numCastOps][numCastOps] = {
2861 // T F F U S F F P P I B A -+
2862 // R Z S P P I I T P 2 2 N T S |
2863 // U E E 2 2 2 2 R E I A T C C +- secondOp
2864 // N X X U S F F N X N D 2 V V |
2865 // C T T I I P P C T T R P T T -+
2866 { 1, 0, 0,99,99, 0, 0,99,99,99,99, 0, 3, 0}, // Trunc -+
2867 { 8, 1, 9,99,99, 2,17,99,99,99,99, 2, 3, 0}, // ZExt |
2868 { 8, 0, 1,99,99, 0, 2,99,99,99,99, 0, 3, 0}, // SExt |
2869 { 0, 0, 0,99,99, 0, 0,99,99,99,99, 0, 3, 0}, // FPToUI |
2870 { 0, 0, 0,99,99, 0, 0,99,99,99,99, 0, 3, 0}, // FPToSI |
2871 { 99,99,99, 0, 0,99,99, 0, 0,99,99,99, 4, 0}, // UIToFP +- firstOp
2872 { 99,99,99, 0, 0,99,99, 0, 0,99,99,99, 4, 0}, // SIToFP |
2873 { 99,99,99, 0, 0,99,99, 0, 0,99,99,99, 4, 0}, // FPTrunc |
2874 { 99,99,99, 2, 2,99,99, 8, 2,99,99,99, 4, 0}, // FPExt |
2875 { 1, 0, 0,99,99, 0, 0,99,99,99,99, 7, 3, 0}, // PtrToInt |
2876 { 0, 0, 0,99,99, 0, 0,99,99,99,99, 0, 3, 0}, // PtrToAddr |
2877 { 99,99,99,99,99,99,99,99,99,11,11,99,15, 0}, // IntToPtr |
2878 { 5, 5, 5, 0, 0, 5, 5, 0, 0,16,16, 5, 1,14}, // BitCast |
2879 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+
2880 };
2881 // clang-format on
2882
2883 // TODO: This logic could be encoded into the table above and handled in the
2884 // switch below.
2885 // If either of the casts are a bitcast from scalar to vector, disallow the
2886 // merging. However, any pair of bitcasts are allowed.
2887 bool IsFirstBitcast = (firstOp == Instruction::BitCast);
2888 bool IsSecondBitcast = (secondOp == Instruction::BitCast);
2889 bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast;
2890
2891 // Check if any of the casts convert scalars <-> vectors.
2892 if ((IsFirstBitcast && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) ||
2893 (IsSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy)))
2894 if (!AreBothBitcasts)
2895 return 0;
2896
2897 int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin]
2898 [secondOp-Instruction::CastOpsBegin];
2899 switch (ElimCase) {
2900 case 0:
2901 // Categorically disallowed.
2902 return 0;
2903 case 1:
2904 // Allowed, use first cast's opcode.
2905 return firstOp;
2906 case 2:
2907 // Allowed, use second cast's opcode.
2908 return secondOp;
2909 case 3:
2910 // No-op cast in second op implies firstOp as long as the DestTy
2911 // is integer and we are not converting between a vector and a
2912 // non-vector type.
2913 if (!SrcTy->isVectorTy() && DstTy->isIntegerTy())
2914 return firstOp;
2915 return 0;
2916 case 4:
2917 // No-op cast in second op implies firstOp as long as the DestTy
2918 // matches MidTy.
2919 if (DstTy == MidTy)
2920 return firstOp;
2921 return 0;
2922 case 5:
2923 // No-op cast in first op implies secondOp as long as the SrcTy
2924 // is an integer.
2925 if (SrcTy->isIntegerTy())
2926 return secondOp;
2927 return 0;
2928 case 7: {
2929 // Disable inttoptr/ptrtoint optimization if enabled.
2930 if (DisableI2pP2iOpt)
2931 return 0;
2932
2933 // Cannot simplify if address spaces are different!
2934 if (SrcTy != DstTy)
2935 return 0;
2936
2937 // Cannot simplify if the intermediate integer size is smaller than the
2938 // pointer size.
2939 unsigned MidSize = MidTy->getScalarSizeInBits();
2940 if (!DL || MidSize < DL->getPointerTypeSizeInBits(SrcTy))
2941 return 0;
2942
2943 return Instruction::BitCast;
2944 }
2945 case 8: {
2946 // ext, trunc -> bitcast, if the SrcTy and DstTy are the same
2947 // ext, trunc -> ext, if sizeof(SrcTy) < sizeof(DstTy)
2948 // ext, trunc -> trunc, if sizeof(SrcTy) > sizeof(DstTy)
2949 unsigned SrcSize = SrcTy->getScalarSizeInBits();
2950 unsigned DstSize = DstTy->getScalarSizeInBits();
2951 if (SrcTy == DstTy)
2952 return Instruction::BitCast;
2953 if (SrcSize < DstSize)
2954 return firstOp;
2955 if (SrcSize > DstSize)
2956 return secondOp;
2957 return 0;
2958 }
2959 case 9:
2960 // zext, sext -> zext, because sext can't sign extend after zext
2961 return Instruction::ZExt;
2962 case 11: {
2963 // inttoptr, ptrtoint/ptrtoaddr -> integer cast
2964 if (!DL)
2965 return 0;
2966 unsigned MidSize = secondOp == Instruction::PtrToAddr
2967 ? DL->getAddressSizeInBits(MidTy)
2968 : DL->getPointerTypeSizeInBits(MidTy);
2969 unsigned SrcSize = SrcTy->getScalarSizeInBits();
2970 unsigned DstSize = DstTy->getScalarSizeInBits();
2971 // If the middle size is smaller than both source and destination,
2972 // an additional masking operation would be required.
2973 if (MidSize < SrcSize && MidSize < DstSize)
2974 return 0;
2975 if (DstSize < SrcSize)
2976 return Instruction::Trunc;
2977 if (DstSize > SrcSize)
2978 return Instruction::ZExt;
2979 return Instruction::BitCast;
2980 }
2981 case 12:
2982 // addrspacecast, addrspacecast -> bitcast, if SrcAS == DstAS
2983 // addrspacecast, addrspacecast -> addrspacecast, if SrcAS != DstAS
2984 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
2985 return Instruction::AddrSpaceCast;
2986 return Instruction::BitCast;
2987 case 13:
2988 // FIXME: this state can be merged with (1), but the following assert
2989 // is useful to check the correcteness of the sequence due to semantic
2990 // change of bitcast.
2991 assert(
2992 SrcTy->isPtrOrPtrVectorTy() &&
2993 MidTy->isPtrOrPtrVectorTy() &&
2994 DstTy->isPtrOrPtrVectorTy() &&
2995 SrcTy->getPointerAddressSpace() != MidTy->getPointerAddressSpace() &&
2996 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
2997 "Illegal addrspacecast, bitcast sequence!");
2998 // Allowed, use first cast's opcode
2999 return firstOp;
3000 case 14:
3001 // bitcast, addrspacecast -> addrspacecast
3002 return Instruction::AddrSpaceCast;
3003 case 15:
3004 // FIXME: this state can be merged with (1), but the following assert
3005 // is useful to check the correcteness of the sequence due to semantic
3006 // change of bitcast.
3007 assert(
3008 SrcTy->isIntOrIntVectorTy() &&
3009 MidTy->isPtrOrPtrVectorTy() &&
3010 DstTy->isPtrOrPtrVectorTy() &&
3011 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
3012 "Illegal inttoptr, bitcast sequence!");
3013 // Allowed, use first cast's opcode
3014 return firstOp;
3015 case 16:
3016 // FIXME: this state can be merged with (2), but the following assert
3017 // is useful to check the correcteness of the sequence due to semantic
3018 // change of bitcast.
3019 assert(
3020 SrcTy->isPtrOrPtrVectorTy() &&
3021 MidTy->isPtrOrPtrVectorTy() &&
3022 DstTy->isIntOrIntVectorTy() &&
3023 SrcTy->getPointerAddressSpace() == MidTy->getPointerAddressSpace() &&
3024 "Illegal bitcast, ptrtoint sequence!");
3025 // Allowed, use second cast's opcode
3026 return secondOp;
3027 case 17:
3028 // (sitofp (zext x)) -> (uitofp x)
3029 return Instruction::UIToFP;
3030 case 99:
3031 // Cast combination can't happen (error in input). This is for all cases
3032 // where the MidTy is not the same for the two cast instructions.
3033 llvm_unreachable("Invalid Cast Combination");
3034 default:
3035 llvm_unreachable("Error in CastResults table!!!");
3036 }
3037}
3038
3040 const Twine &Name, InsertPosition InsertBefore) {
3041 assert(castIsValid(op, S, Ty) && "Invalid cast!");
3042 // Construct and return the appropriate CastInst subclass
3043 switch (op) {
3044 case Trunc: return new TruncInst (S, Ty, Name, InsertBefore);
3045 case ZExt: return new ZExtInst (S, Ty, Name, InsertBefore);
3046 case SExt: return new SExtInst (S, Ty, Name, InsertBefore);
3047 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertBefore);
3048 case FPExt: return new FPExtInst (S, Ty, Name, InsertBefore);
3049 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertBefore);
3050 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore);
3051 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore);
3052 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore);
3053 case PtrToAddr: return new PtrToAddrInst (S, Ty, Name, InsertBefore);
3054 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore);
3055 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore);
3056 case BitCast:
3057 return new BitCastInst(S, Ty, Name, InsertBefore);
3058 case AddrSpaceCast:
3059 return new AddrSpaceCastInst(S, Ty, Name, InsertBefore);
3060 default:
3061 llvm_unreachable("Invalid opcode provided");
3062 }
3063}
3064
3066 InsertPosition InsertBefore) {
3067 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3068 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3069 return Create(Instruction::ZExt, S, Ty, Name, InsertBefore);
3070}
3071
3073 InsertPosition InsertBefore) {
3074 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3075 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3076 return Create(Instruction::SExt, S, Ty, Name, InsertBefore);
3077}
3078
3080 InsertPosition InsertBefore) {
3081 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3082 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3083 return Create(Instruction::Trunc, S, Ty, Name, InsertBefore);
3084}
3085
3086/// Create a BitCast or a PtrToInt cast instruction
3088 InsertPosition InsertBefore) {
3089 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3090 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
3091 "Invalid cast");
3092 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
3093 assert((!Ty->isVectorTy() ||
3094 cast<VectorType>(Ty)->getElementCount() ==
3095 cast<VectorType>(S->getType())->getElementCount()) &&
3096 "Invalid cast");
3097
3098 if (Ty->isIntOrIntVectorTy())
3099 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3100
3101 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore);
3102}
3103
3105 Value *S, Type *Ty, const Twine &Name, InsertPosition InsertBefore) {
3106 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3107 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
3108
3109 if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace())
3110 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore);
3111
3112 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3113}
3114
3116 const Twine &Name,
3117 InsertPosition InsertBefore) {
3118 if (S->getType()->isPointerTy() && Ty->isIntegerTy())
3119 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3120 if (S->getType()->isIntegerTy() && Ty->isPointerTy())
3121 return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore);
3122
3123 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3124}
3125
3127 const Twine &Name,
3128 InsertPosition InsertBefore) {
3129 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
3130 "Invalid integer cast");
3131 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3132 unsigned DstBits = Ty->getScalarSizeInBits();
3133 Instruction::CastOps opcode =
3134 (SrcBits == DstBits ? Instruction::BitCast :
3135 (SrcBits > DstBits ? Instruction::Trunc :
3136 (isSigned ? Instruction::SExt : Instruction::ZExt)));
3137 return Create(opcode, C, Ty, Name, InsertBefore);
3138}
3139
3141 InsertPosition InsertBefore) {
3142 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
3143 "Invalid cast");
3144 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3145 unsigned DstBits = Ty->getScalarSizeInBits();
3146 assert((C->getType() == Ty || SrcBits != DstBits) && "Invalid cast");
3147 Instruction::CastOps opcode =
3148 (SrcBits == DstBits ? Instruction::BitCast :
3149 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
3150 return Create(opcode, C, Ty, Name, InsertBefore);
3151}
3152
3153bool CastInst::isBitCastable(Type *SrcTy, Type *DestTy) {
3154 if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType())
3155 return false;
3156
3157 if (SrcTy == DestTy)
3158 return true;
3159
3160 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) {
3161 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) {
3162 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3163 // An element by element cast. Valid if casting the elements is valid.
3164 SrcTy = SrcVecTy->getElementType();
3165 DestTy = DestVecTy->getElementType();
3166 }
3167 }
3168 }
3169
3170 if (PointerType *DestPtrTy = dyn_cast<PointerType>(DestTy)) {
3171 if (PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy)) {
3172 return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace();
3173 }
3174 }
3175
3176 TypeSize SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
3177 TypeSize DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
3178
3179 // Could still have vectors of pointers if the number of elements doesn't
3180 // match
3181 if (SrcBits.getKnownMinValue() == 0 || DestBits.getKnownMinValue() == 0)
3182 return false;
3183
3184 if (SrcBits != DestBits)
3185 return false;
3186
3187 return true;
3188}
3189
3191 const DataLayout &DL) {
3192 // ptrtoint and inttoptr are not allowed on non-integral pointers
3193 if (auto *PtrTy = dyn_cast<PointerType>(SrcTy))
3194 if (auto *IntTy = dyn_cast<IntegerType>(DestTy))
3195 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
3196 !DL.isNonIntegralPointerType(PtrTy));
3197 if (auto *PtrTy = dyn_cast<PointerType>(DestTy))
3198 if (auto *IntTy = dyn_cast<IntegerType>(SrcTy))
3199 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
3200 !DL.isNonIntegralPointerType(PtrTy));
3201
3202 return isBitCastable(SrcTy, DestTy);
3203}
3204
3205// Provide a way to get a "cast" where the cast opcode is inferred from the
3206// types and size of the operand. This, basically, is a parallel of the
3207// logic in the castIsValid function below. This axiom should hold:
3208// castIsValid( getCastOpcode(Val, Ty), Val, Ty)
3209// should not assert in castIsValid. In other words, this produces a "correct"
3210// casting opcode for the arguments passed to it.
3213 const Value *Src, bool SrcIsSigned, Type *DestTy, bool DestIsSigned) {
3214 Type *SrcTy = Src->getType();
3215
3216 assert(SrcTy->isFirstClassType() && DestTy->isFirstClassType() &&
3217 "Only first class types are castable!");
3218
3219 if (SrcTy == DestTy)
3220 return BitCast;
3221
3222 // FIXME: Check address space sizes here
3223 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy))
3224 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy))
3225 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3226 // An element by element cast. Find the appropriate opcode based on the
3227 // element types.
3228 SrcTy = SrcVecTy->getElementType();
3229 DestTy = DestVecTy->getElementType();
3230 }
3231
3232 // Get the bit sizes, we'll need these
3233 // FIXME: This doesn't work for scalable vector types with different element
3234 // counts that don't call getElementType above.
3235 unsigned SrcBits =
3236 SrcTy->getPrimitiveSizeInBits().getFixedValue(); // 0 for ptr
3237 unsigned DestBits =
3238 DestTy->getPrimitiveSizeInBits().getFixedValue(); // 0 for ptr
3239
3240 // Run through the possibilities ...
3241 if (DestTy->isIntegerTy()) { // Casting to integral
3242 if (SrcTy->isIntegerTy()) { // Casting from integral
3243 if (DestBits < SrcBits)
3244 return Trunc; // int -> smaller int
3245 else if (DestBits > SrcBits) { // its an extension
3246 if (SrcIsSigned)
3247 return SExt; // signed -> SEXT
3248 else
3249 return ZExt; // unsigned -> ZEXT
3250 } else {
3251 return BitCast; // Same size, No-op cast
3252 }
3253 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
3254 if (DestIsSigned)
3255 return FPToSI; // FP -> sint
3256 else
3257 return FPToUI; // FP -> uint
3258 } else if (SrcTy->isVectorTy()) {
3259 assert(DestBits == SrcBits &&
3260 "Casting vector to integer of different width");
3261 return BitCast; // Same size, no-op cast
3262 } else {
3263 assert(SrcTy->isPointerTy() &&
3264 "Casting from a value that is not first-class type");
3265 return PtrToInt; // ptr -> int
3266 }
3267 } else if (DestTy->isFloatingPointTy()) { // Casting to floating pt
3268 if (SrcTy->isIntegerTy()) { // Casting from integral
3269 if (SrcIsSigned)
3270 return SIToFP; // sint -> FP
3271 else
3272 return UIToFP; // uint -> FP
3273 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
3274 if (DestBits < SrcBits) {
3275 return FPTrunc; // FP -> smaller FP
3276 } else if (DestBits > SrcBits) {
3277 return FPExt; // FP -> larger FP
3278 } else {
3279 return BitCast; // same size, no-op cast
3280 }
3281 } else if (SrcTy->isVectorTy()) {
3282 assert(DestBits == SrcBits &&
3283 "Casting vector to floating point of different width");
3284 return BitCast; // same size, no-op cast
3285 }
3286 llvm_unreachable("Casting pointer or non-first class to float");
3287 } else if (DestTy->isVectorTy()) {
3288 assert(DestBits == SrcBits &&
3289 "Illegal cast to vector (wrong type or size)");
3290 return BitCast;
3291 } else if (DestTy->isPointerTy()) {
3292 if (SrcTy->isPointerTy()) {
3293 if (DestTy->getPointerAddressSpace() != SrcTy->getPointerAddressSpace())
3294 return AddrSpaceCast;
3295 return BitCast; // ptr -> ptr
3296 } else if (SrcTy->isIntegerTy()) {
3297 return IntToPtr; // int -> ptr
3298 }
3299 llvm_unreachable("Casting pointer to other than pointer or int");
3300 }
3301 llvm_unreachable("Casting to type that is not first-class");
3302}
3303
3304//===----------------------------------------------------------------------===//
3305// CastInst SubClass Constructors
3306//===----------------------------------------------------------------------===//
3307
3308/// Check that the construction parameters for a CastInst are correct. This
3309/// could be broken out into the separate constructors but it is useful to have
3310/// it in one place and to eliminate the redundant code for getting the sizes
3311/// of the types involved.
3312bool
3314 if (!SrcTy->isFirstClassType() || !DstTy->isFirstClassType() ||
3315 SrcTy->isAggregateType() || DstTy->isAggregateType())
3316 return false;
3317
3318 // Get the size of the types in bits, and whether we are dealing
3319 // with vector types, we'll need this later.
3320 bool SrcIsVec = isa<VectorType>(SrcTy);
3321 bool DstIsVec = isa<VectorType>(DstTy);
3322 unsigned SrcScalarBitSize = SrcTy->getScalarSizeInBits();
3323 unsigned DstScalarBitSize = DstTy->getScalarSizeInBits();
3324
3325 // If these are vector types, get the lengths of the vectors (using zero for
3326 // scalar types means that checking that vector lengths match also checks that
3327 // scalars are not being converted to vectors or vectors to scalars).
3328 ElementCount SrcEC = SrcIsVec ? cast<VectorType>(SrcTy)->getElementCount()
3330 ElementCount DstEC = DstIsVec ? cast<VectorType>(DstTy)->getElementCount()
3332
3333 // Switch on the opcode provided
3334 switch (op) {
3335 default: return false; // This is an input error
3336 case Instruction::Trunc:
3337 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3338 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
3339 case Instruction::ZExt:
3340 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3341 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3342 case Instruction::SExt:
3343 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3344 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3345 case Instruction::FPTrunc:
3346 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
3347 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
3348 case Instruction::FPExt:
3349 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
3350 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3351 case Instruction::UIToFP:
3352 case Instruction::SIToFP:
3353 return SrcTy->isIntOrIntVectorTy() && DstTy->isFPOrFPVectorTy() &&
3354 SrcEC == DstEC;
3355 case Instruction::FPToUI:
3356 case Instruction::FPToSI:
3357 return SrcTy->isFPOrFPVectorTy() && DstTy->isIntOrIntVectorTy() &&
3358 SrcEC == DstEC;
3359 case Instruction::PtrToAddr:
3360 case Instruction::PtrToInt:
3361 if (SrcEC != DstEC)
3362 return false;
3363 return SrcTy->isPtrOrPtrVectorTy() && DstTy->isIntOrIntVectorTy();
3364 case Instruction::IntToPtr:
3365 if (SrcEC != DstEC)
3366 return false;
3367 return SrcTy->isIntOrIntVectorTy() && DstTy->isPtrOrPtrVectorTy();
3368 case Instruction::BitCast: {
3369 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
3370 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
3371
3372 // BitCast implies a no-op cast of type only. No bits change.
3373 // However, you can't cast pointers to anything but pointers.
3374 if (!SrcPtrTy != !DstPtrTy)
3375 return false;
3376
3377 // For non-pointer cases, the cast is okay if the source and destination bit
3378 // widths are identical.
3379 if (!SrcPtrTy)
3380 return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits();
3381
3382 // If both are pointers then the address spaces must match.
3383 if (SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace())
3384 return false;
3385
3386 // A vector of pointers must have the same number of elements.
3387 if (SrcIsVec && DstIsVec)
3388 return SrcEC == DstEC;
3389 if (SrcIsVec)
3390 return SrcEC == ElementCount::getFixed(1);
3391 if (DstIsVec)
3392 return DstEC == ElementCount::getFixed(1);
3393
3394 return true;
3395 }
3396 case Instruction::AddrSpaceCast: {
3397 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
3398 if (!SrcPtrTy)
3399 return false;
3400
3401 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
3402 if (!DstPtrTy)
3403 return false;
3404
3405 if (SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace())
3406 return false;
3407
3408 return SrcEC == DstEC;
3409 }
3410 }
3411}
3412
3414 InsertPosition InsertBefore)
3415 : CastInst(Ty, Trunc, S, Name, InsertBefore) {
3416 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
3417}
3418
3419ZExtInst::ZExtInst(Value *S, Type *Ty, const Twine &Name,
3420 InsertPosition InsertBefore)
3421 : CastInst(Ty, ZExt, S, Name, InsertBefore) {
3422 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
3423}
3424
3425SExtInst::SExtInst(Value *S, Type *Ty, const Twine &Name,
3426 InsertPosition InsertBefore)
3427 : CastInst(Ty, SExt, S, Name, InsertBefore) {
3428 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
3429}
3430
3432 InsertPosition InsertBefore)
3433 : CastInst(Ty, FPTrunc, S, Name, InsertBefore) {
3434 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
3435}
3436
3438 InsertPosition InsertBefore)
3439 : CastInst(Ty, FPExt, S, Name, InsertBefore) {
3440 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
3441}
3442
3444 InsertPosition InsertBefore)
3445 : CastInst(Ty, UIToFP, S, Name, InsertBefore) {
3446 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
3447}
3448
3450 InsertPosition InsertBefore)
3451 : CastInst(Ty, SIToFP, S, Name, InsertBefore) {
3452 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
3453}
3454
3456 InsertPosition InsertBefore)
3457 : CastInst(Ty, FPToUI, S, Name, InsertBefore) {
3458 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
3459}
3460
3462 InsertPosition InsertBefore)
3463 : CastInst(Ty, FPToSI, S, Name, InsertBefore) {
3464 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
3465}
3466
3468 InsertPosition InsertBefore)
3469 : CastInst(Ty, PtrToInt, S, Name, InsertBefore) {
3470 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
3471}
3472
3474 InsertPosition InsertBefore)
3475 : CastInst(Ty, PtrToAddr, S, Name, InsertBefore) {
3476 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToAddr");
3477}
3478
3480 InsertPosition InsertBefore)
3481 : CastInst(Ty, IntToPtr, S, Name, InsertBefore) {
3482 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
3483}
3484
3486 InsertPosition InsertBefore)
3487 : CastInst(Ty, BitCast, S, Name, InsertBefore) {
3488 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
3489}
3490
3492 InsertPosition InsertBefore)
3493 : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) {
3494 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
3495}
3496
3497//===----------------------------------------------------------------------===//
3498// CmpInst Classes
3499//===----------------------------------------------------------------------===//
3500
3502 Value *RHS, const Twine &Name, InsertPosition InsertBefore,
3503 Instruction *FlagsSource)
3504 : Instruction(ty, op, AllocMarker, InsertBefore) {
3505 Op<0>() = LHS;
3506 Op<1>() = RHS;
3507 setPredicate(predicate);
3508 setName(Name);
3509 if (FlagsSource)
3510 copyIRFlags(FlagsSource);
3511}
3512
3514 const Twine &Name, InsertPosition InsertBefore) {
3515 if (Op == Instruction::ICmp) {
3516 if (InsertBefore.isValid())
3517 return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate),
3518 S1, S2, Name);
3519 else
3520 return new ICmpInst(CmpInst::Predicate(predicate),
3521 S1, S2, Name);
3522 }
3523
3524 if (InsertBefore.isValid())
3525 return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate),
3526 S1, S2, Name);
3527 else
3528 return new FCmpInst(CmpInst::Predicate(predicate),
3529 S1, S2, Name);
3530}
3531
3533 Value *S2,
3534 const Instruction *FlagsSource,
3535 const Twine &Name,
3536 InsertPosition InsertBefore) {
3537 CmpInst *Inst = Create(Op, Pred, S1, S2, Name, InsertBefore);
3538 Inst->copyIRFlags(FlagsSource);
3539 return Inst;
3540}
3541
3543 if (ICmpInst *IC = dyn_cast<ICmpInst>(this))
3544 IC->swapOperands();
3545 else
3546 cast<FCmpInst>(this)->swapOperands();
3547}
3548
3550 if (const ICmpInst *IC = dyn_cast<ICmpInst>(this))
3551 return IC->isCommutative();
3552 return cast<FCmpInst>(this)->isCommutative();
3553}
3554
3557 return ICmpInst::isEquality(P);
3559 return FCmpInst::isEquality(P);
3560 llvm_unreachable("Unsupported predicate kind");
3561}
3562
3563// Returns true if either operand of CmpInst is a provably non-zero
3564// floating-point constant.
3565static bool hasNonZeroFPOperands(const CmpInst *Cmp) {
3566 auto *LHS = dyn_cast<Constant>(Cmp->getOperand(0));
3567 auto *RHS = dyn_cast<Constant>(Cmp->getOperand(1));
3568 if (auto *Const = LHS ? LHS : RHS) {
3569 using namespace llvm::PatternMatch;
3570 return match(Const, m_NonZeroNotDenormalFP());
3571 }
3572 return false;
3573}
3574
3575// Floating-point equality is not an equivalence when comparing +0.0 with
3576// -0.0, when comparing NaN with another value, or when flushing
3577// denormals-to-zero.
3578bool CmpInst::isEquivalence(bool Invert) const {
3579 switch (Invert ? getInversePredicate() : getPredicate()) {
3581 return true;
3583 if (!hasNoNaNs())
3584 return false;
3585 [[fallthrough]];
3587 return hasNonZeroFPOperands(this);
3588 default:
3589 return false;
3590 }
3591}
3592
3594 switch (pred) {
3595 default: llvm_unreachable("Unknown cmp predicate!");
3596 case ICMP_EQ: return ICMP_NE;
3597 case ICMP_NE: return ICMP_EQ;
3598 case ICMP_UGT: return ICMP_ULE;
3599 case ICMP_ULT: return ICMP_UGE;
3600 case ICMP_UGE: return ICMP_ULT;
3601 case ICMP_ULE: return ICMP_UGT;
3602 case ICMP_SGT: return ICMP_SLE;
3603 case ICMP_SLT: return ICMP_SGE;
3604 case ICMP_SGE: return ICMP_SLT;
3605 case ICMP_SLE: return ICMP_SGT;
3606
3607 case FCMP_OEQ: return FCMP_UNE;
3608 case FCMP_ONE: return FCMP_UEQ;
3609 case FCMP_OGT: return FCMP_ULE;
3610 case FCMP_OLT: return FCMP_UGE;
3611 case FCMP_OGE: return FCMP_ULT;
3612 case FCMP_OLE: return FCMP_UGT;
3613 case FCMP_UEQ: return FCMP_ONE;
3614 case FCMP_UNE: return FCMP_OEQ;
3615 case FCMP_UGT: return FCMP_OLE;
3616 case FCMP_ULT: return FCMP_OGE;
3617 case FCMP_UGE: return FCMP_OLT;
3618 case FCMP_ULE: return FCMP_OGT;
3619 case FCMP_ORD: return FCMP_UNO;
3620 case FCMP_UNO: return FCMP_ORD;
3621 case FCMP_TRUE: return FCMP_FALSE;
3622 case FCMP_FALSE: return FCMP_TRUE;
3623 }
3624}
3625
3627 switch (Pred) {
3628 default: return "unknown";
3629 case FCmpInst::FCMP_FALSE: return "false";
3630 case FCmpInst::FCMP_OEQ: return "oeq";
3631 case FCmpInst::FCMP_OGT: return "ogt";
3632 case FCmpInst::FCMP_OGE: return "oge";
3633 case FCmpInst::FCMP_OLT: return "olt";
3634 case FCmpInst::FCMP_OLE: return "ole";
3635 case FCmpInst::FCMP_ONE: return "one";
3636 case FCmpInst::FCMP_ORD: return "ord";
3637 case FCmpInst::FCMP_UNO: return "uno";
3638 case FCmpInst::FCMP_UEQ: return "ueq";
3639 case FCmpInst::FCMP_UGT: return "ugt";
3640 case FCmpInst::FCMP_UGE: return "uge";
3641 case FCmpInst::FCMP_ULT: return "ult";
3642 case FCmpInst::FCMP_ULE: return "ule";
3643 case FCmpInst::FCMP_UNE: return "une";
3644 case FCmpInst::FCMP_TRUE: return "true";
3645 case ICmpInst::ICMP_EQ: return "eq";
3646 case ICmpInst::ICMP_NE: return "ne";
3647 case ICmpInst::ICMP_SGT: return "sgt";
3648 case ICmpInst::ICMP_SGE: return "sge";
3649 case ICmpInst::ICMP_SLT: return "slt";
3650 case ICmpInst::ICMP_SLE: return "sle";
3651 case ICmpInst::ICMP_UGT: return "ugt";
3652 case ICmpInst::ICMP_UGE: return "uge";
3653 case ICmpInst::ICMP_ULT: return "ult";
3654 case ICmpInst::ICMP_ULE: return "ule";
3655 }
3656}
3657
3659 OS << CmpInst::getPredicateName(Pred);
3660 return OS;
3661}
3662
3664 switch (pred) {
3665 default: llvm_unreachable("Unknown icmp predicate!");
3666 case ICMP_EQ: case ICMP_NE:
3667 case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE:
3668 return pred;
3669 case ICMP_UGT: return ICMP_SGT;
3670 case ICMP_ULT: return ICMP_SLT;
3671 case ICMP_UGE: return ICMP_SGE;
3672 case ICMP_ULE: return ICMP_SLE;
3673 }
3674}
3675
3677 switch (pred) {
3678 default: llvm_unreachable("Unknown icmp predicate!");
3679 case ICMP_EQ: case ICMP_NE:
3680 case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE:
3681 return pred;
3682 case ICMP_SGT: return ICMP_UGT;
3683 case ICMP_SLT: return ICMP_ULT;
3684 case ICMP_SGE: return ICMP_UGE;
3685 case ICMP_SLE: return ICMP_ULE;
3686 }
3687}
3688
3690 switch (pred) {
3691 default: llvm_unreachable("Unknown cmp predicate!");
3692 case ICMP_EQ: case ICMP_NE:
3693 return pred;
3694 case ICMP_SGT: return ICMP_SLT;
3695 case ICMP_SLT: return ICMP_SGT;
3696 case ICMP_SGE: return ICMP_SLE;
3697 case ICMP_SLE: return ICMP_SGE;
3698 case ICMP_UGT: return ICMP_ULT;
3699 case ICMP_ULT: return ICMP_UGT;
3700 case ICMP_UGE: return ICMP_ULE;
3701 case ICMP_ULE: return ICMP_UGE;
3702
3703 case FCMP_FALSE: case FCMP_TRUE:
3704 case FCMP_OEQ: case FCMP_ONE:
3705 case FCMP_UEQ: case FCMP_UNE:
3706 case FCMP_ORD: case FCMP_UNO:
3707 return pred;
3708 case FCMP_OGT: return FCMP_OLT;
3709 case FCMP_OLT: return FCMP_OGT;
3710 case FCMP_OGE: return FCMP_OLE;
3711 case FCMP_OLE: return FCMP_OGE;
3712 case FCMP_UGT: return FCMP_ULT;
3713 case FCMP_ULT: return FCMP_UGT;
3714 case FCMP_UGE: return FCMP_ULE;
3715 case FCMP_ULE: return FCMP_UGE;
3716 }
3717}
3718
3720 switch (pred) {
3721 case ICMP_SGE:
3722 case ICMP_SLE:
3723 case ICMP_UGE:
3724 case ICMP_ULE:
3725 case FCMP_OGE:
3726 case FCMP_OLE:
3727 case FCMP_UGE:
3728 case FCMP_ULE:
3729 return true;
3730 default:
3731 return false;
3732 }
3733}
3734
3736 switch (pred) {
3737 case ICMP_SGT:
3738 case ICMP_SLT:
3739 case ICMP_UGT:
3740 case ICMP_ULT:
3741 case FCMP_OGT:
3742 case FCMP_OLT:
3743 case FCMP_UGT:
3744 case FCMP_ULT:
3745 return true;
3746 default:
3747 return false;
3748 }
3749}
3750
3752 switch (pred) {
3753 case ICMP_SGE:
3754 return ICMP_SGT;
3755 case ICMP_SLE:
3756 return ICMP_SLT;
3757 case ICMP_UGE:
3758 return ICMP_UGT;
3759 case ICMP_ULE:
3760 return ICMP_ULT;
3761 case FCMP_OGE:
3762 return FCMP_OGT;
3763 case FCMP_OLE:
3764 return FCMP_OLT;
3765 case FCMP_UGE:
3766 return FCMP_UGT;
3767 case FCMP_ULE:
3768 return FCMP_ULT;
3769 default:
3770 return pred;
3771 }
3772}
3773
3775 switch (pred) {
3776 case ICMP_SGT:
3777 return ICMP_SGE;
3778 case ICMP_SLT:
3779 return ICMP_SLE;
3780 case ICMP_UGT:
3781 return ICMP_UGE;
3782 case ICMP_ULT:
3783 return ICMP_ULE;
3784 case FCMP_OGT:
3785 return FCMP_OGE;
3786 case FCMP_OLT:
3787 return FCMP_OLE;
3788 case FCMP_UGT:
3789 return FCMP_UGE;
3790 case FCMP_ULT:
3791 return FCMP_ULE;
3792 default:
3793 return pred;
3794 }
3795}
3796
3798 assert(CmpInst::isRelational(pred) && "Call only with relational predicate!");
3799
3800 if (isStrictPredicate(pred))
3801 return getNonStrictPredicate(pred);
3802 if (isNonStrictPredicate(pred))
3803 return getStrictPredicate(pred);
3804
3805 llvm_unreachable("Unknown predicate!");
3806}
3807
3809 switch (predicate) {
3810 default: return false;
3812 case ICmpInst::ICMP_UGE: return true;
3813 }
3814}
3815
3817 switch (predicate) {
3818 default: return false;
3820 case ICmpInst::ICMP_SGE: return true;
3821 }
3822}
3823
3824bool ICmpInst::compare(const APInt &LHS, const APInt &RHS,
3825 ICmpInst::Predicate Pred) {
3826 assert(ICmpInst::isIntPredicate(Pred) && "Only for integer predicates!");
3827 switch (Pred) {
3829 return LHS.eq(RHS);
3831 return LHS.ne(RHS);
3833 return LHS.ugt(RHS);
3835 return LHS.uge(RHS);
3837 return LHS.ult(RHS);
3839 return LHS.ule(RHS);
3841 return LHS.sgt(RHS);
3843 return LHS.sge(RHS);
3845 return LHS.slt(RHS);
3847 return LHS.sle(RHS);
3848 default:
3849 llvm_unreachable("Unexpected non-integer predicate.");
3850 };
3851}
3852
3853bool FCmpInst::compare(const APFloat &LHS, const APFloat &RHS,
3854 FCmpInst::Predicate Pred) {
3855 APFloat::cmpResult R = LHS.compare(RHS);
3856 switch (Pred) {
3857 default:
3858 llvm_unreachable("Invalid FCmp Predicate");
3860 return false;
3862 return true;
3863 case FCmpInst::FCMP_UNO:
3864 return R == APFloat::cmpUnordered;
3865 case FCmpInst::FCMP_ORD:
3866 return R != APFloat::cmpUnordered;
3867 case FCmpInst::FCMP_UEQ:
3868 return R == APFloat::cmpUnordered || R == APFloat::cmpEqual;
3869 case FCmpInst::FCMP_OEQ:
3870 return R == APFloat::cmpEqual;
3871 case FCmpInst::FCMP_UNE:
3872 return R != APFloat::cmpEqual;
3873 case FCmpInst::FCMP_ONE:
3875 case FCmpInst::FCMP_ULT:
3876 return R == APFloat::cmpUnordered || R == APFloat::cmpLessThan;
3877 case FCmpInst::FCMP_OLT:
3878 return R == APFloat::cmpLessThan;
3879 case FCmpInst::FCMP_UGT:
3881 case FCmpInst::FCMP_OGT:
3882 return R == APFloat::cmpGreaterThan;
3883 case FCmpInst::FCMP_ULE:
3884 return R != APFloat::cmpGreaterThan;
3885 case FCmpInst::FCMP_OLE:
3886 return R == APFloat::cmpLessThan || R == APFloat::cmpEqual;
3887 case FCmpInst::FCMP_UGE:
3888 return R != APFloat::cmpLessThan;
3889 case FCmpInst::FCMP_OGE:
3890 return R == APFloat::cmpGreaterThan || R == APFloat::cmpEqual;
3891 }
3892}
3893
3894std::optional<bool> ICmpInst::compare(const KnownBits &LHS,
3895 const KnownBits &RHS,
3896 ICmpInst::Predicate Pred) {
3897 switch (Pred) {
3898 case ICmpInst::ICMP_EQ:
3899 return KnownBits::eq(LHS, RHS);
3900 case ICmpInst::ICMP_NE:
3901 return KnownBits::ne(LHS, RHS);
3902 case ICmpInst::ICMP_UGE:
3903 return KnownBits::uge(LHS, RHS);
3904 case ICmpInst::ICMP_UGT:
3905 return KnownBits::ugt(LHS, RHS);
3906 case ICmpInst::ICMP_ULE:
3907 return KnownBits::ule(LHS, RHS);
3908 case ICmpInst::ICMP_ULT:
3909 return KnownBits::ult(LHS, RHS);
3910 case ICmpInst::ICMP_SGE:
3911 return KnownBits::sge(LHS, RHS);
3912 case ICmpInst::ICMP_SGT:
3913 return KnownBits::sgt(LHS, RHS);
3914 case ICmpInst::ICMP_SLE:
3915 return KnownBits::sle(LHS, RHS);
3916 case ICmpInst::ICMP_SLT:
3917 return KnownBits::slt(LHS, RHS);
3918 default:
3919 llvm_unreachable("Unexpected non-integer predicate.");
3920 }
3921}
3922
3924 if (CmpInst::isEquality(pred))
3925 return pred;
3926 if (isSigned(pred))
3927 return getUnsignedPredicate(pred);
3928 if (isUnsigned(pred))
3929 return getSignedPredicate(pred);
3930
3931 llvm_unreachable("Unknown predicate!");
3932}
3933
3935 switch (predicate) {
3936 default: return false;
3939 case FCmpInst::FCMP_ORD: return true;
3940 }
3941}
3942
3944 switch (predicate) {
3945 default: return false;
3948 case FCmpInst::FCMP_UNO: return true;
3949 }
3950}
3951
3953 switch(predicate) {
3954 default: return false;
3955 case ICMP_EQ: case ICMP_UGE: case ICMP_ULE: case ICMP_SGE: case ICMP_SLE:
3956 case FCMP_TRUE: case FCMP_UEQ: case FCMP_UGE: case FCMP_ULE: return true;
3957 }
3958}
3959
3961 switch(predicate) {
3962 case ICMP_NE: case ICMP_UGT: case ICMP_ULT: case ICMP_SGT: case ICMP_SLT:
3963 case FCMP_FALSE: case FCMP_ONE: case FCMP_OGT: case FCMP_OLT: return true;
3964 default: return false;
3965 }
3966}
3967
3969 // If the predicates match, then we know the first condition implies the
3970 // second is true.
3971 if (CmpPredicate::getMatching(Pred1, Pred2))
3972 return true;
3973
3974 if (Pred1.hasSameSign() && CmpInst::isSigned(Pred2))
3976 else if (Pred2.hasSameSign() && CmpInst::isSigned(Pred1))
3978
3979 switch (Pred1) {
3980 default:
3981 break;
3982 case CmpInst::ICMP_EQ:
3983 // A == B implies A >=u B, A <=u B, A >=s B, and A <=s B are true.
3984 return Pred2 == CmpInst::ICMP_UGE || Pred2 == CmpInst::ICMP_ULE ||
3985 Pred2 == CmpInst::ICMP_SGE || Pred2 == CmpInst::ICMP_SLE;
3986 case CmpInst::ICMP_UGT: // A >u B implies A != B and A >=u B are true.
3987 return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_UGE;
3988 case CmpInst::ICMP_ULT: // A <u B implies A != B and A <=u B are true.
3989 return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_ULE;
3990 case CmpInst::ICMP_SGT: // A >s B implies A != B and A >=s B are true.
3991 return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_SGE;
3992 case CmpInst::ICMP_SLT: // A <s B implies A != B and A <=s B are true.
3993 return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_SLE;
3994 }
3995 return false;
3996}
3997
3999 CmpPredicate Pred2) {
4000 return isImpliedTrueByMatchingCmp(Pred1,
4002}
4003
4005 CmpPredicate Pred2) {
4006 if (isImpliedTrueByMatchingCmp(Pred1, Pred2))
4007 return true;
4008 if (isImpliedFalseByMatchingCmp(Pred1, Pred2))
4009 return false;
4010 return std::nullopt;
4011}
4012
4013//===----------------------------------------------------------------------===//
4014// CmpPredicate Implementation
4015//===----------------------------------------------------------------------===//
4016
4017std::optional<CmpPredicate> CmpPredicate::getMatching(CmpPredicate A,
4018 CmpPredicate B) {
4019 if (A.Pred == B.Pred)
4020 return A.HasSameSign == B.HasSameSign ? A : CmpPredicate(A.Pred);
4022 return {};
4023 if (A.HasSameSign &&
4025 return B.Pred;
4026 if (B.HasSameSign &&
4028 return A.Pred;
4029 return {};
4030}
4031
4035
4037 if (auto *ICI = dyn_cast<ICmpInst>(Cmp))
4038 return ICI->getCmpPredicate();
4039 return Cmp->getPredicate();
4040}
4041
4045
4047 return getSwapped(get(Cmp));
4048}
4049
4050//===----------------------------------------------------------------------===//
4051// SwitchInst Implementation
4052//===----------------------------------------------------------------------===//
4053
4054void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumReserved) {
4055 assert(Value && Default && NumReserved);
4056 ReservedSpace = NumReserved;
4058 allocHungoffUses(ReservedSpace);
4059
4060 Op<0>() = Value;
4061 Op<1>() = Default;
4062}
4063
4064/// SwitchInst ctor - Create a new switch instruction, specifying a value to
4065/// switch on and a default destination. The number of additional cases can
4066/// be specified here to make memory allocation more efficient. This
4067/// constructor can also autoinsert before another instruction.
4068SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
4069 InsertPosition InsertBefore)
4070 : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch,
4071 AllocMarker, InsertBefore) {
4072 init(Value, Default, 2 + NumCases);
4073}
4074
4075SwitchInst::SwitchInst(const SwitchInst &SI)
4076 : Instruction(SI.getType(), Instruction::Switch, AllocMarker) {
4077 init(SI.getCondition(), SI.getDefaultDest(), SI.getNumOperands());
4078 setNumHungOffUseOperands(SI.getNumOperands());
4079 Use *OL = getOperandList();
4080 ConstantInt **VL = case_values();
4081 const Use *InOL = SI.getOperandList();
4082 ConstantInt *const *InVL = SI.case_values();
4083 for (unsigned i = 2, E = SI.getNumOperands(); i != E; ++i) {
4084 OL[i] = InOL[i];
4085 VL[i - 2] = InVL[i - 2];
4086 }
4087 SubclassOptionalData = SI.SubclassOptionalData;
4088}
4089
4090/// addCase - Add an entry to the switch instruction...
4091///
4093 unsigned NewCaseIdx = getNumCases();
4094 unsigned OpNo = getNumOperands();
4095 if (OpNo + 1 > ReservedSpace)
4096 growOperands(); // Get more space!
4097 // Initialize some new operands.
4098 assert(OpNo < ReservedSpace && "Growing didn't work!");
4099 setNumHungOffUseOperands(OpNo + 1);
4100 CaseHandle Case(this, NewCaseIdx);
4101 Case.setValue(OnVal);
4102 Case.setSuccessor(Dest);
4103}
4104
4105/// removeCase - This method removes the specified case and its successor
4106/// from the switch instruction.
4108 unsigned idx = I->getCaseIndex();
4109
4110 assert(2 + idx < getNumOperands() && "Case index out of range!!!");
4111
4112 unsigned NumOps = getNumOperands();
4113 Use *OL = getOperandList();
4114 ConstantInt **VL = case_values();
4115
4116 // Overwrite this case with the end of the list.
4117 if (2 + idx + 1 != NumOps) {
4118 OL[2 + idx] = OL[NumOps - 1];
4119 VL[idx] = VL[NumOps - 2 - 1];
4120 }
4121
4122 // Nuke the last value.
4123 OL[NumOps - 1].set(nullptr);
4124 VL[NumOps - 2 - 1] = nullptr;
4126
4127 return CaseIt(this, idx);
4128}
4129
4130/// growOperands - grow operands - This grows the operand list in response
4131/// to a push_back style of operation. This grows the number of ops by 3 times.
4132///
4133void SwitchInst::growOperands() {
4134 unsigned e = getNumOperands();
4135 unsigned NumOps = e*3;
4136
4137 ReservedSpace = NumOps;
4138 growHungoffUses(ReservedSpace, /*WithExtraValues=*/true);
4139}
4140
4142 MDNode *ProfileData = getBranchWeightMDNode(SI);
4143 if (!ProfileData)
4144 return;
4145
4146 if (getNumBranchWeights(*ProfileData) != SI.getNumSuccessors()) {
4147 llvm_unreachable("number of prof branch_weights metadata operands does "
4148 "not correspond to number of succesors");
4149 }
4150
4152 if (!extractBranchWeights(ProfileData, Weights))
4153 return;
4154 this->Weights = std::move(Weights);
4155}
4156
4159 if (Weights) {
4160 assert(SI.getNumSuccessors() == Weights->size() &&
4161 "num of prof branch_weights must accord with num of successors");
4162 Changed = true;
4163 // Copy the last case to the place of the removed one and shrink.
4164 // This is tightly coupled with the way SwitchInst::removeCase() removes
4165 // the cases in SwitchInst::removeCase(CaseIt).
4166 (*Weights)[I->getCaseIndex() + 1] = Weights->back();
4167 Weights->pop_back();
4168 }
4169 return SI.removeCase(I);
4170}
4171
4173 auto *DestBlock = I->getCaseSuccessor();
4174 if (Weights) {
4175 auto Weight = getSuccessorWeight(I->getCaseIndex() + 1);
4176 (*Weights)[0] = Weight.value();
4177 }
4178
4179 SI.setDefaultDest(DestBlock);
4180}
4181
4183 ConstantInt *OnVal, BasicBlock *Dest,
4185 SI.addCase(OnVal, Dest);
4186
4187 if (!Weights && W && *W) {
4188 Changed = true;
4189 Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
4190 (*Weights)[SI.getNumSuccessors() - 1] = *W;
4191 } else if (Weights) {
4192 Changed = true;
4193 Weights->push_back(W.value_or(0));
4194 }
4195 if (Weights)
4196 assert(SI.getNumSuccessors() == Weights->size() &&
4197 "num of prof branch_weights must accord with num of successors");
4198}
4199
4202 // Instruction is erased. Mark as unchanged to not touch it in the destructor.
4203 Changed = false;
4204 if (Weights)
4205 Weights->resize(0);
4206 return SI.eraseFromParent();
4207}
4208
4211 if (!Weights)
4212 return std::nullopt;
4213 return (*Weights)[idx];
4214}
4215
4218 if (!W)
4219 return;
4220
4221 if (!Weights && *W)
4222 Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
4223
4224 if (Weights) {
4225 auto &OldW = (*Weights)[idx];
4226 if (*W != OldW) {
4227 Changed = true;
4228 OldW = *W;
4229 }
4230 }
4231}
4232
4235 unsigned idx) {
4236 if (MDNode *ProfileData = getBranchWeightMDNode(SI))
4237 if (ProfileData->getNumOperands() == SI.getNumSuccessors() + 1)
4238 return mdconst::extract<ConstantInt>(ProfileData->getOperand(idx + 1))
4239 ->getValue()
4240 .getZExtValue();
4241
4242 return std::nullopt;
4243}
4244
4245//===----------------------------------------------------------------------===//
4246// IndirectBrInst Implementation
4247//===----------------------------------------------------------------------===//
4248
4249void IndirectBrInst::init(Value *Address, unsigned NumDests) {
4250 assert(Address && Address->getType()->isPointerTy() &&
4251 "Address of indirectbr must be a pointer");
4252 ReservedSpace = 1+NumDests;
4254 allocHungoffUses(ReservedSpace);
4255
4256 Op<0>() = Address;
4257}
4258
4259
4260/// growOperands - grow operands - This grows the operand list in response
4261/// to a push_back style of operation. This grows the number of ops by 2 times.
4262///
4263void IndirectBrInst::growOperands() {
4264 unsigned e = getNumOperands();
4265 unsigned NumOps = e*2;
4266
4267 ReservedSpace = NumOps;
4268 growHungoffUses(ReservedSpace);
4269}
4270
4271IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
4272 InsertPosition InsertBefore)
4273 : Instruction(Type::getVoidTy(Address->getContext()),
4274 Instruction::IndirectBr, AllocMarker, InsertBefore) {
4275 init(Address, NumCases);
4276}
4277
4278IndirectBrInst::IndirectBrInst(const IndirectBrInst &IBI)
4279 : Instruction(Type::getVoidTy(IBI.getContext()), Instruction::IndirectBr,
4280 AllocMarker) {
4281 NumUserOperands = IBI.NumUserOperands;
4282 allocHungoffUses(IBI.getNumOperands());
4283 Use *OL = getOperandList();
4284 const Use *InOL = IBI.getOperandList();
4285 for (unsigned i = 0, E = IBI.getNumOperands(); i != E; ++i)
4286 OL[i] = InOL[i];
4287 SubclassOptionalData = IBI.SubclassOptionalData;
4288}
4289
4290/// addDestination - Add a destination.
4291///
4293 unsigned OpNo = getNumOperands();
4294 if (OpNo+1 > ReservedSpace)
4295 growOperands(); // Get more space!
4296 // Initialize some new operands.
4297 assert(OpNo < ReservedSpace && "Growing didn't work!");
4299 getOperandList()[OpNo] = DestBB;
4300}
4301
4302/// removeDestination - This method removes the specified successor from the
4303/// indirectbr instruction.
4305 assert(idx < getNumOperands()-1 && "Successor index out of range!");
4306
4307 unsigned NumOps = getNumOperands();
4308 Use *OL = getOperandList();
4309
4310 // Replace this value with the last one.
4311 OL[idx+1] = OL[NumOps-1];
4312
4313 // Nuke the last value.
4314 OL[NumOps-1].set(nullptr);
4316}
4317
4318//===----------------------------------------------------------------------===//
4319// FreezeInst Implementation
4320//===----------------------------------------------------------------------===//
4321
4322FreezeInst::FreezeInst(Value *S, const Twine &Name, InsertPosition InsertBefore)
4323 : UnaryInstruction(S->getType(), Freeze, S, InsertBefore) {
4324 setName(Name);
4325}
4326
4327//===----------------------------------------------------------------------===//
4328// cloneImpl() implementations
4329//===----------------------------------------------------------------------===//
4330
4331// Define these methods here so vtables don't get emitted into every translation
4332// unit that uses these classes.
4333
4334GetElementPtrInst *GetElementPtrInst::cloneImpl() const {
4336 return new (AllocMarker) GetElementPtrInst(*this, AllocMarker);
4337}
4338
4342
4346
4348 return new FCmpInst(getPredicate(), Op<0>(), Op<1>());
4349}
4350
4352 return new ICmpInst(getPredicate(), Op<0>(), Op<1>());
4353}
4354
4355ExtractValueInst *ExtractValueInst::cloneImpl() const {
4356 return new ExtractValueInst(*this);
4357}
4358
4359InsertValueInst *InsertValueInst::cloneImpl() const {
4360 return new InsertValueInst(*this);
4361}
4362
4365 getOperand(0), getAlign());
4366 Result->setUsedWithInAlloca(isUsedWithInAlloca());
4367 Result->setSwiftError(isSwiftError());
4368 return Result;
4369}
4370
4372 return new LoadInst(getType(), getOperand(0), Twine(), isVolatile(),
4374}
4375
4380
4385 Result->setVolatile(isVolatile());
4386 Result->setWeak(isWeak());
4387 return Result;
4388}
4389
4391 AtomicRMWInst *Result =
4394 Result->setVolatile(isVolatile());
4395 return Result;
4396}
4397
4401
4403 return new TruncInst(getOperand(0), getType());
4404}
4405
4407 return new ZExtInst(getOperand(0), getType());
4408}
4409
4411 return new SExtInst(getOperand(0), getType());
4412}
4413
4415 return new FPTruncInst(getOperand(0), getType());
4416}
4417
4419 return new FPExtInst(getOperand(0), getType());
4420}
4421
4423 return new UIToFPInst(getOperand(0), getType());
4424}
4425
4427 return new SIToFPInst(getOperand(0), getType());
4428}
4429
4431 return new FPToUIInst(getOperand(0), getType());
4432}
4433
4435 return new FPToSIInst(getOperand(0), getType());
4436}
4437
4439 return new PtrToIntInst(getOperand(0), getType());
4440}
4441
4445
4447 return new IntToPtrInst(getOperand(0), getType());
4448}
4449
4451 return new BitCastInst(getOperand(0), getType());
4452}
4453
4457
4458CallInst *CallInst::cloneImpl() const {
4459 if (hasOperandBundles()) {
4463 return new (AllocMarker) CallInst(*this, AllocMarker);
4464 }
4466 return new (AllocMarker) CallInst(*this, AllocMarker);
4467}
4468
4469SelectInst *SelectInst::cloneImpl() const {
4471}
4472
4474 return new VAArgInst(getOperand(0), getType());
4475}
4476
4477ExtractElementInst *ExtractElementInst::cloneImpl() const {
4479}
4480
4481InsertElementInst *InsertElementInst::cloneImpl() const {
4483}
4484
4488
4489PHINode *PHINode::cloneImpl() const { return new (AllocMarker) PHINode(*this); }
4490
4491LandingPadInst *LandingPadInst::cloneImpl() const {
4492 return new LandingPadInst(*this);
4493}
4494
4495ReturnInst *ReturnInst::cloneImpl() const {
4497 return new (AllocMarker) ReturnInst(*this, AllocMarker);
4498}
4499
4500BranchInst *BranchInst::cloneImpl() const {
4502 return new (AllocMarker) BranchInst(*this, AllocMarker);
4503}
4504
4505SwitchInst *SwitchInst::cloneImpl() const { return new SwitchInst(*this); }
4506
4507IndirectBrInst *IndirectBrInst::cloneImpl() const {
4508 return new IndirectBrInst(*this);
4509}
4510
4511InvokeInst *InvokeInst::cloneImpl() const {
4512 if (hasOperandBundles()) {
4516 return new (AllocMarker) InvokeInst(*this, AllocMarker);
4517 }
4519 return new (AllocMarker) InvokeInst(*this, AllocMarker);
4520}
4521
4522CallBrInst *CallBrInst::cloneImpl() const {
4523 if (hasOperandBundles()) {
4527 return new (AllocMarker) CallBrInst(*this, AllocMarker);
4528 }
4530 return new (AllocMarker) CallBrInst(*this, AllocMarker);
4531}
4532
4533ResumeInst *ResumeInst::cloneImpl() const {
4534 return new (AllocMarker) ResumeInst(*this);
4535}
4536
4537CleanupReturnInst *CleanupReturnInst::cloneImpl() const {
4539 return new (AllocMarker) CleanupReturnInst(*this, AllocMarker);
4540}
4541
4542CatchReturnInst *CatchReturnInst::cloneImpl() const {
4543 return new (AllocMarker) CatchReturnInst(*this);
4544}
4545
4546CatchSwitchInst *CatchSwitchInst::cloneImpl() const {
4547 return new CatchSwitchInst(*this);
4548}
4549
4550FuncletPadInst *FuncletPadInst::cloneImpl() const {
4552 return new (AllocMarker) FuncletPadInst(*this, AllocMarker);
4553}
4554
4556 LLVMContext &Context = getContext();
4557 return new UnreachableInst(Context);
4558}
4559
4560bool UnreachableInst::shouldLowerToTrap(bool TrapUnreachable,
4561 bool NoTrapAfterNoreturn) const {
4562 if (!TrapUnreachable)
4563 return false;
4564
4565 // We may be able to ignore unreachable behind a noreturn call.
4567 Call && Call->doesNotReturn()) {
4568 if (NoTrapAfterNoreturn)
4569 return false;
4570 // Do not emit an additional trap instruction.
4571 if (Call->isNonContinuableTrap())
4572 return false;
4573 }
4574
4575 if (getFunction()->hasFnAttribute(Attribute::Naked))
4576 return false;
4577
4578 return true;
4579}
4580
4582 return new FreezeInst(getOperand(0));
4583}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
constexpr LLT S1
Rewrite undef for PHI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Atomic ordering constants.
@ FnAttr
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_ABI
Definition Compiler.h:213
This file contains the declarations for the subclasses of Constant, which represent the different fla...
@ Default
#define op(i)
Module.h This file contains the declarations for the Module class.
static Align computeLoadStoreDefaultAlign(Type *Ty, InsertPosition Pos)
static bool isImpliedFalseByMatchingCmp(CmpPredicate Pred1, CmpPredicate Pred2)
static Value * createPlaceholderForShuffleVector(Value *V)
static Align computeAllocaDefaultAlign(Type *Ty, InsertPosition Pos)
static cl::opt< bool > DisableI2pP2iOpt("disable-i2p-p2i-opt", cl::init(false), cl::desc("Disables inttoptr/ptrtoint roundtrip optimization"))
static bool hasNonZeroFPOperands(const CmpInst *Cmp)
static int matchShuffleAsBitRotate(ArrayRef< int > Mask, int NumSubElts)
Try to lower a vector shuffle as a bit rotation.
static Type * getIndexedTypeInternal(Type *Ty, ArrayRef< IndexTy > IdxList)
static bool isReplicationMaskWithParams(ArrayRef< int > Mask, int ReplicationFactor, int VF)
static bool isIdentityMaskImpl(ArrayRef< int > Mask, int NumOpElts)
static bool isSingleSourceMaskImpl(ArrayRef< int > Mask, int NumOpElts)
static Value * getAISize(LLVMContext &Context, Value *Amt)
static bool isImpliedTrueByMatchingCmp(CmpPredicate Pred1, CmpPredicate Pred2)
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
This file contains the declarations for metadata subclasses.
#define T
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
#define P(N)
PowerPC Reduce CR logical Operation
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
static unsigned getNumElements(Type *Ty)
This file implements the SmallBitVector class.
This file defines the SmallVector class.
#define LLVM_DEBUG(...)
Definition Debug.h:114
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition VPlanSLP.cpp:247
Value * RHS
Value * LHS
cmpResult
IEEE-754R 5.11: Floating Point Comparison Relations.
Definition APFloat.h:334
LLVM_ABI float convertToFloat() const
Converts this APFloat to host float value.
Definition APFloat.cpp:6143
Class for arbitrary precision integers.
Definition APInt.h:78
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
Definition APInt.h:1331
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition APInt.h:381
unsigned countr_zero() const
Count the number of trailing zero bits.
Definition APInt.h:1640
unsigned countl_zero() const
The APInt version of std::countl_zero.
Definition APInt.h:1599
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition APInt.h:201
This class represents a conversion between pointers from one address space to another.
LLVM_ABI AddrSpaceCastInst * cloneImpl() const
Clone an identical AddrSpaceCastInst.
LLVM_ABI AddrSpaceCastInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI std::optional< TypeSize > getAllocationSizeInBits(const DataLayout &DL) const
Get allocation size in bits.
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
LLVM_ABI AllocaInst * cloneImpl() const
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
unsigned getAddressSpace() const
Return the address space for the allocation.
LLVM_ABI std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
LLVM_ABI bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
void setAlignment(Align Align)
const Value * getArraySize() const
Get the number of elements allocated.
LLVM_ABI AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, const Twine &Name, InsertPosition InsertBefore)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
iterator end() const
Definition ArrayRef.h:131
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
iterator begin() const
Definition ArrayRef.h:130
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Definition ArrayRef.h:186
Class to represent array types.
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this cmpxchg instruction.
bool isVolatile() const
Return true if this is a cmpxchg from a volatile memory location.
void setFailureOrdering(AtomicOrdering Ordering)
Sets the failure ordering constraint of this cmpxchg instruction.
AtomicOrdering getFailureOrdering() const
Returns the failure ordering constraint of this cmpxchg instruction.
void setSuccessOrdering(AtomicOrdering Ordering)
Sets the success ordering constraint of this cmpxchg instruction.
LLVM_ABI AtomicCmpXchgInst * cloneImpl() const
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
friend class Instruction
Iterator for Instructions in a `BasicBlock.
bool isWeak() const
Return true if this cmpxchg may spuriously fail.
void setAlignment(Align Align)
AtomicOrdering getSuccessOrdering() const
Returns the success ordering constraint of this cmpxchg instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this cmpxchg instruction.
LLVM_ABI AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment, AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, SyncScope::ID SSID, InsertPosition InsertBefore=nullptr)
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
LLVM_ABI AtomicRMWInst * cloneImpl() const
bool isVolatile() const
Return true if this is a RMW on a volatile memory location.
BinOp
This enumeration lists the possible modifications atomicrmw can make.
@ Add
*p = old + v
@ FAdd
*p = old + v
@ USubCond
Subtract only if no unsigned overflow.
@ FMinimum
*p = minimum(old, v) minimum matches the behavior of llvm.minimum.
@ Min
*p = old <signed v ? old : v
@ Sub
*p = old - v
@ And
*p = old & v
@ Xor
*p = old ^ v
@ USubSat
*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.
@ FMaximum
*p = maximum(old, v) maximum matches the behavior of llvm.maximum.
@ FSub
*p = old - v
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
@ UMax
*p = old >unsigned v ? old : v
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
@ UDecWrap
Decrement one until a minimum value or zero.
@ Nand
*p = ~(old & v)
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this rmw instruction.
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this rmw instruction.
void setOperation(BinOp Operation)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
BinOp getOperation() const
LLVM_ABI AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment, AtomicOrdering Ordering, SyncScope::ID SSID, InsertPosition InsertBefore=nullptr)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this rmw instruction.
void setAlignment(Align Align)
static LLVM_ABI StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
LLVM_ABI CaptureInfo getCaptureInfo() const
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:69
LLVM_ABI const ConstantRange & getRange() const
Returns the value of the range attribute.
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition Attributes.h:88
static LLVM_ABI Attribute getWithMemoryEffects(LLVMContext &Context, MemoryEffects ME)
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:223
LLVM Basic Block Representation.
Definition BasicBlock.h:62
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
static LLVM_ABI BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
BinaryOps getOpcode() const
Definition InstrTypes.h:374
LLVM_ABI bool swapOperands()
Exchange the two operands to this instruction.
static LLVM_ABI BinaryOperator * CreateNot(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition InstrTypes.h:181
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
LLVM_ABI BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty, const Twine &Name, InsertPosition InsertBefore)
static LLVM_ABI BinaryOperator * CreateNSWNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
LLVM_ABI BinaryOperator * cloneImpl() const
This class represents a no-op cast from one type to another.
LLVM_ABI BitCastInst * cloneImpl() const
Clone an identical BitCastInst.
LLVM_ABI BitCastInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
Conditional or Unconditional Branch instruction.
LLVM_ABI void swapSuccessors()
Swap the successors of this branch instruction.
LLVM_ABI BranchInst * cloneImpl() const
bool isConditional() const
Value * getCondition() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
LLVM_ABI FPClassTest getParamNoFPClass(unsigned i) const
Extract a test mask for disallowed floating-point value classes for the parameter.
bool isInlineAsm() const
Check if this call is an inline asm statement.
LLVM_ABI BundleOpInfo & getBundleOpInfoForOperand(unsigned OpIdx)
Return the BundleOpInfo for the operand at index OpIdx.
void setCallingConv(CallingConv::ID CC)
LLVM_ABI FPClassTest getRetNoFPClass() const
Extract a test mask for disallowed floating-point value classes for the return value.
bundle_op_iterator bundle_op_info_begin()
Return the start of the list of BundleOpInfo instances associated with this OperandBundleUser.
LLVM_ABI bool paramHasNonNullAttr(unsigned ArgNo, bool AllowUndefOrPoison) const
Return true if this argument has the nonnull attribute on either the CallBase instruction or the call...
LLVM_ABI MemoryEffects getMemoryEffects() const
void addFnAttr(Attribute::AttrKind Kind)
Adds the attribute to the function.
LLVM_ABI bool doesNotAccessMemory() const
Determine if the call does not access memory.
LLVM_ABI void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
LLVM_ABI void setOnlyAccessesArgMemory()
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
OperandBundleUse operandBundleFromBundleOpInfo(const BundleOpInfo &BOI) const
Simple helper function to map a BundleOpInfo to an OperandBundleUse.
LLVM_ABI void setOnlyAccessesInaccessibleMemOrArgMem()
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
LLVM_ABI void setDoesNotAccessMemory()
AttributeSet getParamAttributes(unsigned ArgNo) const
Return the param attributes for this call.
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
LLVM_ABI bool onlyAccessesInaccessibleMemory() const
Determine if the function may only access memory that is inaccessible from the IR.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
CallingConv::ID getCallingConv() const
bundle_op_iterator bundle_op_info_end()
Return the end of the list of BundleOpInfo instances associated with this OperandBundleUser.
LLVM_ABI unsigned getNumSubclassExtraOperandsDynamic() const
Get the number of extra operands for instructions that don't have a fixed number of extra operands.
BundleOpInfo * bundle_op_iterator
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
LLVM_ABI bool isMustTailCall() const
Tests if this call site must be tail call optimized.
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
LLVM_ABI bool onlyReadsMemory() const
Determine if the call does not access or only reads memory.
bool isByValArgument(unsigned ArgNo) const
Determine whether this argument is passed by value.
iterator_range< bundle_op_iterator > bundle_op_infos()
Return the range [bundle_op_info_begin, bundle_op_info_end).
LLVM_ABI void setOnlyReadsMemory()
static LLVM_ABI CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle OB added.
LLVM_ABI bool onlyAccessesInaccessibleMemOrArgMem() const
Determine if the function may only access memory that is either inaccessible from the IR or pointed t...
LLVM_ABI CaptureInfo getCaptureInfo(unsigned OpNo) const
Return which pointer components this operand may capture.
LLVM_ABI bool hasArgumentWithAdditionalReturnCaptureComponents() const
Returns whether the call has an argument that has an attribute like captures(ret: address,...
CallBase(AttributeList const &A, FunctionType *FT, ArgsTy &&... Args)
Value * getCalledOperand() const
LLVM_ABI void setOnlyWritesMemory()
LLVM_ABI op_iterator populateBundleOperandInfos(ArrayRef< OperandBundleDef > Bundles, const unsigned BeginIndex)
Populate the BundleOpInfo instances and the Use& vector from Bundles.
AttributeList Attrs
parameter attributes for callable
bool hasOperandBundlesOtherThan(ArrayRef< uint32_t > IDs) const
Return true if this operand bundle user contains operand bundles with tags other than those specified...
LLVM_ABI std::optional< ConstantRange > getRange() const
If this return value has a range attribute, return the value range of the argument.
LLVM_ABI bool isReturnNonNull() const
Return true if the return value is known to be not null.
Value * getArgOperand(unsigned i) const
FunctionType * FTy
uint64_t getRetDereferenceableBytes() const
Extract the number of dereferenceable bytes for a call or parameter (0=unknown).
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
FunctionType * getFunctionType() const
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
static unsigned CountBundleInputs(ArrayRef< OperandBundleDef > Bundles)
Return the total number of values used in Bundles.
LLVM_ABI Value * getArgOperandWithAttribute(Attribute::AttrKind Kind) const
If one of the arguments has the specified attribute, returns its operand value.
LLVM_ABI void setOnlyAccessesInaccessibleMemory()
static LLVM_ABI CallBase * Create(CallBase *CB, ArrayRef< OperandBundleDef > Bundles, InsertPosition InsertPt=nullptr)
Create a clone of CB with a different set of operand bundles and insert it before InsertPt.
LLVM_ABI bool onlyWritesMemory() const
Determine if the call does not access or only writes memory.
LLVM_ABI bool hasClobberingOperandBundles() const
Return true if this operand bundle user has operand bundles that may write to the heap.
void setCalledOperand(Value *V)
static LLVM_ABI CallBase * removeOperandBundle(CallBase *CB, uint32_t ID, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle ID removed.
LLVM_ABI bool hasReadingOperandBundles() const
Return true if this operand bundle user has operand bundles that may read from the heap.
LLVM_ABI bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
LLVM_ABI void setMemoryEffects(MemoryEffects ME)
bool hasOperandBundles() const
Return true if this User has any operand bundles.
LLVM_ABI bool isTailCall() const
Tests if this call site is marked as a tail call.
LLVM_ABI Function * getCaller()
Helper to get the caller (the parent function).
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
SmallVector< BasicBlock *, 16 > getIndirectDests() const
void setDefaultDest(BasicBlock *B)
void setIndirectDest(unsigned i, BasicBlock *B)
BasicBlock * getDefaultDest() const
static CallBrInst * Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, ArrayRef< BasicBlock * > IndirectDests, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
LLVM_ABI CallBrInst * cloneImpl() const
This class represents a function call, abstracting a target machine's calling convention.
LLVM_ABI void updateProfWeight(uint64_t S, uint64_t T)
Updates profile metadata by scaling it by S / T.
TailCallKind getTailCallKind() const
LLVM_ABI CallInst * cloneImpl() const
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Represents which components of the pointer may be captured in which location.
Definition ModRef.h:359
CaptureComponents getOtherComponents() const
Get components potentially captured through locations other than the return value.
Definition ModRef.h:391
static CaptureInfo none()
Create CaptureInfo that does not capture any components of the pointer.
Definition ModRef.h:372
static CaptureInfo all()
Create CaptureInfo that may capture all components of the pointer.
Definition ModRef.h:375
CaptureComponents getRetComponents() const
Get components potentially captured by the return value.
Definition ModRef.h:387
static LLVM_ABI Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)
Returns the opcode necessary to cast Val into Ty using usual casting rules.
static LLVM_ABI CastInst * CreatePointerBitCastOrAddrSpaceCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast or an AddrSpaceCast cast instruction.
Instruction::CastOps getOpcode() const
Return the opcode of this CastInst.
Definition InstrTypes.h:610
static LLVM_ABI unsigned isEliminableCastPair(Instruction::CastOps firstOpcode, Instruction::CastOps secondOpcode, Type *SrcTy, Type *MidTy, Type *DstTy, const DataLayout *DL)
Determine how a pair of casts can be eliminated, if they can be at all.
static LLVM_ABI CastInst * CreateIntegerCast(Value *S, Type *Ty, bool isSigned, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a ZExt, BitCast, or Trunc for int -> int casts.
static LLVM_ABI CastInst * CreateFPCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create an FPExt, BitCast, or FPTrunc for fp -> fp casts.
CastInst(Type *Ty, unsigned iType, Value *S, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics for subclasses.
Definition InstrTypes.h:451
static LLVM_ABI bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
static LLVM_ABI bool isBitCastable(Type *SrcTy, Type *DestTy)
Check whether a bitcast between these types is valid.
static LLVM_ABI CastInst * CreateTruncOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a Trunc or BitCast cast instruction.
static LLVM_ABI CastInst * CreatePointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast, AddrSpaceCast or a PtrToInt cast instruction.
static LLVM_ABI CastInst * CreateBitOrPointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
static LLVM_ABI bool isNoopCast(Instruction::CastOps Opcode, Type *SrcTy, Type *DstTy, const DataLayout &DL)
A no-op cast is one that can be effected without changing any bits.
static LLVM_ABI CastInst * CreateZExtOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a ZExt or BitCast cast instruction.
static LLVM_ABI CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
LLVM_ABI bool isIntegerCast() const
There are several places where we need to know if a cast instruction only deals with integer source a...
static LLVM_ABI CastInst * CreateSExtOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a SExt or BitCast cast instruction.
static LLVM_ABI bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
LLVM_ABI CatchReturnInst * cloneImpl() const
void setUnwindDest(BasicBlock *UnwindDest)
LLVM_ABI void addHandler(BasicBlock *Dest)
Add an entry to the switch instruction... Note: This action invalidates handler_end().
LLVM_ABI CatchSwitchInst * cloneImpl() const
mapped_iterator< op_iterator, DerefFnTy > handler_iterator
Value * getParentPad() const
void setParentPad(Value *ParentPad)
BasicBlock * getUnwindDest() const
LLVM_ABI void removeHandler(handler_iterator HI)
LLVM_ABI CleanupReturnInst * cloneImpl() const
This class is the base class for the comparison instructions.
Definition InstrTypes.h:664
Predicate getStrictPredicate() const
For example, SGE -> SGT, SLE -> SLT, ULE -> ULT, UGE -> UGT.
Definition InstrTypes.h:858
bool isEquality() const
Determine if this is an equals/not equals predicate.
Definition InstrTypes.h:915
void setPredicate(Predicate P)
Set the predicate for this instruction to the specified value.
Definition InstrTypes.h:768
bool isFalseWhenEqual() const
This is just a convenience.
Definition InstrTypes.h:948
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition InstrTypes.h:679
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
Definition InstrTypes.h:693
@ ICMP_SLT
signed less than
Definition InstrTypes.h:705
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:706
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition InstrTypes.h:682
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
Definition InstrTypes.h:691
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Definition InstrTypes.h:680
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Definition InstrTypes.h:681
@ ICMP_UGE
unsigned greater or equal
Definition InstrTypes.h:700
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:703
@ FCMP_ULT
1 1 0 0 True if unordered or less than
Definition InstrTypes.h:690
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition InstrTypes.h:684
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition InstrTypes.h:687
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
Definition InstrTypes.h:688
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition InstrTypes.h:683
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition InstrTypes.h:685
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:704
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition InstrTypes.h:692
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
Definition InstrTypes.h:689
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
Definition InstrTypes.h:678
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition InstrTypes.h:686
LLVM_ABI bool isEquivalence(bool Invert=false) const
Determine if one operand of this compare can always be replaced by the other operand,...
bool isSigned() const
Definition InstrTypes.h:930
static LLVM_ABI bool isEquality(Predicate pred)
Determine if this is an equals/not equals predicate.
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition InstrTypes.h:827
bool isTrueWhenEqual() const
This is just a convenience.
Definition InstrTypes.h:942
static LLVM_ABI CmpInst * Create(OtherOps Op, Predicate Pred, Value *S1, Value *S2, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Construct a compare instruction, given the opcode, the predicate and the two operands.
static bool isFPPredicate(Predicate P)
Definition InstrTypes.h:770
Predicate getNonStrictPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
Definition InstrTypes.h:871
static LLVM_ABI CmpInst * CreateWithCopiedFlags(OtherOps Op, Predicate Pred, Value *S1, Value *S2, const Instruction *FlagsSource, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Construct a compare instruction, given the opcode, the predicate, the two operands and the instructio...
bool isNonStrictPredicate() const
Definition InstrTypes.h:852
LLVM_ABI void swapOperands()
This is just a convenience that dispatches to the subclasses.
static bool isRelational(Predicate P)
Return true if the predicate is relational (not EQ or NE).
Definition InstrTypes.h:923
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:789
static LLVM_ABI StringRef getPredicateName(Predicate P)
Predicate getPredicate() const
Return the predicate for this instruction.
Definition InstrTypes.h:765
bool isStrictPredicate() const
Definition InstrTypes.h:843
static LLVM_ABI bool isUnordered(Predicate predicate)
Determine if the predicate is an unordered operation.
Predicate getFlippedStrictnessPredicate() const
For predicate of kind "is X or equal to 0" returns the predicate "is X".
Definition InstrTypes.h:893
static bool isIntPredicate(Predicate P)
Definition InstrTypes.h:776
static LLVM_ABI bool isOrdered(Predicate predicate)
Determine if the predicate is an ordered operation.
LLVM_ABI CmpInst(Type *ty, Instruction::OtherOps op, Predicate pred, Value *LHS, Value *RHS, const Twine &Name="", InsertPosition InsertBefore=nullptr, Instruction *FlagsSource=nullptr)
bool isUnsigned() const
Definition InstrTypes.h:936
LLVM_ABI bool isCommutative() const
This is just a convenience that dispatches to the subclasses.
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static LLVM_ABI std::optional< CmpPredicate > getMatching(CmpPredicate A, CmpPredicate B)
Compares two CmpPredicates taking samesign into account and returns the canonicalized CmpPredicate if...
CmpPredicate()
Default constructor.
static LLVM_ABI CmpPredicate get(const CmpInst *Cmp)
Do a ICmpInst::getCmpPredicate() or CmpInst::getPredicate(), as appropriate.
LLVM_ABI CmpInst::Predicate getPreferredSignedPredicate() const
Attempts to return a signed CmpInst::Predicate from the CmpPredicate.
bool hasSameSign() const
Query samesign information, for optimizations.
static LLVM_ABI CmpPredicate getSwapped(CmpPredicate P)
Get the swapped predicate of a CmpPredicate.
ConstantFP - Floating Point Values [float, double].
Definition Constants.h:282
const APFloat & getValueAPF() const
Definition Constants.h:325
This is the shared class of boolean and integer constants.
Definition Constants.h:87
LLVM_ABI ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition TypeSize.h:309
LLVM_ABI ExtractElementInst * cloneImpl() const
static ExtractElementInst * Create(Value *Vec, Value *Idx, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
This instruction extracts a struct member or array element value from an aggregate value.
static LLVM_ABI Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
LLVM_ABI ExtractValueInst * cloneImpl() const
This instruction compares its operands according to the predicate given to the constructor.
bool isEquality() const
static LLVM_ABI bool compare(const APFloat &LHS, const APFloat &RHS, FCmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
LLVM_ABI FCmpInst * cloneImpl() const
Clone an identical FCmpInst.
FCmpInst(InsertPosition InsertBefore, Predicate pred, Value *LHS, Value *RHS, const Twine &NameStr="")
Constructor with insertion semantics.
This class represents an extension of floating point types.
LLVM_ABI FPExtInst * cloneImpl() const
Clone an identical FPExtInst.
LLVM_ABI FPExtInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI float getFPAccuracy() const
Get the maximum error permitted by this operation in ULPs.
This class represents a cast from floating point to signed integer.
LLVM_ABI FPToSIInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI FPToSIInst * cloneImpl() const
Clone an identical FPToSIInst.
This class represents a cast from floating point to unsigned integer.
LLVM_ABI FPToUIInst * cloneImpl() const
Clone an identical FPToUIInst.
LLVM_ABI FPToUIInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
This class represents a truncation of floating point types.
LLVM_ABI FPTruncInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI FPTruncInst * cloneImpl() const
Clone an identical FPTruncInst.
LLVM_ABI FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System, InsertPosition InsertBefore=nullptr)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this fence instruction.
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this fence instruction.
LLVM_ABI FenceInst * cloneImpl() const
friend class Instruction
Iterator for Instructions in a `BasicBlock.
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this fence instruction.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Class to represent fixed width SIMD vectors.
unsigned getNumElements() const
LLVM_ABI FreezeInst(Value *S, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
LLVM_ABI FreezeInst * cloneImpl() const
Clone an identical FreezeInst.
void setParentPad(Value *ParentPad)
Value * getParentPad() const
Convenience accessors.
LLVM_ABI FuncletPadInst * cloneImpl() const
Class to represent function types.
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Type * getParamType(unsigned i) const
Parameter type accessors.
bool isVarArg() const
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags inBounds()
GEPNoWrapFlags withoutInBounds() const
unsigned getRaw() const
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
LLVM_ABI bool isInBounds() const
Determine whether the GEP has the inbounds flag.
LLVM_ABI bool hasNoUnsignedSignedWrap() const
Determine whether the GEP has the nusw flag.
static LLVM_ABI Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
LLVM_ABI bool hasAllZeroIndices() const
Return true if all of the indices of this GEP are zeros.
LLVM_ABI bool hasNoUnsignedWrap() const
Determine whether the GEP has the nuw flag.
LLVM_ABI bool hasAllConstantIndices() const
Return true if all of the indices of this GEP are constant integers.
LLVM_ABI void setIsInBounds(bool b=true)
Set or clear the inbounds flag on this GEP instruction.
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
LLVM_ABI bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const
Accumulate the constant address offset of this GEP if possible.
LLVM_ABI GetElementPtrInst * cloneImpl() const
LLVM_ABI bool collectOffset(const DataLayout &DL, unsigned BitWidth, SmallMapVector< Value *, APInt, 4 > &VariableOffsets, APInt &ConstantOffset) const
LLVM_ABI void setNoWrapFlags(GEPNoWrapFlags NW)
Set nowrap flags for GEP instruction.
LLVM_ABI GEPNoWrapFlags getNoWrapFlags() const
Get the nowrap flags for the GEP instruction.
Module * getParent()
Get the module that this global value is contained inside of...
This instruction compares its operands according to the predicate given to the constructor.
ICmpInst(InsertPosition InsertBefore, Predicate pred, Value *LHS, Value *RHS, const Twine &NameStr="")
Constructor with insertion semantics.
static LLVM_ABI bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
LLVM_ABI ICmpInst * cloneImpl() const
Clone an identical ICmpInst.
Predicate getFlippedSignednessPredicate() const
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
static CmpPredicate getInverseCmpPredicate(CmpPredicate Pred)
bool isEquality() const
Return true if this predicate is either EQ or NE.
static LLVM_ABI Predicate getFlippedSignednessPredicate(Predicate Pred)
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
static LLVM_ABI std::optional< bool > isImpliedByMatchingCmp(CmpPredicate Pred1, CmpPredicate Pred2)
Determine if Pred1 implies Pred2 is true, false, or if nothing can be inferred about the implication,...
Predicate getUnsignedPredicate() const
For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
Indirect Branch Instruction.
LLVM_ABI void addDestination(BasicBlock *Dest)
Add a destination.
LLVM_ABI void removeDestination(unsigned i)
This method removes the specified successor from the indirectbr instruction.
LLVM_ABI IndirectBrInst * cloneImpl() const
LLVM_ABI InsertElementInst * cloneImpl() const
static InsertElementInst * Create(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
bool isValid() const
Definition Instruction.h:62
BasicBlock * getBasicBlock()
Definition Instruction.h:63
This instruction inserts a struct field of array element value into an aggregate value.
LLVM_ABI InsertValueInst * cloneImpl() const
BitfieldElement::Type getSubclassData() const
LLVM_ABI bool hasNoNaNs() const LLVM_READONLY
Determine whether the no-NaNs flag is set.
LLVM_ABI void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI void swapProfMetadata()
If the instruction has "branch_weights" MD_prof metadata and the MDNode has three operands (including...
LLVM_ABI bool isVolatile() const LLVM_READONLY
Return true if this instruction has a volatile memory access.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Bitfield::Element< uint16_t, 0, 15 > OpaqueField
Instruction(const Instruction &)=delete
friend class BasicBlock
Various leaf nodes.
void setSubclassData(typename BitfieldElement::Type Value)
This class represents a cast from an integer to a pointer.
LLVM_ABI IntToPtrInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI IntToPtrInst * cloneImpl() const
Clone an identical IntToPtrInst.
Invoke instruction.
BasicBlock * getUnwindDest() const
void setNormalDest(BasicBlock *B)
LLVM_ABI InvokeInst * cloneImpl() const
LLVM_ABI LandingPadInst * getLandingPadInst() const
Get the landingpad instruction from the landing pad block (the unwind destination).
void setUnwindDest(BasicBlock *B)
LLVM_ABI void updateProfWeight(uint64_t S, uint64_t T)
Updates profile metadata by scaling it by S / T.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
LLVMContextImpl *const pImpl
Definition LLVMContext.h:70
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
LLVM_ABI LandingPadInst * cloneImpl() const
static LLVM_ABI LandingPadInst * Create(Type *RetTy, unsigned NumReservedClauses, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedClauses is a hint for the number of incoming clauses that this landingpad w...
LLVM_ABI void addClause(Constant *ClauseVal)
Add a catch or filter clause to the landing pad.
void setCleanup(bool V)
Indicate that this landingpad instruction is a cleanup.
void setAlignment(Align Align)
bool isVolatile() const
Return true if this is a load from a volatile memory location.
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this load instruction.
LLVM_ABI LoadInst * cloneImpl() const
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
void setVolatile(bool V)
Specify whether this is a volatile load or not.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
LLVM_ABI LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, InsertPosition InsertBefore)
Align getAlign() const
Return the alignment of the access that is being performed.
Metadata node.
Definition Metadata.h:1078
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1442
static MemoryEffectsBase readOnly()
Definition ModRef.h:130
bool onlyWritesMemory() const
Whether this function only (at most) writes memory.
Definition ModRef.h:226
bool doesNotAccessMemory() const
Whether this function accesses no memory.
Definition ModRef.h:220
static MemoryEffectsBase argMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Definition ModRef.h:140
static MemoryEffectsBase inaccessibleMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Definition ModRef.h:146
bool onlyAccessesInaccessibleMem() const
Whether this function only (at most) accesses inaccessible memory.
Definition ModRef.h:239
bool onlyAccessesArgPointees() const
Whether this function only (at most) accesses argument memory.
Definition ModRef.h:229
bool onlyReadsMemory() const
Whether this function only (at most) reads memory.
Definition ModRef.h:223
static MemoryEffectsBase writeOnly()
Definition ModRef.h:135
static MemoryEffectsBase inaccessibleOrArgMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Definition ModRef.h:163
static MemoryEffectsBase none()
Definition ModRef.h:125
bool onlyAccessesInaccessibleOrArgMem() const
Whether this function only (at most) accesses argument and inaccessible memory.
Definition ModRef.h:250
StringRef getTag() const
void allocHungoffUses(unsigned N)
const_block_iterator block_begin() const
LLVM_ABI void removeIncomingValueIf(function_ref< bool(unsigned)> Predicate, bool DeletePHIIfEmpty=true)
Remove all incoming values for which the predicate returns true.
void setIncomingBlock(unsigned i, BasicBlock *BB)
LLVM_ABI Value * removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty=true)
Remove an incoming value.
LLVM_ABI bool hasConstantOrUndefValue() const
Whether the specified PHI node always merges together the same value, assuming undefs are equal to a ...
void setIncomingValue(unsigned i, Value *V)
const_block_iterator block_end() const
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
LLVM_ABI Value * hasConstantValue() const
If the specified PHI node always merges together the same value, return the value,...
LLVM_ABI PHINode * cloneImpl() const
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Class to represent pointers.
unsigned getAddressSpace() const
Return the address space of the Pointer type.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
This class represents a cast from a pointer to an address (non-capturing ptrtoint).
PtrToAddrInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
PtrToAddrInst * cloneImpl() const
Clone an identical PtrToAddrInst.
This class represents a cast from a pointer to an integer.
LLVM_ABI PtrToIntInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI PtrToIntInst * cloneImpl() const
Clone an identical PtrToIntInst.
Resume the propagation of an exception.
LLVM_ABI ResumeInst * cloneImpl() const
Return a value (possibly void), from a function.
LLVM_ABI ReturnInst * cloneImpl() const
This class represents a sign extension of integer types.
LLVM_ABI SExtInst * cloneImpl() const
Clone an identical SExtInst.
LLVM_ABI SExtInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
This class represents a cast from signed integer to floating point.
LLVM_ABI SIToFPInst * cloneImpl() const
Clone an identical SIToFPInst.
LLVM_ABI SIToFPInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
Class to represent scalable SIMD vectors.
LLVM_ABI SelectInst * cloneImpl() const
static LLVM_ABI const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, const Instruction *MDFrom=nullptr)
static LLVM_ABI bool isZeroEltSplatMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses all elements with the same value as the first element of exa...
ArrayRef< int > getShuffleMask() const
static LLVM_ABI bool isSpliceMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is a splice mask, concatenating the two inputs together and then ext...
int getMaskValue(unsigned Elt) const
Return the shuffle mask value of this instruction for the given element index.
LLVM_ABI ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
static LLVM_ABI bool isSelectMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from its source vectors without lane crossings.
static LLVM_ABI bool isBitRotateMask(ArrayRef< int > Mask, unsigned EltSizeInBits, unsigned MinSubElts, unsigned MaxSubElts, unsigned &NumSubElts, unsigned &RotateAmt)
Checks if the shuffle is a bit rotation of the first operand across multiple subelements,...
VectorType * getType() const
Overload to return most specific vector type.
LLVM_ABI bool isIdentityWithExtract() const
Return true if this shuffle extracts the first N elements of exactly one source vector.
static LLVM_ABI bool isOneUseSingleSourceMask(ArrayRef< int > Mask, int VF)
Return true if this shuffle mask represents "clustered" mask of size VF, i.e.
LLVM_ABI bool isIdentityWithPadding() const
Return true if this shuffle lengthens exactly one source vector with undefs in the high elements.
static LLVM_ABI bool isSingleSourceMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from exactly one source vector.
LLVM_ABI bool isConcat() const
Return true if this shuffle concatenates its 2 source vectors.
static LLVM_ABI bool isDeInterleaveMaskOfFactor(ArrayRef< int > Mask, unsigned Factor, unsigned &Index)
Check if the mask is a DE-interleave mask of the given factor Factor like: <Index,...
LLVM_ABI ShuffleVectorInst * cloneImpl() const
static LLVM_ABI bool isIdentityMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from exactly one source vector without lane crossin...
static LLVM_ABI bool isExtractSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is an extract subvector mask.
LLVM_ABI void setShuffleMask(ArrayRef< int > Mask)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
LLVM_ABI bool isInterleave(unsigned Factor)
Return if this shuffle interleaves its two input vectors together.
static LLVM_ABI bool isReverseMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask swaps the order of elements from exactly one source vector.
static LLVM_ABI bool isTransposeMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask is a transpose mask.
LLVM_ABI void commute()
Swap the operands and adjust the mask to preserve the semantics of the instruction.
static LLVM_ABI bool isInsertSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &NumSubElts, int &Index)
Return true if this shuffle mask is an insert subvector mask.
static LLVM_ABI Constant * convertShuffleMaskForBitcode(ArrayRef< int > Mask, Type *ResultTy)
static LLVM_ABI bool isReplicationMask(ArrayRef< int > Mask, int &ReplicationFactor, int &VF)
Return true if this shuffle mask replicates each of the VF elements in a vector ReplicationFactor tim...
static LLVM_ABI bool isInterleaveMask(ArrayRef< int > Mask, unsigned Factor, unsigned NumInputElts, SmallVectorImpl< unsigned > &StartIndexes)
Return true if the mask interleaves one or more input vectors together.
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this store instruction.
Align getAlign() const
void setVolatile(bool V)
Specify whether this is a volatile store or not.
void setAlignment(Align Align)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
LLVM_ABI StoreInst * cloneImpl() const
LLVM_ABI StoreInst(Value *Val, Value *Ptr, InsertPosition InsertBefore)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this store instruction.
bool isVolatile() const
Return true if this is a store to a volatile memory location.
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this store instruction.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Class to represent struct types.
LLVM_ABI void setSuccessorWeight(unsigned idx, CaseWeightOpt W)
LLVM_ABI Instruction::InstListType::iterator eraseFromParent()
Delegate the call to the underlying SwitchInst::eraseFromParent() and mark this object to not touch t...
LLVM_ABI void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W)
Delegate the call to the underlying SwitchInst::addCase() and set the specified branch weight for the...
LLVM_ABI CaseWeightOpt getSuccessorWeight(unsigned idx)
LLVM_ABI void replaceDefaultDest(SwitchInst::CaseIt I)
Replace the default destination by given case.
std::optional< uint32_t > CaseWeightOpt
LLVM_ABI SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I)
Delegate the call to the underlying SwitchInst::removeCase() and remove correspondent branch weight.
void setValue(ConstantInt *V) const
Sets the new value for current case.
void setSuccessor(BasicBlock *S) const
Sets the new successor for current case.
Multiway switch.
void allocHungoffUses(unsigned N)
LLVM_ABI SwitchInst * cloneImpl() const
LLVM_ABI void addCase(ConstantInt *OnVal, BasicBlock *Dest)
Add an entry to the switch instruction.
CaseIteratorImpl< CaseHandle > CaseIt
ConstantInt *const * case_values() const
unsigned getNumCases() const
Return the number of 'cases' in this switch instruction, excluding the default case.
LLVM_ABI CaseIt removeCase(CaseIt I)
This method removes the specified case and its successor from the switch instruction.
This class represents a truncation of integer types.
LLVM_ABI TruncInst * cloneImpl() const
Clone an identical TruncInst.
LLVM_ABI TruncInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition TypeSize.h:343
static constexpr TypeSize get(ScalarTy Quantity, bool Scalable)
Definition TypeSize.h:340
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:296
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition Type.h:246
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
LLVM_ABI bool isFirstClassType() const
Return true if the type is "first class", meaning it is a valid type for a Value.
Definition Type.cpp:249
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:197
bool isAggregateType() const
Return true if the type is an aggregate type.
Definition Type.h:304
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:128
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:230
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
Definition Type.cpp:293
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:184
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition Type.h:270
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
bool isTokenTy() const
Return true if this is 'token'.
Definition Type.h:234
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition Type.h:225
This class represents a cast unsigned integer to floating point.
LLVM_ABI UIToFPInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI UIToFPInst * cloneImpl() const
Clone an identical UIToFPInst.
UnaryInstruction(Type *Ty, unsigned iType, Value *V, InsertPosition InsertBefore=nullptr)
Definition InstrTypes.h:62
static LLVM_ABI UnaryOperator * Create(UnaryOps Op, Value *S, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a unary instruction, given the opcode and an operand.
LLVM_ABI UnaryOperator(UnaryOps iType, Value *S, Type *Ty, const Twine &Name, InsertPosition InsertBefore)
LLVM_ABI UnaryOperator * cloneImpl() const
UnaryOps getOpcode() const
Definition InstrTypes.h:154
LLVM_ABI UnreachableInst(LLVMContext &C, InsertPosition InsertBefore=nullptr)
LLVM_ABI bool shouldLowerToTrap(bool TrapUnreachable, bool NoTrapAfterNoreturn) const
friend class Instruction
Iterator for Instructions in a `BasicBlock.
LLVM_ABI UnreachableInst * cloneImpl() const
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
LLVM_ABI void set(Value *Val)
Definition Value.h:905
Use * op_iterator
Definition User.h:279
const Use * getOperandList() const
Definition User.h:225
op_iterator op_begin()
Definition User.h:284
LLVM_ABI void allocHungoffUses(unsigned N, bool WithExtraValues=false)
Allocate the array of Uses, followed by a pointer (with bottom bit set) to the User.
Definition User.cpp:53
const Use & getOperandUse(unsigned i) const
Definition User.h:245
void setNumHungOffUseOperands(unsigned NumOps)
Subclasses with hung off uses need to manage the operand count themselves.
Definition User.h:265
Use & Op()
Definition User.h:196
LLVM_ABI void growHungoffUses(unsigned N, bool WithExtraValues=false)
Grow the number of hung off uses.
Definition User.cpp:70
Value * getOperand(unsigned i) const
Definition User.h:232
unsigned getNumOperands() const
Definition User.h:254
op_iterator op_end()
Definition User.h:286
VAArgInst(Value *List, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
LLVM_ABI VAArgInst * cloneImpl() const
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI Value(Type *Ty, unsigned scid)
Definition Value.cpp:53
unsigned char SubclassOptionalData
Hold subclass data that can be dropped.
Definition Value.h:85
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
Definition Value.cpp:390
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:546
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1099
unsigned NumUserOperands
Definition Value.h:109
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
Base class of all SIMD vector types.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
This class represents zero extension of integer types.
LLVM_ABI ZExtInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI ZExtInst * cloneImpl() const
Clone an identical ZExtInst.
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
An efficient, type-erasing, non-owning reference to a callable.
typename base_list_type::iterator iterator
Definition ilist.h:121
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
CallInst * Call
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
bool match(Val *V, const Pattern &P)
cstfp_pred_ty< is_non_zero_not_denormal_fp > m_NonZeroNotDenormalFP()
Match a floating-point non-zero that is not a denormal.
initializer< Ty > init(const Ty &Val)
@ Switch
The "resume-switch" lowering, where there are separate resume and destroy functions that are shared b...
Definition CoroShape.h:31
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:667
NodeAddr< UseNode * > Use
Definition RDFGraph.h:385
Context & getContext() const
Definition BasicBlock.h:99
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:532
auto seq_inclusive(T Begin, T End)
Iterate over an integral type from Begin to End inclusive.
Definition Sequence.h:325
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1667
unsigned getPointerAddressSpace(const Type *T)
Definition SPIRVUtils.h:367
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
Definition InstrProf.h:296
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
LLVM_ABI MDNode * getBranchWeightMDNode(const Instruction &I)
Get the branch weights metadata node.
MemoryEffectsBase< IRMemLocation > MemoryEffects
Summary of how a function affects memory in the program.
Definition ModRef.h:301
constexpr bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1737
std::enable_if_t< std::is_unsigned_v< T >, std::optional< T > > checkedMulUnsigned(T LHS, T RHS)
Multiply two unsigned integers LHS and RHS.
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
auto reverse(ContainerTy &&C)
Definition STLExtras.h:406
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
bool isPointerTy(const Type *T)
Definition SPIRVUtils.h:361
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
constexpr int PoisonMaskElem
LLVM_ABI unsigned getNumBranchWeights(const MDNode &ProfileData)
AtomicOrdering
Atomic ordering for LLVM's memory model.
OperandBundleDefT< Value * > OperandBundleDef
Definition AutoUpgrade.h:34
@ Mul
Product of integers.
@ Xor
Bitwise or logical XOR of integers.
@ FMul
Product of floats.
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
@ FAdd
Sum of floats.
DWARFExpression::Operation Op
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
OutputIt copy(R &&Range, OutputIt Out)
Definition STLExtras.h:1856
constexpr unsigned BitWidth
LLVM_ABI bool extractBranchWeights(const MDNode *ProfileData, SmallVectorImpl< uint32_t > &Weights)
Extract branch weights from MD_prof metadata.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1918
bool capturesAnything(CaptureComponents CC)
Definition ModRef.h:324
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
Definition STLExtras.h:2129
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
Definition Sequence.h:305
@ Default
The result values are uniform if and only if all operands are uniform.
Definition Uniformity.h:20
LLVM_ABI void scaleProfData(Instruction &I, uint64_t S, uint64_t T)
Scaling the profile data attached to 'I' using the ratio of S/T.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Summary of memprof metadata on allocations.
Used to keep track of an operand bundle.
uint32_t End
The index in the Use& vector where operands for this operand bundle ends.
uint32_t Begin
The index in the Use& vector where operands for this operand bundle starts.
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
static LLVM_ABI std::optional< bool > eq(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_EQ result.
static LLVM_ABI std::optional< bool > ne(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_NE result.
static LLVM_ABI std::optional< bool > sge(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SGE result.
static LLVM_ABI std::optional< bool > ugt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_UGT result.
static LLVM_ABI std::optional< bool > slt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SLT result.
static LLVM_ABI std::optional< bool > ult(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_ULT result.
static LLVM_ABI std::optional< bool > ule(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_ULE result.
static LLVM_ABI std::optional< bool > sle(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SLE result.
static LLVM_ABI std::optional< bool > sgt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SGT result.
static LLVM_ABI std::optional< bool > uge(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_UGE result.
Matching combinators.
A MapVector that performs no allocations if smaller than a certain size.
Definition MapVector.h:276
Indicates this User has operands co-allocated.
Definition User.h:60
Indicates this User has operands and a descriptor co-allocated .
Definition User.h:66