LLVM 23.0.0git
Instructions.cpp
Go to the documentation of this file.
1//===- Instructions.cpp - Implement the LLVM instructions -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements all of the non-inline methods for the LLVM instruction
10// classes.
11//
12//===----------------------------------------------------------------------===//
13
15#include "LLVMContextImpl.h"
18#include "llvm/ADT/Twine.h"
19#include "llvm/IR/Attributes.h"
20#include "llvm/IR/BasicBlock.h"
21#include "llvm/IR/Constant.h"
23#include "llvm/IR/Constants.h"
24#include "llvm/IR/DataLayout.h"
26#include "llvm/IR/Function.h"
27#include "llvm/IR/InstrTypes.h"
28#include "llvm/IR/Instruction.h"
29#include "llvm/IR/Intrinsics.h"
30#include "llvm/IR/LLVMContext.h"
31#include "llvm/IR/MDBuilder.h"
32#include "llvm/IR/Metadata.h"
33#include "llvm/IR/Module.h"
34#include "llvm/IR/Operator.h"
37#include "llvm/IR/Type.h"
38#include "llvm/IR/Value.h"
46#include "llvm/Support/ModRef.h"
48#include <algorithm>
49#include <cassert>
50#include <cstdint>
51#include <optional>
52#include <vector>
53
54using namespace llvm;
55
57 "disable-i2p-p2i-opt", cl::init(false),
58 cl::desc("Disables inttoptr/ptrtoint roundtrip optimization"));
59
60//===----------------------------------------------------------------------===//
61// AllocaInst Class
62//===----------------------------------------------------------------------===//
63
64std::optional<TypeSize>
66 TypeSize Size = DL.getTypeAllocSize(getAllocatedType());
67 // Zero-sized types can return early since 0 * N = 0 for any array size N.
68 if (Size.isZero())
69 return Size;
70 if (isArrayAllocation()) {
72 if (!C)
73 return std::nullopt;
74 assert(!Size.isScalable() && "Array elements cannot have a scalable size");
75 auto CheckedProd =
76 checkedMulUnsigned(Size.getKnownMinValue(), C->getZExtValue());
77 if (!CheckedProd)
78 return std::nullopt;
79 return TypeSize::getFixed(*CheckedProd);
80 }
81 return Size;
82}
83
84std::optional<TypeSize>
86 std::optional<TypeSize> Size = getAllocationSize(DL);
87 if (!Size)
88 return std::nullopt;
89 auto CheckedProd = checkedMulUnsigned(Size->getKnownMinValue(),
90 static_cast<TypeSize::ScalarTy>(8));
91 if (!CheckedProd)
92 return std::nullopt;
93 return TypeSize::get(*CheckedProd, Size->isScalable());
94}
95
96//===----------------------------------------------------------------------===//
97// SelectInst Class
98//===----------------------------------------------------------------------===//
99
100/// areInvalidOperands - Return a string if the specified operands are invalid
101/// for a select operation, otherwise return null.
102const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) {
103 if (Op1->getType() != Op2->getType())
104 return "both values to select must have same type";
105
106 if (Op1->getType()->isTokenTy())
107 return "select values cannot have token type";
108
109 if (VectorType *VT = dyn_cast<VectorType>(Op0->getType())) {
110 // Vector select.
111 if (VT->getElementType() != Type::getInt1Ty(Op0->getContext()))
112 return "vector select condition element type must be i1";
114 if (!ET)
115 return "selected values for vector select must be vectors";
116 if (ET->getElementCount() != VT->getElementCount())
117 return "vector select requires selected vectors to have "
118 "the same vector length as select condition";
119 } else if (Op0->getType() != Type::getInt1Ty(Op0->getContext())) {
120 return "select condition must be i1 or <n x i1>";
121 }
122 return nullptr;
123}
124
125//===----------------------------------------------------------------------===//
126// PHINode Class
127//===----------------------------------------------------------------------===//
128
129PHINode::PHINode(const PHINode &PN)
130 : Instruction(PN.getType(), Instruction::PHI, AllocMarker),
131 ReservedSpace(PN.getNumOperands()) {
134 std::copy(PN.op_begin(), PN.op_end(), op_begin());
135 copyIncomingBlocks(make_range(PN.block_begin(), PN.block_end()));
137}
138
139// removeIncomingValue - Remove an incoming value. This is useful if a
140// predecessor basic block is deleted.
141Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) {
142 Value *Removed = getIncomingValue(Idx);
143 // Swap with the end of the list.
144 unsigned Last = getNumOperands() - 1;
145 if (Idx != Last) {
148 }
149
150 // Nuke the last value.
151 Op<-1>().set(nullptr);
153
154 // If the PHI node is dead, because it has zero entries, nuke it now.
155 if (getNumOperands() == 0 && DeletePHIIfEmpty) {
156 // If anyone is using this PHI, make them use a dummy value instead...
159 }
160 return Removed;
161}
162
163void PHINode::removeIncomingValueIf(function_ref<bool(unsigned)> Predicate,
164 bool DeletePHIIfEmpty) {
165 unsigned NumOps = getNumIncomingValues();
166
167 // Loop backwards in case the predicate is purely index based.
168 for (unsigned Idx = NumOps; Idx-- > 0;) {
169 if (Predicate(Idx)) {
170 unsigned LastIdx = NumOps - 1;
171 if (Idx != LastIdx) {
172 setIncomingValue(Idx, getIncomingValue(LastIdx));
173 setIncomingBlock(Idx, getIncomingBlock(LastIdx));
174 }
175 getOperandUse(LastIdx).set(nullptr);
176 NumOps--;
177 }
178 }
179
181
182 // If the PHI node is dead, because it has zero entries, nuke it now.
183 if (getNumOperands() == 0 && DeletePHIIfEmpty) {
184 // If anyone is using this PHI, make them use a dummy value instead...
187 }
188}
189
190/// growOperands - grow operands - This grows the operand list in response
191/// to a push_back style of operation. This grows the number of ops by 1.5
192/// times.
193///
194void PHINode::growOperands() {
195 unsigned e = getNumOperands();
196 unsigned NumOps = e + e / 2;
197 if (NumOps < 2) NumOps = 2; // 2 op PHI nodes are VERY common.
198
199 ReservedSpace = NumOps;
200 growHungoffUses(ReservedSpace, /*WithExtraValues=*/true);
201}
202
203/// hasConstantValue - If the specified PHI node always merges together the same
204/// value, return the value, otherwise return null.
206 // Exploit the fact that phi nodes always have at least one entry.
207 Value *ConstantValue = getIncomingValue(0);
208 for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i)
209 if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) {
210 if (ConstantValue != this)
211 return nullptr; // Incoming values not all the same.
212 // The case where the first value is this PHI.
213 ConstantValue = getIncomingValue(i);
214 }
215 if (ConstantValue == this)
216 return PoisonValue::get(getType());
217 return ConstantValue;
218}
219
220/// hasConstantOrUndefValue - Whether the specified PHI node always merges
221/// together the same value, assuming that undefs result in the same value as
222/// non-undefs.
223/// Unlike \ref hasConstantValue, this does not return a value because the
224/// unique non-undef incoming value need not dominate the PHI node.
226 Value *ConstantValue = nullptr;
227 for (unsigned i = 0, e = getNumIncomingValues(); i != e; ++i) {
229 if (Incoming != this && !isa<UndefValue>(Incoming)) {
230 if (ConstantValue && ConstantValue != Incoming)
231 return false;
232 ConstantValue = Incoming;
233 }
234 }
235 return true;
236}
237
238//===----------------------------------------------------------------------===//
239// LandingPadInst Implementation
240//===----------------------------------------------------------------------===//
241
242LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
243 const Twine &NameStr,
244 InsertPosition InsertBefore)
245 : Instruction(RetTy, Instruction::LandingPad, AllocMarker, InsertBefore) {
246 init(NumReservedValues, NameStr);
247}
248
249LandingPadInst::LandingPadInst(const LandingPadInst &LP)
250 : Instruction(LP.getType(), Instruction::LandingPad, AllocMarker),
251 ReservedSpace(LP.getNumOperands()) {
254 Use *OL = getOperandList();
255 const Use *InOL = LP.getOperandList();
256 for (unsigned I = 0, E = ReservedSpace; I != E; ++I)
257 OL[I] = InOL[I];
258
259 setCleanup(LP.isCleanup());
260}
261
262LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
263 const Twine &NameStr,
264 InsertPosition InsertBefore) {
265 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertBefore);
266}
267
268void LandingPadInst::init(unsigned NumReservedValues, const Twine &NameStr) {
269 ReservedSpace = NumReservedValues;
271 allocHungoffUses(ReservedSpace);
272 setName(NameStr);
273 setCleanup(false);
274}
275
276/// growOperands - grow operands - This grows the operand list in response to a
277/// push_back style of operation. This grows the number of ops by 2 times.
278void LandingPadInst::growOperands(unsigned Size) {
279 unsigned e = getNumOperands();
280 if (ReservedSpace >= e + Size) return;
281 ReservedSpace = (std::max(e, 1U) + Size / 2) * 2;
282 growHungoffUses(ReservedSpace);
283}
284
286 unsigned OpNo = getNumOperands();
287 growOperands(1);
288 assert(OpNo < ReservedSpace && "Growing didn't work!");
290 getOperandList()[OpNo] = Val;
291}
292
293//===----------------------------------------------------------------------===//
294// CallBase Implementation
295//===----------------------------------------------------------------------===//
296
298 InsertPosition InsertPt) {
299 switch (CB->getOpcode()) {
300 case Instruction::Call:
301 return CallInst::Create(cast<CallInst>(CB), Bundles, InsertPt);
302 case Instruction::Invoke:
303 return InvokeInst::Create(cast<InvokeInst>(CB), Bundles, InsertPt);
304 case Instruction::CallBr:
305 return CallBrInst::Create(cast<CallBrInst>(CB), Bundles, InsertPt);
306 default:
307 llvm_unreachable("Unknown CallBase sub-class!");
308 }
309}
310
312 InsertPosition InsertPt) {
314 for (unsigned i = 0, e = CI->getNumOperandBundles(); i < e; ++i) {
315 auto ChildOB = CI->getOperandBundleAt(i);
316 if (ChildOB.getTagName() != OpB.getTag())
317 OpDefs.emplace_back(ChildOB);
318 }
319 OpDefs.emplace_back(OpB);
320 return CallBase::Create(CI, OpDefs, InsertPt);
321}
322
324
326 assert(getOpcode() == Instruction::CallBr && "Unexpected opcode!");
327 return cast<CallBrInst>(this)->getNumIndirectDests() + 1;
328}
329
331 const Value *V = getCalledOperand();
332 if (isa<Function>(V) || isa<Constant>(V))
333 return false;
334 return !isInlineAsm();
335}
336
337/// Tests if this call site must be tail call optimized. Only a CallInst can
338/// be tail call optimized.
340 if (auto *CI = dyn_cast<CallInst>(this))
341 return CI->isMustTailCall();
342 return false;
343}
344
345/// Tests if this call site is marked as a tail call.
347 if (auto *CI = dyn_cast<CallInst>(this))
348 return CI->isTailCall();
349 return false;
350}
351
354 return F->getIntrinsicID();
356}
357
359 FPClassTest Mask = Attrs.getRetNoFPClass();
360
361 if (const Function *F = getCalledFunction())
362 Mask |= F->getAttributes().getRetNoFPClass();
363 return Mask;
364}
365
367 FPClassTest Mask = Attrs.getParamNoFPClass(i);
368
369 if (const Function *F = getCalledFunction())
370 Mask |= F->getAttributes().getParamNoFPClass(i);
371 return Mask;
372}
373
374std::optional<ConstantRange> CallBase::getRange() const {
375 Attribute CallAttr = Attrs.getRetAttr(Attribute::Range);
377 if (const Function *F = getCalledFunction())
378 FnAttr = F->getRetAttribute(Attribute::Range);
379
380 if (CallAttr.isValid() && FnAttr.isValid())
381 return CallAttr.getRange().intersectWith(FnAttr.getRange());
382 if (CallAttr.isValid())
383 return CallAttr.getRange();
384 if (FnAttr.isValid())
385 return FnAttr.getRange();
386 return std::nullopt;
387}
388
390 if (hasRetAttr(Attribute::NonNull))
391 return true;
392
393 if (getRetDereferenceableBytes() > 0 &&
395 return true;
396
397 return false;
398}
399
401 unsigned Index;
402
403 if (Attrs.hasAttrSomewhere(Kind, &Index))
404 return getArgOperand(Index - AttributeList::FirstArgIndex);
405 if (const Function *F = getCalledFunction())
406 if (F->getAttributes().hasAttrSomewhere(Kind, &Index))
407 return getArgOperand(Index - AttributeList::FirstArgIndex);
408
409 return nullptr;
410}
411
412/// Determine whether the argument or parameter has the given attribute.
413bool CallBase::paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
414 assert(ArgNo < arg_size() && "Param index out of bounds!");
415
416 if (Attrs.hasParamAttr(ArgNo, Kind))
417 return true;
418
419 const Function *F = getCalledFunction();
420 if (!F)
421 return false;
422
423 if (!F->getAttributes().hasParamAttr(ArgNo, Kind))
424 return false;
425
426 // Take into account mod/ref by operand bundles.
427 switch (Kind) {
428 case Attribute::ReadNone:
430 case Attribute::ReadOnly:
432 case Attribute::WriteOnly:
433 return !hasReadingOperandBundles();
434 default:
435 return true;
436 }
437}
438
440 bool AllowUndefOrPoison) const {
442 "Argument must be a pointer");
443 if (paramHasAttr(ArgNo, Attribute::NonNull) &&
444 (AllowUndefOrPoison || paramHasAttr(ArgNo, Attribute::NoUndef)))
445 return true;
446
447 if (paramHasAttr(ArgNo, Attribute::Dereferenceable) &&
449 getCaller(),
451 return true;
452
453 return false;
454}
455
456bool CallBase::hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const {
458 return F->getAttributes().hasFnAttr(Kind);
459
460 return false;
461}
462
463bool CallBase::hasFnAttrOnCalledFunction(StringRef Kind) const {
465 return F->getAttributes().hasFnAttr(Kind);
466
467 return false;
468}
469
470template <typename AK>
471Attribute CallBase::getFnAttrOnCalledFunction(AK Kind) const {
472 if constexpr (std::is_same_v<AK, Attribute::AttrKind>) {
473 // getMemoryEffects() correctly combines memory effects from the call-site,
474 // operand bundles and function.
475 assert(Kind != Attribute::Memory && "Use getMemoryEffects() instead");
476 }
477
479 return F->getAttributes().getFnAttr(Kind);
480
481 return Attribute();
482}
483
484template LLVM_ABI Attribute
485CallBase::getFnAttrOnCalledFunction(Attribute::AttrKind Kind) const;
486template LLVM_ABI Attribute
487CallBase::getFnAttrOnCalledFunction(StringRef Kind) const;
488
489template <typename AK>
490Attribute CallBase::getParamAttrOnCalledFunction(unsigned ArgNo,
491 AK Kind) const {
493
494 if (auto *F = dyn_cast<Function>(V))
495 return F->getAttributes().getParamAttr(ArgNo, Kind);
496
497 return Attribute();
498}
499template LLVM_ABI Attribute CallBase::getParamAttrOnCalledFunction(
500 unsigned ArgNo, Attribute::AttrKind Kind) const;
501template LLVM_ABI Attribute
502CallBase::getParamAttrOnCalledFunction(unsigned ArgNo, StringRef Kind) const;
503
506 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
508}
509
512 const unsigned BeginIndex) {
513 auto It = op_begin() + BeginIndex;
514 for (auto &B : Bundles)
515 It = std::copy(B.input_begin(), B.input_end(), It);
516
517 auto *ContextImpl = getContext().pImpl;
518 auto BI = Bundles.begin();
519 unsigned CurrentIndex = BeginIndex;
520
521 for (auto &BOI : bundle_op_infos()) {
522 assert(BI != Bundles.end() && "Incorrect allocation?");
523
524 BOI.Tag = ContextImpl->getOrInsertBundleTag(BI->getTag());
525 BOI.Begin = CurrentIndex;
526 BOI.End = CurrentIndex + BI->input_size();
527 CurrentIndex = BOI.End;
528 BI++;
529 }
530
531 assert(BI == Bundles.end() && "Incorrect allocation?");
532
533 return It;
534}
535
537 /// When there isn't many bundles, we do a simple linear search.
538 /// Else fallback to a binary-search that use the fact that bundles usually
539 /// have similar number of argument to get faster convergence.
541 for (auto &BOI : bundle_op_infos())
542 if (BOI.Begin <= OpIdx && OpIdx < BOI.End)
543 return BOI;
544
545 llvm_unreachable("Did not find operand bundle for operand!");
546 }
547
548 assert(OpIdx >= arg_size() && "the Idx is not in the operand bundles");
550 OpIdx < std::prev(bundle_op_info_end())->End &&
551 "The Idx isn't in the operand bundle");
552
553 /// We need a decimal number below and to prevent using floating point numbers
554 /// we use an intergal value multiplied by this constant.
555 constexpr unsigned NumberScaling = 1024;
556
559 bundle_op_iterator Current = Begin;
560
561 while (Begin != End) {
562 unsigned ScaledOperandPerBundle =
563 NumberScaling * (std::prev(End)->End - Begin->Begin) / (End - Begin);
564 Current = Begin + (((OpIdx - Begin->Begin) * NumberScaling) /
565 ScaledOperandPerBundle);
566 if (Current >= End)
567 Current = std::prev(End);
568 assert(Current < End && Current >= Begin &&
569 "the operand bundle doesn't cover every value in the range");
570 if (OpIdx >= Current->Begin && OpIdx < Current->End)
571 break;
572 if (OpIdx >= Current->End)
573 Begin = Current + 1;
574 else
575 End = Current;
576 }
577
578 assert(OpIdx >= Current->Begin && OpIdx < Current->End &&
579 "the operand bundle doesn't cover every value in the range");
580 return *Current;
581}
582
585 InsertPosition InsertPt) {
586 if (CB->getOperandBundle(ID))
587 return CB;
588
590 CB->getOperandBundlesAsDefs(Bundles);
591 Bundles.push_back(OB);
592 return Create(CB, Bundles, InsertPt);
593}
594
596 InsertPosition InsertPt) {
598 bool CreateNew = false;
599
600 for (unsigned I = 0, E = CB->getNumOperandBundles(); I != E; ++I) {
601 auto Bundle = CB->getOperandBundleAt(I);
602 if (Bundle.getTagID() == ID) {
603 CreateNew = true;
604 continue;
605 }
606 Bundles.emplace_back(Bundle);
607 }
608
609 return CreateNew ? Create(CB, Bundles, InsertPt) : CB;
610}
611
613 // Implementation note: this is a conservative implementation of operand
614 // bundle semantics, where *any* non-assume operand bundle (other than
615 // ptrauth) forces a callsite to be at least readonly.
620 getIntrinsicID() != Intrinsic::assume;
621}
622
631
633 MemoryEffects ME = getAttributes().getMemoryEffects();
634 if (auto *Fn = dyn_cast<Function>(getCalledOperand())) {
635 MemoryEffects FnME = Fn->getMemoryEffects();
636 if (hasOperandBundles()) {
637 // TODO: Add a method to get memory effects for operand bundles instead.
639 FnME |= MemoryEffects::readOnly();
641 FnME |= MemoryEffects::writeOnly();
642 }
643 if (isVolatile()) {
644 // Volatile operations also access inaccessible memory.
646 }
647 ME &= FnME;
648 }
649 return ME;
650}
654
655/// Determine if the function does not access memory.
662
663/// Determine if the function does not access or only reads memory.
670
671/// Determine if the function does not access or only writes memory.
678
679/// Determine if the call can access memmory only using pointers based
680/// on its arguments.
687
688/// Determine if the function may only access memory that is
689/// inaccessible from the IR.
696
697/// Determine if the function may only access memory that is
698/// either inaccessible from the IR or pointed to by its arguments.
706
708 if (OpNo < arg_size()) {
709 // If the argument is passed byval, the callee does not have access to the
710 // original pointer and thus cannot capture it.
711 if (isByValArgument(OpNo))
712 return CaptureInfo::none();
713
715 if (auto *Fn = dyn_cast<Function>(getCalledOperand()))
716 CI &= Fn->getAttributes().getParamAttrs(OpNo).getCaptureInfo();
717 return CI;
718 }
719
720 // Bundles on assumes are captures(none).
721 if (getIntrinsicID() == Intrinsic::assume)
722 return CaptureInfo::none();
723
724 // deopt operand bundles are captures(none)
725 auto &BOI = getBundleOpInfoForOperand(OpNo);
726 auto OBU = operandBundleFromBundleOpInfo(BOI);
727 return OBU.isDeoptOperandBundle() ? CaptureInfo::none() : CaptureInfo::all();
728}
729
731 for (unsigned I = 0, E = arg_size(); I < E; ++I) {
733 continue;
734
736 if (auto *Fn = dyn_cast<Function>(getCalledOperand()))
737 CI &= Fn->getAttributes().getParamAttrs(I).getCaptureInfo();
739 return true;
740 }
741 return false;
742}
743
744//===----------------------------------------------------------------------===//
745// CallInst Implementation
746//===----------------------------------------------------------------------===//
747
748void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
749 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) {
750 this->FTy = FTy;
751 assert(getNumOperands() == Args.size() + CountBundleInputs(Bundles) + 1 &&
752 "NumOperands not set up?");
753
754#ifndef NDEBUG
755 assert((Args.size() == FTy->getNumParams() ||
756 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
757 "Calling a function with bad signature!");
758
759 for (unsigned i = 0; i != Args.size(); ++i)
760 assert((i >= FTy->getNumParams() ||
761 FTy->getParamType(i) == Args[i]->getType()) &&
762 "Calling a function with a bad signature!");
763#endif
764
765 // Set operands in order of their index to match use-list-order
766 // prediction.
767 llvm::copy(Args, op_begin());
768 setCalledOperand(Func);
769
770 auto It = populateBundleOperandInfos(Bundles, Args.size());
771 (void)It;
772 assert(It + 1 == op_end() && "Should add up!");
773
774 setName(NameStr);
775}
776
777void CallInst::init(FunctionType *FTy, Value *Func, const Twine &NameStr) {
778 this->FTy = FTy;
779 assert(getNumOperands() == 1 && "NumOperands not set up?");
780 setCalledOperand(Func);
781
782 assert(FTy->getNumParams() == 0 && "Calling a function with bad signature");
783
784 setName(NameStr);
785}
786
787CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
788 AllocInfo AllocInfo, InsertPosition InsertBefore)
789 : CallBase(Ty->getReturnType(), Instruction::Call, AllocInfo,
790 InsertBefore) {
791 init(Ty, Func, Name);
792}
793
794CallInst::CallInst(const CallInst &CI, AllocInfo AllocInfo)
795 : CallBase(CI.Attrs, CI.FTy, CI.getType(), Instruction::Call, AllocInfo) {
797 "Wrong number of operands allocated");
798 setTailCallKind(CI.getTailCallKind());
800
801 std::copy(CI.op_begin(), CI.op_end(), op_begin());
802 std::copy(CI.bundle_op_info_begin(), CI.bundle_op_info_end(),
805}
806
808 InsertPosition InsertPt) {
809 std::vector<Value *> Args(CI->arg_begin(), CI->arg_end());
810
811 auto *NewCI = CallInst::Create(CI->getFunctionType(), CI->getCalledOperand(),
812 Args, OpB, CI->getName(), InsertPt);
813 NewCI->setTailCallKind(CI->getTailCallKind());
814 NewCI->setCallingConv(CI->getCallingConv());
815 NewCI->SubclassOptionalData = CI->SubclassOptionalData;
816 NewCI->setAttributes(CI->getAttributes());
817 NewCI->setDebugLoc(CI->getDebugLoc());
818 return NewCI;
819}
820
821// Update profile weight for call instruction by scaling it using the ratio
822// of S/T. The meaning of "branch_weights" meta data for call instruction is
823// transfered to represent call count.
825 if (T == 0) {
826 LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
827 "div by 0. Ignoring. Likely the function "
828 << getParent()->getParent()->getName()
829 << " has 0 entry count, and contains call instructions "
830 "with non-zero prof info.");
831 return;
832 }
833 scaleProfData(*this, S, T);
834}
835
836//===----------------------------------------------------------------------===//
837// InvokeInst Implementation
838//===----------------------------------------------------------------------===//
839
840void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal,
841 BasicBlock *IfException, ArrayRef<Value *> Args,
843 const Twine &NameStr) {
844 this->FTy = FTy;
845
847 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)) &&
848 "NumOperands not set up?");
849
850#ifndef NDEBUG
851 assert(((Args.size() == FTy->getNumParams()) ||
852 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
853 "Invoking a function with bad signature");
854
855 for (unsigned i = 0, e = Args.size(); i != e; i++)
856 assert((i >= FTy->getNumParams() ||
857 FTy->getParamType(i) == Args[i]->getType()) &&
858 "Invoking a function with a bad signature!");
859#endif
860
861 // Set operands in order of their index to match use-list-order
862 // prediction.
863 llvm::copy(Args, op_begin());
864 setNormalDest(IfNormal);
865 setUnwindDest(IfException);
867
868 auto It = populateBundleOperandInfos(Bundles, Args.size());
869 (void)It;
870 assert(It + 3 == op_end() && "Should add up!");
871
872 setName(NameStr);
873}
874
875InvokeInst::InvokeInst(const InvokeInst &II, AllocInfo AllocInfo)
876 : CallBase(II.Attrs, II.FTy, II.getType(), Instruction::Invoke, AllocInfo) {
877 assert(getNumOperands() == II.getNumOperands() &&
878 "Wrong number of operands allocated");
879 setCallingConv(II.getCallingConv());
880 std::copy(II.op_begin(), II.op_end(), op_begin());
881 std::copy(II.bundle_op_info_begin(), II.bundle_op_info_end(),
883 SubclassOptionalData = II.SubclassOptionalData;
884}
885
887 InsertPosition InsertPt) {
888 std::vector<Value *> Args(II->arg_begin(), II->arg_end());
889
890 auto *NewII = InvokeInst::Create(
891 II->getFunctionType(), II->getCalledOperand(), II->getNormalDest(),
892 II->getUnwindDest(), Args, OpB, II->getName(), InsertPt);
893 NewII->setCallingConv(II->getCallingConv());
894 NewII->SubclassOptionalData = II->SubclassOptionalData;
895 NewII->setAttributes(II->getAttributes());
896 NewII->setDebugLoc(II->getDebugLoc());
897 return NewII;
898}
899
901 return cast<LandingPadInst>(getUnwindDest()->getFirstNonPHIIt());
902}
903
905 if (T == 0) {
906 LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
907 "div by 0. Ignoring. Likely the function "
908 << getParent()->getParent()->getName()
909 << " has 0 entry count, and contains call instructions "
910 "with non-zero prof info.");
911 return;
912 }
913 scaleProfData(*this, S, T);
914}
915
916//===----------------------------------------------------------------------===//
917// CallBrInst Implementation
918//===----------------------------------------------------------------------===//
919
920void CallBrInst::init(FunctionType *FTy, Value *Fn, BasicBlock *Fallthrough,
921 ArrayRef<BasicBlock *> IndirectDests,
924 const Twine &NameStr) {
925 this->FTy = FTy;
926
927 assert(getNumOperands() == ComputeNumOperands(Args.size(),
928 IndirectDests.size(),
929 CountBundleInputs(Bundles)) &&
930 "NumOperands not set up?");
931
932#ifndef NDEBUG
933 assert(((Args.size() == FTy->getNumParams()) ||
934 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
935 "Calling a function with bad signature");
936
937 for (unsigned i = 0, e = Args.size(); i != e; i++)
938 assert((i >= FTy->getNumParams() ||
939 FTy->getParamType(i) == Args[i]->getType()) &&
940 "Calling a function with a bad signature!");
941#endif
942
943 // Set operands in order of their index to match use-list-order
944 // prediction.
945 llvm::copy(Args, op_begin());
946 NumIndirectDests = IndirectDests.size();
947 setDefaultDest(Fallthrough);
948 for (unsigned i = 0; i != NumIndirectDests; ++i)
949 setIndirectDest(i, IndirectDests[i]);
951
952 auto It = populateBundleOperandInfos(Bundles, Args.size());
953 (void)It;
954 assert(It + 2 + IndirectDests.size() == op_end() && "Should add up!");
955
956 setName(NameStr);
957}
958
959CallBrInst::CallBrInst(const CallBrInst &CBI, AllocInfo AllocInfo)
960 : CallBase(CBI.Attrs, CBI.FTy, CBI.getType(), Instruction::CallBr,
961 AllocInfo) {
963 "Wrong number of operands allocated");
965 std::copy(CBI.op_begin(), CBI.op_end(), op_begin());
966 std::copy(CBI.bundle_op_info_begin(), CBI.bundle_op_info_end(),
969 NumIndirectDests = CBI.NumIndirectDests;
970}
971
972CallBrInst *CallBrInst::Create(CallBrInst *CBI, ArrayRef<OperandBundleDef> OpB,
973 InsertPosition InsertPt) {
974 std::vector<Value *> Args(CBI->arg_begin(), CBI->arg_end());
975
976 auto *NewCBI = CallBrInst::Create(
977 CBI->getFunctionType(), CBI->getCalledOperand(), CBI->getDefaultDest(),
978 CBI->getIndirectDests(), Args, OpB, CBI->getName(), InsertPt);
979 NewCBI->setCallingConv(CBI->getCallingConv());
980 NewCBI->SubclassOptionalData = CBI->SubclassOptionalData;
981 NewCBI->setAttributes(CBI->getAttributes());
982 NewCBI->setDebugLoc(CBI->getDebugLoc());
983 NewCBI->NumIndirectDests = CBI->NumIndirectDests;
984 return NewCBI;
985}
986
987//===----------------------------------------------------------------------===//
988// ReturnInst Implementation
989//===----------------------------------------------------------------------===//
990
991ReturnInst::ReturnInst(const ReturnInst &RI, AllocInfo AllocInfo)
992 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Ret,
993 AllocInfo) {
995 "Wrong number of operands allocated");
996 if (RI.getNumOperands())
997 Op<0>() = RI.Op<0>();
999}
1000
1001ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, AllocInfo AllocInfo,
1002 InsertPosition InsertBefore)
1003 : Instruction(Type::getVoidTy(C), Instruction::Ret, AllocInfo,
1004 InsertBefore) {
1005 if (retVal)
1006 Op<0>() = retVal;
1007}
1008
1009//===----------------------------------------------------------------------===//
1010// ResumeInst Implementation
1011//===----------------------------------------------------------------------===//
1012
1013ResumeInst::ResumeInst(const ResumeInst &RI)
1014 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Resume,
1015 AllocMarker) {
1016 Op<0>() = RI.Op<0>();
1017}
1018
1019ResumeInst::ResumeInst(Value *Exn, InsertPosition InsertBefore)
1020 : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
1021 AllocMarker, InsertBefore) {
1022 Op<0>() = Exn;
1023}
1024
1025//===----------------------------------------------------------------------===//
1026// CleanupReturnInst Implementation
1027//===----------------------------------------------------------------------===//
1028
1029CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst &CRI,
1031 : Instruction(CRI.getType(), Instruction::CleanupRet, AllocInfo) {
1033 "Wrong number of operands allocated");
1034 setSubclassData<Instruction::OpaqueField>(
1036 Op<0>() = CRI.Op<0>();
1037 if (CRI.hasUnwindDest())
1038 Op<1>() = CRI.Op<1>();
1039}
1040
1041void CleanupReturnInst::init(Value *CleanupPad, BasicBlock *UnwindBB) {
1042 if (UnwindBB)
1043 setSubclassData<UnwindDestField>(true);
1044
1045 Op<0>() = CleanupPad;
1046 if (UnwindBB)
1047 Op<1>() = UnwindBB;
1048}
1049
1050CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
1052 InsertPosition InsertBefore)
1053 : Instruction(Type::getVoidTy(CleanupPad->getContext()),
1054 Instruction::CleanupRet, AllocInfo, InsertBefore) {
1055 init(CleanupPad, UnwindBB);
1056}
1057
1058//===----------------------------------------------------------------------===//
1059// CatchReturnInst Implementation
1060//===----------------------------------------------------------------------===//
1061void CatchReturnInst::init(Value *CatchPad, BasicBlock *BB) {
1062 Op<0>() = CatchPad;
1063 Op<1>() = BB;
1064}
1065
1066CatchReturnInst::CatchReturnInst(const CatchReturnInst &CRI)
1067 : Instruction(Type::getVoidTy(CRI.getContext()), Instruction::CatchRet,
1068 AllocMarker) {
1069 Op<0>() = CRI.Op<0>();
1070 Op<1>() = CRI.Op<1>();
1071}
1072
1073CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
1074 InsertPosition InsertBefore)
1075 : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
1076 AllocMarker, InsertBefore) {
1077 init(CatchPad, BB);
1078}
1079
1080//===----------------------------------------------------------------------===//
1081// CatchSwitchInst Implementation
1082//===----------------------------------------------------------------------===//
1083
1084CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
1085 unsigned NumReservedValues,
1086 const Twine &NameStr,
1087 InsertPosition InsertBefore)
1088 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, AllocMarker,
1089 InsertBefore) {
1090 if (UnwindDest)
1091 ++NumReservedValues;
1092 init(ParentPad, UnwindDest, NumReservedValues + 1);
1093 setName(NameStr);
1094}
1095
1096CatchSwitchInst::CatchSwitchInst(const CatchSwitchInst &CSI)
1097 : Instruction(CSI.getType(), Instruction::CatchSwitch, AllocMarker) {
1099 init(CSI.getParentPad(), CSI.getUnwindDest(), CSI.getNumOperands());
1100 setNumHungOffUseOperands(ReservedSpace);
1101 Use *OL = getOperandList();
1102 const Use *InOL = CSI.getOperandList();
1103 for (unsigned I = 1, E = ReservedSpace; I != E; ++I)
1104 OL[I] = InOL[I];
1105}
1106
1107void CatchSwitchInst::init(Value *ParentPad, BasicBlock *UnwindDest,
1108 unsigned NumReservedValues) {
1109 assert(ParentPad && NumReservedValues);
1110
1111 ReservedSpace = NumReservedValues;
1112 setNumHungOffUseOperands(UnwindDest ? 2 : 1);
1113 allocHungoffUses(ReservedSpace);
1114
1115 Op<0>() = ParentPad;
1116 if (UnwindDest) {
1118 setUnwindDest(UnwindDest);
1119 }
1120}
1121
1122/// growOperands - grow operands - This grows the operand list in response to a
1123/// push_back style of operation. This grows the number of ops by 2 times.
1124void CatchSwitchInst::growOperands(unsigned Size) {
1125 unsigned NumOperands = getNumOperands();
1126 assert(NumOperands >= 1);
1127 if (ReservedSpace >= NumOperands + Size)
1128 return;
1129 ReservedSpace = (NumOperands + Size / 2) * 2;
1130 growHungoffUses(ReservedSpace);
1131}
1132
1134 unsigned OpNo = getNumOperands();
1135 growOperands(1);
1136 assert(OpNo < ReservedSpace && "Growing didn't work!");
1138 getOperandList()[OpNo] = Handler;
1139}
1140
1142 // Move all subsequent handlers up one.
1143 Use *EndDst = op_end() - 1;
1144 for (Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst)
1145 *CurDst = *(CurDst + 1);
1146 // Null out the last handler use.
1147 *EndDst = nullptr;
1148
1150}
1151
1152//===----------------------------------------------------------------------===//
1153// FuncletPadInst Implementation
1154//===----------------------------------------------------------------------===//
1155void FuncletPadInst::init(Value *ParentPad, ArrayRef<Value *> Args,
1156 const Twine &NameStr) {
1157 assert(getNumOperands() == 1 + Args.size() && "NumOperands not set up?");
1158 llvm::copy(Args, op_begin());
1159 setParentPad(ParentPad);
1160 setName(NameStr);
1161}
1162
1163FuncletPadInst::FuncletPadInst(const FuncletPadInst &FPI, AllocInfo AllocInfo)
1164 : Instruction(FPI.getType(), FPI.getOpcode(), AllocInfo) {
1166 "Wrong number of operands allocated");
1167 std::copy(FPI.op_begin(), FPI.op_end(), op_begin());
1169}
1170
1171FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
1173 const Twine &NameStr,
1174 InsertPosition InsertBefore)
1175 : Instruction(ParentPad->getType(), Op, AllocInfo, InsertBefore) {
1176 init(ParentPad, Args, NameStr);
1177}
1178
1179//===----------------------------------------------------------------------===//
1180// UnreachableInst Implementation
1181//===----------------------------------------------------------------------===//
1182
1184 InsertPosition InsertBefore)
1185 : Instruction(Type::getVoidTy(Context), Instruction::Unreachable,
1186 AllocMarker, InsertBefore) {}
1187
1188//===----------------------------------------------------------------------===//
1189// BranchInst Implementation
1190//===----------------------------------------------------------------------===//
1191
1192void BranchInst::AssertOK() {
1193 if (isConditional())
1194 assert(getCondition()->getType()->isIntegerTy(1) &&
1195 "May only branch on boolean predicates!");
1196}
1197
1198BranchInst::BranchInst(BasicBlock *IfTrue, AllocInfo AllocInfo,
1199 InsertPosition InsertBefore)
1200 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1201 AllocInfo, InsertBefore) {
1202 assert(IfTrue && "Branch destination may not be null!");
1203 Op<-1>() = IfTrue;
1204}
1205
1206BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
1207 AllocInfo AllocInfo, InsertPosition InsertBefore)
1208 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1209 AllocInfo, InsertBefore) {
1210 // Assign in order of operand index to make use-list order predictable.
1211 Op<-3>() = Cond;
1212 Op<-2>() = IfFalse;
1213 Op<-1>() = IfTrue;
1214#ifndef NDEBUG
1215 AssertOK();
1216#endif
1217}
1218
1219BranchInst::BranchInst(const BranchInst &BI, AllocInfo AllocInfo)
1220 : Instruction(Type::getVoidTy(BI.getContext()), Instruction::Br,
1221 AllocInfo) {
1223 "Wrong number of operands allocated");
1224 // Assign in order of operand index to make use-list order predictable.
1225 if (BI.getNumOperands() != 1) {
1226 assert(BI.getNumOperands() == 3 && "BR can have 1 or 3 operands!");
1227 Op<-3>() = BI.Op<-3>();
1228 Op<-2>() = BI.Op<-2>();
1229 }
1230 Op<-1>() = BI.Op<-1>();
1232}
1233
1236 "Cannot swap successors of an unconditional branch");
1237 Op<-1>().swap(Op<-2>());
1238
1239 // Update profile metadata if present and it matches our structural
1240 // expectations.
1242}
1243
1244//===----------------------------------------------------------------------===//
1245// AllocaInst Implementation
1246//===----------------------------------------------------------------------===//
1247
1248static Value *getAISize(LLVMContext &Context, Value *Amt) {
1249 if (!Amt)
1250 Amt = ConstantInt::get(Type::getInt32Ty(Context), 1);
1251 else {
1252 assert(!isa<BasicBlock>(Amt) &&
1253 "Passed basic block into allocation size parameter! Use other ctor");
1254 assert(Amt->getType()->isIntegerTy() &&
1255 "Allocation array size is not an integer!");
1256 }
1257 return Amt;
1258}
1259
1261 assert(Pos.isValid() &&
1262 "Insertion position cannot be null when alignment not provided!");
1263 BasicBlock *BB = Pos.getBasicBlock();
1264 assert(BB->getParent() &&
1265 "BB must be in a Function when alignment not provided!");
1266 const DataLayout &DL = BB->getDataLayout();
1267 return DL.getPrefTypeAlign(Ty);
1268}
1269
1270AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
1271 InsertPosition InsertBefore)
1272 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertBefore) {}
1273
1274AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1275 const Twine &Name, InsertPosition InsertBefore)
1276 : AllocaInst(Ty, AddrSpace, ArraySize,
1277 computeAllocaDefaultAlign(Ty, InsertBefore), Name,
1278 InsertBefore) {}
1279
1280AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1281 Align Align, const Twine &Name,
1282 InsertPosition InsertBefore)
1283 : UnaryInstruction(PointerType::get(Ty->getContext(), AddrSpace), Alloca,
1284 getAISize(Ty->getContext(), ArraySize), InsertBefore),
1285 AllocatedType(Ty) {
1287 assert(!Ty->isVoidTy() && "Cannot allocate void!");
1288 setName(Name);
1289}
1290
1293 return !CI->isOne();
1294 return true;
1295}
1296
1297/// isStaticAlloca - Return true if this alloca is in the entry block of the
1298/// function and is a constant size. If so, the code generator will fold it
1299/// into the prolog/epilog code, so it is basically free.
1301 // Must be constant size.
1302 if (!isa<ConstantInt>(getArraySize())) return false;
1303
1304 // Must be in the entry block.
1305 const BasicBlock *Parent = getParent();
1306 return Parent->isEntryBlock() && !isUsedWithInAlloca();
1307}
1308
1309//===----------------------------------------------------------------------===//
1310// LoadInst Implementation
1311//===----------------------------------------------------------------------===//
1312
1313void LoadInst::AssertOK() {
1315 "Ptr must have pointer type.");
1316}
1317
1319 assert(Pos.isValid() &&
1320 "Insertion position cannot be null when alignment not provided!");
1321 BasicBlock *BB = Pos.getBasicBlock();
1322 assert(BB->getParent() &&
1323 "BB must be in a Function when alignment not provided!");
1324 const DataLayout &DL = BB->getDataLayout();
1325 return DL.getABITypeAlign(Ty);
1326}
1327
1328LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name,
1329 InsertPosition InsertBef)
1330 : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertBef) {}
1331
1332LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1333 InsertPosition InsertBef)
1334 : LoadInst(Ty, Ptr, Name, isVolatile,
1335 computeLoadStoreDefaultAlign(Ty, InsertBef), InsertBef) {}
1336
1337LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1338 Align Align, InsertPosition InsertBef)
1339 : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
1340 SyncScope::System, InsertBef) {}
1341
1342LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1344 InsertPosition InsertBef)
1345 : UnaryInstruction(Ty, Load, Ptr, InsertBef) {
1348 setAtomic(Order, SSID);
1349 AssertOK();
1350 setName(Name);
1351}
1352
1353//===----------------------------------------------------------------------===//
1354// StoreInst Implementation
1355//===----------------------------------------------------------------------===//
1356
1357void StoreInst::AssertOK() {
1358 assert(getOperand(0) && getOperand(1) && "Both operands must be non-null!");
1360 "Ptr must have pointer type!");
1361}
1362
1364 : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {}
1365
1367 InsertPosition InsertBefore)
1368 : StoreInst(val, addr, isVolatile,
1369 computeLoadStoreDefaultAlign(val->getType(), InsertBefore),
1370 InsertBefore) {}
1371
1373 InsertPosition InsertBefore)
1375 SyncScope::System, InsertBefore) {}
1376
1378 AtomicOrdering Order, SyncScope::ID SSID,
1379 InsertPosition InsertBefore)
1380 : Instruction(Type::getVoidTy(val->getContext()), Store, AllocMarker,
1381 InsertBefore) {
1382 Op<0>() = val;
1383 Op<1>() = addr;
1386 setAtomic(Order, SSID);
1387 AssertOK();
1388}
1389
1390//===----------------------------------------------------------------------===//
1391// AtomicCmpXchgInst Implementation
1392//===----------------------------------------------------------------------===//
1393
1394void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
1395 Align Alignment, AtomicOrdering SuccessOrdering,
1396 AtomicOrdering FailureOrdering,
1397 SyncScope::ID SSID) {
1398 Op<0>() = Ptr;
1399 Op<1>() = Cmp;
1400 Op<2>() = NewVal;
1401 setSuccessOrdering(SuccessOrdering);
1402 setFailureOrdering(FailureOrdering);
1403 setSyncScopeID(SSID);
1404 setAlignment(Alignment);
1405
1406 assert(getOperand(0) && getOperand(1) && getOperand(2) &&
1407 "All operands must be non-null!");
1409 "Ptr must have pointer type!");
1410 assert(getOperand(1)->getType() == getOperand(2)->getType() &&
1411 "Cmp type and NewVal type must be same!");
1412}
1413
1415 Align Alignment,
1416 AtomicOrdering SuccessOrdering,
1417 AtomicOrdering FailureOrdering,
1418 SyncScope::ID SSID,
1419 InsertPosition InsertBefore)
1420 : Instruction(
1421 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
1422 AtomicCmpXchg, AllocMarker, InsertBefore) {
1423 Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
1424}
1425
1426//===----------------------------------------------------------------------===//
1427// AtomicRMWInst Implementation
1428//===----------------------------------------------------------------------===//
1429
1430void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,
1431 Align Alignment, AtomicOrdering Ordering,
1432 SyncScope::ID SSID) {
1433 assert(Ordering != AtomicOrdering::NotAtomic &&
1434 "atomicrmw instructions can only be atomic.");
1435 assert(Ordering != AtomicOrdering::Unordered &&
1436 "atomicrmw instructions cannot be unordered.");
1437 Op<0>() = Ptr;
1438 Op<1>() = Val;
1440 setOrdering(Ordering);
1441 setSyncScopeID(SSID);
1442 setAlignment(Alignment);
1443
1444 assert(getOperand(0) && getOperand(1) && "All operands must be non-null!");
1446 "Ptr must have pointer type!");
1447 assert(Ordering != AtomicOrdering::NotAtomic &&
1448 "AtomicRMW instructions must be atomic!");
1449}
1450
1452 Align Alignment, AtomicOrdering Ordering,
1453 SyncScope::ID SSID, InsertPosition InsertBefore)
1454 : Instruction(Val->getType(), AtomicRMW, AllocMarker, InsertBefore) {
1455 Init(Operation, Ptr, Val, Alignment, Ordering, SSID);
1456}
1457
1459 switch (Op) {
1461 return "xchg";
1462 case AtomicRMWInst::Add:
1463 return "add";
1464 case AtomicRMWInst::Sub:
1465 return "sub";
1466 case AtomicRMWInst::And:
1467 return "and";
1469 return "nand";
1470 case AtomicRMWInst::Or:
1471 return "or";
1472 case AtomicRMWInst::Xor:
1473 return "xor";
1474 case AtomicRMWInst::Max:
1475 return "max";
1476 case AtomicRMWInst::Min:
1477 return "min";
1479 return "umax";
1481 return "umin";
1483 return "fadd";
1485 return "fsub";
1487 return "fmax";
1489 return "fmin";
1491 return "fmaximum";
1493 return "fminimum";
1495 return "uinc_wrap";
1497 return "udec_wrap";
1499 return "usub_cond";
1501 return "usub_sat";
1503 return "<invalid operation>";
1504 }
1505
1506 llvm_unreachable("invalid atomicrmw operation");
1507}
1508
1509//===----------------------------------------------------------------------===//
1510// FenceInst Implementation
1511//===----------------------------------------------------------------------===//
1512
1514 SyncScope::ID SSID, InsertPosition InsertBefore)
1515 : Instruction(Type::getVoidTy(C), Fence, AllocMarker, InsertBefore) {
1516 setOrdering(Ordering);
1517 setSyncScopeID(SSID);
1518}
1519
1520//===----------------------------------------------------------------------===//
1521// GetElementPtrInst Implementation
1522//===----------------------------------------------------------------------===//
1523
1524void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList,
1525 const Twine &Name) {
1526 assert(getNumOperands() == 1 + IdxList.size() &&
1527 "NumOperands not initialized?");
1528 Op<0>() = Ptr;
1529 llvm::copy(IdxList, op_begin() + 1);
1530 setName(Name);
1531}
1532
1533GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI,
1535 : Instruction(GEPI.getType(), GetElementPtr, AllocInfo),
1536 SourceElementType(GEPI.SourceElementType),
1537 ResultElementType(GEPI.ResultElementType) {
1538 assert(getNumOperands() == GEPI.getNumOperands() &&
1539 "Wrong number of operands allocated");
1540 std::copy(GEPI.op_begin(), GEPI.op_end(), op_begin());
1542}
1543
1545 if (auto *Struct = dyn_cast<StructType>(Ty)) {
1546 if (!Struct->indexValid(Idx))
1547 return nullptr;
1548 return Struct->getTypeAtIndex(Idx);
1549 }
1550 if (!Idx->getType()->isIntOrIntVectorTy())
1551 return nullptr;
1552 if (auto *Array = dyn_cast<ArrayType>(Ty))
1553 return Array->getElementType();
1554 if (auto *Vector = dyn_cast<VectorType>(Ty))
1555 return Vector->getElementType();
1556 return nullptr;
1557}
1558
1560 if (auto *Struct = dyn_cast<StructType>(Ty)) {
1561 if (Idx >= Struct->getNumElements())
1562 return nullptr;
1563 return Struct->getElementType(Idx);
1564 }
1565 if (auto *Array = dyn_cast<ArrayType>(Ty))
1566 return Array->getElementType();
1567 if (auto *Vector = dyn_cast<VectorType>(Ty))
1568 return Vector->getElementType();
1569 return nullptr;
1570}
1571
1572template <typename IndexTy>
1574 if (IdxList.empty())
1575 return Ty;
1576 for (IndexTy V : IdxList.slice(1)) {
1578 if (!Ty)
1579 return Ty;
1580 }
1581 return Ty;
1582}
1583
1587
1589 ArrayRef<Constant *> IdxList) {
1590 return getIndexedTypeInternal(Ty, IdxList);
1591}
1592
1596
1597/// hasAllZeroIndices - Return true if all of the indices of this GEP are
1598/// zeros. If so, the result pointer and the first operand have the same
1599/// value, just potentially different types.
1601 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1603 if (!CI->isZero()) return false;
1604 } else {
1605 return false;
1606 }
1607 }
1608 return true;
1609}
1610
1611/// hasAllConstantIndices - Return true if all of the indices of this GEP are
1612/// constant integers. If so, the result pointer and the first operand have
1613/// a constant offset between them.
1615 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1617 return false;
1618 }
1619 return true;
1620}
1621
1625
1627 GEPNoWrapFlags NW = cast<GEPOperator>(this)->getNoWrapFlags();
1628 if (B)
1630 else
1631 NW = NW.withoutInBounds();
1632 setNoWrapFlags(NW);
1633}
1634
1636 return cast<GEPOperator>(this)->getNoWrapFlags();
1637}
1638
1640 return cast<GEPOperator>(this)->isInBounds();
1641}
1642
1644 return cast<GEPOperator>(this)->hasNoUnsignedSignedWrap();
1645}
1646
1648 return cast<GEPOperator>(this)->hasNoUnsignedWrap();
1649}
1650
1652 APInt &Offset) const {
1653 // Delegate to the generic GEPOperator implementation.
1654 return cast<GEPOperator>(this)->accumulateConstantOffset(DL, Offset);
1655}
1656
1658 const DataLayout &DL, unsigned BitWidth,
1659 SmallMapVector<Value *, APInt, 4> &VariableOffsets,
1660 APInt &ConstantOffset) const {
1661 // Delegate to the generic GEPOperator implementation.
1662 return cast<GEPOperator>(this)->collectOffset(DL, BitWidth, VariableOffsets,
1663 ConstantOffset);
1664}
1665
1666//===----------------------------------------------------------------------===//
1667// ExtractElementInst Implementation
1668//===----------------------------------------------------------------------===//
1669
1670ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
1671 const Twine &Name,
1672 InsertPosition InsertBef)
1673 : Instruction(cast<VectorType>(Val->getType())->getElementType(),
1674 ExtractElement, AllocMarker, InsertBef) {
1675 assert(isValidOperands(Val, Index) &&
1676 "Invalid extractelement instruction operands!");
1677 Op<0>() = Val;
1678 Op<1>() = Index;
1679 setName(Name);
1680}
1681
1682bool ExtractElementInst::isValidOperands(const Value *Val, const Value *Index) {
1683 if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy())
1684 return false;
1685 return true;
1686}
1687
1688//===----------------------------------------------------------------------===//
1689// InsertElementInst Implementation
1690//===----------------------------------------------------------------------===//
1691
1692InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
1693 const Twine &Name,
1694 InsertPosition InsertBef)
1695 : Instruction(Vec->getType(), InsertElement, AllocMarker, InsertBef) {
1696 assert(isValidOperands(Vec, Elt, Index) &&
1697 "Invalid insertelement instruction operands!");
1698 Op<0>() = Vec;
1699 Op<1>() = Elt;
1700 Op<2>() = Index;
1701 setName(Name);
1702}
1703
1705 const Value *Index) {
1706 if (!Vec->getType()->isVectorTy())
1707 return false; // First operand of insertelement must be vector type.
1708
1709 if (Elt->getType() != cast<VectorType>(Vec->getType())->getElementType())
1710 return false;// Second operand of insertelement must be vector element type.
1711
1712 if (!Index->getType()->isIntegerTy())
1713 return false; // Third operand of insertelement must be i32.
1714 return true;
1715}
1716
1717//===----------------------------------------------------------------------===//
1718// ShuffleVectorInst Implementation
1719//===----------------------------------------------------------------------===//
1720
1722 assert(V && "Cannot create placeholder of nullptr V");
1723 return PoisonValue::get(V->getType());
1724}
1725
1727 InsertPosition InsertBefore)
1729 InsertBefore) {}
1730
1732 const Twine &Name,
1733 InsertPosition InsertBefore)
1735 InsertBefore) {}
1736
1738 const Twine &Name,
1739 InsertPosition InsertBefore)
1740 : Instruction(
1741 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
1742 cast<VectorType>(Mask->getType())->getElementCount()),
1743 ShuffleVector, AllocMarker, InsertBefore) {
1744 assert(isValidOperands(V1, V2, Mask) &&
1745 "Invalid shuffle vector instruction operands!");
1746
1747 Op<0>() = V1;
1748 Op<1>() = V2;
1749 SmallVector<int, 16> MaskArr;
1750 getShuffleMask(cast<Constant>(Mask), MaskArr);
1751 setShuffleMask(MaskArr);
1752 setName(Name);
1753}
1754
1756 const Twine &Name,
1757 InsertPosition InsertBefore)
1758 : Instruction(
1759 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
1760 Mask.size(), isa<ScalableVectorType>(V1->getType())),
1761 ShuffleVector, AllocMarker, InsertBefore) {
1762 assert(isValidOperands(V1, V2, Mask) &&
1763 "Invalid shuffle vector instruction operands!");
1764 Op<0>() = V1;
1765 Op<1>() = V2;
1766 setShuffleMask(Mask);
1767 setName(Name);
1768}
1769
1771 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
1772 int NumMaskElts = ShuffleMask.size();
1773 SmallVector<int, 16> NewMask(NumMaskElts);
1774 for (int i = 0; i != NumMaskElts; ++i) {
1775 int MaskElt = getMaskValue(i);
1776 if (MaskElt == PoisonMaskElem) {
1777 NewMask[i] = PoisonMaskElem;
1778 continue;
1779 }
1780 assert(MaskElt >= 0 && MaskElt < 2 * NumOpElts && "Out-of-range mask");
1781 MaskElt = (MaskElt < NumOpElts) ? MaskElt + NumOpElts : MaskElt - NumOpElts;
1782 NewMask[i] = MaskElt;
1783 }
1784 setShuffleMask(NewMask);
1785 Op<0>().swap(Op<1>());
1786}
1787
1789 ArrayRef<int> Mask) {
1790 // V1 and V2 must be vectors of the same type.
1791 if (!isa<VectorType>(V1->getType()) || V1->getType() != V2->getType())
1792 return false;
1793
1794 // Make sure the mask elements make sense.
1795 int V1Size =
1796 cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue();
1797 for (int Elem : Mask)
1798 if (Elem != PoisonMaskElem && Elem >= V1Size * 2)
1799 return false;
1800
1802 if ((Mask[0] != 0 && Mask[0] != PoisonMaskElem) || !all_equal(Mask))
1803 return false;
1804
1805 return true;
1806}
1807
1809 const Value *Mask) {
1810 // V1 and V2 must be vectors of the same type.
1811 if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType())
1812 return false;
1813
1814 // Mask must be vector of i32, and must be the same kind of vector as the
1815 // input vectors
1816 auto *MaskTy = dyn_cast<VectorType>(Mask->getType());
1817 if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32) ||
1819 return false;
1820
1821 // Check to see if Mask is valid.
1823 return true;
1824
1825 // NOTE: Through vector ConstantInt we have the potential to support more
1826 // than just zero splat masks but that requires a LangRef change.
1827 if (isa<ScalableVectorType>(MaskTy))
1828 return false;
1829
1830 unsigned V1Size = cast<FixedVectorType>(V1->getType())->getNumElements();
1831
1832 if (const auto *CI = dyn_cast<ConstantInt>(Mask))
1833 return !CI->uge(V1Size * 2);
1834
1835 if (const auto *MV = dyn_cast<ConstantVector>(Mask)) {
1836 for (Value *Op : MV->operands()) {
1837 if (auto *CI = dyn_cast<ConstantInt>(Op)) {
1838 if (CI->uge(V1Size*2))
1839 return false;
1840 } else if (!isa<UndefValue>(Op)) {
1841 return false;
1842 }
1843 }
1844 return true;
1845 }
1846
1847 if (const auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
1848 for (unsigned i = 0, e = cast<FixedVectorType>(MaskTy)->getNumElements();
1849 i != e; ++i)
1850 if (CDS->getElementAsInteger(i) >= V1Size*2)
1851 return false;
1852 return true;
1853 }
1854
1855 return false;
1856}
1857
1859 SmallVectorImpl<int> &Result) {
1860 ElementCount EC = cast<VectorType>(Mask->getType())->getElementCount();
1861
1862 if (isa<ConstantAggregateZero>(Mask) || isa<UndefValue>(Mask)) {
1863 int MaskVal = isa<UndefValue>(Mask) ? -1 : 0;
1864 Result.append(EC.getKnownMinValue(), MaskVal);
1865 return;
1866 }
1867
1868 assert(!EC.isScalable() &&
1869 "Scalable vector shuffle mask must be undef or zeroinitializer");
1870
1871 unsigned NumElts = EC.getFixedValue();
1872
1873 Result.reserve(NumElts);
1874
1875 if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
1876 for (unsigned i = 0; i != NumElts; ++i)
1877 Result.push_back(CDS->getElementAsInteger(i));
1878 return;
1879 }
1880 for (unsigned i = 0; i != NumElts; ++i) {
1881 Constant *C = Mask->getAggregateElement(i);
1882 Result.push_back(isa<UndefValue>(C) ? -1 :
1883 cast<ConstantInt>(C)->getZExtValue());
1884 }
1885}
1886
1888 ShuffleMask.assign(Mask.begin(), Mask.end());
1889 ShuffleMaskForBitcode = convertShuffleMaskForBitcode(Mask, getType());
1890}
1891
1893 Type *ResultTy) {
1894 Type *Int32Ty = Type::getInt32Ty(ResultTy->getContext());
1895 if (isa<ScalableVectorType>(ResultTy)) {
1896 assert(all_equal(Mask) && "Unexpected shuffle");
1897 Type *VecTy = VectorType::get(Int32Ty, Mask.size(), true);
1898 if (Mask[0] == 0)
1899 return Constant::getNullValue(VecTy);
1900 return PoisonValue::get(VecTy);
1901 }
1903 for (int Elem : Mask) {
1904 if (Elem == PoisonMaskElem)
1906 else
1907 MaskConst.push_back(ConstantInt::get(Int32Ty, Elem));
1908 }
1909 return ConstantVector::get(MaskConst);
1910}
1911
1912static bool isSingleSourceMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
1913 assert(!Mask.empty() && "Shuffle mask must contain elements");
1914 bool UsesLHS = false;
1915 bool UsesRHS = false;
1916 for (int I : Mask) {
1917 if (I == -1)
1918 continue;
1919 assert(I >= 0 && I < (NumOpElts * 2) &&
1920 "Out-of-bounds shuffle mask element");
1921 UsesLHS |= (I < NumOpElts);
1922 UsesRHS |= (I >= NumOpElts);
1923 if (UsesLHS && UsesRHS)
1924 return false;
1925 }
1926 // Allow for degenerate case: completely undef mask means neither source is used.
1927 return UsesLHS || UsesRHS;
1928}
1929
1931 // We don't have vector operand size information, so assume operands are the
1932 // same size as the mask.
1933 return isSingleSourceMaskImpl(Mask, NumSrcElts);
1934}
1935
1936static bool isIdentityMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
1937 if (!isSingleSourceMaskImpl(Mask, NumOpElts))
1938 return false;
1939 for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) {
1940 if (Mask[i] == -1)
1941 continue;
1942 if (Mask[i] != i && Mask[i] != (NumOpElts + i))
1943 return false;
1944 }
1945 return true;
1946}
1947
1949 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1950 return false;
1951 // We don't have vector operand size information, so assume operands are the
1952 // same size as the mask.
1953 return isIdentityMaskImpl(Mask, NumSrcElts);
1954}
1955
1957 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1958 return false;
1959 if (!isSingleSourceMask(Mask, NumSrcElts))
1960 return false;
1961
1962 // The number of elements in the mask must be at least 2.
1963 if (NumSrcElts < 2)
1964 return false;
1965
1966 for (int I = 0, E = Mask.size(); I < E; ++I) {
1967 if (Mask[I] == -1)
1968 continue;
1969 if (Mask[I] != (NumSrcElts - 1 - I) &&
1970 Mask[I] != (NumSrcElts + NumSrcElts - 1 - I))
1971 return false;
1972 }
1973 return true;
1974}
1975
1977 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1978 return false;
1979 if (!isSingleSourceMask(Mask, NumSrcElts))
1980 return false;
1981 for (int I = 0, E = Mask.size(); I < E; ++I) {
1982 if (Mask[I] == -1)
1983 continue;
1984 if (Mask[I] != 0 && Mask[I] != NumSrcElts)
1985 return false;
1986 }
1987 return true;
1988}
1989
1991 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1992 return false;
1993 // Select is differentiated from identity. It requires using both sources.
1994 if (isSingleSourceMask(Mask, NumSrcElts))
1995 return false;
1996 for (int I = 0, E = Mask.size(); I < E; ++I) {
1997 if (Mask[I] == -1)
1998 continue;
1999 if (Mask[I] != I && Mask[I] != (NumSrcElts + I))
2000 return false;
2001 }
2002 return true;
2003}
2004
2006 // Example masks that will return true:
2007 // v1 = <a, b, c, d>
2008 // v2 = <e, f, g, h>
2009 // trn1 = shufflevector v1, v2 <0, 4, 2, 6> = <a, e, c, g>
2010 // trn2 = shufflevector v1, v2 <1, 5, 3, 7> = <b, f, d, h>
2011
2012 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2013 return false;
2014 // 1. The number of elements in the mask must be a power-of-2 and at least 2.
2015 int Sz = Mask.size();
2016 if (Sz < 2 || !isPowerOf2_32(Sz))
2017 return false;
2018
2019 // 2. The first element of the mask must be either a 0 or a 1.
2020 if (Mask[0] != 0 && Mask[0] != 1)
2021 return false;
2022
2023 // 3. The difference between the first 2 elements must be equal to the
2024 // number of elements in the mask.
2025 if ((Mask[1] - Mask[0]) != NumSrcElts)
2026 return false;
2027
2028 // 4. The difference between consecutive even-numbered and odd-numbered
2029 // elements must be equal to 2.
2030 for (int I = 2; I < Sz; ++I) {
2031 int MaskEltVal = Mask[I];
2032 if (MaskEltVal == -1)
2033 return false;
2034 int MaskEltPrevVal = Mask[I - 2];
2035 if (MaskEltVal - MaskEltPrevVal != 2)
2036 return false;
2037 }
2038 return true;
2039}
2040
2042 int &Index) {
2043 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2044 return false;
2045 // Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2046 int StartIndex = -1;
2047 for (int I = 0, E = Mask.size(); I != E; ++I) {
2048 int MaskEltVal = Mask[I];
2049 if (MaskEltVal == -1)
2050 continue;
2051
2052 if (StartIndex == -1) {
2053 // Don't support a StartIndex that begins in the second input, or if the
2054 // first non-undef index would access below the StartIndex.
2055 if (MaskEltVal < I || NumSrcElts <= (MaskEltVal - I))
2056 return false;
2057
2058 StartIndex = MaskEltVal - I;
2059 continue;
2060 }
2061
2062 // Splice is sequential starting from StartIndex.
2063 if (MaskEltVal != (StartIndex + I))
2064 return false;
2065 }
2066
2067 if (StartIndex == -1)
2068 return false;
2069
2070 // NOTE: This accepts StartIndex == 0 (COPY).
2071 Index = StartIndex;
2072 return true;
2073}
2074
2076 int NumSrcElts, int &Index) {
2077 // Must extract from a single source.
2078 if (!isSingleSourceMaskImpl(Mask, NumSrcElts))
2079 return false;
2080
2081 // Must be smaller (else this is an Identity shuffle).
2082 if (NumSrcElts <= (int)Mask.size())
2083 return false;
2084
2085 // Find start of extraction, accounting that we may start with an UNDEF.
2086 int SubIndex = -1;
2087 for (int i = 0, e = Mask.size(); i != e; ++i) {
2088 int M = Mask[i];
2089 if (M < 0)
2090 continue;
2091 int Offset = (M % NumSrcElts) - i;
2092 if (0 <= SubIndex && SubIndex != Offset)
2093 return false;
2094 SubIndex = Offset;
2095 }
2096
2097 if (0 <= SubIndex && SubIndex + (int)Mask.size() <= NumSrcElts) {
2098 Index = SubIndex;
2099 return true;
2100 }
2101 return false;
2102}
2103
2105 int NumSrcElts, int &NumSubElts,
2106 int &Index) {
2107 int NumMaskElts = Mask.size();
2108
2109 // Don't try to match if we're shuffling to a smaller size.
2110 if (NumMaskElts < NumSrcElts)
2111 return false;
2112
2113 // TODO: We don't recognize self-insertion/widening.
2114 if (isSingleSourceMaskImpl(Mask, NumSrcElts))
2115 return false;
2116
2117 // Determine which mask elements are attributed to which source.
2118 APInt UndefElts = APInt::getZero(NumMaskElts);
2119 APInt Src0Elts = APInt::getZero(NumMaskElts);
2120 APInt Src1Elts = APInt::getZero(NumMaskElts);
2121 bool Src0Identity = true;
2122 bool Src1Identity = true;
2123
2124 for (int i = 0; i != NumMaskElts; ++i) {
2125 int M = Mask[i];
2126 if (M < 0) {
2127 UndefElts.setBit(i);
2128 continue;
2129 }
2130 if (M < NumSrcElts) {
2131 Src0Elts.setBit(i);
2132 Src0Identity &= (M == i);
2133 continue;
2134 }
2135 Src1Elts.setBit(i);
2136 Src1Identity &= (M == (i + NumSrcElts));
2137 }
2138 assert((Src0Elts | Src1Elts | UndefElts).isAllOnes() &&
2139 "unknown shuffle elements");
2140 assert(!Src0Elts.isZero() && !Src1Elts.isZero() &&
2141 "2-source shuffle not found");
2142
2143 // Determine lo/hi span ranges.
2144 // TODO: How should we handle undefs at the start of subvector insertions?
2145 int Src0Lo = Src0Elts.countr_zero();
2146 int Src1Lo = Src1Elts.countr_zero();
2147 int Src0Hi = NumMaskElts - Src0Elts.countl_zero();
2148 int Src1Hi = NumMaskElts - Src1Elts.countl_zero();
2149
2150 // If src0 is in place, see if the src1 elements is inplace within its own
2151 // span.
2152 if (Src0Identity) {
2153 int NumSub1Elts = Src1Hi - Src1Lo;
2154 ArrayRef<int> Sub1Mask = Mask.slice(Src1Lo, NumSub1Elts);
2155 if (isIdentityMaskImpl(Sub1Mask, NumSrcElts)) {
2156 NumSubElts = NumSub1Elts;
2157 Index = Src1Lo;
2158 return true;
2159 }
2160 }
2161
2162 // If src1 is in place, see if the src0 elements is inplace within its own
2163 // span.
2164 if (Src1Identity) {
2165 int NumSub0Elts = Src0Hi - Src0Lo;
2166 ArrayRef<int> Sub0Mask = Mask.slice(Src0Lo, NumSub0Elts);
2167 if (isIdentityMaskImpl(Sub0Mask, NumSrcElts)) {
2168 NumSubElts = NumSub0Elts;
2169 Index = Src0Lo;
2170 return true;
2171 }
2172 }
2173
2174 return false;
2175}
2176
2178 // FIXME: Not currently possible to express a shuffle mask for a scalable
2179 // vector for this case.
2181 return false;
2182
2183 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2184 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2185 if (NumMaskElts <= NumOpElts)
2186 return false;
2187
2188 // The first part of the mask must choose elements from exactly 1 source op.
2190 if (!isIdentityMaskImpl(Mask, NumOpElts))
2191 return false;
2192
2193 // All extending must be with undef elements.
2194 for (int i = NumOpElts; i < NumMaskElts; ++i)
2195 if (Mask[i] != -1)
2196 return false;
2197
2198 return true;
2199}
2200
2202 // FIXME: Not currently possible to express a shuffle mask for a scalable
2203 // vector for this case.
2205 return false;
2206
2207 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2208 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2209 if (NumMaskElts >= NumOpElts)
2210 return false;
2211
2212 return isIdentityMaskImpl(getShuffleMask(), NumOpElts);
2213}
2214
2216 // Vector concatenation is differentiated from identity with padding.
2218 return false;
2219
2220 // FIXME: Not currently possible to express a shuffle mask for a scalable
2221 // vector for this case.
2223 return false;
2224
2225 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2226 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2227 if (NumMaskElts != NumOpElts * 2)
2228 return false;
2229
2230 // Use the mask length rather than the operands' vector lengths here. We
2231 // already know that the shuffle returns a vector twice as long as the inputs,
2232 // and neither of the inputs are undef vectors. If the mask picks consecutive
2233 // elements from both inputs, then this is a concatenation of the inputs.
2234 return isIdentityMaskImpl(getShuffleMask(), NumMaskElts);
2235}
2236
2238 int ReplicationFactor, int VF) {
2239 assert(Mask.size() == (unsigned)ReplicationFactor * VF &&
2240 "Unexpected mask size.");
2241
2242 for (int CurrElt : seq(VF)) {
2243 ArrayRef<int> CurrSubMask = Mask.take_front(ReplicationFactor);
2244 assert(CurrSubMask.size() == (unsigned)ReplicationFactor &&
2245 "Run out of mask?");
2246 Mask = Mask.drop_front(ReplicationFactor);
2247 if (!all_of(CurrSubMask, [CurrElt](int MaskElt) {
2248 return MaskElt == PoisonMaskElem || MaskElt == CurrElt;
2249 }))
2250 return false;
2251 }
2252 assert(Mask.empty() && "Did not consume the whole mask?");
2253
2254 return true;
2255}
2256
2258 int &ReplicationFactor, int &VF) {
2259 // undef-less case is trivial.
2260 if (!llvm::is_contained(Mask, PoisonMaskElem)) {
2261 ReplicationFactor =
2262 Mask.take_while([](int MaskElt) { return MaskElt == 0; }).size();
2263 if (ReplicationFactor == 0 || Mask.size() % ReplicationFactor != 0)
2264 return false;
2265 VF = Mask.size() / ReplicationFactor;
2266 return isReplicationMaskWithParams(Mask, ReplicationFactor, VF);
2267 }
2268
2269 // However, if the mask contains undef's, we have to enumerate possible tuples
2270 // and pick one. There are bounds on replication factor: [1, mask size]
2271 // (where RF=1 is an identity shuffle, RF=mask size is a broadcast shuffle)
2272 // Additionally, mask size is a replication factor multiplied by vector size,
2273 // which further significantly reduces the search space.
2274
2275 // Before doing that, let's perform basic correctness checking first.
2276 int Largest = -1;
2277 for (int MaskElt : Mask) {
2278 if (MaskElt == PoisonMaskElem)
2279 continue;
2280 // Elements must be in non-decreasing order.
2281 if (MaskElt < Largest)
2282 return false;
2283 Largest = std::max(Largest, MaskElt);
2284 }
2285
2286 // Prefer larger replication factor if all else equal.
2287 for (int PossibleReplicationFactor :
2288 reverse(seq_inclusive<unsigned>(1, Mask.size()))) {
2289 if (Mask.size() % PossibleReplicationFactor != 0)
2290 continue;
2291 int PossibleVF = Mask.size() / PossibleReplicationFactor;
2292 if (!isReplicationMaskWithParams(Mask, PossibleReplicationFactor,
2293 PossibleVF))
2294 continue;
2295 ReplicationFactor = PossibleReplicationFactor;
2296 VF = PossibleVF;
2297 return true;
2298 }
2299
2300 return false;
2301}
2302
2303bool ShuffleVectorInst::isReplicationMask(int &ReplicationFactor,
2304 int &VF) const {
2305 // Not possible to express a shuffle mask for a scalable vector for this
2306 // case.
2308 return false;
2309
2310 VF = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2311 if (ShuffleMask.size() % VF != 0)
2312 return false;
2313 ReplicationFactor = ShuffleMask.size() / VF;
2314
2315 return isReplicationMaskWithParams(ShuffleMask, ReplicationFactor, VF);
2316}
2317
2319 if (VF <= 0 || Mask.size() < static_cast<unsigned>(VF) ||
2320 Mask.size() % VF != 0)
2321 return false;
2322 for (unsigned K = 0, Sz = Mask.size(); K < Sz; K += VF) {
2323 ArrayRef<int> SubMask = Mask.slice(K, VF);
2324 if (all_of(SubMask, equal_to(PoisonMaskElem)))
2325 continue;
2326 SmallBitVector Used(VF, false);
2327 for (int Idx : SubMask) {
2328 if (Idx != PoisonMaskElem && Idx < VF)
2329 Used.set(Idx);
2330 }
2331 if (!Used.all())
2332 return false;
2333 }
2334 return true;
2335}
2336
2337/// Return true if this shuffle mask is a replication mask.
2339 // Not possible to express a shuffle mask for a scalable vector for this
2340 // case.
2342 return false;
2343 if (!isSingleSourceMask(ShuffleMask, VF))
2344 return false;
2345
2346 return isOneUseSingleSourceMask(ShuffleMask, VF);
2347}
2348
2349bool ShuffleVectorInst::isInterleave(unsigned Factor) {
2351 // shuffle_vector can only interleave fixed length vectors - for scalable
2352 // vectors, see the @llvm.vector.interleave2 intrinsic
2353 if (!OpTy)
2354 return false;
2355 unsigned OpNumElts = OpTy->getNumElements();
2356
2357 return isInterleaveMask(ShuffleMask, Factor, OpNumElts * 2);
2358}
2359
2361 ArrayRef<int> Mask, unsigned Factor, unsigned NumInputElts,
2362 SmallVectorImpl<unsigned> &StartIndexes) {
2363 unsigned NumElts = Mask.size();
2364 if (NumElts % Factor)
2365 return false;
2366
2367 unsigned LaneLen = NumElts / Factor;
2368 if (!isPowerOf2_32(LaneLen))
2369 return false;
2370
2371 StartIndexes.resize(Factor);
2372
2373 // Check whether each element matches the general interleaved rule.
2374 // Ignore undef elements, as long as the defined elements match the rule.
2375 // Outer loop processes all factors (x, y, z in the above example)
2376 unsigned I = 0, J;
2377 for (; I < Factor; I++) {
2378 unsigned SavedLaneValue;
2379 unsigned SavedNoUndefs = 0;
2380
2381 // Inner loop processes consecutive accesses (x, x+1... in the example)
2382 for (J = 0; J < LaneLen - 1; J++) {
2383 // Lane computes x's position in the Mask
2384 unsigned Lane = J * Factor + I;
2385 unsigned NextLane = Lane + Factor;
2386 int LaneValue = Mask[Lane];
2387 int NextLaneValue = Mask[NextLane];
2388
2389 // If both are defined, values must be sequential
2390 if (LaneValue >= 0 && NextLaneValue >= 0 &&
2391 LaneValue + 1 != NextLaneValue)
2392 break;
2393
2394 // If the next value is undef, save the current one as reference
2395 if (LaneValue >= 0 && NextLaneValue < 0) {
2396 SavedLaneValue = LaneValue;
2397 SavedNoUndefs = 1;
2398 }
2399
2400 // Undefs are allowed, but defined elements must still be consecutive:
2401 // i.e.: x,..., undef,..., x + 2,..., undef,..., undef,..., x + 5, ....
2402 // Verify this by storing the last non-undef followed by an undef
2403 // Check that following non-undef masks are incremented with the
2404 // corresponding distance.
2405 if (SavedNoUndefs > 0 && LaneValue < 0) {
2406 SavedNoUndefs++;
2407 if (NextLaneValue >= 0 &&
2408 SavedLaneValue + SavedNoUndefs != (unsigned)NextLaneValue)
2409 break;
2410 }
2411 }
2412
2413 if (J < LaneLen - 1)
2414 return false;
2415
2416 int StartMask = 0;
2417 if (Mask[I] >= 0) {
2418 // Check that the start of the I range (J=0) is greater than 0
2419 StartMask = Mask[I];
2420 } else if (Mask[(LaneLen - 1) * Factor + I] >= 0) {
2421 // StartMask defined by the last value in lane
2422 StartMask = Mask[(LaneLen - 1) * Factor + I] - J;
2423 } else if (SavedNoUndefs > 0) {
2424 // StartMask defined by some non-zero value in the j loop
2425 StartMask = SavedLaneValue - (LaneLen - 1 - SavedNoUndefs);
2426 }
2427 // else StartMask remains set to 0, i.e. all elements are undefs
2428
2429 if (StartMask < 0)
2430 return false;
2431 // We must stay within the vectors; This case can happen with undefs.
2432 if (StartMask + LaneLen > NumInputElts)
2433 return false;
2434
2435 StartIndexes[I] = StartMask;
2436 }
2437
2438 return true;
2439}
2440
2441/// Check if the mask is a DE-interleave mask of the given factor
2442/// \p Factor like:
2443/// <Index, Index+Factor, ..., Index+(NumElts-1)*Factor>
2445 unsigned Factor,
2446 unsigned &Index) {
2447 // Check all potential start indices from 0 to (Factor - 1).
2448 for (unsigned Idx = 0; Idx < Factor; Idx++) {
2449 unsigned I = 0;
2450
2451 // Check that elements are in ascending order by Factor. Ignore undef
2452 // elements.
2453 for (; I < Mask.size(); I++)
2454 if (Mask[I] >= 0 && static_cast<unsigned>(Mask[I]) != Idx + I * Factor)
2455 break;
2456
2457 if (I == Mask.size()) {
2458 Index = Idx;
2459 return true;
2460 }
2461 }
2462
2463 return false;
2464}
2465
2466/// Try to lower a vector shuffle as a bit rotation.
2467///
2468/// Look for a repeated rotation pattern in each sub group.
2469/// Returns an element-wise left bit rotation amount or -1 if failed.
2470static int matchShuffleAsBitRotate(ArrayRef<int> Mask, int NumSubElts) {
2471 int NumElts = Mask.size();
2472 assert((NumElts % NumSubElts) == 0 && "Illegal shuffle mask");
2473
2474 int RotateAmt = -1;
2475 for (int i = 0; i != NumElts; i += NumSubElts) {
2476 for (int j = 0; j != NumSubElts; ++j) {
2477 int M = Mask[i + j];
2478 if (M < 0)
2479 continue;
2480 if (M < i || M >= i + NumSubElts)
2481 return -1;
2482 int Offset = (NumSubElts - (M - (i + j))) % NumSubElts;
2483 if (0 <= RotateAmt && Offset != RotateAmt)
2484 return -1;
2485 RotateAmt = Offset;
2486 }
2487 }
2488 return RotateAmt;
2489}
2490
2492 ArrayRef<int> Mask, unsigned EltSizeInBits, unsigned MinSubElts,
2493 unsigned MaxSubElts, unsigned &NumSubElts, unsigned &RotateAmt) {
2494 for (NumSubElts = MinSubElts; NumSubElts <= MaxSubElts; NumSubElts *= 2) {
2495 int EltRotateAmt = matchShuffleAsBitRotate(Mask, NumSubElts);
2496 if (EltRotateAmt < 0)
2497 continue;
2498 RotateAmt = EltRotateAmt * EltSizeInBits;
2499 return true;
2500 }
2501
2502 return false;
2503}
2504
2505//===----------------------------------------------------------------------===//
2506// InsertValueInst Class
2507//===----------------------------------------------------------------------===//
2508
2509void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2510 const Twine &Name) {
2511 assert(getNumOperands() == 2 && "NumOperands not initialized?");
2512
2513 // There's no fundamental reason why we require at least one index
2514 // (other than weirdness with &*IdxBegin being invalid; see
2515 // getelementptr's init routine for example). But there's no
2516 // present need to support it.
2517 assert(!Idxs.empty() && "InsertValueInst must have at least one index");
2518
2520 Val->getType() && "Inserted value must match indexed type!");
2521 Op<0>() = Agg;
2522 Op<1>() = Val;
2523
2524 Indices.append(Idxs.begin(), Idxs.end());
2525 setName(Name);
2526}
2527
2528InsertValueInst::InsertValueInst(const InsertValueInst &IVI)
2529 : Instruction(IVI.getType(), InsertValue, AllocMarker),
2530 Indices(IVI.Indices) {
2531 Op<0>() = IVI.getOperand(0);
2532 Op<1>() = IVI.getOperand(1);
2534}
2535
2536//===----------------------------------------------------------------------===//
2537// ExtractValueInst Class
2538//===----------------------------------------------------------------------===//
2539
2540void ExtractValueInst::init(ArrayRef<unsigned> Idxs, const Twine &Name) {
2541 assert(getNumOperands() == 1 && "NumOperands not initialized?");
2542
2543 // There's no fundamental reason why we require at least one index.
2544 // But there's no present need to support it.
2545 assert(!Idxs.empty() && "ExtractValueInst must have at least one index");
2546
2547 Indices.append(Idxs.begin(), Idxs.end());
2548 setName(Name);
2549}
2550
2551ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI)
2552 : UnaryInstruction(EVI.getType(), ExtractValue, EVI.getOperand(0),
2553 (BasicBlock *)nullptr),
2554 Indices(EVI.Indices) {
2556}
2557
2558// getIndexedType - Returns the type of the element that would be extracted
2559// with an extractvalue instruction with the specified parameters.
2560//
2561// A null type is returned if the indices are invalid for the specified
2562// pointer type.
2563//
2565 ArrayRef<unsigned> Idxs) {
2566 for (unsigned Index : Idxs) {
2567 // We can't use CompositeType::indexValid(Index) here.
2568 // indexValid() always returns true for arrays because getelementptr allows
2569 // out-of-bounds indices. Since we don't allow those for extractvalue and
2570 // insertvalue we need to check array indexing manually.
2571 // Since the only other types we can index into are struct types it's just
2572 // as easy to check those manually as well.
2573 if (ArrayType *AT = dyn_cast<ArrayType>(Agg)) {
2574 if (Index >= AT->getNumElements())
2575 return nullptr;
2576 Agg = AT->getElementType();
2577 } else if (StructType *ST = dyn_cast<StructType>(Agg)) {
2578 if (Index >= ST->getNumElements())
2579 return nullptr;
2580 Agg = ST->getElementType(Index);
2581 } else {
2582 // Not a valid type to index into.
2583 return nullptr;
2584 }
2585 }
2586 return Agg;
2587}
2588
2589//===----------------------------------------------------------------------===//
2590// UnaryOperator Class
2591//===----------------------------------------------------------------------===//
2592
2594 const Twine &Name, InsertPosition InsertBefore)
2595 : UnaryInstruction(Ty, iType, S, InsertBefore) {
2596 Op<0>() = S;
2597 setName(Name);
2598 AssertOK();
2599}
2600
2602 InsertPosition InsertBefore) {
2603 return new UnaryOperator(Op, S, S->getType(), Name, InsertBefore);
2604}
2605
2606void UnaryOperator::AssertOK() {
2607 Value *LHS = getOperand(0);
2608 (void)LHS; // Silence warnings.
2609#ifndef NDEBUG
2610 switch (getOpcode()) {
2611 case FNeg:
2612 assert(getType() == LHS->getType() &&
2613 "Unary operation should return same type as operand!");
2614 assert(getType()->isFPOrFPVectorTy() &&
2615 "Tried to create a floating-point operation on a "
2616 "non-floating-point type!");
2617 break;
2618 default: llvm_unreachable("Invalid opcode provided");
2619 }
2620#endif
2621}
2622
2623//===----------------------------------------------------------------------===//
2624// BinaryOperator Class
2625//===----------------------------------------------------------------------===//
2626
2628 const Twine &Name, InsertPosition InsertBefore)
2629 : Instruction(Ty, iType, AllocMarker, InsertBefore) {
2630 Op<0>() = S1;
2631 Op<1>() = S2;
2632 setName(Name);
2633 AssertOK();
2634}
2635
2636void BinaryOperator::AssertOK() {
2637 Value *LHS = getOperand(0), *RHS = getOperand(1);
2638 (void)LHS; (void)RHS; // Silence warnings.
2639 assert(LHS->getType() == RHS->getType() &&
2640 "Binary operator operand types must match!");
2641#ifndef NDEBUG
2642 switch (getOpcode()) {
2643 case Add: case Sub:
2644 case Mul:
2645 assert(getType() == LHS->getType() &&
2646 "Arithmetic operation should return same type as operands!");
2647 assert(getType()->isIntOrIntVectorTy() &&
2648 "Tried to create an integer operation on a non-integer type!");
2649 break;
2650 case FAdd: case FSub:
2651 case FMul:
2652 assert(getType() == LHS->getType() &&
2653 "Arithmetic operation should return same type as operands!");
2654 assert(getType()->isFPOrFPVectorTy() &&
2655 "Tried to create a floating-point operation on a "
2656 "non-floating-point type!");
2657 break;
2658 case UDiv:
2659 case SDiv:
2660 assert(getType() == LHS->getType() &&
2661 "Arithmetic operation should return same type as operands!");
2662 assert(getType()->isIntOrIntVectorTy() &&
2663 "Incorrect operand type (not integer) for S/UDIV");
2664 break;
2665 case FDiv:
2666 assert(getType() == LHS->getType() &&
2667 "Arithmetic operation should return same type as operands!");
2668 assert(getType()->isFPOrFPVectorTy() &&
2669 "Incorrect operand type (not floating point) for FDIV");
2670 break;
2671 case URem:
2672 case SRem:
2673 assert(getType() == LHS->getType() &&
2674 "Arithmetic operation should return same type as operands!");
2675 assert(getType()->isIntOrIntVectorTy() &&
2676 "Incorrect operand type (not integer) for S/UREM");
2677 break;
2678 case FRem:
2679 assert(getType() == LHS->getType() &&
2680 "Arithmetic operation should return same type as operands!");
2681 assert(getType()->isFPOrFPVectorTy() &&
2682 "Incorrect operand type (not floating point) for FREM");
2683 break;
2684 case Shl:
2685 case LShr:
2686 case AShr:
2687 assert(getType() == LHS->getType() &&
2688 "Shift operation should return same type as operands!");
2689 assert(getType()->isIntOrIntVectorTy() &&
2690 "Tried to create a shift operation on a non-integral type!");
2691 break;
2692 case And: case Or:
2693 case Xor:
2694 assert(getType() == LHS->getType() &&
2695 "Logical operation should return same type as operands!");
2696 assert(getType()->isIntOrIntVectorTy() &&
2697 "Tried to create a logical operation on a non-integral type!");
2698 break;
2699 default: llvm_unreachable("Invalid opcode provided");
2700 }
2701#endif
2702}
2703
2705 const Twine &Name,
2706 InsertPosition InsertBefore) {
2707 assert(S1->getType() == S2->getType() &&
2708 "Cannot create binary operator with two operands of differing type!");
2709 return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore);
2710}
2711
2713 InsertPosition InsertBefore) {
2714 Value *Zero = ConstantInt::get(Op->getType(), 0);
2715 return new BinaryOperator(Instruction::Sub, Zero, Op, Op->getType(), Name,
2716 InsertBefore);
2717}
2718
2720 InsertPosition InsertBefore) {
2721 Value *Zero = ConstantInt::get(Op->getType(), 0);
2722 return BinaryOperator::CreateNSWSub(Zero, Op, Name, InsertBefore);
2723}
2724
2726 InsertPosition InsertBefore) {
2727 Constant *C = Constant::getAllOnesValue(Op->getType());
2728 return new BinaryOperator(Instruction::Xor, Op, C,
2729 Op->getType(), Name, InsertBefore);
2730}
2731
2732// Exchange the two operands to this instruction. This instruction is safe to
2733// use on any binary instruction and does not modify the semantics of the
2734// instruction.
2736 if (!isCommutative())
2737 return true; // Can't commute operands
2738 Op<0>().swap(Op<1>());
2739 return false;
2740}
2741
2742//===----------------------------------------------------------------------===//
2743// FPMathOperator Class
2744//===----------------------------------------------------------------------===//
2745
2747 const MDNode *MD =
2748 cast<Instruction>(this)->getMetadata(LLVMContext::MD_fpmath);
2749 if (!MD)
2750 return 0.0;
2752 return Accuracy->getValueAPF().convertToFloat();
2753}
2754
2755//===----------------------------------------------------------------------===//
2756// CastInst Class
2757//===----------------------------------------------------------------------===//
2758
2759// Just determine if this cast only deals with integral->integral conversion.
2761 switch (getOpcode()) {
2762 default: return false;
2763 case Instruction::ZExt:
2764 case Instruction::SExt:
2765 case Instruction::Trunc:
2766 return true;
2767 case Instruction::BitCast:
2768 return getOperand(0)->getType()->isIntegerTy() &&
2769 getType()->isIntegerTy();
2770 }
2771}
2772
2773/// This function determines if the CastInst does not require any bits to be
2774/// changed in order to effect the cast. Essentially, it identifies cases where
2775/// no code gen is necessary for the cast, hence the name no-op cast. For
2776/// example, the following are all no-op casts:
2777/// # bitcast i32* %x to i8*
2778/// # bitcast <2 x i32> %x to <4 x i16>
2779/// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only
2780/// Determine if the described cast is a no-op.
2782 Type *SrcTy,
2783 Type *DestTy,
2784 const DataLayout &DL) {
2785 assert(castIsValid(Opcode, SrcTy, DestTy) && "method precondition");
2786 switch (Opcode) {
2787 default: llvm_unreachable("Invalid CastOp");
2788 case Instruction::Trunc:
2789 case Instruction::ZExt:
2790 case Instruction::SExt:
2791 case Instruction::FPTrunc:
2792 case Instruction::FPExt:
2793 case Instruction::UIToFP:
2794 case Instruction::SIToFP:
2795 case Instruction::FPToUI:
2796 case Instruction::FPToSI:
2797 case Instruction::AddrSpaceCast:
2798 // TODO: Target informations may give a more accurate answer here.
2799 return false;
2800 case Instruction::BitCast:
2801 return true; // BitCast never modifies bits.
2802 case Instruction::PtrToAddr:
2803 case Instruction::PtrToInt:
2804 return DL.getIntPtrType(SrcTy)->getScalarSizeInBits() ==
2805 DestTy->getScalarSizeInBits();
2806 case Instruction::IntToPtr:
2807 return DL.getIntPtrType(DestTy)->getScalarSizeInBits() ==
2808 SrcTy->getScalarSizeInBits();
2809 }
2810}
2811
2813 return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), DL);
2814}
2815
2816/// This function determines if a pair of casts can be eliminated and what
2817/// opcode should be used in the elimination. This assumes that there are two
2818/// instructions like this:
2819/// * %F = firstOpcode SrcTy %x to MidTy
2820/// * %S = secondOpcode MidTy %F to DstTy
2821/// The function returns a resultOpcode so these two casts can be replaced with:
2822/// * %Replacement = resultOpcode %SrcTy %x to DstTy
2823/// If no such cast is permitted, the function returns 0.
2825 Instruction::CastOps secondOp,
2826 Type *SrcTy, Type *MidTy, Type *DstTy,
2827 const DataLayout *DL) {
2828 // Define the 144 possibilities for these two cast instructions. The values
2829 // in this matrix determine what to do in a given situation and select the
2830 // case in the switch below. The rows correspond to firstOp, the columns
2831 // correspond to secondOp. In looking at the table below, keep in mind
2832 // the following cast properties:
2833 //
2834 // Size Compare Source Destination
2835 // Operator Src ? Size Type Sign Type Sign
2836 // -------- ------------ ------------------- ---------------------
2837 // TRUNC > Integer Any Integral Any
2838 // ZEXT < Integral Unsigned Integer Any
2839 // SEXT < Integral Signed Integer Any
2840 // FPTOUI n/a FloatPt n/a Integral Unsigned
2841 // FPTOSI n/a FloatPt n/a Integral Signed
2842 // UITOFP n/a Integral Unsigned FloatPt n/a
2843 // SITOFP n/a Integral Signed FloatPt n/a
2844 // FPTRUNC > FloatPt n/a FloatPt n/a
2845 // FPEXT < FloatPt n/a FloatPt n/a
2846 // PTRTOINT n/a Pointer n/a Integral Unsigned
2847 // PTRTOADDR n/a Pointer n/a Integral Unsigned
2848 // INTTOPTR n/a Integral Unsigned Pointer n/a
2849 // BITCAST = FirstClass n/a FirstClass n/a
2850 // ADDRSPCST n/a Pointer n/a Pointer n/a
2851 //
2852 // NOTE: some transforms are safe, but we consider them to be non-profitable.
2853 // For example, we could merge "fptoui double to i32" + "zext i32 to i64",
2854 // into "fptoui double to i64", but this loses information about the range
2855 // of the produced value (we no longer know the top-part is all zeros).
2856 // Further this conversion is often much more expensive for typical hardware,
2857 // and causes issues when building libgcc. We disallow fptosi+sext for the
2858 // same reason.
2859 const unsigned numCastOps =
2860 Instruction::CastOpsEnd - Instruction::CastOpsBegin;
2861 // clang-format off
2862 static const uint8_t CastResults[numCastOps][numCastOps] = {
2863 // T F F U S F F P P I B A -+
2864 // R Z S P P I I T P 2 2 N T S |
2865 // U E E 2 2 2 2 R E I A T C C +- secondOp
2866 // N X X U S F F N X N D 2 V V |
2867 // C T T I I P P C T T R P T T -+
2868 { 1, 0, 0,99,99, 0, 0,99,99,99,99, 0, 3, 0}, // Trunc -+
2869 { 8, 1, 9,99,99, 2,17,99,99,99,99, 2, 3, 0}, // ZExt |
2870 { 8, 0, 1,99,99, 0, 2,99,99,99,99, 0, 3, 0}, // SExt |
2871 { 0, 0, 0,99,99, 0, 0,99,99,99,99, 0, 3, 0}, // FPToUI |
2872 { 0, 0, 0,99,99, 0, 0,99,99,99,99, 0, 3, 0}, // FPToSI |
2873 { 99,99,99, 0, 0,99,99, 0, 0,99,99,99, 4, 0}, // UIToFP +- firstOp
2874 { 99,99,99, 0, 0,99,99, 0, 0,99,99,99, 4, 0}, // SIToFP |
2875 { 99,99,99, 0, 0,99,99, 0, 0,99,99,99, 4, 0}, // FPTrunc |
2876 { 99,99,99, 2, 2,99,99, 8, 2,99,99,99, 4, 0}, // FPExt |
2877 { 1, 0, 0,99,99, 0, 0,99,99,99,99, 7, 3, 0}, // PtrToInt |
2878 { 0, 0, 0,99,99, 0, 0,99,99,99,99, 0, 3, 0}, // PtrToAddr |
2879 { 99,99,99,99,99,99,99,99,99,11,11,99,15, 0}, // IntToPtr |
2880 { 5, 5, 5, 0, 0, 5, 5, 0, 0,16,16, 5, 1,14}, // BitCast |
2881 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+
2882 };
2883 // clang-format on
2884
2885 // TODO: This logic could be encoded into the table above and handled in the
2886 // switch below.
2887 // If either of the casts are a bitcast from scalar to vector, disallow the
2888 // merging. However, any pair of bitcasts are allowed.
2889 bool IsFirstBitcast = (firstOp == Instruction::BitCast);
2890 bool IsSecondBitcast = (secondOp == Instruction::BitCast);
2891 bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast;
2892
2893 // Check if any of the casts convert scalars <-> vectors.
2894 if ((IsFirstBitcast && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) ||
2895 (IsSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy)))
2896 if (!AreBothBitcasts)
2897 return 0;
2898
2899 int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin]
2900 [secondOp-Instruction::CastOpsBegin];
2901 switch (ElimCase) {
2902 case 0:
2903 // Categorically disallowed.
2904 return 0;
2905 case 1:
2906 // Allowed, use first cast's opcode.
2907 return firstOp;
2908 case 2:
2909 // Allowed, use second cast's opcode.
2910 return secondOp;
2911 case 3:
2912 // No-op cast in second op implies firstOp as long as the DestTy
2913 // is integer and we are not converting between a vector and a
2914 // non-vector type.
2915 if (!SrcTy->isVectorTy() && DstTy->isIntegerTy())
2916 return firstOp;
2917 return 0;
2918 case 4:
2919 // No-op cast in second op implies firstOp as long as the DestTy
2920 // matches MidTy.
2921 if (DstTy == MidTy)
2922 return firstOp;
2923 return 0;
2924 case 5:
2925 // No-op cast in first op implies secondOp as long as the SrcTy
2926 // is an integer.
2927 if (SrcTy->isIntegerTy())
2928 return secondOp;
2929 return 0;
2930 case 7: {
2931 // Disable inttoptr/ptrtoint optimization if enabled.
2932 if (DisableI2pP2iOpt)
2933 return 0;
2934
2935 // Cannot simplify if address spaces are different!
2936 if (SrcTy != DstTy)
2937 return 0;
2938
2939 // Cannot simplify if the intermediate integer size is smaller than the
2940 // pointer size.
2941 unsigned MidSize = MidTy->getScalarSizeInBits();
2942 if (!DL || MidSize < DL->getPointerTypeSizeInBits(SrcTy))
2943 return 0;
2944
2945 return Instruction::BitCast;
2946 }
2947 case 8: {
2948 // ext, trunc -> bitcast, if the SrcTy and DstTy are the same
2949 // ext, trunc -> ext, if sizeof(SrcTy) < sizeof(DstTy)
2950 // ext, trunc -> trunc, if sizeof(SrcTy) > sizeof(DstTy)
2951 unsigned SrcSize = SrcTy->getScalarSizeInBits();
2952 unsigned DstSize = DstTy->getScalarSizeInBits();
2953 if (SrcTy == DstTy)
2954 return Instruction::BitCast;
2955 if (SrcSize < DstSize)
2956 return firstOp;
2957 if (SrcSize > DstSize)
2958 return secondOp;
2959 return 0;
2960 }
2961 case 9:
2962 // zext, sext -> zext, because sext can't sign extend after zext
2963 return Instruction::ZExt;
2964 case 11: {
2965 // inttoptr, ptrtoint/ptrtoaddr -> integer cast
2966 if (!DL)
2967 return 0;
2968 unsigned MidSize = secondOp == Instruction::PtrToAddr
2969 ? DL->getAddressSizeInBits(MidTy)
2970 : DL->getPointerTypeSizeInBits(MidTy);
2971 unsigned SrcSize = SrcTy->getScalarSizeInBits();
2972 unsigned DstSize = DstTy->getScalarSizeInBits();
2973 // If the middle size is smaller than both source and destination,
2974 // an additional masking operation would be required.
2975 if (MidSize < SrcSize && MidSize < DstSize)
2976 return 0;
2977 if (DstSize < SrcSize)
2978 return Instruction::Trunc;
2979 if (DstSize > SrcSize)
2980 return Instruction::ZExt;
2981 return Instruction::BitCast;
2982 }
2983 case 12:
2984 // addrspacecast, addrspacecast -> bitcast, if SrcAS == DstAS
2985 // addrspacecast, addrspacecast -> addrspacecast, if SrcAS != DstAS
2986 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
2987 return Instruction::AddrSpaceCast;
2988 return Instruction::BitCast;
2989 case 13:
2990 // FIXME: this state can be merged with (1), but the following assert
2991 // is useful to check the correcteness of the sequence due to semantic
2992 // change of bitcast.
2993 assert(
2994 SrcTy->isPtrOrPtrVectorTy() &&
2995 MidTy->isPtrOrPtrVectorTy() &&
2996 DstTy->isPtrOrPtrVectorTy() &&
2997 SrcTy->getPointerAddressSpace() != MidTy->getPointerAddressSpace() &&
2998 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
2999 "Illegal addrspacecast, bitcast sequence!");
3000 // Allowed, use first cast's opcode
3001 return firstOp;
3002 case 14:
3003 // bitcast, addrspacecast -> addrspacecast
3004 return Instruction::AddrSpaceCast;
3005 case 15:
3006 // FIXME: this state can be merged with (1), but the following assert
3007 // is useful to check the correcteness of the sequence due to semantic
3008 // change of bitcast.
3009 assert(
3010 SrcTy->isIntOrIntVectorTy() &&
3011 MidTy->isPtrOrPtrVectorTy() &&
3012 DstTy->isPtrOrPtrVectorTy() &&
3013 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
3014 "Illegal inttoptr, bitcast sequence!");
3015 // Allowed, use first cast's opcode
3016 return firstOp;
3017 case 16:
3018 // FIXME: this state can be merged with (2), but the following assert
3019 // is useful to check the correcteness of the sequence due to semantic
3020 // change of bitcast.
3021 assert(
3022 SrcTy->isPtrOrPtrVectorTy() &&
3023 MidTy->isPtrOrPtrVectorTy() &&
3024 DstTy->isIntOrIntVectorTy() &&
3025 SrcTy->getPointerAddressSpace() == MidTy->getPointerAddressSpace() &&
3026 "Illegal bitcast, ptrtoint sequence!");
3027 // Allowed, use second cast's opcode
3028 return secondOp;
3029 case 17:
3030 // (sitofp (zext x)) -> (uitofp x)
3031 return Instruction::UIToFP;
3032 case 99:
3033 // Cast combination can't happen (error in input). This is for all cases
3034 // where the MidTy is not the same for the two cast instructions.
3035 llvm_unreachable("Invalid Cast Combination");
3036 default:
3037 llvm_unreachable("Error in CastResults table!!!");
3038 }
3039}
3040
3042 const Twine &Name, InsertPosition InsertBefore) {
3043 assert(castIsValid(op, S, Ty) && "Invalid cast!");
3044 // Construct and return the appropriate CastInst subclass
3045 switch (op) {
3046 case Trunc: return new TruncInst (S, Ty, Name, InsertBefore);
3047 case ZExt: return new ZExtInst (S, Ty, Name, InsertBefore);
3048 case SExt: return new SExtInst (S, Ty, Name, InsertBefore);
3049 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertBefore);
3050 case FPExt: return new FPExtInst (S, Ty, Name, InsertBefore);
3051 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertBefore);
3052 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore);
3053 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore);
3054 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore);
3055 case PtrToAddr: return new PtrToAddrInst (S, Ty, Name, InsertBefore);
3056 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore);
3057 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore);
3058 case BitCast:
3059 return new BitCastInst(S, Ty, Name, InsertBefore);
3060 case AddrSpaceCast:
3061 return new AddrSpaceCastInst(S, Ty, Name, InsertBefore);
3062 default:
3063 llvm_unreachable("Invalid opcode provided");
3064 }
3065}
3066
3068 InsertPosition InsertBefore) {
3069 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3070 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3071 return Create(Instruction::ZExt, S, Ty, Name, InsertBefore);
3072}
3073
3075 InsertPosition InsertBefore) {
3076 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3077 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3078 return Create(Instruction::SExt, S, Ty, Name, InsertBefore);
3079}
3080
3082 InsertPosition InsertBefore) {
3083 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3084 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3085 return Create(Instruction::Trunc, S, Ty, Name, InsertBefore);
3086}
3087
3088/// Create a BitCast or a PtrToInt cast instruction
3090 InsertPosition InsertBefore) {
3091 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3092 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
3093 "Invalid cast");
3094 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
3095 assert((!Ty->isVectorTy() ||
3096 cast<VectorType>(Ty)->getElementCount() ==
3097 cast<VectorType>(S->getType())->getElementCount()) &&
3098 "Invalid cast");
3099
3100 if (Ty->isIntOrIntVectorTy())
3101 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3102
3103 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore);
3104}
3105
3107 Value *S, Type *Ty, const Twine &Name, InsertPosition InsertBefore) {
3108 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3109 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
3110
3111 if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace())
3112 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore);
3113
3114 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3115}
3116
3118 const Twine &Name,
3119 InsertPosition InsertBefore) {
3120 if (S->getType()->isPointerTy() && Ty->isIntegerTy())
3121 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3122 if (S->getType()->isIntegerTy() && Ty->isPointerTy())
3123 return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore);
3124
3125 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3126}
3127
3129 const Twine &Name,
3130 InsertPosition InsertBefore) {
3131 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
3132 "Invalid integer cast");
3133 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3134 unsigned DstBits = Ty->getScalarSizeInBits();
3135 Instruction::CastOps opcode =
3136 (SrcBits == DstBits ? Instruction::BitCast :
3137 (SrcBits > DstBits ? Instruction::Trunc :
3138 (isSigned ? Instruction::SExt : Instruction::ZExt)));
3139 return Create(opcode, C, Ty, Name, InsertBefore);
3140}
3141
3143 InsertPosition InsertBefore) {
3144 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
3145 "Invalid cast");
3146 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3147 unsigned DstBits = Ty->getScalarSizeInBits();
3148 assert((C->getType() == Ty || SrcBits != DstBits) && "Invalid cast");
3149 Instruction::CastOps opcode =
3150 (SrcBits == DstBits ? Instruction::BitCast :
3151 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
3152 return Create(opcode, C, Ty, Name, InsertBefore);
3153}
3154
3155bool CastInst::isBitCastable(Type *SrcTy, Type *DestTy) {
3156 if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType())
3157 return false;
3158
3159 if (SrcTy == DestTy)
3160 return true;
3161
3162 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) {
3163 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) {
3164 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3165 // An element by element cast. Valid if casting the elements is valid.
3166 SrcTy = SrcVecTy->getElementType();
3167 DestTy = DestVecTy->getElementType();
3168 }
3169 }
3170 }
3171
3172 if (PointerType *DestPtrTy = dyn_cast<PointerType>(DestTy)) {
3173 if (PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy)) {
3174 return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace();
3175 }
3176 }
3177
3178 TypeSize SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
3179 TypeSize DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
3180
3181 // Could still have vectors of pointers if the number of elements doesn't
3182 // match
3183 if (SrcBits.getKnownMinValue() == 0 || DestBits.getKnownMinValue() == 0)
3184 return false;
3185
3186 if (SrcBits != DestBits)
3187 return false;
3188
3189 return true;
3190}
3191
3193 const DataLayout &DL) {
3194 // ptrtoint and inttoptr are not allowed on non-integral pointers
3195 if (auto *PtrTy = dyn_cast<PointerType>(SrcTy))
3196 if (auto *IntTy = dyn_cast<IntegerType>(DestTy))
3197 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
3198 !DL.isNonIntegralPointerType(PtrTy));
3199 if (auto *PtrTy = dyn_cast<PointerType>(DestTy))
3200 if (auto *IntTy = dyn_cast<IntegerType>(SrcTy))
3201 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
3202 !DL.isNonIntegralPointerType(PtrTy));
3203
3204 return isBitCastable(SrcTy, DestTy);
3205}
3206
3207// Provide a way to get a "cast" where the cast opcode is inferred from the
3208// types and size of the operand. This, basically, is a parallel of the
3209// logic in the castIsValid function below. This axiom should hold:
3210// castIsValid( getCastOpcode(Val, Ty), Val, Ty)
3211// should not assert in castIsValid. In other words, this produces a "correct"
3212// casting opcode for the arguments passed to it.
3215 const Value *Src, bool SrcIsSigned, Type *DestTy, bool DestIsSigned) {
3216 Type *SrcTy = Src->getType();
3217
3218 assert(SrcTy->isFirstClassType() && DestTy->isFirstClassType() &&
3219 "Only first class types are castable!");
3220
3221 if (SrcTy == DestTy)
3222 return BitCast;
3223
3224 // FIXME: Check address space sizes here
3225 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy))
3226 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy))
3227 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3228 // An element by element cast. Find the appropriate opcode based on the
3229 // element types.
3230 SrcTy = SrcVecTy->getElementType();
3231 DestTy = DestVecTy->getElementType();
3232 }
3233
3234 // Get the bit sizes, we'll need these
3235 // FIXME: This doesn't work for scalable vector types with different element
3236 // counts that don't call getElementType above.
3237 unsigned SrcBits =
3238 SrcTy->getPrimitiveSizeInBits().getFixedValue(); // 0 for ptr
3239 unsigned DestBits =
3240 DestTy->getPrimitiveSizeInBits().getFixedValue(); // 0 for ptr
3241
3242 // Run through the possibilities ...
3243 if (DestTy->isIntegerTy()) { // Casting to integral
3244 if (SrcTy->isIntegerTy()) { // Casting from integral
3245 if (DestBits < SrcBits)
3246 return Trunc; // int -> smaller int
3247 else if (DestBits > SrcBits) { // its an extension
3248 if (SrcIsSigned)
3249 return SExt; // signed -> SEXT
3250 else
3251 return ZExt; // unsigned -> ZEXT
3252 } else {
3253 return BitCast; // Same size, No-op cast
3254 }
3255 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
3256 if (DestIsSigned)
3257 return FPToSI; // FP -> sint
3258 else
3259 return FPToUI; // FP -> uint
3260 } else if (SrcTy->isVectorTy()) {
3261 assert(DestBits == SrcBits &&
3262 "Casting vector to integer of different width");
3263 return BitCast; // Same size, no-op cast
3264 } else {
3265 assert(SrcTy->isPointerTy() &&
3266 "Casting from a value that is not first-class type");
3267 return PtrToInt; // ptr -> int
3268 }
3269 } else if (DestTy->isFloatingPointTy()) { // Casting to floating pt
3270 if (SrcTy->isIntegerTy()) { // Casting from integral
3271 if (SrcIsSigned)
3272 return SIToFP; // sint -> FP
3273 else
3274 return UIToFP; // uint -> FP
3275 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
3276 if (DestBits < SrcBits) {
3277 return FPTrunc; // FP -> smaller FP
3278 } else if (DestBits > SrcBits) {
3279 return FPExt; // FP -> larger FP
3280 } else {
3281 return BitCast; // same size, no-op cast
3282 }
3283 } else if (SrcTy->isVectorTy()) {
3284 assert(DestBits == SrcBits &&
3285 "Casting vector to floating point of different width");
3286 return BitCast; // same size, no-op cast
3287 }
3288 llvm_unreachable("Casting pointer or non-first class to float");
3289 } else if (DestTy->isVectorTy()) {
3290 assert(DestBits == SrcBits &&
3291 "Illegal cast to vector (wrong type or size)");
3292 return BitCast;
3293 } else if (DestTy->isPointerTy()) {
3294 if (SrcTy->isPointerTy()) {
3295 if (DestTy->getPointerAddressSpace() != SrcTy->getPointerAddressSpace())
3296 return AddrSpaceCast;
3297 return BitCast; // ptr -> ptr
3298 } else if (SrcTy->isIntegerTy()) {
3299 return IntToPtr; // int -> ptr
3300 }
3301 llvm_unreachable("Casting pointer to other than pointer or int");
3302 }
3303 llvm_unreachable("Casting to type that is not first-class");
3304}
3305
3306//===----------------------------------------------------------------------===//
3307// CastInst SubClass Constructors
3308//===----------------------------------------------------------------------===//
3309
3310/// Check that the construction parameters for a CastInst are correct. This
3311/// could be broken out into the separate constructors but it is useful to have
3312/// it in one place and to eliminate the redundant code for getting the sizes
3313/// of the types involved.
3314bool
3316 if (!SrcTy->isFirstClassType() || !DstTy->isFirstClassType() ||
3317 SrcTy->isAggregateType() || DstTy->isAggregateType())
3318 return false;
3319
3320 // Get the size of the types in bits, and whether we are dealing
3321 // with vector types, we'll need this later.
3322 bool SrcIsVec = isa<VectorType>(SrcTy);
3323 bool DstIsVec = isa<VectorType>(DstTy);
3324 unsigned SrcScalarBitSize = SrcTy->getScalarSizeInBits();
3325 unsigned DstScalarBitSize = DstTy->getScalarSizeInBits();
3326
3327 // If these are vector types, get the lengths of the vectors (using zero for
3328 // scalar types means that checking that vector lengths match also checks that
3329 // scalars are not being converted to vectors or vectors to scalars).
3330 ElementCount SrcEC = SrcIsVec ? cast<VectorType>(SrcTy)->getElementCount()
3332 ElementCount DstEC = DstIsVec ? cast<VectorType>(DstTy)->getElementCount()
3334
3335 // Switch on the opcode provided
3336 switch (op) {
3337 default: return false; // This is an input error
3338 case Instruction::Trunc:
3339 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3340 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
3341 case Instruction::ZExt:
3342 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3343 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3344 case Instruction::SExt:
3345 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3346 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3347 case Instruction::FPTrunc:
3348 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
3349 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
3350 case Instruction::FPExt:
3351 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
3352 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3353 case Instruction::UIToFP:
3354 case Instruction::SIToFP:
3355 return SrcTy->isIntOrIntVectorTy() && DstTy->isFPOrFPVectorTy() &&
3356 SrcEC == DstEC;
3357 case Instruction::FPToUI:
3358 case Instruction::FPToSI:
3359 return SrcTy->isFPOrFPVectorTy() && DstTy->isIntOrIntVectorTy() &&
3360 SrcEC == DstEC;
3361 case Instruction::PtrToAddr:
3362 case Instruction::PtrToInt:
3363 if (SrcEC != DstEC)
3364 return false;
3365 return SrcTy->isPtrOrPtrVectorTy() && DstTy->isIntOrIntVectorTy();
3366 case Instruction::IntToPtr:
3367 if (SrcEC != DstEC)
3368 return false;
3369 return SrcTy->isIntOrIntVectorTy() && DstTy->isPtrOrPtrVectorTy();
3370 case Instruction::BitCast: {
3371 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
3372 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
3373
3374 // BitCast implies a no-op cast of type only. No bits change.
3375 // However, you can't cast pointers to anything but pointers.
3376 if (!SrcPtrTy != !DstPtrTy)
3377 return false;
3378
3379 // For non-pointer cases, the cast is okay if the source and destination bit
3380 // widths are identical.
3381 if (!SrcPtrTy)
3382 return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits();
3383
3384 // If both are pointers then the address spaces must match.
3385 if (SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace())
3386 return false;
3387
3388 // A vector of pointers must have the same number of elements.
3389 if (SrcIsVec && DstIsVec)
3390 return SrcEC == DstEC;
3391 if (SrcIsVec)
3392 return SrcEC == ElementCount::getFixed(1);
3393 if (DstIsVec)
3394 return DstEC == ElementCount::getFixed(1);
3395
3396 return true;
3397 }
3398 case Instruction::AddrSpaceCast: {
3399 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
3400 if (!SrcPtrTy)
3401 return false;
3402
3403 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
3404 if (!DstPtrTy)
3405 return false;
3406
3407 if (SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace())
3408 return false;
3409
3410 return SrcEC == DstEC;
3411 }
3412 }
3413}
3414
3416 InsertPosition InsertBefore)
3417 : CastInst(Ty, Trunc, S, Name, InsertBefore) {
3418 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
3419}
3420
3421ZExtInst::ZExtInst(Value *S, Type *Ty, const Twine &Name,
3422 InsertPosition InsertBefore)
3423 : CastInst(Ty, ZExt, S, Name, InsertBefore) {
3424 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
3425}
3426
3427SExtInst::SExtInst(Value *S, Type *Ty, const Twine &Name,
3428 InsertPosition InsertBefore)
3429 : CastInst(Ty, SExt, S, Name, InsertBefore) {
3430 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
3431}
3432
3434 InsertPosition InsertBefore)
3435 : CastInst(Ty, FPTrunc, S, Name, InsertBefore) {
3436 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
3437}
3438
3440 InsertPosition InsertBefore)
3441 : CastInst(Ty, FPExt, S, Name, InsertBefore) {
3442 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
3443}
3444
3446 InsertPosition InsertBefore)
3447 : CastInst(Ty, UIToFP, S, Name, InsertBefore) {
3448 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
3449}
3450
3452 InsertPosition InsertBefore)
3453 : CastInst(Ty, SIToFP, S, Name, InsertBefore) {
3454 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
3455}
3456
3458 InsertPosition InsertBefore)
3459 : CastInst(Ty, FPToUI, S, Name, InsertBefore) {
3460 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
3461}
3462
3464 InsertPosition InsertBefore)
3465 : CastInst(Ty, FPToSI, S, Name, InsertBefore) {
3466 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
3467}
3468
3470 InsertPosition InsertBefore)
3471 : CastInst(Ty, PtrToInt, S, Name, InsertBefore) {
3472 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
3473}
3474
3476 InsertPosition InsertBefore)
3477 : CastInst(Ty, PtrToAddr, S, Name, InsertBefore) {
3478 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToAddr");
3479}
3480
3482 InsertPosition InsertBefore)
3483 : CastInst(Ty, IntToPtr, S, Name, InsertBefore) {
3484 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
3485}
3486
3488 InsertPosition InsertBefore)
3489 : CastInst(Ty, BitCast, S, Name, InsertBefore) {
3490 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
3491}
3492
3494 InsertPosition InsertBefore)
3495 : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) {
3496 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
3497}
3498
3499//===----------------------------------------------------------------------===//
3500// CmpInst Classes
3501//===----------------------------------------------------------------------===//
3502
3504 Value *RHS, const Twine &Name, InsertPosition InsertBefore,
3505 Instruction *FlagsSource)
3506 : Instruction(ty, op, AllocMarker, InsertBefore) {
3507 Op<0>() = LHS;
3508 Op<1>() = RHS;
3509 setPredicate(predicate);
3510 setName(Name);
3511 if (FlagsSource)
3512 copyIRFlags(FlagsSource);
3513}
3514
3516 const Twine &Name, InsertPosition InsertBefore) {
3517 if (Op == Instruction::ICmp) {
3518 if (InsertBefore.isValid())
3519 return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate),
3520 S1, S2, Name);
3521 else
3522 return new ICmpInst(CmpInst::Predicate(predicate),
3523 S1, S2, Name);
3524 }
3525
3526 if (InsertBefore.isValid())
3527 return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate),
3528 S1, S2, Name);
3529 else
3530 return new FCmpInst(CmpInst::Predicate(predicate),
3531 S1, S2, Name);
3532}
3533
3535 Value *S2,
3536 const Instruction *FlagsSource,
3537 const Twine &Name,
3538 InsertPosition InsertBefore) {
3539 CmpInst *Inst = Create(Op, Pred, S1, S2, Name, InsertBefore);
3540 Inst->copyIRFlags(FlagsSource);
3541 return Inst;
3542}
3543
3545 if (ICmpInst *IC = dyn_cast<ICmpInst>(this))
3546 IC->swapOperands();
3547 else
3548 cast<FCmpInst>(this)->swapOperands();
3549}
3550
3552 if (const ICmpInst *IC = dyn_cast<ICmpInst>(this))
3553 return IC->isCommutative();
3554 return cast<FCmpInst>(this)->isCommutative();
3555}
3556
3559 return ICmpInst::isEquality(P);
3561 return FCmpInst::isEquality(P);
3562 llvm_unreachable("Unsupported predicate kind");
3563}
3564
3565// Returns true if either operand of CmpInst is a provably non-zero
3566// floating-point constant.
3567static bool hasNonZeroFPOperands(const CmpInst *Cmp) {
3568 auto *LHS = dyn_cast<Constant>(Cmp->getOperand(0));
3569 auto *RHS = dyn_cast<Constant>(Cmp->getOperand(1));
3570 if (auto *Const = LHS ? LHS : RHS) {
3571 using namespace llvm::PatternMatch;
3572 return match(Const, m_NonZeroNotDenormalFP());
3573 }
3574 return false;
3575}
3576
3577// Floating-point equality is not an equivalence when comparing +0.0 with
3578// -0.0, when comparing NaN with another value, or when flushing
3579// denormals-to-zero.
3580bool CmpInst::isEquivalence(bool Invert) const {
3581 switch (Invert ? getInversePredicate() : getPredicate()) {
3583 return true;
3585 if (!hasNoNaNs())
3586 return false;
3587 [[fallthrough]];
3589 return hasNonZeroFPOperands(this);
3590 default:
3591 return false;
3592 }
3593}
3594
3596 switch (pred) {
3597 default: llvm_unreachable("Unknown cmp predicate!");
3598 case ICMP_EQ: return ICMP_NE;
3599 case ICMP_NE: return ICMP_EQ;
3600 case ICMP_UGT: return ICMP_ULE;
3601 case ICMP_ULT: return ICMP_UGE;
3602 case ICMP_UGE: return ICMP_ULT;
3603 case ICMP_ULE: return ICMP_UGT;
3604 case ICMP_SGT: return ICMP_SLE;
3605 case ICMP_SLT: return ICMP_SGE;
3606 case ICMP_SGE: return ICMP_SLT;
3607 case ICMP_SLE: return ICMP_SGT;
3608
3609 case FCMP_OEQ: return FCMP_UNE;
3610 case FCMP_ONE: return FCMP_UEQ;
3611 case FCMP_OGT: return FCMP_ULE;
3612 case FCMP_OLT: return FCMP_UGE;
3613 case FCMP_OGE: return FCMP_ULT;
3614 case FCMP_OLE: return FCMP_UGT;
3615 case FCMP_UEQ: return FCMP_ONE;
3616 case FCMP_UNE: return FCMP_OEQ;
3617 case FCMP_UGT: return FCMP_OLE;
3618 case FCMP_ULT: return FCMP_OGE;
3619 case FCMP_UGE: return FCMP_OLT;
3620 case FCMP_ULE: return FCMP_OGT;
3621 case FCMP_ORD: return FCMP_UNO;
3622 case FCMP_UNO: return FCMP_ORD;
3623 case FCMP_TRUE: return FCMP_FALSE;
3624 case FCMP_FALSE: return FCMP_TRUE;
3625 }
3626}
3627
3629 switch (Pred) {
3630 default: return "unknown";
3631 case FCmpInst::FCMP_FALSE: return "false";
3632 case FCmpInst::FCMP_OEQ: return "oeq";
3633 case FCmpInst::FCMP_OGT: return "ogt";
3634 case FCmpInst::FCMP_OGE: return "oge";
3635 case FCmpInst::FCMP_OLT: return "olt";
3636 case FCmpInst::FCMP_OLE: return "ole";
3637 case FCmpInst::FCMP_ONE: return "one";
3638 case FCmpInst::FCMP_ORD: return "ord";
3639 case FCmpInst::FCMP_UNO: return "uno";
3640 case FCmpInst::FCMP_UEQ: return "ueq";
3641 case FCmpInst::FCMP_UGT: return "ugt";
3642 case FCmpInst::FCMP_UGE: return "uge";
3643 case FCmpInst::FCMP_ULT: return "ult";
3644 case FCmpInst::FCMP_ULE: return "ule";
3645 case FCmpInst::FCMP_UNE: return "une";
3646 case FCmpInst::FCMP_TRUE: return "true";
3647 case ICmpInst::ICMP_EQ: return "eq";
3648 case ICmpInst::ICMP_NE: return "ne";
3649 case ICmpInst::ICMP_SGT: return "sgt";
3650 case ICmpInst::ICMP_SGE: return "sge";
3651 case ICmpInst::ICMP_SLT: return "slt";
3652 case ICmpInst::ICMP_SLE: return "sle";
3653 case ICmpInst::ICMP_UGT: return "ugt";
3654 case ICmpInst::ICMP_UGE: return "uge";
3655 case ICmpInst::ICMP_ULT: return "ult";
3656 case ICmpInst::ICMP_ULE: return "ule";
3657 }
3658}
3659
3661 OS << CmpInst::getPredicateName(Pred);
3662 return OS;
3663}
3664
3666 switch (pred) {
3667 default: llvm_unreachable("Unknown icmp predicate!");
3668 case ICMP_EQ: case ICMP_NE:
3669 case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE:
3670 return pred;
3671 case ICMP_UGT: return ICMP_SGT;
3672 case ICMP_ULT: return ICMP_SLT;
3673 case ICMP_UGE: return ICMP_SGE;
3674 case ICMP_ULE: return ICMP_SLE;
3675 }
3676}
3677
3679 switch (pred) {
3680 default: llvm_unreachable("Unknown icmp predicate!");
3681 case ICMP_EQ: case ICMP_NE:
3682 case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE:
3683 return pred;
3684 case ICMP_SGT: return ICMP_UGT;
3685 case ICMP_SLT: return ICMP_ULT;
3686 case ICMP_SGE: return ICMP_UGE;
3687 case ICMP_SLE: return ICMP_ULE;
3688 }
3689}
3690
3692 switch (pred) {
3693 default: llvm_unreachable("Unknown cmp predicate!");
3694 case ICMP_EQ: case ICMP_NE:
3695 return pred;
3696 case ICMP_SGT: return ICMP_SLT;
3697 case ICMP_SLT: return ICMP_SGT;
3698 case ICMP_SGE: return ICMP_SLE;
3699 case ICMP_SLE: return ICMP_SGE;
3700 case ICMP_UGT: return ICMP_ULT;
3701 case ICMP_ULT: return ICMP_UGT;
3702 case ICMP_UGE: return ICMP_ULE;
3703 case ICMP_ULE: return ICMP_UGE;
3704
3705 case FCMP_FALSE: case FCMP_TRUE:
3706 case FCMP_OEQ: case FCMP_ONE:
3707 case FCMP_UEQ: case FCMP_UNE:
3708 case FCMP_ORD: case FCMP_UNO:
3709 return pred;
3710 case FCMP_OGT: return FCMP_OLT;
3711 case FCMP_OLT: return FCMP_OGT;
3712 case FCMP_OGE: return FCMP_OLE;
3713 case FCMP_OLE: return FCMP_OGE;
3714 case FCMP_UGT: return FCMP_ULT;
3715 case FCMP_ULT: return FCMP_UGT;
3716 case FCMP_UGE: return FCMP_ULE;
3717 case FCMP_ULE: return FCMP_UGE;
3718 }
3719}
3720
3722 switch (pred) {
3723 case ICMP_SGE:
3724 case ICMP_SLE:
3725 case ICMP_UGE:
3726 case ICMP_ULE:
3727 case FCMP_OGE:
3728 case FCMP_OLE:
3729 case FCMP_UGE:
3730 case FCMP_ULE:
3731 return true;
3732 default:
3733 return false;
3734 }
3735}
3736
3738 switch (pred) {
3739 case ICMP_SGT:
3740 case ICMP_SLT:
3741 case ICMP_UGT:
3742 case ICMP_ULT:
3743 case FCMP_OGT:
3744 case FCMP_OLT:
3745 case FCMP_UGT:
3746 case FCMP_ULT:
3747 return true;
3748 default:
3749 return false;
3750 }
3751}
3752
3754 switch (pred) {
3755 case ICMP_SGE:
3756 return ICMP_SGT;
3757 case ICMP_SLE:
3758 return ICMP_SLT;
3759 case ICMP_UGE:
3760 return ICMP_UGT;
3761 case ICMP_ULE:
3762 return ICMP_ULT;
3763 case FCMP_OGE:
3764 return FCMP_OGT;
3765 case FCMP_OLE:
3766 return FCMP_OLT;
3767 case FCMP_UGE:
3768 return FCMP_UGT;
3769 case FCMP_ULE:
3770 return FCMP_ULT;
3771 default:
3772 return pred;
3773 }
3774}
3775
3777 switch (pred) {
3778 case ICMP_SGT:
3779 return ICMP_SGE;
3780 case ICMP_SLT:
3781 return ICMP_SLE;
3782 case ICMP_UGT:
3783 return ICMP_UGE;
3784 case ICMP_ULT:
3785 return ICMP_ULE;
3786 case FCMP_OGT:
3787 return FCMP_OGE;
3788 case FCMP_OLT:
3789 return FCMP_OLE;
3790 case FCMP_UGT:
3791 return FCMP_UGE;
3792 case FCMP_ULT:
3793 return FCMP_ULE;
3794 default:
3795 return pred;
3796 }
3797}
3798
3800 assert(CmpInst::isRelational(pred) && "Call only with relational predicate!");
3801
3802 if (isStrictPredicate(pred))
3803 return getNonStrictPredicate(pred);
3804 if (isNonStrictPredicate(pred))
3805 return getStrictPredicate(pred);
3806
3807 llvm_unreachable("Unknown predicate!");
3808}
3809
3811 switch (predicate) {
3812 default: return false;
3814 case ICmpInst::ICMP_UGE: return true;
3815 }
3816}
3817
3819 switch (predicate) {
3820 default: return false;
3822 case ICmpInst::ICMP_SGE: return true;
3823 }
3824}
3825
3826bool ICmpInst::compare(const APInt &LHS, const APInt &RHS,
3827 ICmpInst::Predicate Pred) {
3828 assert(ICmpInst::isIntPredicate(Pred) && "Only for integer predicates!");
3829 switch (Pred) {
3831 return LHS.eq(RHS);
3833 return LHS.ne(RHS);
3835 return LHS.ugt(RHS);
3837 return LHS.uge(RHS);
3839 return LHS.ult(RHS);
3841 return LHS.ule(RHS);
3843 return LHS.sgt(RHS);
3845 return LHS.sge(RHS);
3847 return LHS.slt(RHS);
3849 return LHS.sle(RHS);
3850 default:
3851 llvm_unreachable("Unexpected non-integer predicate.");
3852 };
3853}
3854
3855bool FCmpInst::compare(const APFloat &LHS, const APFloat &RHS,
3856 FCmpInst::Predicate Pred) {
3857 APFloat::cmpResult R = LHS.compare(RHS);
3858 switch (Pred) {
3859 default:
3860 llvm_unreachable("Invalid FCmp Predicate");
3862 return false;
3864 return true;
3865 case FCmpInst::FCMP_UNO:
3866 return R == APFloat::cmpUnordered;
3867 case FCmpInst::FCMP_ORD:
3868 return R != APFloat::cmpUnordered;
3869 case FCmpInst::FCMP_UEQ:
3870 return R == APFloat::cmpUnordered || R == APFloat::cmpEqual;
3871 case FCmpInst::FCMP_OEQ:
3872 return R == APFloat::cmpEqual;
3873 case FCmpInst::FCMP_UNE:
3874 return R != APFloat::cmpEqual;
3875 case FCmpInst::FCMP_ONE:
3877 case FCmpInst::FCMP_ULT:
3878 return R == APFloat::cmpUnordered || R == APFloat::cmpLessThan;
3879 case FCmpInst::FCMP_OLT:
3880 return R == APFloat::cmpLessThan;
3881 case FCmpInst::FCMP_UGT:
3883 case FCmpInst::FCMP_OGT:
3884 return R == APFloat::cmpGreaterThan;
3885 case FCmpInst::FCMP_ULE:
3886 return R != APFloat::cmpGreaterThan;
3887 case FCmpInst::FCMP_OLE:
3888 return R == APFloat::cmpLessThan || R == APFloat::cmpEqual;
3889 case FCmpInst::FCMP_UGE:
3890 return R != APFloat::cmpLessThan;
3891 case FCmpInst::FCMP_OGE:
3892 return R == APFloat::cmpGreaterThan || R == APFloat::cmpEqual;
3893 }
3894}
3895
3896std::optional<bool> ICmpInst::compare(const KnownBits &LHS,
3897 const KnownBits &RHS,
3898 ICmpInst::Predicate Pred) {
3899 switch (Pred) {
3900 case ICmpInst::ICMP_EQ:
3901 return KnownBits::eq(LHS, RHS);
3902 case ICmpInst::ICMP_NE:
3903 return KnownBits::ne(LHS, RHS);
3904 case ICmpInst::ICMP_UGE:
3905 return KnownBits::uge(LHS, RHS);
3906 case ICmpInst::ICMP_UGT:
3907 return KnownBits::ugt(LHS, RHS);
3908 case ICmpInst::ICMP_ULE:
3909 return KnownBits::ule(LHS, RHS);
3910 case ICmpInst::ICMP_ULT:
3911 return KnownBits::ult(LHS, RHS);
3912 case ICmpInst::ICMP_SGE:
3913 return KnownBits::sge(LHS, RHS);
3914 case ICmpInst::ICMP_SGT:
3915 return KnownBits::sgt(LHS, RHS);
3916 case ICmpInst::ICMP_SLE:
3917 return KnownBits::sle(LHS, RHS);
3918 case ICmpInst::ICMP_SLT:
3919 return KnownBits::slt(LHS, RHS);
3920 default:
3921 llvm_unreachable("Unexpected non-integer predicate.");
3922 }
3923}
3924
3926 if (CmpInst::isEquality(pred))
3927 return pred;
3928 if (isSigned(pred))
3929 return getUnsignedPredicate(pred);
3930 if (isUnsigned(pred))
3931 return getSignedPredicate(pred);
3932
3933 llvm_unreachable("Unknown predicate!");
3934}
3935
3937 switch (predicate) {
3938 default: return false;
3941 case FCmpInst::FCMP_ORD: return true;
3942 }
3943}
3944
3946 switch (predicate) {
3947 default: return false;
3950 case FCmpInst::FCMP_UNO: return true;
3951 }
3952}
3953
3955 switch(predicate) {
3956 default: return false;
3957 case ICMP_EQ: case ICMP_UGE: case ICMP_ULE: case ICMP_SGE: case ICMP_SLE:
3958 case FCMP_TRUE: case FCMP_UEQ: case FCMP_UGE: case FCMP_ULE: return true;
3959 }
3960}
3961
3963 switch(predicate) {
3964 case ICMP_NE: case ICMP_UGT: case ICMP_ULT: case ICMP_SGT: case ICMP_SLT:
3965 case FCMP_FALSE: case FCMP_ONE: case FCMP_OGT: case FCMP_OLT: return true;
3966 default: return false;
3967 }
3968}
3969
3971 // If the predicates match, then we know the first condition implies the
3972 // second is true.
3973 if (CmpPredicate::getMatching(Pred1, Pred2))
3974 return true;
3975
3976 if (Pred1.hasSameSign() && CmpInst::isSigned(Pred2))
3978 else if (Pred2.hasSameSign() && CmpInst::isSigned(Pred1))
3980
3981 switch (Pred1) {
3982 default:
3983 break;
3984 case CmpInst::ICMP_EQ:
3985 // A == B implies A >=u B, A <=u B, A >=s B, and A <=s B are true.
3986 return Pred2 == CmpInst::ICMP_UGE || Pred2 == CmpInst::ICMP_ULE ||
3987 Pred2 == CmpInst::ICMP_SGE || Pred2 == CmpInst::ICMP_SLE;
3988 case CmpInst::ICMP_UGT: // A >u B implies A != B and A >=u B are true.
3989 return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_UGE;
3990 case CmpInst::ICMP_ULT: // A <u B implies A != B and A <=u B are true.
3991 return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_ULE;
3992 case CmpInst::ICMP_SGT: // A >s B implies A != B and A >=s B are true.
3993 return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_SGE;
3994 case CmpInst::ICMP_SLT: // A <s B implies A != B and A <=s B are true.
3995 return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_SLE;
3996 }
3997 return false;
3998}
3999
4001 CmpPredicate Pred2) {
4002 return isImpliedTrueByMatchingCmp(Pred1,
4004}
4005
4007 CmpPredicate Pred2) {
4008 if (isImpliedTrueByMatchingCmp(Pred1, Pred2))
4009 return true;
4010 if (isImpliedFalseByMatchingCmp(Pred1, Pred2))
4011 return false;
4012 return std::nullopt;
4013}
4014
4015//===----------------------------------------------------------------------===//
4016// CmpPredicate Implementation
4017//===----------------------------------------------------------------------===//
4018
4019std::optional<CmpPredicate> CmpPredicate::getMatching(CmpPredicate A,
4020 CmpPredicate B) {
4021 if (A.Pred == B.Pred)
4022 return A.HasSameSign == B.HasSameSign ? A : CmpPredicate(A.Pred);
4024 return {};
4025 if (A.HasSameSign &&
4027 return B.Pred;
4028 if (B.HasSameSign &&
4030 return A.Pred;
4031 return {};
4032}
4033
4037
4039 if (auto *ICI = dyn_cast<ICmpInst>(Cmp))
4040 return ICI->getCmpPredicate();
4041 return Cmp->getPredicate();
4042}
4043
4047
4049 return getSwapped(get(Cmp));
4050}
4051
4052//===----------------------------------------------------------------------===//
4053// SwitchInst Implementation
4054//===----------------------------------------------------------------------===//
4055
4056void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumReserved) {
4057 assert(Value && Default && NumReserved);
4058 ReservedSpace = NumReserved;
4060 allocHungoffUses(ReservedSpace);
4061
4062 Op<0>() = Value;
4063 Op<1>() = Default;
4064}
4065
4066/// SwitchInst ctor - Create a new switch instruction, specifying a value to
4067/// switch on and a default destination. The number of additional cases can
4068/// be specified here to make memory allocation more efficient. This
4069/// constructor can also autoinsert before another instruction.
4070SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
4071 InsertPosition InsertBefore)
4072 : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch,
4073 AllocMarker, InsertBefore) {
4074 init(Value, Default, 2 + NumCases);
4075}
4076
4077SwitchInst::SwitchInst(const SwitchInst &SI)
4078 : Instruction(SI.getType(), Instruction::Switch, AllocMarker) {
4079 init(SI.getCondition(), SI.getDefaultDest(), SI.getNumOperands());
4080 setNumHungOffUseOperands(SI.getNumOperands());
4081 Use *OL = getOperandList();
4082 ConstantInt **VL = case_values();
4083 const Use *InOL = SI.getOperandList();
4084 ConstantInt *const *InVL = SI.case_values();
4085 for (unsigned i = 2, E = SI.getNumOperands(); i != E; ++i) {
4086 OL[i] = InOL[i];
4087 VL[i - 2] = InVL[i - 2];
4088 }
4089 SubclassOptionalData = SI.SubclassOptionalData;
4090}
4091
4092/// addCase - Add an entry to the switch instruction...
4093///
4095 unsigned NewCaseIdx = getNumCases();
4096 unsigned OpNo = getNumOperands();
4097 if (OpNo + 1 > ReservedSpace)
4098 growOperands(); // Get more space!
4099 // Initialize some new operands.
4100 assert(OpNo < ReservedSpace && "Growing didn't work!");
4101 setNumHungOffUseOperands(OpNo + 1);
4102 CaseHandle Case(this, NewCaseIdx);
4103 Case.setValue(OnVal);
4104 Case.setSuccessor(Dest);
4105}
4106
4107/// removeCase - This method removes the specified case and its successor
4108/// from the switch instruction.
4110 unsigned idx = I->getCaseIndex();
4111
4112 assert(2 + idx < getNumOperands() && "Case index out of range!!!");
4113
4114 unsigned NumOps = getNumOperands();
4115 Use *OL = getOperandList();
4116 ConstantInt **VL = case_values();
4117
4118 // Overwrite this case with the end of the list.
4119 if (2 + idx + 1 != NumOps) {
4120 OL[2 + idx] = OL[NumOps - 1];
4121 VL[idx] = VL[NumOps - 2 - 1];
4122 }
4123
4124 // Nuke the last value.
4125 OL[NumOps - 1].set(nullptr);
4126 VL[NumOps - 2 - 1] = nullptr;
4128
4129 return CaseIt(this, idx);
4130}
4131
4132/// growOperands - grow operands - This grows the operand list in response
4133/// to a push_back style of operation. This grows the number of ops by 3 times.
4134///
4135void SwitchInst::growOperands() {
4136 unsigned e = getNumOperands();
4137 unsigned NumOps = e*3;
4138
4139 ReservedSpace = NumOps;
4140 growHungoffUses(ReservedSpace, /*WithExtraValues=*/true);
4141}
4142
4144 MDNode *ProfileData = getBranchWeightMDNode(SI);
4145 if (!ProfileData)
4146 return;
4147
4148 if (getNumBranchWeights(*ProfileData) != SI.getNumSuccessors()) {
4149 llvm_unreachable("number of prof branch_weights metadata operands does "
4150 "not correspond to number of succesors");
4151 }
4152
4154 if (!extractBranchWeights(ProfileData, Weights))
4155 return;
4156 this->Weights = std::move(Weights);
4157}
4158
4161 if (Weights) {
4162 assert(SI.getNumSuccessors() == Weights->size() &&
4163 "num of prof branch_weights must accord with num of successors");
4164 Changed = true;
4165 // Copy the last case to the place of the removed one and shrink.
4166 // This is tightly coupled with the way SwitchInst::removeCase() removes
4167 // the cases in SwitchInst::removeCase(CaseIt).
4168 (*Weights)[I->getCaseIndex() + 1] = Weights->back();
4169 Weights->pop_back();
4170 }
4171 return SI.removeCase(I);
4172}
4173
4175 auto *DestBlock = I->getCaseSuccessor();
4176 if (Weights) {
4177 auto Weight = getSuccessorWeight(I->getCaseIndex() + 1);
4178 (*Weights)[0] = Weight.value();
4179 }
4180
4181 SI.setDefaultDest(DestBlock);
4182}
4183
4185 ConstantInt *OnVal, BasicBlock *Dest,
4187 SI.addCase(OnVal, Dest);
4188
4189 if (!Weights && W && *W) {
4190 Changed = true;
4191 Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
4192 (*Weights)[SI.getNumSuccessors() - 1] = *W;
4193 } else if (Weights) {
4194 Changed = true;
4195 Weights->push_back(W.value_or(0));
4196 }
4197 if (Weights)
4198 assert(SI.getNumSuccessors() == Weights->size() &&
4199 "num of prof branch_weights must accord with num of successors");
4200}
4201
4204 // Instruction is erased. Mark as unchanged to not touch it in the destructor.
4205 Changed = false;
4206 if (Weights)
4207 Weights->resize(0);
4208 return SI.eraseFromParent();
4209}
4210
4213 if (!Weights)
4214 return std::nullopt;
4215 return (*Weights)[idx];
4216}
4217
4220 if (!W)
4221 return;
4222
4223 if (!Weights && *W)
4224 Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
4225
4226 if (Weights) {
4227 auto &OldW = (*Weights)[idx];
4228 if (*W != OldW) {
4229 Changed = true;
4230 OldW = *W;
4231 }
4232 }
4233}
4234
4237 unsigned idx) {
4238 if (MDNode *ProfileData = getBranchWeightMDNode(SI))
4239 if (ProfileData->getNumOperands() == SI.getNumSuccessors() + 1)
4240 return mdconst::extract<ConstantInt>(ProfileData->getOperand(idx + 1))
4241 ->getValue()
4242 .getZExtValue();
4243
4244 return std::nullopt;
4245}
4246
4247//===----------------------------------------------------------------------===//
4248// IndirectBrInst Implementation
4249//===----------------------------------------------------------------------===//
4250
4251void IndirectBrInst::init(Value *Address, unsigned NumDests) {
4252 assert(Address && Address->getType()->isPointerTy() &&
4253 "Address of indirectbr must be a pointer");
4254 ReservedSpace = 1+NumDests;
4256 allocHungoffUses(ReservedSpace);
4257
4258 Op<0>() = Address;
4259}
4260
4261
4262/// growOperands - grow operands - This grows the operand list in response
4263/// to a push_back style of operation. This grows the number of ops by 2 times.
4264///
4265void IndirectBrInst::growOperands() {
4266 unsigned e = getNumOperands();
4267 unsigned NumOps = e*2;
4268
4269 ReservedSpace = NumOps;
4270 growHungoffUses(ReservedSpace);
4271}
4272
4273IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
4274 InsertPosition InsertBefore)
4275 : Instruction(Type::getVoidTy(Address->getContext()),
4276 Instruction::IndirectBr, AllocMarker, InsertBefore) {
4277 init(Address, NumCases);
4278}
4279
4280IndirectBrInst::IndirectBrInst(const IndirectBrInst &IBI)
4281 : Instruction(Type::getVoidTy(IBI.getContext()), Instruction::IndirectBr,
4282 AllocMarker) {
4283 NumUserOperands = IBI.NumUserOperands;
4284 allocHungoffUses(IBI.getNumOperands());
4285 Use *OL = getOperandList();
4286 const Use *InOL = IBI.getOperandList();
4287 for (unsigned i = 0, E = IBI.getNumOperands(); i != E; ++i)
4288 OL[i] = InOL[i];
4289 SubclassOptionalData = IBI.SubclassOptionalData;
4290}
4291
4292/// addDestination - Add a destination.
4293///
4295 unsigned OpNo = getNumOperands();
4296 if (OpNo+1 > ReservedSpace)
4297 growOperands(); // Get more space!
4298 // Initialize some new operands.
4299 assert(OpNo < ReservedSpace && "Growing didn't work!");
4301 getOperandList()[OpNo] = DestBB;
4302}
4303
4304/// removeDestination - This method removes the specified successor from the
4305/// indirectbr instruction.
4307 assert(idx < getNumOperands()-1 && "Successor index out of range!");
4308
4309 unsigned NumOps = getNumOperands();
4310 Use *OL = getOperandList();
4311
4312 // Replace this value with the last one.
4313 OL[idx+1] = OL[NumOps-1];
4314
4315 // Nuke the last value.
4316 OL[NumOps-1].set(nullptr);
4318}
4319
4320//===----------------------------------------------------------------------===//
4321// FreezeInst Implementation
4322//===----------------------------------------------------------------------===//
4323
4324FreezeInst::FreezeInst(Value *S, const Twine &Name, InsertPosition InsertBefore)
4325 : UnaryInstruction(S->getType(), Freeze, S, InsertBefore) {
4326 setName(Name);
4327}
4328
4329//===----------------------------------------------------------------------===//
4330// cloneImpl() implementations
4331//===----------------------------------------------------------------------===//
4332
4333// Define these methods here so vtables don't get emitted into every translation
4334// unit that uses these classes.
4335
4336GetElementPtrInst *GetElementPtrInst::cloneImpl() const {
4338 return new (AllocMarker) GetElementPtrInst(*this, AllocMarker);
4339}
4340
4344
4348
4350 return new FCmpInst(getPredicate(), Op<0>(), Op<1>());
4351}
4352
4354 return new ICmpInst(getPredicate(), Op<0>(), Op<1>());
4355}
4356
4357ExtractValueInst *ExtractValueInst::cloneImpl() const {
4358 return new ExtractValueInst(*this);
4359}
4360
4361InsertValueInst *InsertValueInst::cloneImpl() const {
4362 return new InsertValueInst(*this);
4363}
4364
4367 getOperand(0), getAlign());
4368 Result->setUsedWithInAlloca(isUsedWithInAlloca());
4369 Result->setSwiftError(isSwiftError());
4370 return Result;
4371}
4372
4374 return new LoadInst(getType(), getOperand(0), Twine(), isVolatile(),
4376}
4377
4382
4387 Result->setVolatile(isVolatile());
4388 Result->setWeak(isWeak());
4389 return Result;
4390}
4391
4393 AtomicRMWInst *Result =
4396 Result->setVolatile(isVolatile());
4397 return Result;
4398}
4399
4403
4405 return new TruncInst(getOperand(0), getType());
4406}
4407
4409 return new ZExtInst(getOperand(0), getType());
4410}
4411
4413 return new SExtInst(getOperand(0), getType());
4414}
4415
4417 return new FPTruncInst(getOperand(0), getType());
4418}
4419
4421 return new FPExtInst(getOperand(0), getType());
4422}
4423
4425 return new UIToFPInst(getOperand(0), getType());
4426}
4427
4429 return new SIToFPInst(getOperand(0), getType());
4430}
4431
4433 return new FPToUIInst(getOperand(0), getType());
4434}
4435
4437 return new FPToSIInst(getOperand(0), getType());
4438}
4439
4441 return new PtrToIntInst(getOperand(0), getType());
4442}
4443
4447
4449 return new IntToPtrInst(getOperand(0), getType());
4450}
4451
4453 return new BitCastInst(getOperand(0), getType());
4454}
4455
4459
4460CallInst *CallInst::cloneImpl() const {
4461 if (hasOperandBundles()) {
4465 return new (AllocMarker) CallInst(*this, AllocMarker);
4466 }
4468 return new (AllocMarker) CallInst(*this, AllocMarker);
4469}
4470
4471SelectInst *SelectInst::cloneImpl() const {
4473}
4474
4476 return new VAArgInst(getOperand(0), getType());
4477}
4478
4479ExtractElementInst *ExtractElementInst::cloneImpl() const {
4481}
4482
4483InsertElementInst *InsertElementInst::cloneImpl() const {
4485}
4486
4490
4491PHINode *PHINode::cloneImpl() const { return new (AllocMarker) PHINode(*this); }
4492
4493LandingPadInst *LandingPadInst::cloneImpl() const {
4494 return new LandingPadInst(*this);
4495}
4496
4497ReturnInst *ReturnInst::cloneImpl() const {
4499 return new (AllocMarker) ReturnInst(*this, AllocMarker);
4500}
4501
4502BranchInst *BranchInst::cloneImpl() const {
4504 return new (AllocMarker) BranchInst(*this, AllocMarker);
4505}
4506
4507SwitchInst *SwitchInst::cloneImpl() const { return new SwitchInst(*this); }
4508
4509IndirectBrInst *IndirectBrInst::cloneImpl() const {
4510 return new IndirectBrInst(*this);
4511}
4512
4513InvokeInst *InvokeInst::cloneImpl() const {
4514 if (hasOperandBundles()) {
4518 return new (AllocMarker) InvokeInst(*this, AllocMarker);
4519 }
4521 return new (AllocMarker) InvokeInst(*this, AllocMarker);
4522}
4523
4524CallBrInst *CallBrInst::cloneImpl() const {
4525 if (hasOperandBundles()) {
4529 return new (AllocMarker) CallBrInst(*this, AllocMarker);
4530 }
4532 return new (AllocMarker) CallBrInst(*this, AllocMarker);
4533}
4534
4535ResumeInst *ResumeInst::cloneImpl() const {
4536 return new (AllocMarker) ResumeInst(*this);
4537}
4538
4539CleanupReturnInst *CleanupReturnInst::cloneImpl() const {
4541 return new (AllocMarker) CleanupReturnInst(*this, AllocMarker);
4542}
4543
4544CatchReturnInst *CatchReturnInst::cloneImpl() const {
4545 return new (AllocMarker) CatchReturnInst(*this);
4546}
4547
4548CatchSwitchInst *CatchSwitchInst::cloneImpl() const {
4549 return new CatchSwitchInst(*this);
4550}
4551
4552FuncletPadInst *FuncletPadInst::cloneImpl() const {
4554 return new (AllocMarker) FuncletPadInst(*this, AllocMarker);
4555}
4556
4558 LLVMContext &Context = getContext();
4559 return new UnreachableInst(Context);
4560}
4561
4562bool UnreachableInst::shouldLowerToTrap(bool TrapUnreachable,
4563 bool NoTrapAfterNoreturn) const {
4564 if (!TrapUnreachable)
4565 return false;
4566
4567 // We may be able to ignore unreachable behind a noreturn call.
4569 Call && Call->doesNotReturn()) {
4570 if (NoTrapAfterNoreturn)
4571 return false;
4572 // Do not emit an additional trap instruction.
4573 if (Call->isNonContinuableTrap())
4574 return false;
4575 }
4576
4577 if (getFunction()->hasFnAttribute(Attribute::Naked))
4578 return false;
4579
4580 return true;
4581}
4582
4584 return new FreezeInst(getOperand(0));
4585}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
constexpr LLT S1
Rewrite undef for PHI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Atomic ordering constants.
@ FnAttr
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_ABI
Definition Compiler.h:213
This file contains the declarations for the subclasses of Constant, which represent the different fla...
@ Default
#define op(i)
Module.h This file contains the declarations for the Module class.
static Align computeLoadStoreDefaultAlign(Type *Ty, InsertPosition Pos)
static bool isImpliedFalseByMatchingCmp(CmpPredicate Pred1, CmpPredicate Pred2)
static Value * createPlaceholderForShuffleVector(Value *V)
static Align computeAllocaDefaultAlign(Type *Ty, InsertPosition Pos)
static cl::opt< bool > DisableI2pP2iOpt("disable-i2p-p2i-opt", cl::init(false), cl::desc("Disables inttoptr/ptrtoint roundtrip optimization"))
static bool hasNonZeroFPOperands(const CmpInst *Cmp)
static int matchShuffleAsBitRotate(ArrayRef< int > Mask, int NumSubElts)
Try to lower a vector shuffle as a bit rotation.
static Type * getIndexedTypeInternal(Type *Ty, ArrayRef< IndexTy > IdxList)
static bool isReplicationMaskWithParams(ArrayRef< int > Mask, int ReplicationFactor, int VF)
static bool isIdentityMaskImpl(ArrayRef< int > Mask, int NumOpElts)
static bool isSingleSourceMaskImpl(ArrayRef< int > Mask, int NumOpElts)
static Value * getAISize(LLVMContext &Context, Value *Amt)
static bool isImpliedTrueByMatchingCmp(CmpPredicate Pred1, CmpPredicate Pred2)
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
This file contains the declarations for metadata subclasses.
#define T
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
#define P(N)
PowerPC Reduce CR logical Operation
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
static unsigned getNumElements(Type *Ty)
This file implements the SmallBitVector class.
This file defines the SmallVector class.
#define LLVM_DEBUG(...)
Definition Debug.h:114
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition VPlanSLP.cpp:247
Value * RHS
Value * LHS
cmpResult
IEEE-754R 5.11: Floating Point Comparison Relations.
Definition APFloat.h:334
LLVM_ABI float convertToFloat() const
Converts this APFloat to host float value.
Definition APFloat.cpp:6065
Class for arbitrary precision integers.
Definition APInt.h:78
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
Definition APInt.h:1339
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition APInt.h:381
unsigned countr_zero() const
Count the number of trailing zero bits.
Definition APInt.h:1648
unsigned countl_zero() const
The APInt version of std::countl_zero.
Definition APInt.h:1607
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition APInt.h:201
This class represents a conversion between pointers from one address space to another.
LLVM_ABI AddrSpaceCastInst * cloneImpl() const
Clone an identical AddrSpaceCastInst.
LLVM_ABI AddrSpaceCastInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI std::optional< TypeSize > getAllocationSizeInBits(const DataLayout &DL) const
Get allocation size in bits.
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
LLVM_ABI AllocaInst * cloneImpl() const
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
unsigned getAddressSpace() const
Return the address space for the allocation.
LLVM_ABI std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
LLVM_ABI bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
void setAlignment(Align Align)
const Value * getArraySize() const
Get the number of elements allocated.
LLVM_ABI AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, const Twine &Name, InsertPosition InsertBefore)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
iterator end() const
Definition ArrayRef.h:131
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
iterator begin() const
Definition ArrayRef.h:130
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Definition ArrayRef.h:186
Class to represent array types.
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this cmpxchg instruction.
bool isVolatile() const
Return true if this is a cmpxchg from a volatile memory location.
void setFailureOrdering(AtomicOrdering Ordering)
Sets the failure ordering constraint of this cmpxchg instruction.
AtomicOrdering getFailureOrdering() const
Returns the failure ordering constraint of this cmpxchg instruction.
void setSuccessOrdering(AtomicOrdering Ordering)
Sets the success ordering constraint of this cmpxchg instruction.
LLVM_ABI AtomicCmpXchgInst * cloneImpl() const
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
friend class Instruction
Iterator for Instructions in a `BasicBlock.
bool isWeak() const
Return true if this cmpxchg may spuriously fail.
void setAlignment(Align Align)
AtomicOrdering getSuccessOrdering() const
Returns the success ordering constraint of this cmpxchg instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this cmpxchg instruction.
LLVM_ABI AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment, AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, SyncScope::ID SSID, InsertPosition InsertBefore=nullptr)
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
LLVM_ABI AtomicRMWInst * cloneImpl() const
bool isVolatile() const
Return true if this is a RMW on a volatile memory location.
BinOp
This enumeration lists the possible modifications atomicrmw can make.
@ Add
*p = old + v
@ FAdd
*p = old + v
@ USubCond
Subtract only if no unsigned overflow.
@ FMinimum
*p = minimum(old, v) minimum matches the behavior of llvm.minimum.
@ Min
*p = old <signed v ? old : v
@ Sub
*p = old - v
@ And
*p = old & v
@ Xor
*p = old ^ v
@ USubSat
*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.
@ FMaximum
*p = maximum(old, v) maximum matches the behavior of llvm.maximum.
@ FSub
*p = old - v
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
@ UMax
*p = old >unsigned v ? old : v
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
@ UDecWrap
Decrement one until a minimum value or zero.
@ Nand
*p = ~(old & v)
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this rmw instruction.
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this rmw instruction.
void setOperation(BinOp Operation)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
BinOp getOperation() const
LLVM_ABI AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment, AtomicOrdering Ordering, SyncScope::ID SSID, InsertPosition InsertBefore=nullptr)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this rmw instruction.
void setAlignment(Align Align)
static LLVM_ABI StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
LLVM_ABI CaptureInfo getCaptureInfo() const
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:103
LLVM_ABI const ConstantRange & getRange() const
Returns the value of the range attribute.
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition Attributes.h:122
static LLVM_ABI Attribute getWithMemoryEffects(LLVMContext &Context, MemoryEffects ME)
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:259
LLVM Basic Block Representation.
Definition BasicBlock.h:62
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
static LLVM_ABI BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
BinaryOps getOpcode() const
Definition InstrTypes.h:374
LLVM_ABI bool swapOperands()
Exchange the two operands to this instruction.
static LLVM_ABI BinaryOperator * CreateNot(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition InstrTypes.h:181
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
LLVM_ABI BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty, const Twine &Name, InsertPosition InsertBefore)
static LLVM_ABI BinaryOperator * CreateNSWNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
LLVM_ABI BinaryOperator * cloneImpl() const
This class represents a no-op cast from one type to another.
LLVM_ABI BitCastInst * cloneImpl() const
Clone an identical BitCastInst.
LLVM_ABI BitCastInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
Conditional or Unconditional Branch instruction.
LLVM_ABI void swapSuccessors()
Swap the successors of this branch instruction.
LLVM_ABI BranchInst * cloneImpl() const
bool isConditional() const
Value * getCondition() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
LLVM_ABI FPClassTest getParamNoFPClass(unsigned i) const
Extract a test mask for disallowed floating-point value classes for the parameter.
bool isInlineAsm() const
Check if this call is an inline asm statement.
LLVM_ABI BundleOpInfo & getBundleOpInfoForOperand(unsigned OpIdx)
Return the BundleOpInfo for the operand at index OpIdx.
void setCallingConv(CallingConv::ID CC)
LLVM_ABI FPClassTest getRetNoFPClass() const
Extract a test mask for disallowed floating-point value classes for the return value.
bundle_op_iterator bundle_op_info_begin()
Return the start of the list of BundleOpInfo instances associated with this OperandBundleUser.
LLVM_ABI bool paramHasNonNullAttr(unsigned ArgNo, bool AllowUndefOrPoison) const
Return true if this argument has the nonnull attribute on either the CallBase instruction or the call...
LLVM_ABI MemoryEffects getMemoryEffects() const
void addFnAttr(Attribute::AttrKind Kind)
Adds the attribute to the function.
LLVM_ABI bool doesNotAccessMemory() const
Determine if the call does not access memory.
LLVM_ABI void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
LLVM_ABI void setOnlyAccessesArgMemory()
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
OperandBundleUse operandBundleFromBundleOpInfo(const BundleOpInfo &BOI) const
Simple helper function to map a BundleOpInfo to an OperandBundleUse.
LLVM_ABI void setOnlyAccessesInaccessibleMemOrArgMem()
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
LLVM_ABI void setDoesNotAccessMemory()
AttributeSet getParamAttributes(unsigned ArgNo) const
Return the param attributes for this call.
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
LLVM_ABI bool onlyAccessesInaccessibleMemory() const
Determine if the function may only access memory that is inaccessible from the IR.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
CallingConv::ID getCallingConv() const
bundle_op_iterator bundle_op_info_end()
Return the end of the list of BundleOpInfo instances associated with this OperandBundleUser.
LLVM_ABI unsigned getNumSubclassExtraOperandsDynamic() const
Get the number of extra operands for instructions that don't have a fixed number of extra operands.
BundleOpInfo * bundle_op_iterator
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
LLVM_ABI bool isMustTailCall() const
Tests if this call site must be tail call optimized.
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
LLVM_ABI bool onlyReadsMemory() const
Determine if the call does not access or only reads memory.
bool isByValArgument(unsigned ArgNo) const
Determine whether this argument is passed by value.
iterator_range< bundle_op_iterator > bundle_op_infos()
Return the range [bundle_op_info_begin, bundle_op_info_end).
LLVM_ABI void setOnlyReadsMemory()
static LLVM_ABI CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle OB added.
LLVM_ABI bool onlyAccessesInaccessibleMemOrArgMem() const
Determine if the function may only access memory that is either inaccessible from the IR or pointed t...
LLVM_ABI CaptureInfo getCaptureInfo(unsigned OpNo) const
Return which pointer components this operand may capture.
LLVM_ABI bool hasArgumentWithAdditionalReturnCaptureComponents() const
Returns whether the call has an argument that has an attribute like captures(ret: address,...
CallBase(AttributeList const &A, FunctionType *FT, ArgsTy &&... Args)
Value * getCalledOperand() const
LLVM_ABI void setOnlyWritesMemory()
LLVM_ABI op_iterator populateBundleOperandInfos(ArrayRef< OperandBundleDef > Bundles, const unsigned BeginIndex)
Populate the BundleOpInfo instances and the Use& vector from Bundles.
AttributeList Attrs
parameter attributes for callable
bool hasOperandBundlesOtherThan(ArrayRef< uint32_t > IDs) const
Return true if this operand bundle user contains operand bundles with tags other than those specified...
LLVM_ABI std::optional< ConstantRange > getRange() const
If this return value has a range attribute, return the value range of the argument.
LLVM_ABI bool isReturnNonNull() const
Return true if the return value is known to be not null.
Value * getArgOperand(unsigned i) const
FunctionType * FTy
uint64_t getRetDereferenceableBytes() const
Extract the number of dereferenceable bytes for a call or parameter (0=unknown).
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
FunctionType * getFunctionType() const
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
static unsigned CountBundleInputs(ArrayRef< OperandBundleDef > Bundles)
Return the total number of values used in Bundles.
LLVM_ABI Value * getArgOperandWithAttribute(Attribute::AttrKind Kind) const
If one of the arguments has the specified attribute, returns its operand value.
LLVM_ABI void setOnlyAccessesInaccessibleMemory()
static LLVM_ABI CallBase * Create(CallBase *CB, ArrayRef< OperandBundleDef > Bundles, InsertPosition InsertPt=nullptr)
Create a clone of CB with a different set of operand bundles and insert it before InsertPt.
LLVM_ABI bool onlyWritesMemory() const
Determine if the call does not access or only writes memory.
LLVM_ABI bool hasClobberingOperandBundles() const
Return true if this operand bundle user has operand bundles that may write to the heap.
void setCalledOperand(Value *V)
static LLVM_ABI CallBase * removeOperandBundle(CallBase *CB, uint32_t ID, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle ID removed.
LLVM_ABI bool hasReadingOperandBundles() const
Return true if this operand bundle user has operand bundles that may read from the heap.
LLVM_ABI bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
LLVM_ABI void setMemoryEffects(MemoryEffects ME)
bool hasOperandBundles() const
Return true if this User has any operand bundles.
LLVM_ABI bool isTailCall() const
Tests if this call site is marked as a tail call.
LLVM_ABI Function * getCaller()
Helper to get the caller (the parent function).
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
SmallVector< BasicBlock *, 16 > getIndirectDests() const
void setDefaultDest(BasicBlock *B)
void setIndirectDest(unsigned i, BasicBlock *B)
BasicBlock * getDefaultDest() const
static CallBrInst * Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, ArrayRef< BasicBlock * > IndirectDests, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
LLVM_ABI CallBrInst * cloneImpl() const
This class represents a function call, abstracting a target machine's calling convention.
LLVM_ABI void updateProfWeight(uint64_t S, uint64_t T)
Updates profile metadata by scaling it by S / T.
TailCallKind getTailCallKind() const
LLVM_ABI CallInst * cloneImpl() const
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Represents which components of the pointer may be captured in which location.
Definition ModRef.h:359
CaptureComponents getOtherComponents() const
Get components potentially captured through locations other than the return value.
Definition ModRef.h:391
static CaptureInfo none()
Create CaptureInfo that does not capture any components of the pointer.
Definition ModRef.h:372
static CaptureInfo all()
Create CaptureInfo that may capture all components of the pointer.
Definition ModRef.h:375
CaptureComponents getRetComponents() const
Get components potentially captured by the return value.
Definition ModRef.h:387
static LLVM_ABI Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)
Returns the opcode necessary to cast Val into Ty using usual casting rules.
static LLVM_ABI CastInst * CreatePointerBitCastOrAddrSpaceCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast or an AddrSpaceCast cast instruction.
Instruction::CastOps getOpcode() const
Return the opcode of this CastInst.
Definition InstrTypes.h:610
static LLVM_ABI unsigned isEliminableCastPair(Instruction::CastOps firstOpcode, Instruction::CastOps secondOpcode, Type *SrcTy, Type *MidTy, Type *DstTy, const DataLayout *DL)
Determine how a pair of casts can be eliminated, if they can be at all.
static LLVM_ABI CastInst * CreateIntegerCast(Value *S, Type *Ty, bool isSigned, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a ZExt, BitCast, or Trunc for int -> int casts.
static LLVM_ABI CastInst * CreateFPCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create an FPExt, BitCast, or FPTrunc for fp -> fp casts.
CastInst(Type *Ty, unsigned iType, Value *S, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics for subclasses.
Definition InstrTypes.h:451
static LLVM_ABI bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
static LLVM_ABI bool isBitCastable(Type *SrcTy, Type *DestTy)
Check whether a bitcast between these types is valid.
static LLVM_ABI CastInst * CreateTruncOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a Trunc or BitCast cast instruction.
static LLVM_ABI CastInst * CreatePointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast, AddrSpaceCast or a PtrToInt cast instruction.
static LLVM_ABI CastInst * CreateBitOrPointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
static LLVM_ABI bool isNoopCast(Instruction::CastOps Opcode, Type *SrcTy, Type *DstTy, const DataLayout &DL)
A no-op cast is one that can be effected without changing any bits.
static LLVM_ABI CastInst * CreateZExtOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a ZExt or BitCast cast instruction.
static LLVM_ABI CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
LLVM_ABI bool isIntegerCast() const
There are several places where we need to know if a cast instruction only deals with integer source a...
static LLVM_ABI CastInst * CreateSExtOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a SExt or BitCast cast instruction.
static LLVM_ABI bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
LLVM_ABI CatchReturnInst * cloneImpl() const
void setUnwindDest(BasicBlock *UnwindDest)
LLVM_ABI void addHandler(BasicBlock *Dest)
Add an entry to the switch instruction... Note: This action invalidates handler_end().
LLVM_ABI CatchSwitchInst * cloneImpl() const
mapped_iterator< op_iterator, DerefFnTy > handler_iterator
Value * getParentPad() const
void setParentPad(Value *ParentPad)
BasicBlock * getUnwindDest() const
LLVM_ABI void removeHandler(handler_iterator HI)
LLVM_ABI CleanupReturnInst * cloneImpl() const
This class is the base class for the comparison instructions.
Definition InstrTypes.h:664
Predicate getStrictPredicate() const
For example, SGE -> SGT, SLE -> SLT, ULE -> ULT, UGE -> UGT.
Definition InstrTypes.h:858
bool isEquality() const
Determine if this is an equals/not equals predicate.
Definition InstrTypes.h:915
void setPredicate(Predicate P)
Set the predicate for this instruction to the specified value.
Definition InstrTypes.h:768
bool isFalseWhenEqual() const
This is just a convenience.
Definition InstrTypes.h:948
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition InstrTypes.h:679
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
Definition InstrTypes.h:693
@ ICMP_SLT
signed less than
Definition InstrTypes.h:705
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:706
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition InstrTypes.h:682
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
Definition InstrTypes.h:691
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Definition InstrTypes.h:680
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Definition InstrTypes.h:681
@ ICMP_UGE
unsigned greater or equal
Definition InstrTypes.h:700
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:703
@ FCMP_ULT
1 1 0 0 True if unordered or less than
Definition InstrTypes.h:690
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition InstrTypes.h:684
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition InstrTypes.h:687
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
Definition InstrTypes.h:688
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition InstrTypes.h:683
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition InstrTypes.h:685
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:704
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition InstrTypes.h:692
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
Definition InstrTypes.h:689
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
Definition InstrTypes.h:678
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition InstrTypes.h:686
LLVM_ABI bool isEquivalence(bool Invert=false) const
Determine if one operand of this compare can always be replaced by the other operand,...
bool isSigned() const
Definition InstrTypes.h:930
static LLVM_ABI bool isEquality(Predicate pred)
Determine if this is an equals/not equals predicate.
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition InstrTypes.h:827
bool isTrueWhenEqual() const
This is just a convenience.
Definition InstrTypes.h:942
static LLVM_ABI CmpInst * Create(OtherOps Op, Predicate Pred, Value *S1, Value *S2, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Construct a compare instruction, given the opcode, the predicate and the two operands.
static bool isFPPredicate(Predicate P)
Definition InstrTypes.h:770
Predicate getNonStrictPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
Definition InstrTypes.h:871
static LLVM_ABI CmpInst * CreateWithCopiedFlags(OtherOps Op, Predicate Pred, Value *S1, Value *S2, const Instruction *FlagsSource, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Construct a compare instruction, given the opcode, the predicate, the two operands and the instructio...
bool isNonStrictPredicate() const
Definition InstrTypes.h:852
LLVM_ABI void swapOperands()
This is just a convenience that dispatches to the subclasses.
static bool isRelational(Predicate P)
Return true if the predicate is relational (not EQ or NE).
Definition InstrTypes.h:923
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:789
static LLVM_ABI StringRef getPredicateName(Predicate P)
Predicate getPredicate() const
Return the predicate for this instruction.
Definition InstrTypes.h:765
bool isStrictPredicate() const
Definition InstrTypes.h:843
static LLVM_ABI bool isUnordered(Predicate predicate)
Determine if the predicate is an unordered operation.
Predicate getFlippedStrictnessPredicate() const
For predicate of kind "is X or equal to 0" returns the predicate "is X".
Definition InstrTypes.h:893
static bool isIntPredicate(Predicate P)
Definition InstrTypes.h:776
static LLVM_ABI bool isOrdered(Predicate predicate)
Determine if the predicate is an ordered operation.
LLVM_ABI CmpInst(Type *ty, Instruction::OtherOps op, Predicate pred, Value *LHS, Value *RHS, const Twine &Name="", InsertPosition InsertBefore=nullptr, Instruction *FlagsSource=nullptr)
bool isUnsigned() const
Definition InstrTypes.h:936
LLVM_ABI bool isCommutative() const
This is just a convenience that dispatches to the subclasses.
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static LLVM_ABI std::optional< CmpPredicate > getMatching(CmpPredicate A, CmpPredicate B)
Compares two CmpPredicates taking samesign into account and returns the canonicalized CmpPredicate if...
CmpPredicate()
Default constructor.
static LLVM_ABI CmpPredicate get(const CmpInst *Cmp)
Do a ICmpInst::getCmpPredicate() or CmpInst::getPredicate(), as appropriate.
LLVM_ABI CmpInst::Predicate getPreferredSignedPredicate() const
Attempts to return a signed CmpInst::Predicate from the CmpPredicate.
bool hasSameSign() const
Query samesign information, for optimizations.
static LLVM_ABI CmpPredicate getSwapped(CmpPredicate P)
Get the swapped predicate of a CmpPredicate.
ConstantFP - Floating Point Values [float, double].
Definition Constants.h:282
const APFloat & getValueAPF() const
Definition Constants.h:325
This is the shared class of boolean and integer constants.
Definition Constants.h:87
LLVM_ABI ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition TypeSize.h:309
LLVM_ABI ExtractElementInst * cloneImpl() const
static ExtractElementInst * Create(Value *Vec, Value *Idx, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
This instruction extracts a struct member or array element value from an aggregate value.
static LLVM_ABI Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
LLVM_ABI ExtractValueInst * cloneImpl() const
This instruction compares its operands according to the predicate given to the constructor.
bool isEquality() const
static LLVM_ABI bool compare(const APFloat &LHS, const APFloat &RHS, FCmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
LLVM_ABI FCmpInst * cloneImpl() const
Clone an identical FCmpInst.
FCmpInst(InsertPosition InsertBefore, Predicate pred, Value *LHS, Value *RHS, const Twine &NameStr="")
Constructor with insertion semantics.
This class represents an extension of floating point types.
LLVM_ABI FPExtInst * cloneImpl() const
Clone an identical FPExtInst.
LLVM_ABI FPExtInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI float getFPAccuracy() const
Get the maximum error permitted by this operation in ULPs.
This class represents a cast from floating point to signed integer.
LLVM_ABI FPToSIInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI FPToSIInst * cloneImpl() const
Clone an identical FPToSIInst.
This class represents a cast from floating point to unsigned integer.
LLVM_ABI FPToUIInst * cloneImpl() const
Clone an identical FPToUIInst.
LLVM_ABI FPToUIInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
This class represents a truncation of floating point types.
LLVM_ABI FPTruncInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI FPTruncInst * cloneImpl() const
Clone an identical FPTruncInst.
LLVM_ABI FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System, InsertPosition InsertBefore=nullptr)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this fence instruction.
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this fence instruction.
LLVM_ABI FenceInst * cloneImpl() const
friend class Instruction
Iterator for Instructions in a `BasicBlock.
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this fence instruction.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Class to represent fixed width SIMD vectors.
unsigned getNumElements() const
LLVM_ABI FreezeInst(Value *S, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
LLVM_ABI FreezeInst * cloneImpl() const
Clone an identical FreezeInst.
void setParentPad(Value *ParentPad)
Value * getParentPad() const
Convenience accessors.
LLVM_ABI FuncletPadInst * cloneImpl() const
Class to represent function types.
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Type * getParamType(unsigned i) const
Parameter type accessors.
bool isVarArg() const
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags inBounds()
GEPNoWrapFlags withoutInBounds() const
unsigned getRaw() const
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
LLVM_ABI bool isInBounds() const
Determine whether the GEP has the inbounds flag.
LLVM_ABI bool hasNoUnsignedSignedWrap() const
Determine whether the GEP has the nusw flag.
static LLVM_ABI Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
LLVM_ABI bool hasAllZeroIndices() const
Return true if all of the indices of this GEP are zeros.
LLVM_ABI bool hasNoUnsignedWrap() const
Determine whether the GEP has the nuw flag.
LLVM_ABI bool hasAllConstantIndices() const
Return true if all of the indices of this GEP are constant integers.
LLVM_ABI void setIsInBounds(bool b=true)
Set or clear the inbounds flag on this GEP instruction.
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
LLVM_ABI bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const
Accumulate the constant address offset of this GEP if possible.
LLVM_ABI GetElementPtrInst * cloneImpl() const
LLVM_ABI bool collectOffset(const DataLayout &DL, unsigned BitWidth, SmallMapVector< Value *, APInt, 4 > &VariableOffsets, APInt &ConstantOffset) const
LLVM_ABI void setNoWrapFlags(GEPNoWrapFlags NW)
Set nowrap flags for GEP instruction.
LLVM_ABI GEPNoWrapFlags getNoWrapFlags() const
Get the nowrap flags for the GEP instruction.
Module * getParent()
Get the module that this global value is contained inside of...
This instruction compares its operands according to the predicate given to the constructor.
ICmpInst(InsertPosition InsertBefore, Predicate pred, Value *LHS, Value *RHS, const Twine &NameStr="")
Constructor with insertion semantics.
static LLVM_ABI bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
LLVM_ABI ICmpInst * cloneImpl() const
Clone an identical ICmpInst.
Predicate getFlippedSignednessPredicate() const
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
static CmpPredicate getInverseCmpPredicate(CmpPredicate Pred)
bool isEquality() const
Return true if this predicate is either EQ or NE.
static LLVM_ABI Predicate getFlippedSignednessPredicate(Predicate Pred)
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
static LLVM_ABI std::optional< bool > isImpliedByMatchingCmp(CmpPredicate Pred1, CmpPredicate Pred2)
Determine if Pred1 implies Pred2 is true, false, or if nothing can be inferred about the implication,...
Predicate getUnsignedPredicate() const
For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
Indirect Branch Instruction.
LLVM_ABI void addDestination(BasicBlock *Dest)
Add a destination.
LLVM_ABI void removeDestination(unsigned i)
This method removes the specified successor from the indirectbr instruction.
LLVM_ABI IndirectBrInst * cloneImpl() const
LLVM_ABI InsertElementInst * cloneImpl() const
static InsertElementInst * Create(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
bool isValid() const
Definition Instruction.h:62
BasicBlock * getBasicBlock()
Definition Instruction.h:63
This instruction inserts a struct field of array element value into an aggregate value.
LLVM_ABI InsertValueInst * cloneImpl() const
BitfieldElement::Type getSubclassData() const
LLVM_ABI bool hasNoNaNs() const LLVM_READONLY
Determine whether the no-NaNs flag is set.
LLVM_ABI void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI void swapProfMetadata()
If the instruction has "branch_weights" MD_prof metadata and the MDNode has three operands (including...
LLVM_ABI bool isVolatile() const LLVM_READONLY
Return true if this instruction has a volatile memory access.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Bitfield::Element< uint16_t, 0, 15 > OpaqueField
Instruction(const Instruction &)=delete
friend class BasicBlock
Various leaf nodes.
void setSubclassData(typename BitfieldElement::Type Value)
This class represents a cast from an integer to a pointer.
LLVM_ABI IntToPtrInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI IntToPtrInst * cloneImpl() const
Clone an identical IntToPtrInst.
Invoke instruction.
BasicBlock * getUnwindDest() const
void setNormalDest(BasicBlock *B)
LLVM_ABI InvokeInst * cloneImpl() const
LLVM_ABI LandingPadInst * getLandingPadInst() const
Get the landingpad instruction from the landing pad block (the unwind destination).
void setUnwindDest(BasicBlock *B)
LLVM_ABI void updateProfWeight(uint64_t S, uint64_t T)
Updates profile metadata by scaling it by S / T.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
LLVMContextImpl *const pImpl
Definition LLVMContext.h:70
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
LLVM_ABI LandingPadInst * cloneImpl() const
static LLVM_ABI LandingPadInst * Create(Type *RetTy, unsigned NumReservedClauses, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedClauses is a hint for the number of incoming clauses that this landingpad w...
LLVM_ABI void addClause(Constant *ClauseVal)
Add a catch or filter clause to the landing pad.
void setCleanup(bool V)
Indicate that this landingpad instruction is a cleanup.
void setAlignment(Align Align)
bool isVolatile() const
Return true if this is a load from a volatile memory location.
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this load instruction.
LLVM_ABI LoadInst * cloneImpl() const
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
void setVolatile(bool V)
Specify whether this is a volatile load or not.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
LLVM_ABI LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, InsertPosition InsertBefore)
Align getAlign() const
Return the alignment of the access that is being performed.
Metadata node.
Definition Metadata.h:1080
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1444
static MemoryEffectsBase readOnly()
Definition ModRef.h:130
bool onlyWritesMemory() const
Whether this function only (at most) writes memory.
Definition ModRef.h:226
bool doesNotAccessMemory() const
Whether this function accesses no memory.
Definition ModRef.h:220
static MemoryEffectsBase argMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Definition ModRef.h:140
static MemoryEffectsBase inaccessibleMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Definition ModRef.h:146
bool onlyAccessesInaccessibleMem() const
Whether this function only (at most) accesses inaccessible memory.
Definition ModRef.h:239
bool onlyAccessesArgPointees() const
Whether this function only (at most) accesses argument memory.
Definition ModRef.h:229
bool onlyReadsMemory() const
Whether this function only (at most) reads memory.
Definition ModRef.h:223
static MemoryEffectsBase writeOnly()
Definition ModRef.h:135
static MemoryEffectsBase inaccessibleOrArgMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Definition ModRef.h:163
static MemoryEffectsBase none()
Definition ModRef.h:125
bool onlyAccessesInaccessibleOrArgMem() const
Whether this function only (at most) accesses argument and inaccessible memory.
Definition ModRef.h:250
StringRef getTag() const
void allocHungoffUses(unsigned N)
const_block_iterator block_begin() const
LLVM_ABI void removeIncomingValueIf(function_ref< bool(unsigned)> Predicate, bool DeletePHIIfEmpty=true)
Remove all incoming values for which the predicate returns true.
void setIncomingBlock(unsigned i, BasicBlock *BB)
LLVM_ABI Value * removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty=true)
Remove an incoming value.
LLVM_ABI bool hasConstantOrUndefValue() const
Whether the specified PHI node always merges together the same value, assuming undefs are equal to a ...
void setIncomingValue(unsigned i, Value *V)
const_block_iterator block_end() const
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
LLVM_ABI Value * hasConstantValue() const
If the specified PHI node always merges together the same value, return the value,...
LLVM_ABI PHINode * cloneImpl() const
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Class to represent pointers.
unsigned getAddressSpace() const
Return the address space of the Pointer type.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
This class represents a cast from a pointer to an address (non-capturing ptrtoint).
PtrToAddrInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
PtrToAddrInst * cloneImpl() const
Clone an identical PtrToAddrInst.
This class represents a cast from a pointer to an integer.
LLVM_ABI PtrToIntInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI PtrToIntInst * cloneImpl() const
Clone an identical PtrToIntInst.
Resume the propagation of an exception.
LLVM_ABI ResumeInst * cloneImpl() const
Return a value (possibly void), from a function.
LLVM_ABI ReturnInst * cloneImpl() const
This class represents a sign extension of integer types.
LLVM_ABI SExtInst * cloneImpl() const
Clone an identical SExtInst.
LLVM_ABI SExtInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
This class represents a cast from signed integer to floating point.
LLVM_ABI SIToFPInst * cloneImpl() const
Clone an identical SIToFPInst.
LLVM_ABI SIToFPInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
Class to represent scalable SIMD vectors.
LLVM_ABI SelectInst * cloneImpl() const
static LLVM_ABI const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, const Instruction *MDFrom=nullptr)
static LLVM_ABI bool isZeroEltSplatMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses all elements with the same value as the first element of exa...
ArrayRef< int > getShuffleMask() const
static LLVM_ABI bool isSpliceMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is a splice mask, concatenating the two inputs together and then ext...
int getMaskValue(unsigned Elt) const
Return the shuffle mask value of this instruction for the given element index.
LLVM_ABI ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
static LLVM_ABI bool isSelectMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from its source vectors without lane crossings.
static LLVM_ABI bool isBitRotateMask(ArrayRef< int > Mask, unsigned EltSizeInBits, unsigned MinSubElts, unsigned MaxSubElts, unsigned &NumSubElts, unsigned &RotateAmt)
Checks if the shuffle is a bit rotation of the first operand across multiple subelements,...
VectorType * getType() const
Overload to return most specific vector type.
LLVM_ABI bool isIdentityWithExtract() const
Return true if this shuffle extracts the first N elements of exactly one source vector.
static LLVM_ABI bool isOneUseSingleSourceMask(ArrayRef< int > Mask, int VF)
Return true if this shuffle mask represents "clustered" mask of size VF, i.e.
LLVM_ABI bool isIdentityWithPadding() const
Return true if this shuffle lengthens exactly one source vector with undefs in the high elements.
static LLVM_ABI bool isSingleSourceMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from exactly one source vector.
LLVM_ABI bool isConcat() const
Return true if this shuffle concatenates its 2 source vectors.
static LLVM_ABI bool isDeInterleaveMaskOfFactor(ArrayRef< int > Mask, unsigned Factor, unsigned &Index)
Check if the mask is a DE-interleave mask of the given factor Factor like: <Index,...
LLVM_ABI ShuffleVectorInst * cloneImpl() const
static LLVM_ABI bool isIdentityMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from exactly one source vector without lane crossin...
static LLVM_ABI bool isExtractSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is an extract subvector mask.
LLVM_ABI void setShuffleMask(ArrayRef< int > Mask)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
LLVM_ABI bool isInterleave(unsigned Factor)
Return if this shuffle interleaves its two input vectors together.
static LLVM_ABI bool isReverseMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask swaps the order of elements from exactly one source vector.
static LLVM_ABI bool isTransposeMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask is a transpose mask.
LLVM_ABI void commute()
Swap the operands and adjust the mask to preserve the semantics of the instruction.
static LLVM_ABI bool isInsertSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &NumSubElts, int &Index)
Return true if this shuffle mask is an insert subvector mask.
static LLVM_ABI Constant * convertShuffleMaskForBitcode(ArrayRef< int > Mask, Type *ResultTy)
static LLVM_ABI bool isReplicationMask(ArrayRef< int > Mask, int &ReplicationFactor, int &VF)
Return true if this shuffle mask replicates each of the VF elements in a vector ReplicationFactor tim...
static LLVM_ABI bool isInterleaveMask(ArrayRef< int > Mask, unsigned Factor, unsigned NumInputElts, SmallVectorImpl< unsigned > &StartIndexes)
Return true if the mask interleaves one or more input vectors together.
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this store instruction.
Align getAlign() const
void setVolatile(bool V)
Specify whether this is a volatile store or not.
void setAlignment(Align Align)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
LLVM_ABI StoreInst * cloneImpl() const
LLVM_ABI StoreInst(Value *Val, Value *Ptr, InsertPosition InsertBefore)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this store instruction.
bool isVolatile() const
Return true if this is a store to a volatile memory location.
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this store instruction.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Class to represent struct types.
LLVM_ABI void setSuccessorWeight(unsigned idx, CaseWeightOpt W)
LLVM_ABI Instruction::InstListType::iterator eraseFromParent()
Delegate the call to the underlying SwitchInst::eraseFromParent() and mark this object to not touch t...
LLVM_ABI void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W)
Delegate the call to the underlying SwitchInst::addCase() and set the specified branch weight for the...
LLVM_ABI CaseWeightOpt getSuccessorWeight(unsigned idx)
LLVM_ABI void replaceDefaultDest(SwitchInst::CaseIt I)
Replace the default destination by given case.
std::optional< uint32_t > CaseWeightOpt
LLVM_ABI SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I)
Delegate the call to the underlying SwitchInst::removeCase() and remove correspondent branch weight.
void setValue(ConstantInt *V) const
Sets the new value for current case.
void setSuccessor(BasicBlock *S) const
Sets the new successor for current case.
Multiway switch.
void allocHungoffUses(unsigned N)
LLVM_ABI SwitchInst * cloneImpl() const
LLVM_ABI void addCase(ConstantInt *OnVal, BasicBlock *Dest)
Add an entry to the switch instruction.
CaseIteratorImpl< CaseHandle > CaseIt
ConstantInt *const * case_values() const
unsigned getNumCases() const
Return the number of 'cases' in this switch instruction, excluding the default case.
LLVM_ABI CaseIt removeCase(CaseIt I)
This method removes the specified case and its successor from the switch instruction.
This class represents a truncation of integer types.
LLVM_ABI TruncInst * cloneImpl() const
Clone an identical TruncInst.
LLVM_ABI TruncInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition TypeSize.h:343
static constexpr TypeSize get(ScalarTy Quantity, bool Scalable)
Definition TypeSize.h:340
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:296
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition Type.h:246
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
LLVM_ABI bool isFirstClassType() const
Return true if the type is "first class", meaning it is a valid type for a Value.
Definition Type.cpp:249
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:197
bool isAggregateType() const
Return true if the type is an aggregate type.
Definition Type.h:304
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:128
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:230
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
Definition Type.cpp:293
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:184
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition Type.h:270
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
bool isTokenTy() const
Return true if this is 'token'.
Definition Type.h:234
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition Type.h:225
This class represents a cast unsigned integer to floating point.
LLVM_ABI UIToFPInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI UIToFPInst * cloneImpl() const
Clone an identical UIToFPInst.
UnaryInstruction(Type *Ty, unsigned iType, Value *V, InsertPosition InsertBefore=nullptr)
Definition InstrTypes.h:62
static LLVM_ABI UnaryOperator * Create(UnaryOps Op, Value *S, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a unary instruction, given the opcode and an operand.
LLVM_ABI UnaryOperator(UnaryOps iType, Value *S, Type *Ty, const Twine &Name, InsertPosition InsertBefore)
LLVM_ABI UnaryOperator * cloneImpl() const
UnaryOps getOpcode() const
Definition InstrTypes.h:154
LLVM_ABI UnreachableInst(LLVMContext &C, InsertPosition InsertBefore=nullptr)
LLVM_ABI bool shouldLowerToTrap(bool TrapUnreachable, bool NoTrapAfterNoreturn) const
friend class Instruction
Iterator for Instructions in a `BasicBlock.
LLVM_ABI UnreachableInst * cloneImpl() const
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
LLVM_ABI void set(Value *Val)
Definition Value.h:905
Use * op_iterator
Definition User.h:254
const Use * getOperandList() const
Definition User.h:200
op_iterator op_begin()
Definition User.h:259
LLVM_ABI void allocHungoffUses(unsigned N, bool WithExtraValues=false)
Allocate the array of Uses, followed by a pointer (with bottom bit set) to the User.
Definition User.cpp:54
const Use & getOperandUse(unsigned i) const
Definition User.h:220
void setNumHungOffUseOperands(unsigned NumOps)
Subclasses with hung off uses need to manage the operand count themselves.
Definition User.h:240
Use & Op()
Definition User.h:171
LLVM_ABI void growHungoffUses(unsigned N, bool WithExtraValues=false)
Grow the number of hung off uses.
Definition User.cpp:71
Value * getOperand(unsigned i) const
Definition User.h:207
unsigned getNumOperands() const
Definition User.h:229
op_iterator op_end()
Definition User.h:261
VAArgInst(Value *List, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
LLVM_ABI VAArgInst * cloneImpl() const
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI Value(Type *Ty, unsigned scid)
Definition Value.cpp:53
unsigned char SubclassOptionalData
Hold subclass data that can be dropped.
Definition Value.h:85
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
Definition Value.cpp:397
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:553
LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.h:259
unsigned NumUserOperands
Definition Value.h:109
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
Base class of all SIMD vector types.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
This class represents zero extension of integer types.
LLVM_ABI ZExtInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI ZExtInst * cloneImpl() const
Clone an identical ZExtInst.
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
An efficient, type-erasing, non-owning reference to a callable.
typename base_list_type::iterator iterator
Definition ilist.h:121
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
CallInst * Call
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
bool match(Val *V, const Pattern &P)
cstfp_pred_ty< is_non_zero_not_denormal_fp > m_NonZeroNotDenormalFP()
Match a floating-point non-zero that is not a denormal.
initializer< Ty > init(const Ty &Val)
@ Switch
The "resume-switch" lowering, where there are separate resume and destroy functions that are shared b...
Definition CoroShape.h:31
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:668
NodeAddr< UseNode * > Use
Definition RDFGraph.h:385
Context & getContext() const
Definition BasicBlock.h:99
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
@ Offset
Definition DWP.cpp:532
auto seq_inclusive(T Begin, T End)
Iterate over an integral type from Begin to End inclusive.
Definition Sequence.h:325
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1737
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1667
unsigned getPointerAddressSpace(const Type *T)
Definition SPIRVUtils.h:367
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
Definition InstrProf.h:296
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
LLVM_ABI MDNode * getBranchWeightMDNode(const Instruction &I)
Get the branch weights metadata node.
MemoryEffectsBase< IRMemLocation > MemoryEffects
Summary of how a function affects memory in the program.
Definition ModRef.h:301
constexpr auto equal_to(T &&Arg)
Functor variant of std::equal_to that can be used as a UnaryPredicate in functional algorithms like a...
Definition STLExtras.h:2163
std::enable_if_t< std::is_unsigned_v< T >, std::optional< T > > checkedMulUnsigned(T LHS, T RHS)
Multiply two unsigned integers LHS and RHS.
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
auto reverse(ContainerTy &&C)
Definition STLExtras.h:406
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
bool isPointerTy(const Type *T)
Definition SPIRVUtils.h:361
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
constexpr int PoisonMaskElem
LLVM_ABI unsigned getNumBranchWeights(const MDNode &ProfileData)
AtomicOrdering
Atomic ordering for LLVM's memory model.
OperandBundleDefT< Value * > OperandBundleDef
Definition AutoUpgrade.h:34
@ Mul
Product of integers.
@ Xor
Bitwise or logical XOR of integers.
@ FMul
Product of floats.
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
@ FAdd
Sum of floats.
DWARFExpression::Operation Op
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
OutputIt copy(R &&Range, OutputIt Out)
Definition STLExtras.h:1883
constexpr unsigned BitWidth
LLVM_ABI bool extractBranchWeights(const MDNode *ProfileData, SmallVectorImpl< uint32_t > &Weights)
Extract branch weights from MD_prof metadata.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1945
bool capturesAnything(CaptureComponents CC)
Definition ModRef.h:324
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
Definition STLExtras.h:2156
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
Definition Sequence.h:305
@ Default
The result values are uniform if and only if all operands are uniform.
Definition Uniformity.h:20
LLVM_ABI void scaleProfData(Instruction &I, uint64_t S, uint64_t T)
Scaling the profile data attached to 'I' using the ratio of S/T.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Summary of memprof metadata on allocations.
Used to keep track of an operand bundle.
uint32_t End
The index in the Use& vector where operands for this operand bundle ends.
uint32_t Begin
The index in the Use& vector where operands for this operand bundle starts.
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
static LLVM_ABI std::optional< bool > eq(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_EQ result.
static LLVM_ABI std::optional< bool > ne(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_NE result.
static LLVM_ABI std::optional< bool > sge(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SGE result.
static LLVM_ABI std::optional< bool > ugt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_UGT result.
static LLVM_ABI std::optional< bool > slt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SLT result.
static LLVM_ABI std::optional< bool > ult(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_ULT result.
static LLVM_ABI std::optional< bool > ule(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_ULE result.
static LLVM_ABI std::optional< bool > sle(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SLE result.
static LLVM_ABI std::optional< bool > sgt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SGT result.
static LLVM_ABI std::optional< bool > uge(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_UGE result.
Matching combinators.
A MapVector that performs no allocations if smaller than a certain size.
Definition MapVector.h:276
Indicates this User has operands co-allocated.
Definition User.h:60
Indicates this User has operands and a descriptor co-allocated .
Definition User.h:66