LLVM 22.0.0git
Instructions.cpp
Go to the documentation of this file.
1//===- Instructions.cpp - Implement the LLVM instructions -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements all of the non-inline methods for the LLVM instruction
10// classes.
11//
12//===----------------------------------------------------------------------===//
13
15#include "LLVMContextImpl.h"
18#include "llvm/ADT/Twine.h"
19#include "llvm/IR/Attributes.h"
20#include "llvm/IR/BasicBlock.h"
21#include "llvm/IR/Constant.h"
23#include "llvm/IR/Constants.h"
24#include "llvm/IR/DataLayout.h"
26#include "llvm/IR/Function.h"
27#include "llvm/IR/InstrTypes.h"
28#include "llvm/IR/Instruction.h"
29#include "llvm/IR/Intrinsics.h"
30#include "llvm/IR/LLVMContext.h"
31#include "llvm/IR/MDBuilder.h"
32#include "llvm/IR/Metadata.h"
33#include "llvm/IR/Module.h"
34#include "llvm/IR/Operator.h"
37#include "llvm/IR/Type.h"
38#include "llvm/IR/Value.h"
46#include "llvm/Support/ModRef.h"
48#include <algorithm>
49#include <cassert>
50#include <cstdint>
51#include <optional>
52#include <vector>
53
54using namespace llvm;
55
57 "disable-i2p-p2i-opt", cl::init(false),
58 cl::desc("Disables inttoptr/ptrtoint roundtrip optimization"));
59
60//===----------------------------------------------------------------------===//
61// AllocaInst Class
62//===----------------------------------------------------------------------===//
63
64std::optional<TypeSize>
66 TypeSize Size = DL.getTypeAllocSize(getAllocatedType());
67 if (isArrayAllocation()) {
69 if (!C)
70 return std::nullopt;
71 assert(!Size.isScalable() && "Array elements cannot have a scalable size");
72 auto CheckedProd =
73 checkedMulUnsigned(Size.getKnownMinValue(), C->getZExtValue());
74 if (!CheckedProd)
75 return std::nullopt;
76 return TypeSize::getFixed(*CheckedProd);
77 }
78 return Size;
79}
80
81std::optional<TypeSize>
83 std::optional<TypeSize> Size = getAllocationSize(DL);
84 if (!Size)
85 return std::nullopt;
86 auto CheckedProd = checkedMulUnsigned(Size->getKnownMinValue(),
87 static_cast<TypeSize::ScalarTy>(8));
88 if (!CheckedProd)
89 return std::nullopt;
90 return TypeSize::get(*CheckedProd, Size->isScalable());
91}
92
93//===----------------------------------------------------------------------===//
94// SelectInst Class
95//===----------------------------------------------------------------------===//
96
97/// areInvalidOperands - Return a string if the specified operands are invalid
98/// for a select operation, otherwise return null.
99const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) {
100 if (Op1->getType() != Op2->getType())
101 return "both values to select must have same type";
102
103 if (Op1->getType()->isTokenTy())
104 return "select values cannot have token type";
105
106 if (VectorType *VT = dyn_cast<VectorType>(Op0->getType())) {
107 // Vector select.
108 if (VT->getElementType() != Type::getInt1Ty(Op0->getContext()))
109 return "vector select condition element type must be i1";
111 if (!ET)
112 return "selected values for vector select must be vectors";
113 if (ET->getElementCount() != VT->getElementCount())
114 return "vector select requires selected vectors to have "
115 "the same vector length as select condition";
116 } else if (Op0->getType() != Type::getInt1Ty(Op0->getContext())) {
117 return "select condition must be i1 or <n x i1>";
118 }
119 return nullptr;
120}
121
122//===----------------------------------------------------------------------===//
123// PHINode Class
124//===----------------------------------------------------------------------===//
125
126PHINode::PHINode(const PHINode &PN)
127 : Instruction(PN.getType(), Instruction::PHI, AllocMarker),
128 ReservedSpace(PN.getNumOperands()) {
131 std::copy(PN.op_begin(), PN.op_end(), op_begin());
132 copyIncomingBlocks(make_range(PN.block_begin(), PN.block_end()));
134}
135
136// removeIncomingValue - Remove an incoming value. This is useful if a
137// predecessor basic block is deleted.
138Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) {
139 Value *Removed = getIncomingValue(Idx);
140 // Swap with the end of the list.
141 unsigned Last = getNumOperands() - 1;
142 if (Idx != Last) {
145 }
146
147 // Nuke the last value.
148 Op<-1>().set(nullptr);
150
151 // If the PHI node is dead, because it has zero entries, nuke it now.
152 if (getNumOperands() == 0 && DeletePHIIfEmpty) {
153 // If anyone is using this PHI, make them use a dummy value instead...
156 }
157 return Removed;
158}
159
160void PHINode::removeIncomingValueIf(function_ref<bool(unsigned)> Predicate,
161 bool DeletePHIIfEmpty) {
162 unsigned NumOps = getNumIncomingValues();
163 unsigned NewNumOps = 0;
164 for (unsigned Idx = 0; Idx < NumOps; ++Idx) {
165 if (Predicate(Idx))
166 continue;
167
168 if (Idx != NewNumOps) {
169 setIncomingValue(NewNumOps, getIncomingValue(Idx));
170 setIncomingBlock(NewNumOps, getIncomingBlock(Idx));
171 }
172 ++NewNumOps;
173 }
174
175 if (NewNumOps == NumOps)
176 return;
177
178 // Remove operands.
179 for (unsigned Idx = NewNumOps; Idx < NumOps; ++Idx)
180 getOperandUse(Idx).set(nullptr);
181
182 setNumHungOffUseOperands(NewNumOps);
183
184 // If the PHI node is dead, because it has zero entries, nuke it now.
185 if (getNumOperands() == 0 && DeletePHIIfEmpty) {
186 // If anyone is using this PHI, make them use a dummy value instead...
189 }
190}
191
192/// growOperands - grow operands - This grows the operand list in response
193/// to a push_back style of operation. This grows the number of ops by 1.5
194/// times.
195///
196void PHINode::growOperands() {
197 unsigned e = getNumOperands();
198 unsigned NumOps = e + e / 2;
199 if (NumOps < 2) NumOps = 2; // 2 op PHI nodes are VERY common.
200
201 ReservedSpace = NumOps;
202 growHungoffUses(ReservedSpace, /*WithExtraValues=*/true);
203}
204
205/// hasConstantValue - If the specified PHI node always merges together the same
206/// value, return the value, otherwise return null.
208 // Exploit the fact that phi nodes always have at least one entry.
209 Value *ConstantValue = getIncomingValue(0);
210 for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i)
211 if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) {
212 if (ConstantValue != this)
213 return nullptr; // Incoming values not all the same.
214 // The case where the first value is this PHI.
215 ConstantValue = getIncomingValue(i);
216 }
217 if (ConstantValue == this)
218 return PoisonValue::get(getType());
219 return ConstantValue;
220}
221
222/// hasConstantOrUndefValue - Whether the specified PHI node always merges
223/// together the same value, assuming that undefs result in the same value as
224/// non-undefs.
225/// Unlike \ref hasConstantValue, this does not return a value because the
226/// unique non-undef incoming value need not dominate the PHI node.
228 Value *ConstantValue = nullptr;
229 for (unsigned i = 0, e = getNumIncomingValues(); i != e; ++i) {
231 if (Incoming != this && !isa<UndefValue>(Incoming)) {
232 if (ConstantValue && ConstantValue != Incoming)
233 return false;
234 ConstantValue = Incoming;
235 }
236 }
237 return true;
238}
239
240//===----------------------------------------------------------------------===//
241// LandingPadInst Implementation
242//===----------------------------------------------------------------------===//
243
244LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
245 const Twine &NameStr,
246 InsertPosition InsertBefore)
247 : Instruction(RetTy, Instruction::LandingPad, AllocMarker, InsertBefore) {
248 init(NumReservedValues, NameStr);
249}
250
251LandingPadInst::LandingPadInst(const LandingPadInst &LP)
252 : Instruction(LP.getType(), Instruction::LandingPad, AllocMarker),
253 ReservedSpace(LP.getNumOperands()) {
256 Use *OL = getOperandList();
257 const Use *InOL = LP.getOperandList();
258 for (unsigned I = 0, E = ReservedSpace; I != E; ++I)
259 OL[I] = InOL[I];
260
261 setCleanup(LP.isCleanup());
262}
263
264LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
265 const Twine &NameStr,
266 InsertPosition InsertBefore) {
267 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertBefore);
268}
269
270void LandingPadInst::init(unsigned NumReservedValues, const Twine &NameStr) {
271 ReservedSpace = NumReservedValues;
273 allocHungoffUses(ReservedSpace);
274 setName(NameStr);
275 setCleanup(false);
276}
277
278/// growOperands - grow operands - This grows the operand list in response to a
279/// push_back style of operation. This grows the number of ops by 2 times.
280void LandingPadInst::growOperands(unsigned Size) {
281 unsigned e = getNumOperands();
282 if (ReservedSpace >= e + Size) return;
283 ReservedSpace = (std::max(e, 1U) + Size / 2) * 2;
284 growHungoffUses(ReservedSpace);
285}
286
288 unsigned OpNo = getNumOperands();
289 growOperands(1);
290 assert(OpNo < ReservedSpace && "Growing didn't work!");
292 getOperandList()[OpNo] = Val;
293}
294
295//===----------------------------------------------------------------------===//
296// CallBase Implementation
297//===----------------------------------------------------------------------===//
298
300 InsertPosition InsertPt) {
301 switch (CB->getOpcode()) {
302 case Instruction::Call:
303 return CallInst::Create(cast<CallInst>(CB), Bundles, InsertPt);
304 case Instruction::Invoke:
305 return InvokeInst::Create(cast<InvokeInst>(CB), Bundles, InsertPt);
306 case Instruction::CallBr:
307 return CallBrInst::Create(cast<CallBrInst>(CB), Bundles, InsertPt);
308 default:
309 llvm_unreachable("Unknown CallBase sub-class!");
310 }
311}
312
314 InsertPosition InsertPt) {
316 for (unsigned i = 0, e = CI->getNumOperandBundles(); i < e; ++i) {
317 auto ChildOB = CI->getOperandBundleAt(i);
318 if (ChildOB.getTagName() != OpB.getTag())
319 OpDefs.emplace_back(ChildOB);
320 }
321 OpDefs.emplace_back(OpB);
322 return CallBase::Create(CI, OpDefs, InsertPt);
323}
324
326
328 assert(getOpcode() == Instruction::CallBr && "Unexpected opcode!");
329 return cast<CallBrInst>(this)->getNumIndirectDests() + 1;
330}
331
333 const Value *V = getCalledOperand();
334 if (isa<Function>(V) || isa<Constant>(V))
335 return false;
336 return !isInlineAsm();
337}
338
339/// Tests if this call site must be tail call optimized. Only a CallInst can
340/// be tail call optimized.
342 if (auto *CI = dyn_cast<CallInst>(this))
343 return CI->isMustTailCall();
344 return false;
345}
346
347/// Tests if this call site is marked as a tail call.
349 if (auto *CI = dyn_cast<CallInst>(this))
350 return CI->isTailCall();
351 return false;
352}
353
356 return F->getIntrinsicID();
358}
359
361 FPClassTest Mask = Attrs.getRetNoFPClass();
362
363 if (const Function *F = getCalledFunction())
364 Mask |= F->getAttributes().getRetNoFPClass();
365 return Mask;
366}
367
369 FPClassTest Mask = Attrs.getParamNoFPClass(i);
370
371 if (const Function *F = getCalledFunction())
372 Mask |= F->getAttributes().getParamNoFPClass(i);
373 return Mask;
374}
375
376std::optional<ConstantRange> CallBase::getRange() const {
377 Attribute CallAttr = Attrs.getRetAttr(Attribute::Range);
379 if (const Function *F = getCalledFunction())
380 FnAttr = F->getRetAttribute(Attribute::Range);
381
382 if (CallAttr.isValid() && FnAttr.isValid())
383 return CallAttr.getRange().intersectWith(FnAttr.getRange());
384 if (CallAttr.isValid())
385 return CallAttr.getRange();
386 if (FnAttr.isValid())
387 return FnAttr.getRange();
388 return std::nullopt;
389}
390
392 if (hasRetAttr(Attribute::NonNull))
393 return true;
394
395 if (getRetDereferenceableBytes() > 0 &&
397 return true;
398
399 return false;
400}
401
403 unsigned Index;
404
405 if (Attrs.hasAttrSomewhere(Kind, &Index))
406 return getArgOperand(Index - AttributeList::FirstArgIndex);
407 if (const Function *F = getCalledFunction())
408 if (F->getAttributes().hasAttrSomewhere(Kind, &Index))
409 return getArgOperand(Index - AttributeList::FirstArgIndex);
410
411 return nullptr;
412}
413
414/// Determine whether the argument or parameter has the given attribute.
415bool CallBase::paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
416 assert(ArgNo < arg_size() && "Param index out of bounds!");
417
418 if (Attrs.hasParamAttr(ArgNo, Kind))
419 return true;
420
421 const Function *F = getCalledFunction();
422 if (!F)
423 return false;
424
425 if (!F->getAttributes().hasParamAttr(ArgNo, Kind))
426 return false;
427
428 // Take into account mod/ref by operand bundles.
429 switch (Kind) {
430 case Attribute::ReadNone:
432 case Attribute::ReadOnly:
434 case Attribute::WriteOnly:
435 return !hasReadingOperandBundles();
436 default:
437 return true;
438 }
439}
440
442 bool AllowUndefOrPoison) const {
444 "Argument must be a pointer");
445 if (paramHasAttr(ArgNo, Attribute::NonNull) &&
446 (AllowUndefOrPoison || paramHasAttr(ArgNo, Attribute::NoUndef)))
447 return true;
448
449 if (paramHasAttr(ArgNo, Attribute::Dereferenceable) &&
451 getCaller(),
453 return true;
454
455 return false;
456}
457
458bool CallBase::hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const {
460 return F->getAttributes().hasFnAttr(Kind);
461
462 return false;
463}
464
465bool CallBase::hasFnAttrOnCalledFunction(StringRef Kind) const {
467 return F->getAttributes().hasFnAttr(Kind);
468
469 return false;
470}
471
472template <typename AK>
473Attribute CallBase::getFnAttrOnCalledFunction(AK Kind) const {
474 if constexpr (std::is_same_v<AK, Attribute::AttrKind>) {
475 // getMemoryEffects() correctly combines memory effects from the call-site,
476 // operand bundles and function.
477 assert(Kind != Attribute::Memory && "Use getMemoryEffects() instead");
478 }
479
481 return F->getAttributes().getFnAttr(Kind);
482
483 return Attribute();
484}
485
486template LLVM_ABI Attribute
487CallBase::getFnAttrOnCalledFunction(Attribute::AttrKind Kind) const;
488template LLVM_ABI Attribute
489CallBase::getFnAttrOnCalledFunction(StringRef Kind) const;
490
491template <typename AK>
492Attribute CallBase::getParamAttrOnCalledFunction(unsigned ArgNo,
493 AK Kind) const {
495
496 if (auto *F = dyn_cast<Function>(V))
497 return F->getAttributes().getParamAttr(ArgNo, Kind);
498
499 return Attribute();
500}
501template LLVM_ABI Attribute CallBase::getParamAttrOnCalledFunction(
502 unsigned ArgNo, Attribute::AttrKind Kind) const;
503template LLVM_ABI Attribute
504CallBase::getParamAttrOnCalledFunction(unsigned ArgNo, StringRef Kind) const;
505
508 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
510}
511
514 const unsigned BeginIndex) {
515 auto It = op_begin() + BeginIndex;
516 for (auto &B : Bundles)
517 It = std::copy(B.input_begin(), B.input_end(), It);
518
519 auto *ContextImpl = getContext().pImpl;
520 auto BI = Bundles.begin();
521 unsigned CurrentIndex = BeginIndex;
522
523 for (auto &BOI : bundle_op_infos()) {
524 assert(BI != Bundles.end() && "Incorrect allocation?");
525
526 BOI.Tag = ContextImpl->getOrInsertBundleTag(BI->getTag());
527 BOI.Begin = CurrentIndex;
528 BOI.End = CurrentIndex + BI->input_size();
529 CurrentIndex = BOI.End;
530 BI++;
531 }
532
533 assert(BI == Bundles.end() && "Incorrect allocation?");
534
535 return It;
536}
537
539 /// When there isn't many bundles, we do a simple linear search.
540 /// Else fallback to a binary-search that use the fact that bundles usually
541 /// have similar number of argument to get faster convergence.
543 for (auto &BOI : bundle_op_infos())
544 if (BOI.Begin <= OpIdx && OpIdx < BOI.End)
545 return BOI;
546
547 llvm_unreachable("Did not find operand bundle for operand!");
548 }
549
550 assert(OpIdx >= arg_size() && "the Idx is not in the operand bundles");
552 OpIdx < std::prev(bundle_op_info_end())->End &&
553 "The Idx isn't in the operand bundle");
554
555 /// We need a decimal number below and to prevent using floating point numbers
556 /// we use an intergal value multiplied by this constant.
557 constexpr unsigned NumberScaling = 1024;
558
561 bundle_op_iterator Current = Begin;
562
563 while (Begin != End) {
564 unsigned ScaledOperandPerBundle =
565 NumberScaling * (std::prev(End)->End - Begin->Begin) / (End - Begin);
566 Current = Begin + (((OpIdx - Begin->Begin) * NumberScaling) /
567 ScaledOperandPerBundle);
568 if (Current >= End)
569 Current = std::prev(End);
570 assert(Current < End && Current >= Begin &&
571 "the operand bundle doesn't cover every value in the range");
572 if (OpIdx >= Current->Begin && OpIdx < Current->End)
573 break;
574 if (OpIdx >= Current->End)
575 Begin = Current + 1;
576 else
577 End = Current;
578 }
579
580 assert(OpIdx >= Current->Begin && OpIdx < Current->End &&
581 "the operand bundle doesn't cover every value in the range");
582 return *Current;
583}
584
587 InsertPosition InsertPt) {
588 if (CB->getOperandBundle(ID))
589 return CB;
590
592 CB->getOperandBundlesAsDefs(Bundles);
593 Bundles.push_back(OB);
594 return Create(CB, Bundles, InsertPt);
595}
596
598 InsertPosition InsertPt) {
600 bool CreateNew = false;
601
602 for (unsigned I = 0, E = CB->getNumOperandBundles(); I != E; ++I) {
603 auto Bundle = CB->getOperandBundleAt(I);
604 if (Bundle.getTagID() == ID) {
605 CreateNew = true;
606 continue;
607 }
608 Bundles.emplace_back(Bundle);
609 }
610
611 return CreateNew ? Create(CB, Bundles, InsertPt) : CB;
612}
613
615 // Implementation note: this is a conservative implementation of operand
616 // bundle semantics, where *any* non-assume operand bundle (other than
617 // ptrauth) forces a callsite to be at least readonly.
622 getIntrinsicID() != Intrinsic::assume;
623}
624
633
635 MemoryEffects ME = getAttributes().getMemoryEffects();
636 if (auto *Fn = dyn_cast<Function>(getCalledOperand())) {
637 MemoryEffects FnME = Fn->getMemoryEffects();
638 if (hasOperandBundles()) {
639 // TODO: Add a method to get memory effects for operand bundles instead.
641 FnME |= MemoryEffects::readOnly();
643 FnME |= MemoryEffects::writeOnly();
644 }
645 if (isVolatile()) {
646 // Volatile operations also access inaccessible memory.
648 }
649 ME &= FnME;
650 }
651 return ME;
652}
656
657/// Determine if the function does not access memory.
664
665/// Determine if the function does not access or only reads memory.
672
673/// Determine if the function does not access or only writes memory.
680
681/// Determine if the call can access memmory only using pointers based
682/// on its arguments.
689
690/// Determine if the function may only access memory that is
691/// inaccessible from the IR.
698
699/// Determine if the function may only access memory that is
700/// either inaccessible from the IR or pointed to by its arguments.
708
710 if (OpNo < arg_size()) {
711 // If the argument is passed byval, the callee does not have access to the
712 // original pointer and thus cannot capture it.
713 if (isByValArgument(OpNo))
714 return CaptureInfo::none();
715
717 if (auto *Fn = dyn_cast<Function>(getCalledOperand()))
718 CI &= Fn->getAttributes().getParamAttrs(OpNo).getCaptureInfo();
719 return CI;
720 }
721
722 // Bundles on assumes are captures(none).
723 if (getIntrinsicID() == Intrinsic::assume)
724 return CaptureInfo::none();
725
726 // deopt operand bundles are captures(none)
727 auto &BOI = getBundleOpInfoForOperand(OpNo);
728 auto OBU = operandBundleFromBundleOpInfo(BOI);
729 return OBU.isDeoptOperandBundle() ? CaptureInfo::none() : CaptureInfo::all();
730}
731
733 for (unsigned I = 0, E = arg_size(); I < E; ++I) {
735 continue;
736
738 if (auto *Fn = dyn_cast<Function>(getCalledOperand()))
739 CI &= Fn->getAttributes().getParamAttrs(I).getCaptureInfo();
741 return true;
742 }
743 return false;
744}
745
746//===----------------------------------------------------------------------===//
747// CallInst Implementation
748//===----------------------------------------------------------------------===//
749
750void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
751 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) {
752 this->FTy = FTy;
753 assert(getNumOperands() == Args.size() + CountBundleInputs(Bundles) + 1 &&
754 "NumOperands not set up?");
755
756#ifndef NDEBUG
757 assert((Args.size() == FTy->getNumParams() ||
758 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
759 "Calling a function with bad signature!");
760
761 for (unsigned i = 0; i != Args.size(); ++i)
762 assert((i >= FTy->getNumParams() ||
763 FTy->getParamType(i) == Args[i]->getType()) &&
764 "Calling a function with a bad signature!");
765#endif
766
767 // Set operands in order of their index to match use-list-order
768 // prediction.
769 llvm::copy(Args, op_begin());
770 setCalledOperand(Func);
771
772 auto It = populateBundleOperandInfos(Bundles, Args.size());
773 (void)It;
774 assert(It + 1 == op_end() && "Should add up!");
775
776 setName(NameStr);
777}
778
779void CallInst::init(FunctionType *FTy, Value *Func, const Twine &NameStr) {
780 this->FTy = FTy;
781 assert(getNumOperands() == 1 && "NumOperands not set up?");
782 setCalledOperand(Func);
783
784 assert(FTy->getNumParams() == 0 && "Calling a function with bad signature");
785
786 setName(NameStr);
787}
788
789CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
790 AllocInfo AllocInfo, InsertPosition InsertBefore)
791 : CallBase(Ty->getReturnType(), Instruction::Call, AllocInfo,
792 InsertBefore) {
793 init(Ty, Func, Name);
794}
795
796CallInst::CallInst(const CallInst &CI, AllocInfo AllocInfo)
797 : CallBase(CI.Attrs, CI.FTy, CI.getType(), Instruction::Call, AllocInfo) {
799 "Wrong number of operands allocated");
800 setTailCallKind(CI.getTailCallKind());
802
803 std::copy(CI.op_begin(), CI.op_end(), op_begin());
804 std::copy(CI.bundle_op_info_begin(), CI.bundle_op_info_end(),
807}
808
810 InsertPosition InsertPt) {
811 std::vector<Value *> Args(CI->arg_begin(), CI->arg_end());
812
813 auto *NewCI = CallInst::Create(CI->getFunctionType(), CI->getCalledOperand(),
814 Args, OpB, CI->getName(), InsertPt);
815 NewCI->setTailCallKind(CI->getTailCallKind());
816 NewCI->setCallingConv(CI->getCallingConv());
817 NewCI->SubclassOptionalData = CI->SubclassOptionalData;
818 NewCI->setAttributes(CI->getAttributes());
819 NewCI->setDebugLoc(CI->getDebugLoc());
820 return NewCI;
821}
822
823// Update profile weight for call instruction by scaling it using the ratio
824// of S/T. The meaning of "branch_weights" meta data for call instruction is
825// transfered to represent call count.
827 if (T == 0) {
828 LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
829 "div by 0. Ignoring. Likely the function "
830 << getParent()->getParent()->getName()
831 << " has 0 entry count, and contains call instructions "
832 "with non-zero prof info.");
833 return;
834 }
835 scaleProfData(*this, S, T);
836}
837
838//===----------------------------------------------------------------------===//
839// InvokeInst Implementation
840//===----------------------------------------------------------------------===//
841
842void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal,
843 BasicBlock *IfException, ArrayRef<Value *> Args,
845 const Twine &NameStr) {
846 this->FTy = FTy;
847
849 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)) &&
850 "NumOperands not set up?");
851
852#ifndef NDEBUG
853 assert(((Args.size() == FTy->getNumParams()) ||
854 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
855 "Invoking a function with bad signature");
856
857 for (unsigned i = 0, e = Args.size(); i != e; i++)
858 assert((i >= FTy->getNumParams() ||
859 FTy->getParamType(i) == Args[i]->getType()) &&
860 "Invoking a function with a bad signature!");
861#endif
862
863 // Set operands in order of their index to match use-list-order
864 // prediction.
865 llvm::copy(Args, op_begin());
866 setNormalDest(IfNormal);
867 setUnwindDest(IfException);
869
870 auto It = populateBundleOperandInfos(Bundles, Args.size());
871 (void)It;
872 assert(It + 3 == op_end() && "Should add up!");
873
874 setName(NameStr);
875}
876
877InvokeInst::InvokeInst(const InvokeInst &II, AllocInfo AllocInfo)
878 : CallBase(II.Attrs, II.FTy, II.getType(), Instruction::Invoke, AllocInfo) {
879 assert(getNumOperands() == II.getNumOperands() &&
880 "Wrong number of operands allocated");
881 setCallingConv(II.getCallingConv());
882 std::copy(II.op_begin(), II.op_end(), op_begin());
883 std::copy(II.bundle_op_info_begin(), II.bundle_op_info_end(),
885 SubclassOptionalData = II.SubclassOptionalData;
886}
887
889 InsertPosition InsertPt) {
890 std::vector<Value *> Args(II->arg_begin(), II->arg_end());
891
892 auto *NewII = InvokeInst::Create(
893 II->getFunctionType(), II->getCalledOperand(), II->getNormalDest(),
894 II->getUnwindDest(), Args, OpB, II->getName(), InsertPt);
895 NewII->setCallingConv(II->getCallingConv());
896 NewII->SubclassOptionalData = II->SubclassOptionalData;
897 NewII->setAttributes(II->getAttributes());
898 NewII->setDebugLoc(II->getDebugLoc());
899 return NewII;
900}
901
903 return cast<LandingPadInst>(getUnwindDest()->getFirstNonPHIIt());
904}
905
907 if (T == 0) {
908 LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
909 "div by 0. Ignoring. Likely the function "
910 << getParent()->getParent()->getName()
911 << " has 0 entry count, and contains call instructions "
912 "with non-zero prof info.");
913 return;
914 }
915 scaleProfData(*this, S, T);
916}
917
918//===----------------------------------------------------------------------===//
919// CallBrInst Implementation
920//===----------------------------------------------------------------------===//
921
922void CallBrInst::init(FunctionType *FTy, Value *Fn, BasicBlock *Fallthrough,
923 ArrayRef<BasicBlock *> IndirectDests,
926 const Twine &NameStr) {
927 this->FTy = FTy;
928
929 assert(getNumOperands() == ComputeNumOperands(Args.size(),
930 IndirectDests.size(),
931 CountBundleInputs(Bundles)) &&
932 "NumOperands not set up?");
933
934#ifndef NDEBUG
935 assert(((Args.size() == FTy->getNumParams()) ||
936 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
937 "Calling a function with bad signature");
938
939 for (unsigned i = 0, e = Args.size(); i != e; i++)
940 assert((i >= FTy->getNumParams() ||
941 FTy->getParamType(i) == Args[i]->getType()) &&
942 "Calling a function with a bad signature!");
943#endif
944
945 // Set operands in order of their index to match use-list-order
946 // prediction.
947 llvm::copy(Args, op_begin());
948 NumIndirectDests = IndirectDests.size();
949 setDefaultDest(Fallthrough);
950 for (unsigned i = 0; i != NumIndirectDests; ++i)
951 setIndirectDest(i, IndirectDests[i]);
953
954 auto It = populateBundleOperandInfos(Bundles, Args.size());
955 (void)It;
956 assert(It + 2 + IndirectDests.size() == op_end() && "Should add up!");
957
958 setName(NameStr);
959}
960
961CallBrInst::CallBrInst(const CallBrInst &CBI, AllocInfo AllocInfo)
962 : CallBase(CBI.Attrs, CBI.FTy, CBI.getType(), Instruction::CallBr,
963 AllocInfo) {
965 "Wrong number of operands allocated");
967 std::copy(CBI.op_begin(), CBI.op_end(), op_begin());
968 std::copy(CBI.bundle_op_info_begin(), CBI.bundle_op_info_end(),
971 NumIndirectDests = CBI.NumIndirectDests;
972}
973
974CallBrInst *CallBrInst::Create(CallBrInst *CBI, ArrayRef<OperandBundleDef> OpB,
975 InsertPosition InsertPt) {
976 std::vector<Value *> Args(CBI->arg_begin(), CBI->arg_end());
977
978 auto *NewCBI = CallBrInst::Create(
979 CBI->getFunctionType(), CBI->getCalledOperand(), CBI->getDefaultDest(),
980 CBI->getIndirectDests(), Args, OpB, CBI->getName(), InsertPt);
981 NewCBI->setCallingConv(CBI->getCallingConv());
982 NewCBI->SubclassOptionalData = CBI->SubclassOptionalData;
983 NewCBI->setAttributes(CBI->getAttributes());
984 NewCBI->setDebugLoc(CBI->getDebugLoc());
985 NewCBI->NumIndirectDests = CBI->NumIndirectDests;
986 return NewCBI;
987}
988
989//===----------------------------------------------------------------------===//
990// ReturnInst Implementation
991//===----------------------------------------------------------------------===//
992
993ReturnInst::ReturnInst(const ReturnInst &RI, AllocInfo AllocInfo)
994 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Ret,
995 AllocInfo) {
997 "Wrong number of operands allocated");
998 if (RI.getNumOperands())
999 Op<0>() = RI.Op<0>();
1001}
1002
1003ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, AllocInfo AllocInfo,
1004 InsertPosition InsertBefore)
1005 : Instruction(Type::getVoidTy(C), Instruction::Ret, AllocInfo,
1006 InsertBefore) {
1007 if (retVal)
1008 Op<0>() = retVal;
1009}
1010
1011//===----------------------------------------------------------------------===//
1012// ResumeInst Implementation
1013//===----------------------------------------------------------------------===//
1014
1015ResumeInst::ResumeInst(const ResumeInst &RI)
1016 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Resume,
1017 AllocMarker) {
1018 Op<0>() = RI.Op<0>();
1019}
1020
1021ResumeInst::ResumeInst(Value *Exn, InsertPosition InsertBefore)
1022 : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
1023 AllocMarker, InsertBefore) {
1024 Op<0>() = Exn;
1025}
1026
1027//===----------------------------------------------------------------------===//
1028// CleanupReturnInst Implementation
1029//===----------------------------------------------------------------------===//
1030
1031CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst &CRI,
1033 : Instruction(CRI.getType(), Instruction::CleanupRet, AllocInfo) {
1035 "Wrong number of operands allocated");
1036 setSubclassData<Instruction::OpaqueField>(
1038 Op<0>() = CRI.Op<0>();
1039 if (CRI.hasUnwindDest())
1040 Op<1>() = CRI.Op<1>();
1041}
1042
1043void CleanupReturnInst::init(Value *CleanupPad, BasicBlock *UnwindBB) {
1044 if (UnwindBB)
1045 setSubclassData<UnwindDestField>(true);
1046
1047 Op<0>() = CleanupPad;
1048 if (UnwindBB)
1049 Op<1>() = UnwindBB;
1050}
1051
1052CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
1054 InsertPosition InsertBefore)
1055 : Instruction(Type::getVoidTy(CleanupPad->getContext()),
1056 Instruction::CleanupRet, AllocInfo, InsertBefore) {
1057 init(CleanupPad, UnwindBB);
1058}
1059
1060//===----------------------------------------------------------------------===//
1061// CatchReturnInst Implementation
1062//===----------------------------------------------------------------------===//
1063void CatchReturnInst::init(Value *CatchPad, BasicBlock *BB) {
1064 Op<0>() = CatchPad;
1065 Op<1>() = BB;
1066}
1067
1068CatchReturnInst::CatchReturnInst(const CatchReturnInst &CRI)
1069 : Instruction(Type::getVoidTy(CRI.getContext()), Instruction::CatchRet,
1070 AllocMarker) {
1071 Op<0>() = CRI.Op<0>();
1072 Op<1>() = CRI.Op<1>();
1073}
1074
1075CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
1076 InsertPosition InsertBefore)
1077 : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
1078 AllocMarker, InsertBefore) {
1079 init(CatchPad, BB);
1080}
1081
1082//===----------------------------------------------------------------------===//
1083// CatchSwitchInst Implementation
1084//===----------------------------------------------------------------------===//
1085
1086CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
1087 unsigned NumReservedValues,
1088 const Twine &NameStr,
1089 InsertPosition InsertBefore)
1090 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, AllocMarker,
1091 InsertBefore) {
1092 if (UnwindDest)
1093 ++NumReservedValues;
1094 init(ParentPad, UnwindDest, NumReservedValues + 1);
1095 setName(NameStr);
1096}
1097
1098CatchSwitchInst::CatchSwitchInst(const CatchSwitchInst &CSI)
1099 : Instruction(CSI.getType(), Instruction::CatchSwitch, AllocMarker) {
1101 init(CSI.getParentPad(), CSI.getUnwindDest(), CSI.getNumOperands());
1102 setNumHungOffUseOperands(ReservedSpace);
1103 Use *OL = getOperandList();
1104 const Use *InOL = CSI.getOperandList();
1105 for (unsigned I = 1, E = ReservedSpace; I != E; ++I)
1106 OL[I] = InOL[I];
1107}
1108
1109void CatchSwitchInst::init(Value *ParentPad, BasicBlock *UnwindDest,
1110 unsigned NumReservedValues) {
1111 assert(ParentPad && NumReservedValues);
1112
1113 ReservedSpace = NumReservedValues;
1114 setNumHungOffUseOperands(UnwindDest ? 2 : 1);
1115 allocHungoffUses(ReservedSpace);
1116
1117 Op<0>() = ParentPad;
1118 if (UnwindDest) {
1120 setUnwindDest(UnwindDest);
1121 }
1122}
1123
1124/// growOperands - grow operands - This grows the operand list in response to a
1125/// push_back style of operation. This grows the number of ops by 2 times.
1126void CatchSwitchInst::growOperands(unsigned Size) {
1127 unsigned NumOperands = getNumOperands();
1128 assert(NumOperands >= 1);
1129 if (ReservedSpace >= NumOperands + Size)
1130 return;
1131 ReservedSpace = (NumOperands + Size / 2) * 2;
1132 growHungoffUses(ReservedSpace);
1133}
1134
1136 unsigned OpNo = getNumOperands();
1137 growOperands(1);
1138 assert(OpNo < ReservedSpace && "Growing didn't work!");
1140 getOperandList()[OpNo] = Handler;
1141}
1142
1144 // Move all subsequent handlers up one.
1145 Use *EndDst = op_end() - 1;
1146 for (Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst)
1147 *CurDst = *(CurDst + 1);
1148 // Null out the last handler use.
1149 *EndDst = nullptr;
1150
1152}
1153
1154//===----------------------------------------------------------------------===//
1155// FuncletPadInst Implementation
1156//===----------------------------------------------------------------------===//
1157void FuncletPadInst::init(Value *ParentPad, ArrayRef<Value *> Args,
1158 const Twine &NameStr) {
1159 assert(getNumOperands() == 1 + Args.size() && "NumOperands not set up?");
1160 llvm::copy(Args, op_begin());
1161 setParentPad(ParentPad);
1162 setName(NameStr);
1163}
1164
1165FuncletPadInst::FuncletPadInst(const FuncletPadInst &FPI, AllocInfo AllocInfo)
1166 : Instruction(FPI.getType(), FPI.getOpcode(), AllocInfo) {
1168 "Wrong number of operands allocated");
1169 std::copy(FPI.op_begin(), FPI.op_end(), op_begin());
1171}
1172
1173FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
1175 const Twine &NameStr,
1176 InsertPosition InsertBefore)
1177 : Instruction(ParentPad->getType(), Op, AllocInfo, InsertBefore) {
1178 init(ParentPad, Args, NameStr);
1179}
1180
1181//===----------------------------------------------------------------------===//
1182// UnreachableInst Implementation
1183//===----------------------------------------------------------------------===//
1184
1186 InsertPosition InsertBefore)
1187 : Instruction(Type::getVoidTy(Context), Instruction::Unreachable,
1188 AllocMarker, InsertBefore) {}
1189
1190//===----------------------------------------------------------------------===//
1191// BranchInst Implementation
1192//===----------------------------------------------------------------------===//
1193
1194void BranchInst::AssertOK() {
1195 if (isConditional())
1196 assert(getCondition()->getType()->isIntegerTy(1) &&
1197 "May only branch on boolean predicates!");
1198}
1199
1200BranchInst::BranchInst(BasicBlock *IfTrue, AllocInfo AllocInfo,
1201 InsertPosition InsertBefore)
1202 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1203 AllocInfo, InsertBefore) {
1204 assert(IfTrue && "Branch destination may not be null!");
1205 Op<-1>() = IfTrue;
1206}
1207
1208BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
1209 AllocInfo AllocInfo, InsertPosition InsertBefore)
1210 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1211 AllocInfo, InsertBefore) {
1212 // Assign in order of operand index to make use-list order predictable.
1213 Op<-3>() = Cond;
1214 Op<-2>() = IfFalse;
1215 Op<-1>() = IfTrue;
1216#ifndef NDEBUG
1217 AssertOK();
1218#endif
1219}
1220
1221BranchInst::BranchInst(const BranchInst &BI, AllocInfo AllocInfo)
1222 : Instruction(Type::getVoidTy(BI.getContext()), Instruction::Br,
1223 AllocInfo) {
1225 "Wrong number of operands allocated");
1226 // Assign in order of operand index to make use-list order predictable.
1227 if (BI.getNumOperands() != 1) {
1228 assert(BI.getNumOperands() == 3 && "BR can have 1 or 3 operands!");
1229 Op<-3>() = BI.Op<-3>();
1230 Op<-2>() = BI.Op<-2>();
1231 }
1232 Op<-1>() = BI.Op<-1>();
1234}
1235
1238 "Cannot swap successors of an unconditional branch");
1239 Op<-1>().swap(Op<-2>());
1240
1241 // Update profile metadata if present and it matches our structural
1242 // expectations.
1244}
1245
1246//===----------------------------------------------------------------------===//
1247// AllocaInst Implementation
1248//===----------------------------------------------------------------------===//
1249
1250static Value *getAISize(LLVMContext &Context, Value *Amt) {
1251 if (!Amt)
1252 Amt = ConstantInt::get(Type::getInt32Ty(Context), 1);
1253 else {
1254 assert(!isa<BasicBlock>(Amt) &&
1255 "Passed basic block into allocation size parameter! Use other ctor");
1256 assert(Amt->getType()->isIntegerTy() &&
1257 "Allocation array size is not an integer!");
1258 }
1259 return Amt;
1260}
1261
1263 assert(Pos.isValid() &&
1264 "Insertion position cannot be null when alignment not provided!");
1265 BasicBlock *BB = Pos.getBasicBlock();
1266 assert(BB->getParent() &&
1267 "BB must be in a Function when alignment not provided!");
1268 const DataLayout &DL = BB->getDataLayout();
1269 return DL.getPrefTypeAlign(Ty);
1270}
1271
1272AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
1273 InsertPosition InsertBefore)
1274 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertBefore) {}
1275
1276AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1277 const Twine &Name, InsertPosition InsertBefore)
1278 : AllocaInst(Ty, AddrSpace, ArraySize,
1279 computeAllocaDefaultAlign(Ty, InsertBefore), Name,
1280 InsertBefore) {}
1281
1282AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1283 Align Align, const Twine &Name,
1284 InsertPosition InsertBefore)
1285 : UnaryInstruction(PointerType::get(Ty->getContext(), AddrSpace), Alloca,
1286 getAISize(Ty->getContext(), ArraySize), InsertBefore),
1287 AllocatedType(Ty) {
1289 assert(!Ty->isVoidTy() && "Cannot allocate void!");
1290 setName(Name);
1291}
1292
1295 return !CI->isOne();
1296 return true;
1297}
1298
1299/// isStaticAlloca - Return true if this alloca is in the entry block of the
1300/// function and is a constant size. If so, the code generator will fold it
1301/// into the prolog/epilog code, so it is basically free.
1303 // Must be constant size.
1304 if (!isa<ConstantInt>(getArraySize())) return false;
1305
1306 // Must be in the entry block.
1307 const BasicBlock *Parent = getParent();
1308 return Parent->isEntryBlock() && !isUsedWithInAlloca();
1309}
1310
1311//===----------------------------------------------------------------------===//
1312// LoadInst Implementation
1313//===----------------------------------------------------------------------===//
1314
1315void LoadInst::AssertOK() {
1317 "Ptr must have pointer type.");
1318}
1319
1321 assert(Pos.isValid() &&
1322 "Insertion position cannot be null when alignment not provided!");
1323 BasicBlock *BB = Pos.getBasicBlock();
1324 assert(BB->getParent() &&
1325 "BB must be in a Function when alignment not provided!");
1326 const DataLayout &DL = BB->getDataLayout();
1327 return DL.getABITypeAlign(Ty);
1328}
1329
1330LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name,
1331 InsertPosition InsertBef)
1332 : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertBef) {}
1333
1334LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1335 InsertPosition InsertBef)
1336 : LoadInst(Ty, Ptr, Name, isVolatile,
1337 computeLoadStoreDefaultAlign(Ty, InsertBef), InsertBef) {}
1338
1339LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1340 Align Align, InsertPosition InsertBef)
1341 : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
1342 SyncScope::System, InsertBef) {}
1343
1344LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1346 InsertPosition InsertBef)
1347 : UnaryInstruction(Ty, Load, Ptr, InsertBef) {
1350 setAtomic(Order, SSID);
1351 AssertOK();
1352 setName(Name);
1353}
1354
1355//===----------------------------------------------------------------------===//
1356// StoreInst Implementation
1357//===----------------------------------------------------------------------===//
1358
1359void StoreInst::AssertOK() {
1360 assert(getOperand(0) && getOperand(1) && "Both operands must be non-null!");
1362 "Ptr must have pointer type!");
1363}
1364
1366 : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {}
1367
1369 InsertPosition InsertBefore)
1370 : StoreInst(val, addr, isVolatile,
1371 computeLoadStoreDefaultAlign(val->getType(), InsertBefore),
1372 InsertBefore) {}
1373
1375 InsertPosition InsertBefore)
1377 SyncScope::System, InsertBefore) {}
1378
1380 AtomicOrdering Order, SyncScope::ID SSID,
1381 InsertPosition InsertBefore)
1382 : Instruction(Type::getVoidTy(val->getContext()), Store, AllocMarker,
1383 InsertBefore) {
1384 Op<0>() = val;
1385 Op<1>() = addr;
1388 setAtomic(Order, SSID);
1389 AssertOK();
1390}
1391
1392//===----------------------------------------------------------------------===//
1393// AtomicCmpXchgInst Implementation
1394//===----------------------------------------------------------------------===//
1395
1396void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
1397 Align Alignment, AtomicOrdering SuccessOrdering,
1398 AtomicOrdering FailureOrdering,
1399 SyncScope::ID SSID) {
1400 Op<0>() = Ptr;
1401 Op<1>() = Cmp;
1402 Op<2>() = NewVal;
1403 setSuccessOrdering(SuccessOrdering);
1404 setFailureOrdering(FailureOrdering);
1405 setSyncScopeID(SSID);
1406 setAlignment(Alignment);
1407
1408 assert(getOperand(0) && getOperand(1) && getOperand(2) &&
1409 "All operands must be non-null!");
1411 "Ptr must have pointer type!");
1412 assert(getOperand(1)->getType() == getOperand(2)->getType() &&
1413 "Cmp type and NewVal type must be same!");
1414}
1415
1417 Align Alignment,
1418 AtomicOrdering SuccessOrdering,
1419 AtomicOrdering FailureOrdering,
1420 SyncScope::ID SSID,
1421 InsertPosition InsertBefore)
1422 : Instruction(
1423 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
1424 AtomicCmpXchg, AllocMarker, InsertBefore) {
1425 Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
1426}
1427
1428//===----------------------------------------------------------------------===//
1429// AtomicRMWInst Implementation
1430//===----------------------------------------------------------------------===//
1431
1432void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,
1433 Align Alignment, AtomicOrdering Ordering,
1434 SyncScope::ID SSID) {
1435 assert(Ordering != AtomicOrdering::NotAtomic &&
1436 "atomicrmw instructions can only be atomic.");
1437 assert(Ordering != AtomicOrdering::Unordered &&
1438 "atomicrmw instructions cannot be unordered.");
1439 Op<0>() = Ptr;
1440 Op<1>() = Val;
1442 setOrdering(Ordering);
1443 setSyncScopeID(SSID);
1444 setAlignment(Alignment);
1445
1446 assert(getOperand(0) && getOperand(1) && "All operands must be non-null!");
1448 "Ptr must have pointer type!");
1449 assert(Ordering != AtomicOrdering::NotAtomic &&
1450 "AtomicRMW instructions must be atomic!");
1451}
1452
1454 Align Alignment, AtomicOrdering Ordering,
1455 SyncScope::ID SSID, InsertPosition InsertBefore)
1456 : Instruction(Val->getType(), AtomicRMW, AllocMarker, InsertBefore) {
1457 Init(Operation, Ptr, Val, Alignment, Ordering, SSID);
1458}
1459
1461 switch (Op) {
1463 return "xchg";
1464 case AtomicRMWInst::Add:
1465 return "add";
1466 case AtomicRMWInst::Sub:
1467 return "sub";
1468 case AtomicRMWInst::And:
1469 return "and";
1471 return "nand";
1472 case AtomicRMWInst::Or:
1473 return "or";
1474 case AtomicRMWInst::Xor:
1475 return "xor";
1476 case AtomicRMWInst::Max:
1477 return "max";
1478 case AtomicRMWInst::Min:
1479 return "min";
1481 return "umax";
1483 return "umin";
1485 return "fadd";
1487 return "fsub";
1489 return "fmax";
1491 return "fmin";
1493 return "fmaximum";
1495 return "fminimum";
1497 return "uinc_wrap";
1499 return "udec_wrap";
1501 return "usub_cond";
1503 return "usub_sat";
1505 return "<invalid operation>";
1506 }
1507
1508 llvm_unreachable("invalid atomicrmw operation");
1509}
1510
1511//===----------------------------------------------------------------------===//
1512// FenceInst Implementation
1513//===----------------------------------------------------------------------===//
1514
1516 SyncScope::ID SSID, InsertPosition InsertBefore)
1517 : Instruction(Type::getVoidTy(C), Fence, AllocMarker, InsertBefore) {
1518 setOrdering(Ordering);
1519 setSyncScopeID(SSID);
1520}
1521
1522//===----------------------------------------------------------------------===//
1523// GetElementPtrInst Implementation
1524//===----------------------------------------------------------------------===//
1525
1526void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList,
1527 const Twine &Name) {
1528 assert(getNumOperands() == 1 + IdxList.size() &&
1529 "NumOperands not initialized?");
1530 Op<0>() = Ptr;
1531 llvm::copy(IdxList, op_begin() + 1);
1532 setName(Name);
1533}
1534
1535GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI,
1537 : Instruction(GEPI.getType(), GetElementPtr, AllocInfo),
1538 SourceElementType(GEPI.SourceElementType),
1539 ResultElementType(GEPI.ResultElementType) {
1540 assert(getNumOperands() == GEPI.getNumOperands() &&
1541 "Wrong number of operands allocated");
1542 std::copy(GEPI.op_begin(), GEPI.op_end(), op_begin());
1544}
1545
1547 if (auto *Struct = dyn_cast<StructType>(Ty)) {
1548 if (!Struct->indexValid(Idx))
1549 return nullptr;
1550 return Struct->getTypeAtIndex(Idx);
1551 }
1552 if (!Idx->getType()->isIntOrIntVectorTy())
1553 return nullptr;
1554 if (auto *Array = dyn_cast<ArrayType>(Ty))
1555 return Array->getElementType();
1556 if (auto *Vector = dyn_cast<VectorType>(Ty))
1557 return Vector->getElementType();
1558 return nullptr;
1559}
1560
1562 if (auto *Struct = dyn_cast<StructType>(Ty)) {
1563 if (Idx >= Struct->getNumElements())
1564 return nullptr;
1565 return Struct->getElementType(Idx);
1566 }
1567 if (auto *Array = dyn_cast<ArrayType>(Ty))
1568 return Array->getElementType();
1569 if (auto *Vector = dyn_cast<VectorType>(Ty))
1570 return Vector->getElementType();
1571 return nullptr;
1572}
1573
1574template <typename IndexTy>
1576 if (IdxList.empty())
1577 return Ty;
1578 for (IndexTy V : IdxList.slice(1)) {
1580 if (!Ty)
1581 return Ty;
1582 }
1583 return Ty;
1584}
1585
1589
1591 ArrayRef<Constant *> IdxList) {
1592 return getIndexedTypeInternal(Ty, IdxList);
1593}
1594
1598
1599/// hasAllZeroIndices - Return true if all of the indices of this GEP are
1600/// zeros. If so, the result pointer and the first operand have the same
1601/// value, just potentially different types.
1603 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1605 if (!CI->isZero()) return false;
1606 } else {
1607 return false;
1608 }
1609 }
1610 return true;
1611}
1612
1613/// hasAllConstantIndices - Return true if all of the indices of this GEP are
1614/// constant integers. If so, the result pointer and the first operand have
1615/// a constant offset between them.
1617 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1619 return false;
1620 }
1621 return true;
1622}
1623
1627
1629 GEPNoWrapFlags NW = cast<GEPOperator>(this)->getNoWrapFlags();
1630 if (B)
1632 else
1633 NW = NW.withoutInBounds();
1634 setNoWrapFlags(NW);
1635}
1636
1638 return cast<GEPOperator>(this)->getNoWrapFlags();
1639}
1640
1642 return cast<GEPOperator>(this)->isInBounds();
1643}
1644
1646 return cast<GEPOperator>(this)->hasNoUnsignedSignedWrap();
1647}
1648
1650 return cast<GEPOperator>(this)->hasNoUnsignedWrap();
1651}
1652
1654 APInt &Offset) const {
1655 // Delegate to the generic GEPOperator implementation.
1656 return cast<GEPOperator>(this)->accumulateConstantOffset(DL, Offset);
1657}
1658
1660 const DataLayout &DL, unsigned BitWidth,
1661 SmallMapVector<Value *, APInt, 4> &VariableOffsets,
1662 APInt &ConstantOffset) const {
1663 // Delegate to the generic GEPOperator implementation.
1664 return cast<GEPOperator>(this)->collectOffset(DL, BitWidth, VariableOffsets,
1665 ConstantOffset);
1666}
1667
1668//===----------------------------------------------------------------------===//
1669// ExtractElementInst Implementation
1670//===----------------------------------------------------------------------===//
1671
1672ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
1673 const Twine &Name,
1674 InsertPosition InsertBef)
1675 : Instruction(cast<VectorType>(Val->getType())->getElementType(),
1676 ExtractElement, AllocMarker, InsertBef) {
1677 assert(isValidOperands(Val, Index) &&
1678 "Invalid extractelement instruction operands!");
1679 Op<0>() = Val;
1680 Op<1>() = Index;
1681 setName(Name);
1682}
1683
1684bool ExtractElementInst::isValidOperands(const Value *Val, const Value *Index) {
1685 if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy())
1686 return false;
1687 return true;
1688}
1689
1690//===----------------------------------------------------------------------===//
1691// InsertElementInst Implementation
1692//===----------------------------------------------------------------------===//
1693
1694InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
1695 const Twine &Name,
1696 InsertPosition InsertBef)
1697 : Instruction(Vec->getType(), InsertElement, AllocMarker, InsertBef) {
1698 assert(isValidOperands(Vec, Elt, Index) &&
1699 "Invalid insertelement instruction operands!");
1700 Op<0>() = Vec;
1701 Op<1>() = Elt;
1702 Op<2>() = Index;
1703 setName(Name);
1704}
1705
1707 const Value *Index) {
1708 if (!Vec->getType()->isVectorTy())
1709 return false; // First operand of insertelement must be vector type.
1710
1711 if (Elt->getType() != cast<VectorType>(Vec->getType())->getElementType())
1712 return false;// Second operand of insertelement must be vector element type.
1713
1714 if (!Index->getType()->isIntegerTy())
1715 return false; // Third operand of insertelement must be i32.
1716 return true;
1717}
1718
1719//===----------------------------------------------------------------------===//
1720// ShuffleVectorInst Implementation
1721//===----------------------------------------------------------------------===//
1722
1724 assert(V && "Cannot create placeholder of nullptr V");
1725 return PoisonValue::get(V->getType());
1726}
1727
1729 InsertPosition InsertBefore)
1731 InsertBefore) {}
1732
1734 const Twine &Name,
1735 InsertPosition InsertBefore)
1737 InsertBefore) {}
1738
1740 const Twine &Name,
1741 InsertPosition InsertBefore)
1742 : Instruction(
1743 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
1744 cast<VectorType>(Mask->getType())->getElementCount()),
1745 ShuffleVector, AllocMarker, InsertBefore) {
1746 assert(isValidOperands(V1, V2, Mask) &&
1747 "Invalid shuffle vector instruction operands!");
1748
1749 Op<0>() = V1;
1750 Op<1>() = V2;
1751 SmallVector<int, 16> MaskArr;
1752 getShuffleMask(cast<Constant>(Mask), MaskArr);
1753 setShuffleMask(MaskArr);
1754 setName(Name);
1755}
1756
1758 const Twine &Name,
1759 InsertPosition InsertBefore)
1760 : Instruction(
1761 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
1762 Mask.size(), isa<ScalableVectorType>(V1->getType())),
1763 ShuffleVector, AllocMarker, InsertBefore) {
1764 assert(isValidOperands(V1, V2, Mask) &&
1765 "Invalid shuffle vector instruction operands!");
1766 Op<0>() = V1;
1767 Op<1>() = V2;
1768 setShuffleMask(Mask);
1769 setName(Name);
1770}
1771
1773 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
1774 int NumMaskElts = ShuffleMask.size();
1775 SmallVector<int, 16> NewMask(NumMaskElts);
1776 for (int i = 0; i != NumMaskElts; ++i) {
1777 int MaskElt = getMaskValue(i);
1778 if (MaskElt == PoisonMaskElem) {
1779 NewMask[i] = PoisonMaskElem;
1780 continue;
1781 }
1782 assert(MaskElt >= 0 && MaskElt < 2 * NumOpElts && "Out-of-range mask");
1783 MaskElt = (MaskElt < NumOpElts) ? MaskElt + NumOpElts : MaskElt - NumOpElts;
1784 NewMask[i] = MaskElt;
1785 }
1786 setShuffleMask(NewMask);
1787 Op<0>().swap(Op<1>());
1788}
1789
1791 ArrayRef<int> Mask) {
1792 // V1 and V2 must be vectors of the same type.
1793 if (!isa<VectorType>(V1->getType()) || V1->getType() != V2->getType())
1794 return false;
1795
1796 // Make sure the mask elements make sense.
1797 int V1Size =
1798 cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue();
1799 for (int Elem : Mask)
1800 if (Elem != PoisonMaskElem && Elem >= V1Size * 2)
1801 return false;
1802
1804 if ((Mask[0] != 0 && Mask[0] != PoisonMaskElem) || !all_equal(Mask))
1805 return false;
1806
1807 return true;
1808}
1809
1811 const Value *Mask) {
1812 // V1 and V2 must be vectors of the same type.
1813 if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType())
1814 return false;
1815
1816 // Mask must be vector of i32, and must be the same kind of vector as the
1817 // input vectors
1818 auto *MaskTy = dyn_cast<VectorType>(Mask->getType());
1819 if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32) ||
1821 return false;
1822
1823 // Check to see if Mask is valid.
1825 return true;
1826
1827 // NOTE: Through vector ConstantInt we have the potential to support more
1828 // than just zero splat masks but that requires a LangRef change.
1829 if (isa<ScalableVectorType>(MaskTy))
1830 return false;
1831
1832 unsigned V1Size = cast<FixedVectorType>(V1->getType())->getNumElements();
1833
1834 if (const auto *CI = dyn_cast<ConstantInt>(Mask))
1835 return !CI->uge(V1Size * 2);
1836
1837 if (const auto *MV = dyn_cast<ConstantVector>(Mask)) {
1838 for (Value *Op : MV->operands()) {
1839 if (auto *CI = dyn_cast<ConstantInt>(Op)) {
1840 if (CI->uge(V1Size*2))
1841 return false;
1842 } else if (!isa<UndefValue>(Op)) {
1843 return false;
1844 }
1845 }
1846 return true;
1847 }
1848
1849 if (const auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
1850 for (unsigned i = 0, e = cast<FixedVectorType>(MaskTy)->getNumElements();
1851 i != e; ++i)
1852 if (CDS->getElementAsInteger(i) >= V1Size*2)
1853 return false;
1854 return true;
1855 }
1856
1857 return false;
1858}
1859
1861 SmallVectorImpl<int> &Result) {
1862 ElementCount EC = cast<VectorType>(Mask->getType())->getElementCount();
1863
1864 if (isa<ConstantAggregateZero>(Mask) || isa<UndefValue>(Mask)) {
1865 int MaskVal = isa<UndefValue>(Mask) ? -1 : 0;
1866 Result.append(EC.getKnownMinValue(), MaskVal);
1867 return;
1868 }
1869
1870 assert(!EC.isScalable() &&
1871 "Scalable vector shuffle mask must be undef or zeroinitializer");
1872
1873 unsigned NumElts = EC.getFixedValue();
1874
1875 Result.reserve(NumElts);
1876
1877 if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
1878 for (unsigned i = 0; i != NumElts; ++i)
1879 Result.push_back(CDS->getElementAsInteger(i));
1880 return;
1881 }
1882 for (unsigned i = 0; i != NumElts; ++i) {
1883 Constant *C = Mask->getAggregateElement(i);
1884 Result.push_back(isa<UndefValue>(C) ? -1 :
1885 cast<ConstantInt>(C)->getZExtValue());
1886 }
1887}
1888
1890 ShuffleMask.assign(Mask.begin(), Mask.end());
1891 ShuffleMaskForBitcode = convertShuffleMaskForBitcode(Mask, getType());
1892}
1893
1895 Type *ResultTy) {
1896 Type *Int32Ty = Type::getInt32Ty(ResultTy->getContext());
1897 if (isa<ScalableVectorType>(ResultTy)) {
1898 assert(all_equal(Mask) && "Unexpected shuffle");
1899 Type *VecTy = VectorType::get(Int32Ty, Mask.size(), true);
1900 if (Mask[0] == 0)
1901 return Constant::getNullValue(VecTy);
1902 return PoisonValue::get(VecTy);
1903 }
1905 for (int Elem : Mask) {
1906 if (Elem == PoisonMaskElem)
1908 else
1909 MaskConst.push_back(ConstantInt::get(Int32Ty, Elem));
1910 }
1911 return ConstantVector::get(MaskConst);
1912}
1913
1914static bool isSingleSourceMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
1915 assert(!Mask.empty() && "Shuffle mask must contain elements");
1916 bool UsesLHS = false;
1917 bool UsesRHS = false;
1918 for (int I : Mask) {
1919 if (I == -1)
1920 continue;
1921 assert(I >= 0 && I < (NumOpElts * 2) &&
1922 "Out-of-bounds shuffle mask element");
1923 UsesLHS |= (I < NumOpElts);
1924 UsesRHS |= (I >= NumOpElts);
1925 if (UsesLHS && UsesRHS)
1926 return false;
1927 }
1928 // Allow for degenerate case: completely undef mask means neither source is used.
1929 return UsesLHS || UsesRHS;
1930}
1931
1933 // We don't have vector operand size information, so assume operands are the
1934 // same size as the mask.
1935 return isSingleSourceMaskImpl(Mask, NumSrcElts);
1936}
1937
1938static bool isIdentityMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
1939 if (!isSingleSourceMaskImpl(Mask, NumOpElts))
1940 return false;
1941 for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) {
1942 if (Mask[i] == -1)
1943 continue;
1944 if (Mask[i] != i && Mask[i] != (NumOpElts + i))
1945 return false;
1946 }
1947 return true;
1948}
1949
1951 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1952 return false;
1953 // We don't have vector operand size information, so assume operands are the
1954 // same size as the mask.
1955 return isIdentityMaskImpl(Mask, NumSrcElts);
1956}
1957
1959 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1960 return false;
1961 if (!isSingleSourceMask(Mask, NumSrcElts))
1962 return false;
1963
1964 // The number of elements in the mask must be at least 2.
1965 if (NumSrcElts < 2)
1966 return false;
1967
1968 for (int I = 0, E = Mask.size(); I < E; ++I) {
1969 if (Mask[I] == -1)
1970 continue;
1971 if (Mask[I] != (NumSrcElts - 1 - I) &&
1972 Mask[I] != (NumSrcElts + NumSrcElts - 1 - I))
1973 return false;
1974 }
1975 return true;
1976}
1977
1979 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1980 return false;
1981 if (!isSingleSourceMask(Mask, NumSrcElts))
1982 return false;
1983 for (int I = 0, E = Mask.size(); I < E; ++I) {
1984 if (Mask[I] == -1)
1985 continue;
1986 if (Mask[I] != 0 && Mask[I] != NumSrcElts)
1987 return false;
1988 }
1989 return true;
1990}
1991
1993 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1994 return false;
1995 // Select is differentiated from identity. It requires using both sources.
1996 if (isSingleSourceMask(Mask, NumSrcElts))
1997 return false;
1998 for (int I = 0, E = Mask.size(); I < E; ++I) {
1999 if (Mask[I] == -1)
2000 continue;
2001 if (Mask[I] != I && Mask[I] != (NumSrcElts + I))
2002 return false;
2003 }
2004 return true;
2005}
2006
2008 // Example masks that will return true:
2009 // v1 = <a, b, c, d>
2010 // v2 = <e, f, g, h>
2011 // trn1 = shufflevector v1, v2 <0, 4, 2, 6> = <a, e, c, g>
2012 // trn2 = shufflevector v1, v2 <1, 5, 3, 7> = <b, f, d, h>
2013
2014 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2015 return false;
2016 // 1. The number of elements in the mask must be a power-of-2 and at least 2.
2017 int Sz = Mask.size();
2018 if (Sz < 2 || !isPowerOf2_32(Sz))
2019 return false;
2020
2021 // 2. The first element of the mask must be either a 0 or a 1.
2022 if (Mask[0] != 0 && Mask[0] != 1)
2023 return false;
2024
2025 // 3. The difference between the first 2 elements must be equal to the
2026 // number of elements in the mask.
2027 if ((Mask[1] - Mask[0]) != NumSrcElts)
2028 return false;
2029
2030 // 4. The difference between consecutive even-numbered and odd-numbered
2031 // elements must be equal to 2.
2032 for (int I = 2; I < Sz; ++I) {
2033 int MaskEltVal = Mask[I];
2034 if (MaskEltVal == -1)
2035 return false;
2036 int MaskEltPrevVal = Mask[I - 2];
2037 if (MaskEltVal - MaskEltPrevVal != 2)
2038 return false;
2039 }
2040 return true;
2041}
2042
2044 int &Index) {
2045 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2046 return false;
2047 // Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2048 int StartIndex = -1;
2049 for (int I = 0, E = Mask.size(); I != E; ++I) {
2050 int MaskEltVal = Mask[I];
2051 if (MaskEltVal == -1)
2052 continue;
2053
2054 if (StartIndex == -1) {
2055 // Don't support a StartIndex that begins in the second input, or if the
2056 // first non-undef index would access below the StartIndex.
2057 if (MaskEltVal < I || NumSrcElts <= (MaskEltVal - I))
2058 return false;
2059
2060 StartIndex = MaskEltVal - I;
2061 continue;
2062 }
2063
2064 // Splice is sequential starting from StartIndex.
2065 if (MaskEltVal != (StartIndex + I))
2066 return false;
2067 }
2068
2069 if (StartIndex == -1)
2070 return false;
2071
2072 // NOTE: This accepts StartIndex == 0 (COPY).
2073 Index = StartIndex;
2074 return true;
2075}
2076
2078 int NumSrcElts, int &Index) {
2079 // Must extract from a single source.
2080 if (!isSingleSourceMaskImpl(Mask, NumSrcElts))
2081 return false;
2082
2083 // Must be smaller (else this is an Identity shuffle).
2084 if (NumSrcElts <= (int)Mask.size())
2085 return false;
2086
2087 // Find start of extraction, accounting that we may start with an UNDEF.
2088 int SubIndex = -1;
2089 for (int i = 0, e = Mask.size(); i != e; ++i) {
2090 int M = Mask[i];
2091 if (M < 0)
2092 continue;
2093 int Offset = (M % NumSrcElts) - i;
2094 if (0 <= SubIndex && SubIndex != Offset)
2095 return false;
2096 SubIndex = Offset;
2097 }
2098
2099 if (0 <= SubIndex && SubIndex + (int)Mask.size() <= NumSrcElts) {
2100 Index = SubIndex;
2101 return true;
2102 }
2103 return false;
2104}
2105
2107 int NumSrcElts, int &NumSubElts,
2108 int &Index) {
2109 int NumMaskElts = Mask.size();
2110
2111 // Don't try to match if we're shuffling to a smaller size.
2112 if (NumMaskElts < NumSrcElts)
2113 return false;
2114
2115 // TODO: We don't recognize self-insertion/widening.
2116 if (isSingleSourceMaskImpl(Mask, NumSrcElts))
2117 return false;
2118
2119 // Determine which mask elements are attributed to which source.
2120 APInt UndefElts = APInt::getZero(NumMaskElts);
2121 APInt Src0Elts = APInt::getZero(NumMaskElts);
2122 APInt Src1Elts = APInt::getZero(NumMaskElts);
2123 bool Src0Identity = true;
2124 bool Src1Identity = true;
2125
2126 for (int i = 0; i != NumMaskElts; ++i) {
2127 int M = Mask[i];
2128 if (M < 0) {
2129 UndefElts.setBit(i);
2130 continue;
2131 }
2132 if (M < NumSrcElts) {
2133 Src0Elts.setBit(i);
2134 Src0Identity &= (M == i);
2135 continue;
2136 }
2137 Src1Elts.setBit(i);
2138 Src1Identity &= (M == (i + NumSrcElts));
2139 }
2140 assert((Src0Elts | Src1Elts | UndefElts).isAllOnes() &&
2141 "unknown shuffle elements");
2142 assert(!Src0Elts.isZero() && !Src1Elts.isZero() &&
2143 "2-source shuffle not found");
2144
2145 // Determine lo/hi span ranges.
2146 // TODO: How should we handle undefs at the start of subvector insertions?
2147 int Src0Lo = Src0Elts.countr_zero();
2148 int Src1Lo = Src1Elts.countr_zero();
2149 int Src0Hi = NumMaskElts - Src0Elts.countl_zero();
2150 int Src1Hi = NumMaskElts - Src1Elts.countl_zero();
2151
2152 // If src0 is in place, see if the src1 elements is inplace within its own
2153 // span.
2154 if (Src0Identity) {
2155 int NumSub1Elts = Src1Hi - Src1Lo;
2156 ArrayRef<int> Sub1Mask = Mask.slice(Src1Lo, NumSub1Elts);
2157 if (isIdentityMaskImpl(Sub1Mask, NumSrcElts)) {
2158 NumSubElts = NumSub1Elts;
2159 Index = Src1Lo;
2160 return true;
2161 }
2162 }
2163
2164 // If src1 is in place, see if the src0 elements is inplace within its own
2165 // span.
2166 if (Src1Identity) {
2167 int NumSub0Elts = Src0Hi - Src0Lo;
2168 ArrayRef<int> Sub0Mask = Mask.slice(Src0Lo, NumSub0Elts);
2169 if (isIdentityMaskImpl(Sub0Mask, NumSrcElts)) {
2170 NumSubElts = NumSub0Elts;
2171 Index = Src0Lo;
2172 return true;
2173 }
2174 }
2175
2176 return false;
2177}
2178
2180 // FIXME: Not currently possible to express a shuffle mask for a scalable
2181 // vector for this case.
2183 return false;
2184
2185 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2186 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2187 if (NumMaskElts <= NumOpElts)
2188 return false;
2189
2190 // The first part of the mask must choose elements from exactly 1 source op.
2192 if (!isIdentityMaskImpl(Mask, NumOpElts))
2193 return false;
2194
2195 // All extending must be with undef elements.
2196 for (int i = NumOpElts; i < NumMaskElts; ++i)
2197 if (Mask[i] != -1)
2198 return false;
2199
2200 return true;
2201}
2202
2204 // FIXME: Not currently possible to express a shuffle mask for a scalable
2205 // vector for this case.
2207 return false;
2208
2209 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2210 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2211 if (NumMaskElts >= NumOpElts)
2212 return false;
2213
2214 return isIdentityMaskImpl(getShuffleMask(), NumOpElts);
2215}
2216
2218 // Vector concatenation is differentiated from identity with padding.
2220 return false;
2221
2222 // FIXME: Not currently possible to express a shuffle mask for a scalable
2223 // vector for this case.
2225 return false;
2226
2227 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2228 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2229 if (NumMaskElts != NumOpElts * 2)
2230 return false;
2231
2232 // Use the mask length rather than the operands' vector lengths here. We
2233 // already know that the shuffle returns a vector twice as long as the inputs,
2234 // and neither of the inputs are undef vectors. If the mask picks consecutive
2235 // elements from both inputs, then this is a concatenation of the inputs.
2236 return isIdentityMaskImpl(getShuffleMask(), NumMaskElts);
2237}
2238
2240 int ReplicationFactor, int VF) {
2241 assert(Mask.size() == (unsigned)ReplicationFactor * VF &&
2242 "Unexpected mask size.");
2243
2244 for (int CurrElt : seq(VF)) {
2245 ArrayRef<int> CurrSubMask = Mask.take_front(ReplicationFactor);
2246 assert(CurrSubMask.size() == (unsigned)ReplicationFactor &&
2247 "Run out of mask?");
2248 Mask = Mask.drop_front(ReplicationFactor);
2249 if (!all_of(CurrSubMask, [CurrElt](int MaskElt) {
2250 return MaskElt == PoisonMaskElem || MaskElt == CurrElt;
2251 }))
2252 return false;
2253 }
2254 assert(Mask.empty() && "Did not consume the whole mask?");
2255
2256 return true;
2257}
2258
2260 int &ReplicationFactor, int &VF) {
2261 // undef-less case is trivial.
2262 if (!llvm::is_contained(Mask, PoisonMaskElem)) {
2263 ReplicationFactor =
2264 Mask.take_while([](int MaskElt) { return MaskElt == 0; }).size();
2265 if (ReplicationFactor == 0 || Mask.size() % ReplicationFactor != 0)
2266 return false;
2267 VF = Mask.size() / ReplicationFactor;
2268 return isReplicationMaskWithParams(Mask, ReplicationFactor, VF);
2269 }
2270
2271 // However, if the mask contains undef's, we have to enumerate possible tuples
2272 // and pick one. There are bounds on replication factor: [1, mask size]
2273 // (where RF=1 is an identity shuffle, RF=mask size is a broadcast shuffle)
2274 // Additionally, mask size is a replication factor multiplied by vector size,
2275 // which further significantly reduces the search space.
2276
2277 // Before doing that, let's perform basic correctness checking first.
2278 int Largest = -1;
2279 for (int MaskElt : Mask) {
2280 if (MaskElt == PoisonMaskElem)
2281 continue;
2282 // Elements must be in non-decreasing order.
2283 if (MaskElt < Largest)
2284 return false;
2285 Largest = std::max(Largest, MaskElt);
2286 }
2287
2288 // Prefer larger replication factor if all else equal.
2289 for (int PossibleReplicationFactor :
2290 reverse(seq_inclusive<unsigned>(1, Mask.size()))) {
2291 if (Mask.size() % PossibleReplicationFactor != 0)
2292 continue;
2293 int PossibleVF = Mask.size() / PossibleReplicationFactor;
2294 if (!isReplicationMaskWithParams(Mask, PossibleReplicationFactor,
2295 PossibleVF))
2296 continue;
2297 ReplicationFactor = PossibleReplicationFactor;
2298 VF = PossibleVF;
2299 return true;
2300 }
2301
2302 return false;
2303}
2304
2305bool ShuffleVectorInst::isReplicationMask(int &ReplicationFactor,
2306 int &VF) const {
2307 // Not possible to express a shuffle mask for a scalable vector for this
2308 // case.
2310 return false;
2311
2312 VF = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2313 if (ShuffleMask.size() % VF != 0)
2314 return false;
2315 ReplicationFactor = ShuffleMask.size() / VF;
2316
2317 return isReplicationMaskWithParams(ShuffleMask, ReplicationFactor, VF);
2318}
2319
2321 if (VF <= 0 || Mask.size() < static_cast<unsigned>(VF) ||
2322 Mask.size() % VF != 0)
2323 return false;
2324 for (unsigned K = 0, Sz = Mask.size(); K < Sz; K += VF) {
2325 ArrayRef<int> SubMask = Mask.slice(K, VF);
2326 if (all_of(SubMask, [](int Idx) { return Idx == PoisonMaskElem; }))
2327 continue;
2328 SmallBitVector Used(VF, false);
2329 for (int Idx : SubMask) {
2330 if (Idx != PoisonMaskElem && Idx < VF)
2331 Used.set(Idx);
2332 }
2333 if (!Used.all())
2334 return false;
2335 }
2336 return true;
2337}
2338
2339/// Return true if this shuffle mask is a replication mask.
2341 // Not possible to express a shuffle mask for a scalable vector for this
2342 // case.
2344 return false;
2345 if (!isSingleSourceMask(ShuffleMask, VF))
2346 return false;
2347
2348 return isOneUseSingleSourceMask(ShuffleMask, VF);
2349}
2350
2351bool ShuffleVectorInst::isInterleave(unsigned Factor) {
2353 // shuffle_vector can only interleave fixed length vectors - for scalable
2354 // vectors, see the @llvm.vector.interleave2 intrinsic
2355 if (!OpTy)
2356 return false;
2357 unsigned OpNumElts = OpTy->getNumElements();
2358
2359 return isInterleaveMask(ShuffleMask, Factor, OpNumElts * 2);
2360}
2361
2363 ArrayRef<int> Mask, unsigned Factor, unsigned NumInputElts,
2364 SmallVectorImpl<unsigned> &StartIndexes) {
2365 unsigned NumElts = Mask.size();
2366 if (NumElts % Factor)
2367 return false;
2368
2369 unsigned LaneLen = NumElts / Factor;
2370 if (!isPowerOf2_32(LaneLen))
2371 return false;
2372
2373 StartIndexes.resize(Factor);
2374
2375 // Check whether each element matches the general interleaved rule.
2376 // Ignore undef elements, as long as the defined elements match the rule.
2377 // Outer loop processes all factors (x, y, z in the above example)
2378 unsigned I = 0, J;
2379 for (; I < Factor; I++) {
2380 unsigned SavedLaneValue;
2381 unsigned SavedNoUndefs = 0;
2382
2383 // Inner loop processes consecutive accesses (x, x+1... in the example)
2384 for (J = 0; J < LaneLen - 1; J++) {
2385 // Lane computes x's position in the Mask
2386 unsigned Lane = J * Factor + I;
2387 unsigned NextLane = Lane + Factor;
2388 int LaneValue = Mask[Lane];
2389 int NextLaneValue = Mask[NextLane];
2390
2391 // If both are defined, values must be sequential
2392 if (LaneValue >= 0 && NextLaneValue >= 0 &&
2393 LaneValue + 1 != NextLaneValue)
2394 break;
2395
2396 // If the next value is undef, save the current one as reference
2397 if (LaneValue >= 0 && NextLaneValue < 0) {
2398 SavedLaneValue = LaneValue;
2399 SavedNoUndefs = 1;
2400 }
2401
2402 // Undefs are allowed, but defined elements must still be consecutive:
2403 // i.e.: x,..., undef,..., x + 2,..., undef,..., undef,..., x + 5, ....
2404 // Verify this by storing the last non-undef followed by an undef
2405 // Check that following non-undef masks are incremented with the
2406 // corresponding distance.
2407 if (SavedNoUndefs > 0 && LaneValue < 0) {
2408 SavedNoUndefs++;
2409 if (NextLaneValue >= 0 &&
2410 SavedLaneValue + SavedNoUndefs != (unsigned)NextLaneValue)
2411 break;
2412 }
2413 }
2414
2415 if (J < LaneLen - 1)
2416 return false;
2417
2418 int StartMask = 0;
2419 if (Mask[I] >= 0) {
2420 // Check that the start of the I range (J=0) is greater than 0
2421 StartMask = Mask[I];
2422 } else if (Mask[(LaneLen - 1) * Factor + I] >= 0) {
2423 // StartMask defined by the last value in lane
2424 StartMask = Mask[(LaneLen - 1) * Factor + I] - J;
2425 } else if (SavedNoUndefs > 0) {
2426 // StartMask defined by some non-zero value in the j loop
2427 StartMask = SavedLaneValue - (LaneLen - 1 - SavedNoUndefs);
2428 }
2429 // else StartMask remains set to 0, i.e. all elements are undefs
2430
2431 if (StartMask < 0)
2432 return false;
2433 // We must stay within the vectors; This case can happen with undefs.
2434 if (StartMask + LaneLen > NumInputElts)
2435 return false;
2436
2437 StartIndexes[I] = StartMask;
2438 }
2439
2440 return true;
2441}
2442
2443/// Check if the mask is a DE-interleave mask of the given factor
2444/// \p Factor like:
2445/// <Index, Index+Factor, ..., Index+(NumElts-1)*Factor>
2447 unsigned Factor,
2448 unsigned &Index) {
2449 // Check all potential start indices from 0 to (Factor - 1).
2450 for (unsigned Idx = 0; Idx < Factor; Idx++) {
2451 unsigned I = 0;
2452
2453 // Check that elements are in ascending order by Factor. Ignore undef
2454 // elements.
2455 for (; I < Mask.size(); I++)
2456 if (Mask[I] >= 0 && static_cast<unsigned>(Mask[I]) != Idx + I * Factor)
2457 break;
2458
2459 if (I == Mask.size()) {
2460 Index = Idx;
2461 return true;
2462 }
2463 }
2464
2465 return false;
2466}
2467
2468/// Try to lower a vector shuffle as a bit rotation.
2469///
2470/// Look for a repeated rotation pattern in each sub group.
2471/// Returns an element-wise left bit rotation amount or -1 if failed.
2472static int matchShuffleAsBitRotate(ArrayRef<int> Mask, int NumSubElts) {
2473 int NumElts = Mask.size();
2474 assert((NumElts % NumSubElts) == 0 && "Illegal shuffle mask");
2475
2476 int RotateAmt = -1;
2477 for (int i = 0; i != NumElts; i += NumSubElts) {
2478 for (int j = 0; j != NumSubElts; ++j) {
2479 int M = Mask[i + j];
2480 if (M < 0)
2481 continue;
2482 if (M < i || M >= i + NumSubElts)
2483 return -1;
2484 int Offset = (NumSubElts - (M - (i + j))) % NumSubElts;
2485 if (0 <= RotateAmt && Offset != RotateAmt)
2486 return -1;
2487 RotateAmt = Offset;
2488 }
2489 }
2490 return RotateAmt;
2491}
2492
2494 ArrayRef<int> Mask, unsigned EltSizeInBits, unsigned MinSubElts,
2495 unsigned MaxSubElts, unsigned &NumSubElts, unsigned &RotateAmt) {
2496 for (NumSubElts = MinSubElts; NumSubElts <= MaxSubElts; NumSubElts *= 2) {
2497 int EltRotateAmt = matchShuffleAsBitRotate(Mask, NumSubElts);
2498 if (EltRotateAmt < 0)
2499 continue;
2500 RotateAmt = EltRotateAmt * EltSizeInBits;
2501 return true;
2502 }
2503
2504 return false;
2505}
2506
2507//===----------------------------------------------------------------------===//
2508// InsertValueInst Class
2509//===----------------------------------------------------------------------===//
2510
2511void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2512 const Twine &Name) {
2513 assert(getNumOperands() == 2 && "NumOperands not initialized?");
2514
2515 // There's no fundamental reason why we require at least one index
2516 // (other than weirdness with &*IdxBegin being invalid; see
2517 // getelementptr's init routine for example). But there's no
2518 // present need to support it.
2519 assert(!Idxs.empty() && "InsertValueInst must have at least one index");
2520
2522 Val->getType() && "Inserted value must match indexed type!");
2523 Op<0>() = Agg;
2524 Op<1>() = Val;
2525
2526 Indices.append(Idxs.begin(), Idxs.end());
2527 setName(Name);
2528}
2529
2530InsertValueInst::InsertValueInst(const InsertValueInst &IVI)
2531 : Instruction(IVI.getType(), InsertValue, AllocMarker),
2532 Indices(IVI.Indices) {
2533 Op<0>() = IVI.getOperand(0);
2534 Op<1>() = IVI.getOperand(1);
2536}
2537
2538//===----------------------------------------------------------------------===//
2539// ExtractValueInst Class
2540//===----------------------------------------------------------------------===//
2541
2542void ExtractValueInst::init(ArrayRef<unsigned> Idxs, const Twine &Name) {
2543 assert(getNumOperands() == 1 && "NumOperands not initialized?");
2544
2545 // There's no fundamental reason why we require at least one index.
2546 // But there's no present need to support it.
2547 assert(!Idxs.empty() && "ExtractValueInst must have at least one index");
2548
2549 Indices.append(Idxs.begin(), Idxs.end());
2550 setName(Name);
2551}
2552
2553ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI)
2554 : UnaryInstruction(EVI.getType(), ExtractValue, EVI.getOperand(0),
2555 (BasicBlock *)nullptr),
2556 Indices(EVI.Indices) {
2558}
2559
2560// getIndexedType - Returns the type of the element that would be extracted
2561// with an extractvalue instruction with the specified parameters.
2562//
2563// A null type is returned if the indices are invalid for the specified
2564// pointer type.
2565//
2567 ArrayRef<unsigned> Idxs) {
2568 for (unsigned Index : Idxs) {
2569 // We can't use CompositeType::indexValid(Index) here.
2570 // indexValid() always returns true for arrays because getelementptr allows
2571 // out-of-bounds indices. Since we don't allow those for extractvalue and
2572 // insertvalue we need to check array indexing manually.
2573 // Since the only other types we can index into are struct types it's just
2574 // as easy to check those manually as well.
2575 if (ArrayType *AT = dyn_cast<ArrayType>(Agg)) {
2576 if (Index >= AT->getNumElements())
2577 return nullptr;
2578 Agg = AT->getElementType();
2579 } else if (StructType *ST = dyn_cast<StructType>(Agg)) {
2580 if (Index >= ST->getNumElements())
2581 return nullptr;
2582 Agg = ST->getElementType(Index);
2583 } else {
2584 // Not a valid type to index into.
2585 return nullptr;
2586 }
2587 }
2588 return Agg;
2589}
2590
2591//===----------------------------------------------------------------------===//
2592// UnaryOperator Class
2593//===----------------------------------------------------------------------===//
2594
2596 const Twine &Name, InsertPosition InsertBefore)
2597 : UnaryInstruction(Ty, iType, S, InsertBefore) {
2598 Op<0>() = S;
2599 setName(Name);
2600 AssertOK();
2601}
2602
2604 InsertPosition InsertBefore) {
2605 return new UnaryOperator(Op, S, S->getType(), Name, InsertBefore);
2606}
2607
2608void UnaryOperator::AssertOK() {
2609 Value *LHS = getOperand(0);
2610 (void)LHS; // Silence warnings.
2611#ifndef NDEBUG
2612 switch (getOpcode()) {
2613 case FNeg:
2614 assert(getType() == LHS->getType() &&
2615 "Unary operation should return same type as operand!");
2616 assert(getType()->isFPOrFPVectorTy() &&
2617 "Tried to create a floating-point operation on a "
2618 "non-floating-point type!");
2619 break;
2620 default: llvm_unreachable("Invalid opcode provided");
2621 }
2622#endif
2623}
2624
2625//===----------------------------------------------------------------------===//
2626// BinaryOperator Class
2627//===----------------------------------------------------------------------===//
2628
2630 const Twine &Name, InsertPosition InsertBefore)
2631 : Instruction(Ty, iType, AllocMarker, InsertBefore) {
2632 Op<0>() = S1;
2633 Op<1>() = S2;
2634 setName(Name);
2635 AssertOK();
2636}
2637
2638void BinaryOperator::AssertOK() {
2639 Value *LHS = getOperand(0), *RHS = getOperand(1);
2640 (void)LHS; (void)RHS; // Silence warnings.
2641 assert(LHS->getType() == RHS->getType() &&
2642 "Binary operator operand types must match!");
2643#ifndef NDEBUG
2644 switch (getOpcode()) {
2645 case Add: case Sub:
2646 case Mul:
2647 assert(getType() == LHS->getType() &&
2648 "Arithmetic operation should return same type as operands!");
2649 assert(getType()->isIntOrIntVectorTy() &&
2650 "Tried to create an integer operation on a non-integer type!");
2651 break;
2652 case FAdd: case FSub:
2653 case FMul:
2654 assert(getType() == LHS->getType() &&
2655 "Arithmetic operation should return same type as operands!");
2656 assert(getType()->isFPOrFPVectorTy() &&
2657 "Tried to create a floating-point operation on a "
2658 "non-floating-point type!");
2659 break;
2660 case UDiv:
2661 case SDiv:
2662 assert(getType() == LHS->getType() &&
2663 "Arithmetic operation should return same type as operands!");
2664 assert(getType()->isIntOrIntVectorTy() &&
2665 "Incorrect operand type (not integer) for S/UDIV");
2666 break;
2667 case FDiv:
2668 assert(getType() == LHS->getType() &&
2669 "Arithmetic operation should return same type as operands!");
2670 assert(getType()->isFPOrFPVectorTy() &&
2671 "Incorrect operand type (not floating point) for FDIV");
2672 break;
2673 case URem:
2674 case SRem:
2675 assert(getType() == LHS->getType() &&
2676 "Arithmetic operation should return same type as operands!");
2677 assert(getType()->isIntOrIntVectorTy() &&
2678 "Incorrect operand type (not integer) for S/UREM");
2679 break;
2680 case FRem:
2681 assert(getType() == LHS->getType() &&
2682 "Arithmetic operation should return same type as operands!");
2683 assert(getType()->isFPOrFPVectorTy() &&
2684 "Incorrect operand type (not floating point) for FREM");
2685 break;
2686 case Shl:
2687 case LShr:
2688 case AShr:
2689 assert(getType() == LHS->getType() &&
2690 "Shift operation should return same type as operands!");
2691 assert(getType()->isIntOrIntVectorTy() &&
2692 "Tried to create a shift operation on a non-integral type!");
2693 break;
2694 case And: case Or:
2695 case Xor:
2696 assert(getType() == LHS->getType() &&
2697 "Logical operation should return same type as operands!");
2698 assert(getType()->isIntOrIntVectorTy() &&
2699 "Tried to create a logical operation on a non-integral type!");
2700 break;
2701 default: llvm_unreachable("Invalid opcode provided");
2702 }
2703#endif
2704}
2705
2707 const Twine &Name,
2708 InsertPosition InsertBefore) {
2709 assert(S1->getType() == S2->getType() &&
2710 "Cannot create binary operator with two operands of differing type!");
2711 return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore);
2712}
2713
2715 InsertPosition InsertBefore) {
2716 Value *Zero = ConstantInt::get(Op->getType(), 0);
2717 return new BinaryOperator(Instruction::Sub, Zero, Op, Op->getType(), Name,
2718 InsertBefore);
2719}
2720
2722 InsertPosition InsertBefore) {
2723 Value *Zero = ConstantInt::get(Op->getType(), 0);
2724 return BinaryOperator::CreateNSWSub(Zero, Op, Name, InsertBefore);
2725}
2726
2728 InsertPosition InsertBefore) {
2729 Constant *C = Constant::getAllOnesValue(Op->getType());
2730 return new BinaryOperator(Instruction::Xor, Op, C,
2731 Op->getType(), Name, InsertBefore);
2732}
2733
2734// Exchange the two operands to this instruction. This instruction is safe to
2735// use on any binary instruction and does not modify the semantics of the
2736// instruction.
2738 if (!isCommutative())
2739 return true; // Can't commute operands
2740 Op<0>().swap(Op<1>());
2741 return false;
2742}
2743
2744//===----------------------------------------------------------------------===//
2745// FPMathOperator Class
2746//===----------------------------------------------------------------------===//
2747
2749 const MDNode *MD =
2750 cast<Instruction>(this)->getMetadata(LLVMContext::MD_fpmath);
2751 if (!MD)
2752 return 0.0;
2754 return Accuracy->getValueAPF().convertToFloat();
2755}
2756
2757//===----------------------------------------------------------------------===//
2758// CastInst Class
2759//===----------------------------------------------------------------------===//
2760
2761// Just determine if this cast only deals with integral->integral conversion.
2763 switch (getOpcode()) {
2764 default: return false;
2765 case Instruction::ZExt:
2766 case Instruction::SExt:
2767 case Instruction::Trunc:
2768 return true;
2769 case Instruction::BitCast:
2770 return getOperand(0)->getType()->isIntegerTy() &&
2771 getType()->isIntegerTy();
2772 }
2773}
2774
2775/// This function determines if the CastInst does not require any bits to be
2776/// changed in order to effect the cast. Essentially, it identifies cases where
2777/// no code gen is necessary for the cast, hence the name no-op cast. For
2778/// example, the following are all no-op casts:
2779/// # bitcast i32* %x to i8*
2780/// # bitcast <2 x i32> %x to <4 x i16>
2781/// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only
2782/// Determine if the described cast is a no-op.
2784 Type *SrcTy,
2785 Type *DestTy,
2786 const DataLayout &DL) {
2787 assert(castIsValid(Opcode, SrcTy, DestTy) && "method precondition");
2788 switch (Opcode) {
2789 default: llvm_unreachable("Invalid CastOp");
2790 case Instruction::Trunc:
2791 case Instruction::ZExt:
2792 case Instruction::SExt:
2793 case Instruction::FPTrunc:
2794 case Instruction::FPExt:
2795 case Instruction::UIToFP:
2796 case Instruction::SIToFP:
2797 case Instruction::FPToUI:
2798 case Instruction::FPToSI:
2799 case Instruction::AddrSpaceCast:
2800 // TODO: Target informations may give a more accurate answer here.
2801 return false;
2802 case Instruction::BitCast:
2803 return true; // BitCast never modifies bits.
2804 case Instruction::PtrToAddr:
2805 case Instruction::PtrToInt:
2806 return DL.getIntPtrType(SrcTy)->getScalarSizeInBits() ==
2807 DestTy->getScalarSizeInBits();
2808 case Instruction::IntToPtr:
2809 return DL.getIntPtrType(DestTy)->getScalarSizeInBits() ==
2810 SrcTy->getScalarSizeInBits();
2811 }
2812}
2813
2815 return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), DL);
2816}
2817
2818/// This function determines if a pair of casts can be eliminated and what
2819/// opcode should be used in the elimination. This assumes that there are two
2820/// instructions like this:
2821/// * %F = firstOpcode SrcTy %x to MidTy
2822/// * %S = secondOpcode MidTy %F to DstTy
2823/// The function returns a resultOpcode so these two casts can be replaced with:
2824/// * %Replacement = resultOpcode %SrcTy %x to DstTy
2825/// If no such cast is permitted, the function returns 0.
2827 Instruction::CastOps secondOp,
2828 Type *SrcTy, Type *MidTy, Type *DstTy,
2829 const DataLayout *DL) {
2830 // Define the 144 possibilities for these two cast instructions. The values
2831 // in this matrix determine what to do in a given situation and select the
2832 // case in the switch below. The rows correspond to firstOp, the columns
2833 // correspond to secondOp. In looking at the table below, keep in mind
2834 // the following cast properties:
2835 //
2836 // Size Compare Source Destination
2837 // Operator Src ? Size Type Sign Type Sign
2838 // -------- ------------ ------------------- ---------------------
2839 // TRUNC > Integer Any Integral Any
2840 // ZEXT < Integral Unsigned Integer Any
2841 // SEXT < Integral Signed Integer Any
2842 // FPTOUI n/a FloatPt n/a Integral Unsigned
2843 // FPTOSI n/a FloatPt n/a Integral Signed
2844 // UITOFP n/a Integral Unsigned FloatPt n/a
2845 // SITOFP n/a Integral Signed FloatPt n/a
2846 // FPTRUNC > FloatPt n/a FloatPt n/a
2847 // FPEXT < FloatPt n/a FloatPt n/a
2848 // PTRTOINT n/a Pointer n/a Integral Unsigned
2849 // PTRTOADDR n/a Pointer n/a Integral Unsigned
2850 // INTTOPTR n/a Integral Unsigned Pointer n/a
2851 // BITCAST = FirstClass n/a FirstClass n/a
2852 // ADDRSPCST n/a Pointer n/a Pointer n/a
2853 //
2854 // NOTE: some transforms are safe, but we consider them to be non-profitable.
2855 // For example, we could merge "fptoui double to i32" + "zext i32 to i64",
2856 // into "fptoui double to i64", but this loses information about the range
2857 // of the produced value (we no longer know the top-part is all zeros).
2858 // Further this conversion is often much more expensive for typical hardware,
2859 // and causes issues when building libgcc. We disallow fptosi+sext for the
2860 // same reason.
2861 const unsigned numCastOps =
2862 Instruction::CastOpsEnd - Instruction::CastOpsBegin;
2863 // clang-format off
2864 static const uint8_t CastResults[numCastOps][numCastOps] = {
2865 // T F F U S F F P P I B A -+
2866 // R Z S P P I I T P 2 2 N T S |
2867 // U E E 2 2 2 2 R E I A T C C +- secondOp
2868 // N X X U S F F N X N D 2 V V |
2869 // C T T I I P P C T T R P T T -+
2870 { 1, 0, 0,99,99, 0, 0,99,99,99,99, 0, 3, 0}, // Trunc -+
2871 { 8, 1, 9,99,99, 2,17,99,99,99,99, 2, 3, 0}, // ZExt |
2872 { 8, 0, 1,99,99, 0, 2,99,99,99,99, 0, 3, 0}, // SExt |
2873 { 0, 0, 0,99,99, 0, 0,99,99,99,99, 0, 3, 0}, // FPToUI |
2874 { 0, 0, 0,99,99, 0, 0,99,99,99,99, 0, 3, 0}, // FPToSI |
2875 { 99,99,99, 0, 0,99,99, 0, 0,99,99,99, 4, 0}, // UIToFP +- firstOp
2876 { 99,99,99, 0, 0,99,99, 0, 0,99,99,99, 4, 0}, // SIToFP |
2877 { 99,99,99, 0, 0,99,99, 0, 0,99,99,99, 4, 0}, // FPTrunc |
2878 { 99,99,99, 2, 2,99,99, 8, 2,99,99,99, 4, 0}, // FPExt |
2879 { 1, 0, 0,99,99, 0, 0,99,99,99,99, 7, 3, 0}, // PtrToInt |
2880 { 0, 0, 0,99,99, 0, 0,99,99,99,99, 0, 3, 0}, // PtrToAddr |
2881 { 99,99,99,99,99,99,99,99,99,11,11,99,15, 0}, // IntToPtr |
2882 { 5, 5, 5, 0, 0, 5, 5, 0, 0,16,16, 5, 1,14}, // BitCast |
2883 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+
2884 };
2885 // clang-format on
2886
2887 // TODO: This logic could be encoded into the table above and handled in the
2888 // switch below.
2889 // If either of the casts are a bitcast from scalar to vector, disallow the
2890 // merging. However, any pair of bitcasts are allowed.
2891 bool IsFirstBitcast = (firstOp == Instruction::BitCast);
2892 bool IsSecondBitcast = (secondOp == Instruction::BitCast);
2893 bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast;
2894
2895 // Check if any of the casts convert scalars <-> vectors.
2896 if ((IsFirstBitcast && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) ||
2897 (IsSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy)))
2898 if (!AreBothBitcasts)
2899 return 0;
2900
2901 int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin]
2902 [secondOp-Instruction::CastOpsBegin];
2903 switch (ElimCase) {
2904 case 0:
2905 // Categorically disallowed.
2906 return 0;
2907 case 1:
2908 // Allowed, use first cast's opcode.
2909 return firstOp;
2910 case 2:
2911 // Allowed, use second cast's opcode.
2912 return secondOp;
2913 case 3:
2914 // No-op cast in second op implies firstOp as long as the DestTy
2915 // is integer and we are not converting between a vector and a
2916 // non-vector type.
2917 if (!SrcTy->isVectorTy() && DstTy->isIntegerTy())
2918 return firstOp;
2919 return 0;
2920 case 4:
2921 // No-op cast in second op implies firstOp as long as the DestTy
2922 // matches MidTy.
2923 if (DstTy == MidTy)
2924 return firstOp;
2925 return 0;
2926 case 5:
2927 // No-op cast in first op implies secondOp as long as the SrcTy
2928 // is an integer.
2929 if (SrcTy->isIntegerTy())
2930 return secondOp;
2931 return 0;
2932 case 7: {
2933 // Disable inttoptr/ptrtoint optimization if enabled.
2934 if (DisableI2pP2iOpt)
2935 return 0;
2936
2937 // Cannot simplify if address spaces are different!
2938 if (SrcTy != DstTy)
2939 return 0;
2940
2941 // Cannot simplify if the intermediate integer size is smaller than the
2942 // pointer size.
2943 unsigned MidSize = MidTy->getScalarSizeInBits();
2944 if (!DL || MidSize < DL->getPointerTypeSizeInBits(SrcTy))
2945 return 0;
2946
2947 return Instruction::BitCast;
2948 }
2949 case 8: {
2950 // ext, trunc -> bitcast, if the SrcTy and DstTy are the same
2951 // ext, trunc -> ext, if sizeof(SrcTy) < sizeof(DstTy)
2952 // ext, trunc -> trunc, if sizeof(SrcTy) > sizeof(DstTy)
2953 unsigned SrcSize = SrcTy->getScalarSizeInBits();
2954 unsigned DstSize = DstTy->getScalarSizeInBits();
2955 if (SrcTy == DstTy)
2956 return Instruction::BitCast;
2957 if (SrcSize < DstSize)
2958 return firstOp;
2959 if (SrcSize > DstSize)
2960 return secondOp;
2961 return 0;
2962 }
2963 case 9:
2964 // zext, sext -> zext, because sext can't sign extend after zext
2965 return Instruction::ZExt;
2966 case 11: {
2967 // inttoptr, ptrtoint/ptrtoaddr -> integer cast
2968 if (!DL)
2969 return 0;
2970 unsigned MidSize = secondOp == Instruction::PtrToAddr
2971 ? DL->getAddressSizeInBits(MidTy)
2972 : DL->getPointerTypeSizeInBits(MidTy);
2973 unsigned SrcSize = SrcTy->getScalarSizeInBits();
2974 unsigned DstSize = DstTy->getScalarSizeInBits();
2975 // If the middle size is smaller than both source and destination,
2976 // an additional masking operation would be required.
2977 if (MidSize < SrcSize && MidSize < DstSize)
2978 return 0;
2979 if (DstSize < SrcSize)
2980 return Instruction::Trunc;
2981 if (DstSize > SrcSize)
2982 return Instruction::ZExt;
2983 return Instruction::BitCast;
2984 }
2985 case 12:
2986 // addrspacecast, addrspacecast -> bitcast, if SrcAS == DstAS
2987 // addrspacecast, addrspacecast -> addrspacecast, if SrcAS != DstAS
2988 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
2989 return Instruction::AddrSpaceCast;
2990 return Instruction::BitCast;
2991 case 13:
2992 // FIXME: this state can be merged with (1), but the following assert
2993 // is useful to check the correcteness of the sequence due to semantic
2994 // change of bitcast.
2995 assert(
2996 SrcTy->isPtrOrPtrVectorTy() &&
2997 MidTy->isPtrOrPtrVectorTy() &&
2998 DstTy->isPtrOrPtrVectorTy() &&
2999 SrcTy->getPointerAddressSpace() != MidTy->getPointerAddressSpace() &&
3000 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
3001 "Illegal addrspacecast, bitcast sequence!");
3002 // Allowed, use first cast's opcode
3003 return firstOp;
3004 case 14:
3005 // bitcast, addrspacecast -> addrspacecast
3006 return Instruction::AddrSpaceCast;
3007 case 15:
3008 // FIXME: this state can be merged with (1), but the following assert
3009 // is useful to check the correcteness of the sequence due to semantic
3010 // change of bitcast.
3011 assert(
3012 SrcTy->isIntOrIntVectorTy() &&
3013 MidTy->isPtrOrPtrVectorTy() &&
3014 DstTy->isPtrOrPtrVectorTy() &&
3015 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
3016 "Illegal inttoptr, bitcast sequence!");
3017 // Allowed, use first cast's opcode
3018 return firstOp;
3019 case 16:
3020 // FIXME: this state can be merged with (2), but the following assert
3021 // is useful to check the correcteness of the sequence due to semantic
3022 // change of bitcast.
3023 assert(
3024 SrcTy->isPtrOrPtrVectorTy() &&
3025 MidTy->isPtrOrPtrVectorTy() &&
3026 DstTy->isIntOrIntVectorTy() &&
3027 SrcTy->getPointerAddressSpace() == MidTy->getPointerAddressSpace() &&
3028 "Illegal bitcast, ptrtoint sequence!");
3029 // Allowed, use second cast's opcode
3030 return secondOp;
3031 case 17:
3032 // (sitofp (zext x)) -> (uitofp x)
3033 return Instruction::UIToFP;
3034 case 99:
3035 // Cast combination can't happen (error in input). This is for all cases
3036 // where the MidTy is not the same for the two cast instructions.
3037 llvm_unreachable("Invalid Cast Combination");
3038 default:
3039 llvm_unreachable("Error in CastResults table!!!");
3040 }
3041}
3042
3044 const Twine &Name, InsertPosition InsertBefore) {
3045 assert(castIsValid(op, S, Ty) && "Invalid cast!");
3046 // Construct and return the appropriate CastInst subclass
3047 switch (op) {
3048 case Trunc: return new TruncInst (S, Ty, Name, InsertBefore);
3049 case ZExt: return new ZExtInst (S, Ty, Name, InsertBefore);
3050 case SExt: return new SExtInst (S, Ty, Name, InsertBefore);
3051 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertBefore);
3052 case FPExt: return new FPExtInst (S, Ty, Name, InsertBefore);
3053 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertBefore);
3054 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore);
3055 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore);
3056 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore);
3057 case PtrToAddr: return new PtrToAddrInst (S, Ty, Name, InsertBefore);
3058 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore);
3059 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore);
3060 case BitCast:
3061 return new BitCastInst(S, Ty, Name, InsertBefore);
3062 case AddrSpaceCast:
3063 return new AddrSpaceCastInst(S, Ty, Name, InsertBefore);
3064 default:
3065 llvm_unreachable("Invalid opcode provided");
3066 }
3067}
3068
3070 InsertPosition InsertBefore) {
3071 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3072 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3073 return Create(Instruction::ZExt, S, Ty, Name, InsertBefore);
3074}
3075
3077 InsertPosition InsertBefore) {
3078 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3079 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3080 return Create(Instruction::SExt, S, Ty, Name, InsertBefore);
3081}
3082
3084 InsertPosition InsertBefore) {
3085 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3086 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3087 return Create(Instruction::Trunc, S, Ty, Name, InsertBefore);
3088}
3089
3090/// Create a BitCast or a PtrToInt cast instruction
3092 InsertPosition InsertBefore) {
3093 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3094 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
3095 "Invalid cast");
3096 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
3097 assert((!Ty->isVectorTy() ||
3098 cast<VectorType>(Ty)->getElementCount() ==
3099 cast<VectorType>(S->getType())->getElementCount()) &&
3100 "Invalid cast");
3101
3102 if (Ty->isIntOrIntVectorTy())
3103 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3104
3105 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore);
3106}
3107
3109 Value *S, Type *Ty, const Twine &Name, InsertPosition InsertBefore) {
3110 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3111 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
3112
3113 if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace())
3114 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore);
3115
3116 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3117}
3118
3120 const Twine &Name,
3121 InsertPosition InsertBefore) {
3122 if (S->getType()->isPointerTy() && Ty->isIntegerTy())
3123 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3124 if (S->getType()->isIntegerTy() && Ty->isPointerTy())
3125 return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore);
3126
3127 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3128}
3129
3131 const Twine &Name,
3132 InsertPosition InsertBefore) {
3133 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
3134 "Invalid integer cast");
3135 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3136 unsigned DstBits = Ty->getScalarSizeInBits();
3137 Instruction::CastOps opcode =
3138 (SrcBits == DstBits ? Instruction::BitCast :
3139 (SrcBits > DstBits ? Instruction::Trunc :
3140 (isSigned ? Instruction::SExt : Instruction::ZExt)));
3141 return Create(opcode, C, Ty, Name, InsertBefore);
3142}
3143
3145 InsertPosition InsertBefore) {
3146 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
3147 "Invalid cast");
3148 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3149 unsigned DstBits = Ty->getScalarSizeInBits();
3150 assert((C->getType() == Ty || SrcBits != DstBits) && "Invalid cast");
3151 Instruction::CastOps opcode =
3152 (SrcBits == DstBits ? Instruction::BitCast :
3153 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
3154 return Create(opcode, C, Ty, Name, InsertBefore);
3155}
3156
3157bool CastInst::isBitCastable(Type *SrcTy, Type *DestTy) {
3158 if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType())
3159 return false;
3160
3161 if (SrcTy == DestTy)
3162 return true;
3163
3164 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) {
3165 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) {
3166 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3167 // An element by element cast. Valid if casting the elements is valid.
3168 SrcTy = SrcVecTy->getElementType();
3169 DestTy = DestVecTy->getElementType();
3170 }
3171 }
3172 }
3173
3174 if (PointerType *DestPtrTy = dyn_cast<PointerType>(DestTy)) {
3175 if (PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy)) {
3176 return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace();
3177 }
3178 }
3179
3180 TypeSize SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
3181 TypeSize DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
3182
3183 // Could still have vectors of pointers if the number of elements doesn't
3184 // match
3185 if (SrcBits.getKnownMinValue() == 0 || DestBits.getKnownMinValue() == 0)
3186 return false;
3187
3188 if (SrcBits != DestBits)
3189 return false;
3190
3191 return true;
3192}
3193
3195 const DataLayout &DL) {
3196 // ptrtoint and inttoptr are not allowed on non-integral pointers
3197 if (auto *PtrTy = dyn_cast<PointerType>(SrcTy))
3198 if (auto *IntTy = dyn_cast<IntegerType>(DestTy))
3199 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
3200 !DL.isNonIntegralPointerType(PtrTy));
3201 if (auto *PtrTy = dyn_cast<PointerType>(DestTy))
3202 if (auto *IntTy = dyn_cast<IntegerType>(SrcTy))
3203 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
3204 !DL.isNonIntegralPointerType(PtrTy));
3205
3206 return isBitCastable(SrcTy, DestTy);
3207}
3208
3209// Provide a way to get a "cast" where the cast opcode is inferred from the
3210// types and size of the operand. This, basically, is a parallel of the
3211// logic in the castIsValid function below. This axiom should hold:
3212// castIsValid( getCastOpcode(Val, Ty), Val, Ty)
3213// should not assert in castIsValid. In other words, this produces a "correct"
3214// casting opcode for the arguments passed to it.
3217 const Value *Src, bool SrcIsSigned, Type *DestTy, bool DestIsSigned) {
3218 Type *SrcTy = Src->getType();
3219
3220 assert(SrcTy->isFirstClassType() && DestTy->isFirstClassType() &&
3221 "Only first class types are castable!");
3222
3223 if (SrcTy == DestTy)
3224 return BitCast;
3225
3226 // FIXME: Check address space sizes here
3227 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy))
3228 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy))
3229 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3230 // An element by element cast. Find the appropriate opcode based on the
3231 // element types.
3232 SrcTy = SrcVecTy->getElementType();
3233 DestTy = DestVecTy->getElementType();
3234 }
3235
3236 // Get the bit sizes, we'll need these
3237 // FIXME: This doesn't work for scalable vector types with different element
3238 // counts that don't call getElementType above.
3239 unsigned SrcBits =
3240 SrcTy->getPrimitiveSizeInBits().getFixedValue(); // 0 for ptr
3241 unsigned DestBits =
3242 DestTy->getPrimitiveSizeInBits().getFixedValue(); // 0 for ptr
3243
3244 // Run through the possibilities ...
3245 if (DestTy->isIntegerTy()) { // Casting to integral
3246 if (SrcTy->isIntegerTy()) { // Casting from integral
3247 if (DestBits < SrcBits)
3248 return Trunc; // int -> smaller int
3249 else if (DestBits > SrcBits) { // its an extension
3250 if (SrcIsSigned)
3251 return SExt; // signed -> SEXT
3252 else
3253 return ZExt; // unsigned -> ZEXT
3254 } else {
3255 return BitCast; // Same size, No-op cast
3256 }
3257 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
3258 if (DestIsSigned)
3259 return FPToSI; // FP -> sint
3260 else
3261 return FPToUI; // FP -> uint
3262 } else if (SrcTy->isVectorTy()) {
3263 assert(DestBits == SrcBits &&
3264 "Casting vector to integer of different width");
3265 return BitCast; // Same size, no-op cast
3266 } else {
3267 assert(SrcTy->isPointerTy() &&
3268 "Casting from a value that is not first-class type");
3269 return PtrToInt; // ptr -> int
3270 }
3271 } else if (DestTy->isFloatingPointTy()) { // Casting to floating pt
3272 if (SrcTy->isIntegerTy()) { // Casting from integral
3273 if (SrcIsSigned)
3274 return SIToFP; // sint -> FP
3275 else
3276 return UIToFP; // uint -> FP
3277 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
3278 if (DestBits < SrcBits) {
3279 return FPTrunc; // FP -> smaller FP
3280 } else if (DestBits > SrcBits) {
3281 return FPExt; // FP -> larger FP
3282 } else {
3283 return BitCast; // same size, no-op cast
3284 }
3285 } else if (SrcTy->isVectorTy()) {
3286 assert(DestBits == SrcBits &&
3287 "Casting vector to floating point of different width");
3288 return BitCast; // same size, no-op cast
3289 }
3290 llvm_unreachable("Casting pointer or non-first class to float");
3291 } else if (DestTy->isVectorTy()) {
3292 assert(DestBits == SrcBits &&
3293 "Illegal cast to vector (wrong type or size)");
3294 return BitCast;
3295 } else if (DestTy->isPointerTy()) {
3296 if (SrcTy->isPointerTy()) {
3297 if (DestTy->getPointerAddressSpace() != SrcTy->getPointerAddressSpace())
3298 return AddrSpaceCast;
3299 return BitCast; // ptr -> ptr
3300 } else if (SrcTy->isIntegerTy()) {
3301 return IntToPtr; // int -> ptr
3302 }
3303 llvm_unreachable("Casting pointer to other than pointer or int");
3304 }
3305 llvm_unreachable("Casting to type that is not first-class");
3306}
3307
3308//===----------------------------------------------------------------------===//
3309// CastInst SubClass Constructors
3310//===----------------------------------------------------------------------===//
3311
3312/// Check that the construction parameters for a CastInst are correct. This
3313/// could be broken out into the separate constructors but it is useful to have
3314/// it in one place and to eliminate the redundant code for getting the sizes
3315/// of the types involved.
3316bool
3318 if (!SrcTy->isFirstClassType() || !DstTy->isFirstClassType() ||
3319 SrcTy->isAggregateType() || DstTy->isAggregateType())
3320 return false;
3321
3322 // Get the size of the types in bits, and whether we are dealing
3323 // with vector types, we'll need this later.
3324 bool SrcIsVec = isa<VectorType>(SrcTy);
3325 bool DstIsVec = isa<VectorType>(DstTy);
3326 unsigned SrcScalarBitSize = SrcTy->getScalarSizeInBits();
3327 unsigned DstScalarBitSize = DstTy->getScalarSizeInBits();
3328
3329 // If these are vector types, get the lengths of the vectors (using zero for
3330 // scalar types means that checking that vector lengths match also checks that
3331 // scalars are not being converted to vectors or vectors to scalars).
3332 ElementCount SrcEC = SrcIsVec ? cast<VectorType>(SrcTy)->getElementCount()
3334 ElementCount DstEC = DstIsVec ? cast<VectorType>(DstTy)->getElementCount()
3336
3337 // Switch on the opcode provided
3338 switch (op) {
3339 default: return false; // This is an input error
3340 case Instruction::Trunc:
3341 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3342 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
3343 case Instruction::ZExt:
3344 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3345 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3346 case Instruction::SExt:
3347 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3348 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3349 case Instruction::FPTrunc:
3350 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
3351 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
3352 case Instruction::FPExt:
3353 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
3354 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3355 case Instruction::UIToFP:
3356 case Instruction::SIToFP:
3357 return SrcTy->isIntOrIntVectorTy() && DstTy->isFPOrFPVectorTy() &&
3358 SrcEC == DstEC;
3359 case Instruction::FPToUI:
3360 case Instruction::FPToSI:
3361 return SrcTy->isFPOrFPVectorTy() && DstTy->isIntOrIntVectorTy() &&
3362 SrcEC == DstEC;
3363 case Instruction::PtrToAddr:
3364 case Instruction::PtrToInt:
3365 if (SrcEC != DstEC)
3366 return false;
3367 return SrcTy->isPtrOrPtrVectorTy() && DstTy->isIntOrIntVectorTy();
3368 case Instruction::IntToPtr:
3369 if (SrcEC != DstEC)
3370 return false;
3371 return SrcTy->isIntOrIntVectorTy() && DstTy->isPtrOrPtrVectorTy();
3372 case Instruction::BitCast: {
3373 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
3374 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
3375
3376 // BitCast implies a no-op cast of type only. No bits change.
3377 // However, you can't cast pointers to anything but pointers.
3378 if (!SrcPtrTy != !DstPtrTy)
3379 return false;
3380
3381 // For non-pointer cases, the cast is okay if the source and destination bit
3382 // widths are identical.
3383 if (!SrcPtrTy)
3384 return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits();
3385
3386 // If both are pointers then the address spaces must match.
3387 if (SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace())
3388 return false;
3389
3390 // A vector of pointers must have the same number of elements.
3391 if (SrcIsVec && DstIsVec)
3392 return SrcEC == DstEC;
3393 if (SrcIsVec)
3394 return SrcEC == ElementCount::getFixed(1);
3395 if (DstIsVec)
3396 return DstEC == ElementCount::getFixed(1);
3397
3398 return true;
3399 }
3400 case Instruction::AddrSpaceCast: {
3401 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
3402 if (!SrcPtrTy)
3403 return false;
3404
3405 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
3406 if (!DstPtrTy)
3407 return false;
3408
3409 if (SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace())
3410 return false;
3411
3412 return SrcEC == DstEC;
3413 }
3414 }
3415}
3416
3418 InsertPosition InsertBefore)
3419 : CastInst(Ty, Trunc, S, Name, InsertBefore) {
3420 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
3421}
3422
3423ZExtInst::ZExtInst(Value *S, Type *Ty, const Twine &Name,
3424 InsertPosition InsertBefore)
3425 : CastInst(Ty, ZExt, S, Name, InsertBefore) {
3426 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
3427}
3428
3429SExtInst::SExtInst(Value *S, Type *Ty, const Twine &Name,
3430 InsertPosition InsertBefore)
3431 : CastInst(Ty, SExt, S, Name, InsertBefore) {
3432 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
3433}
3434
3436 InsertPosition InsertBefore)
3437 : CastInst(Ty, FPTrunc, S, Name, InsertBefore) {
3438 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
3439}
3440
3442 InsertPosition InsertBefore)
3443 : CastInst(Ty, FPExt, S, Name, InsertBefore) {
3444 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
3445}
3446
3448 InsertPosition InsertBefore)
3449 : CastInst(Ty, UIToFP, S, Name, InsertBefore) {
3450 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
3451}
3452
3454 InsertPosition InsertBefore)
3455 : CastInst(Ty, SIToFP, S, Name, InsertBefore) {
3456 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
3457}
3458
3460 InsertPosition InsertBefore)
3461 : CastInst(Ty, FPToUI, S, Name, InsertBefore) {
3462 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
3463}
3464
3466 InsertPosition InsertBefore)
3467 : CastInst(Ty, FPToSI, S, Name, InsertBefore) {
3468 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
3469}
3470
3472 InsertPosition InsertBefore)
3473 : CastInst(Ty, PtrToInt, S, Name, InsertBefore) {
3474 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
3475}
3476
3478 InsertPosition InsertBefore)
3479 : CastInst(Ty, PtrToAddr, S, Name, InsertBefore) {
3480 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToAddr");
3481}
3482
3484 InsertPosition InsertBefore)
3485 : CastInst(Ty, IntToPtr, S, Name, InsertBefore) {
3486 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
3487}
3488
3490 InsertPosition InsertBefore)
3491 : CastInst(Ty, BitCast, S, Name, InsertBefore) {
3492 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
3493}
3494
3496 InsertPosition InsertBefore)
3497 : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) {
3498 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
3499}
3500
3501//===----------------------------------------------------------------------===//
3502// CmpInst Classes
3503//===----------------------------------------------------------------------===//
3504
3506 Value *RHS, const Twine &Name, InsertPosition InsertBefore,
3507 Instruction *FlagsSource)
3508 : Instruction(ty, op, AllocMarker, InsertBefore) {
3509 Op<0>() = LHS;
3510 Op<1>() = RHS;
3511 setPredicate(predicate);
3512 setName(Name);
3513 if (FlagsSource)
3514 copyIRFlags(FlagsSource);
3515}
3516
3518 const Twine &Name, InsertPosition InsertBefore) {
3519 if (Op == Instruction::ICmp) {
3520 if (InsertBefore.isValid())
3521 return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate),
3522 S1, S2, Name);
3523 else
3524 return new ICmpInst(CmpInst::Predicate(predicate),
3525 S1, S2, Name);
3526 }
3527
3528 if (InsertBefore.isValid())
3529 return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate),
3530 S1, S2, Name);
3531 else
3532 return new FCmpInst(CmpInst::Predicate(predicate),
3533 S1, S2, Name);
3534}
3535
3537 Value *S2,
3538 const Instruction *FlagsSource,
3539 const Twine &Name,
3540 InsertPosition InsertBefore) {
3541 CmpInst *Inst = Create(Op, Pred, S1, S2, Name, InsertBefore);
3542 Inst->copyIRFlags(FlagsSource);
3543 return Inst;
3544}
3545
3547 if (ICmpInst *IC = dyn_cast<ICmpInst>(this))
3548 IC->swapOperands();
3549 else
3550 cast<FCmpInst>(this)->swapOperands();
3551}
3552
3554 if (const ICmpInst *IC = dyn_cast<ICmpInst>(this))
3555 return IC->isCommutative();
3556 return cast<FCmpInst>(this)->isCommutative();
3557}
3558
3561 return ICmpInst::isEquality(P);
3563 return FCmpInst::isEquality(P);
3564 llvm_unreachable("Unsupported predicate kind");
3565}
3566
3567// Returns true if either operand of CmpInst is a provably non-zero
3568// floating-point constant.
3569static bool hasNonZeroFPOperands(const CmpInst *Cmp) {
3570 auto *LHS = dyn_cast<Constant>(Cmp->getOperand(0));
3571 auto *RHS = dyn_cast<Constant>(Cmp->getOperand(1));
3572 if (auto *Const = LHS ? LHS : RHS) {
3573 using namespace llvm::PatternMatch;
3574 return match(Const, m_NonZeroNotDenormalFP());
3575 }
3576 return false;
3577}
3578
3579// Floating-point equality is not an equivalence when comparing +0.0 with
3580// -0.0, when comparing NaN with another value, or when flushing
3581// denormals-to-zero.
3582bool CmpInst::isEquivalence(bool Invert) const {
3583 switch (Invert ? getInversePredicate() : getPredicate()) {
3585 return true;
3587 if (!hasNoNaNs())
3588 return false;
3589 [[fallthrough]];
3591 return hasNonZeroFPOperands(this);
3592 default:
3593 return false;
3594 }
3595}
3596
3598 switch (pred) {
3599 default: llvm_unreachable("Unknown cmp predicate!");
3600 case ICMP_EQ: return ICMP_NE;
3601 case ICMP_NE: return ICMP_EQ;
3602 case ICMP_UGT: return ICMP_ULE;
3603 case ICMP_ULT: return ICMP_UGE;
3604 case ICMP_UGE: return ICMP_ULT;
3605 case ICMP_ULE: return ICMP_UGT;
3606 case ICMP_SGT: return ICMP_SLE;
3607 case ICMP_SLT: return ICMP_SGE;
3608 case ICMP_SGE: return ICMP_SLT;
3609 case ICMP_SLE: return ICMP_SGT;
3610
3611 case FCMP_OEQ: return FCMP_UNE;
3612 case FCMP_ONE: return FCMP_UEQ;
3613 case FCMP_OGT: return FCMP_ULE;
3614 case FCMP_OLT: return FCMP_UGE;
3615 case FCMP_OGE: return FCMP_ULT;
3616 case FCMP_OLE: return FCMP_UGT;
3617 case FCMP_UEQ: return FCMP_ONE;
3618 case FCMP_UNE: return FCMP_OEQ;
3619 case FCMP_UGT: return FCMP_OLE;
3620 case FCMP_ULT: return FCMP_OGE;
3621 case FCMP_UGE: return FCMP_OLT;
3622 case FCMP_ULE: return FCMP_OGT;
3623 case FCMP_ORD: return FCMP_UNO;
3624 case FCMP_UNO: return FCMP_ORD;
3625 case FCMP_TRUE: return FCMP_FALSE;
3626 case FCMP_FALSE: return FCMP_TRUE;
3627 }
3628}
3629
3631 switch (Pred) {
3632 default: return "unknown";
3633 case FCmpInst::FCMP_FALSE: return "false";
3634 case FCmpInst::FCMP_OEQ: return "oeq";
3635 case FCmpInst::FCMP_OGT: return "ogt";
3636 case FCmpInst::FCMP_OGE: return "oge";
3637 case FCmpInst::FCMP_OLT: return "olt";
3638 case FCmpInst::FCMP_OLE: return "ole";
3639 case FCmpInst::FCMP_ONE: return "one";
3640 case FCmpInst::FCMP_ORD: return "ord";
3641 case FCmpInst::FCMP_UNO: return "uno";
3642 case FCmpInst::FCMP_UEQ: return "ueq";
3643 case FCmpInst::FCMP_UGT: return "ugt";
3644 case FCmpInst::FCMP_UGE: return "uge";
3645 case FCmpInst::FCMP_ULT: return "ult";
3646 case FCmpInst::FCMP_ULE: return "ule";
3647 case FCmpInst::FCMP_UNE: return "une";
3648 case FCmpInst::FCMP_TRUE: return "true";
3649 case ICmpInst::ICMP_EQ: return "eq";
3650 case ICmpInst::ICMP_NE: return "ne";
3651 case ICmpInst::ICMP_SGT: return "sgt";
3652 case ICmpInst::ICMP_SGE: return "sge";
3653 case ICmpInst::ICMP_SLT: return "slt";
3654 case ICmpInst::ICMP_SLE: return "sle";
3655 case ICmpInst::ICMP_UGT: return "ugt";
3656 case ICmpInst::ICMP_UGE: return "uge";
3657 case ICmpInst::ICMP_ULT: return "ult";
3658 case ICmpInst::ICMP_ULE: return "ule";
3659 }
3660}
3661
3663 OS << CmpInst::getPredicateName(Pred);
3664 return OS;
3665}
3666
3668 switch (pred) {
3669 default: llvm_unreachable("Unknown icmp predicate!");
3670 case ICMP_EQ: case ICMP_NE:
3671 case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE:
3672 return pred;
3673 case ICMP_UGT: return ICMP_SGT;
3674 case ICMP_ULT: return ICMP_SLT;
3675 case ICMP_UGE: return ICMP_SGE;
3676 case ICMP_ULE: return ICMP_SLE;
3677 }
3678}
3679
3681 switch (pred) {
3682 default: llvm_unreachable("Unknown icmp predicate!");
3683 case ICMP_EQ: case ICMP_NE:
3684 case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE:
3685 return pred;
3686 case ICMP_SGT: return ICMP_UGT;
3687 case ICMP_SLT: return ICMP_ULT;
3688 case ICMP_SGE: return ICMP_UGE;
3689 case ICMP_SLE: return ICMP_ULE;
3690 }
3691}
3692
3694 switch (pred) {
3695 default: llvm_unreachable("Unknown cmp predicate!");
3696 case ICMP_EQ: case ICMP_NE:
3697 return pred;
3698 case ICMP_SGT: return ICMP_SLT;
3699 case ICMP_SLT: return ICMP_SGT;
3700 case ICMP_SGE: return ICMP_SLE;
3701 case ICMP_SLE: return ICMP_SGE;
3702 case ICMP_UGT: return ICMP_ULT;
3703 case ICMP_ULT: return ICMP_UGT;
3704 case ICMP_UGE: return ICMP_ULE;
3705 case ICMP_ULE: return ICMP_UGE;
3706
3707 case FCMP_FALSE: case FCMP_TRUE:
3708 case FCMP_OEQ: case FCMP_ONE:
3709 case FCMP_UEQ: case FCMP_UNE:
3710 case FCMP_ORD: case FCMP_UNO:
3711 return pred;
3712 case FCMP_OGT: return FCMP_OLT;
3713 case FCMP_OLT: return FCMP_OGT;
3714 case FCMP_OGE: return FCMP_OLE;
3715 case FCMP_OLE: return FCMP_OGE;
3716 case FCMP_UGT: return FCMP_ULT;
3717 case FCMP_ULT: return FCMP_UGT;
3718 case FCMP_UGE: return FCMP_ULE;
3719 case FCMP_ULE: return FCMP_UGE;
3720 }
3721}
3722
3724 switch (pred) {
3725 case ICMP_SGE:
3726 case ICMP_SLE:
3727 case ICMP_UGE:
3728 case ICMP_ULE:
3729 case FCMP_OGE:
3730 case FCMP_OLE:
3731 case FCMP_UGE:
3732 case FCMP_ULE:
3733 return true;
3734 default:
3735 return false;
3736 }
3737}
3738
3740 switch (pred) {
3741 case ICMP_SGT:
3742 case ICMP_SLT:
3743 case ICMP_UGT:
3744 case ICMP_ULT:
3745 case FCMP_OGT:
3746 case FCMP_OLT:
3747 case FCMP_UGT:
3748 case FCMP_ULT:
3749 return true;
3750 default:
3751 return false;
3752 }
3753}
3754
3756 switch (pred) {
3757 case ICMP_SGE:
3758 return ICMP_SGT;
3759 case ICMP_SLE:
3760 return ICMP_SLT;
3761 case ICMP_UGE:
3762 return ICMP_UGT;
3763 case ICMP_ULE:
3764 return ICMP_ULT;
3765 case FCMP_OGE:
3766 return FCMP_OGT;
3767 case FCMP_OLE:
3768 return FCMP_OLT;
3769 case FCMP_UGE:
3770 return FCMP_UGT;
3771 case FCMP_ULE:
3772 return FCMP_ULT;
3773 default:
3774 return pred;
3775 }
3776}
3777
3779 switch (pred) {
3780 case ICMP_SGT:
3781 return ICMP_SGE;
3782 case ICMP_SLT:
3783 return ICMP_SLE;
3784 case ICMP_UGT:
3785 return ICMP_UGE;
3786 case ICMP_ULT:
3787 return ICMP_ULE;
3788 case FCMP_OGT:
3789 return FCMP_OGE;
3790 case FCMP_OLT:
3791 return FCMP_OLE;
3792 case FCMP_UGT:
3793 return FCMP_UGE;
3794 case FCMP_ULT:
3795 return FCMP_ULE;
3796 default:
3797 return pred;
3798 }
3799}
3800
3802 assert(CmpInst::isRelational(pred) && "Call only with relational predicate!");
3803
3804 if (isStrictPredicate(pred))
3805 return getNonStrictPredicate(pred);
3806 if (isNonStrictPredicate(pred))
3807 return getStrictPredicate(pred);
3808
3809 llvm_unreachable("Unknown predicate!");
3810}
3811
3813 switch (predicate) {
3814 default: return false;
3816 case ICmpInst::ICMP_UGE: return true;
3817 }
3818}
3819
3821 switch (predicate) {
3822 default: return false;
3824 case ICmpInst::ICMP_SGE: return true;
3825 }
3826}
3827
3828bool ICmpInst::compare(const APInt &LHS, const APInt &RHS,
3829 ICmpInst::Predicate Pred) {
3830 assert(ICmpInst::isIntPredicate(Pred) && "Only for integer predicates!");
3831 switch (Pred) {
3833 return LHS.eq(RHS);
3835 return LHS.ne(RHS);
3837 return LHS.ugt(RHS);
3839 return LHS.uge(RHS);
3841 return LHS.ult(RHS);
3843 return LHS.ule(RHS);
3845 return LHS.sgt(RHS);
3847 return LHS.sge(RHS);
3849 return LHS.slt(RHS);
3851 return LHS.sle(RHS);
3852 default:
3853 llvm_unreachable("Unexpected non-integer predicate.");
3854 };
3855}
3856
3857bool FCmpInst::compare(const APFloat &LHS, const APFloat &RHS,
3858 FCmpInst::Predicate Pred) {
3859 APFloat::cmpResult R = LHS.compare(RHS);
3860 switch (Pred) {
3861 default:
3862 llvm_unreachable("Invalid FCmp Predicate");
3864 return false;
3866 return true;
3867 case FCmpInst::FCMP_UNO:
3868 return R == APFloat::cmpUnordered;
3869 case FCmpInst::FCMP_ORD:
3870 return R != APFloat::cmpUnordered;
3871 case FCmpInst::FCMP_UEQ:
3872 return R == APFloat::cmpUnordered || R == APFloat::cmpEqual;
3873 case FCmpInst::FCMP_OEQ:
3874 return R == APFloat::cmpEqual;
3875 case FCmpInst::FCMP_UNE:
3876 return R != APFloat::cmpEqual;
3877 case FCmpInst::FCMP_ONE:
3879 case FCmpInst::FCMP_ULT:
3880 return R == APFloat::cmpUnordered || R == APFloat::cmpLessThan;
3881 case FCmpInst::FCMP_OLT:
3882 return R == APFloat::cmpLessThan;
3883 case FCmpInst::FCMP_UGT:
3885 case FCmpInst::FCMP_OGT:
3886 return R == APFloat::cmpGreaterThan;
3887 case FCmpInst::FCMP_ULE:
3888 return R != APFloat::cmpGreaterThan;
3889 case FCmpInst::FCMP_OLE:
3890 return R == APFloat::cmpLessThan || R == APFloat::cmpEqual;
3891 case FCmpInst::FCMP_UGE:
3892 return R != APFloat::cmpLessThan;
3893 case FCmpInst::FCMP_OGE:
3894 return R == APFloat::cmpGreaterThan || R == APFloat::cmpEqual;
3895 }
3896}
3897
3898std::optional<bool> ICmpInst::compare(const KnownBits &LHS,
3899 const KnownBits &RHS,
3900 ICmpInst::Predicate Pred) {
3901 switch (Pred) {
3902 case ICmpInst::ICMP_EQ:
3903 return KnownBits::eq(LHS, RHS);
3904 case ICmpInst::ICMP_NE:
3905 return KnownBits::ne(LHS, RHS);
3906 case ICmpInst::ICMP_UGE:
3907 return KnownBits::uge(LHS, RHS);
3908 case ICmpInst::ICMP_UGT:
3909 return KnownBits::ugt(LHS, RHS);
3910 case ICmpInst::ICMP_ULE:
3911 return KnownBits::ule(LHS, RHS);
3912 case ICmpInst::ICMP_ULT:
3913 return KnownBits::ult(LHS, RHS);
3914 case ICmpInst::ICMP_SGE:
3915 return KnownBits::sge(LHS, RHS);
3916 case ICmpInst::ICMP_SGT:
3917 return KnownBits::sgt(LHS, RHS);
3918 case ICmpInst::ICMP_SLE:
3919 return KnownBits::sle(LHS, RHS);
3920 case ICmpInst::ICMP_SLT:
3921 return KnownBits::slt(LHS, RHS);
3922 default:
3923 llvm_unreachable("Unexpected non-integer predicate.");
3924 }
3925}
3926
3928 if (CmpInst::isEquality(pred))
3929 return pred;
3930 if (isSigned(pred))
3931 return getUnsignedPredicate(pred);
3932 if (isUnsigned(pred))
3933 return getSignedPredicate(pred);
3934
3935 llvm_unreachable("Unknown predicate!");
3936}
3937
3939 switch (predicate) {
3940 default: return false;
3943 case FCmpInst::FCMP_ORD: return true;
3944 }
3945}
3946
3948 switch (predicate) {
3949 default: return false;
3952 case FCmpInst::FCMP_UNO: return true;
3953 }
3954}
3955
3957 switch(predicate) {
3958 default: return false;
3959 case ICMP_EQ: case ICMP_UGE: case ICMP_ULE: case ICMP_SGE: case ICMP_SLE:
3960 case FCMP_TRUE: case FCMP_UEQ: case FCMP_UGE: case FCMP_ULE: return true;
3961 }
3962}
3963
3965 switch(predicate) {
3966 case ICMP_NE: case ICMP_UGT: case ICMP_ULT: case ICMP_SGT: case ICMP_SLT:
3967 case FCMP_FALSE: case FCMP_ONE: case FCMP_OGT: case FCMP_OLT: return true;
3968 default: return false;
3969 }
3970}
3971
3973 // If the predicates match, then we know the first condition implies the
3974 // second is true.
3975 if (CmpPredicate::getMatching(Pred1, Pred2))
3976 return true;
3977
3978 if (Pred1.hasSameSign() && CmpInst::isSigned(Pred2))
3980 else if (Pred2.hasSameSign() && CmpInst::isSigned(Pred1))
3982
3983 switch (Pred1) {
3984 default:
3985 break;
3986 case CmpInst::ICMP_EQ:
3987 // A == B implies A >=u B, A <=u B, A >=s B, and A <=s B are true.
3988 return Pred2 == CmpInst::ICMP_UGE || Pred2 == CmpInst::ICMP_ULE ||
3989 Pred2 == CmpInst::ICMP_SGE || Pred2 == CmpInst::ICMP_SLE;
3990 case CmpInst::ICMP_UGT: // A >u B implies A != B and A >=u B are true.
3991 return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_UGE;
3992 case CmpInst::ICMP_ULT: // A <u B implies A != B and A <=u B are true.
3993 return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_ULE;
3994 case CmpInst::ICMP_SGT: // A >s B implies A != B and A >=s B are true.
3995 return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_SGE;
3996 case CmpInst::ICMP_SLT: // A <s B implies A != B and A <=s B are true.
3997 return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_SLE;
3998 }
3999 return false;
4000}
4001
4003 CmpPredicate Pred2) {
4004 return isImpliedTrueByMatchingCmp(Pred1,
4006}
4007
4009 CmpPredicate Pred2) {
4010 if (isImpliedTrueByMatchingCmp(Pred1, Pred2))
4011 return true;
4012 if (isImpliedFalseByMatchingCmp(Pred1, Pred2))
4013 return false;
4014 return std::nullopt;
4015}
4016
4017//===----------------------------------------------------------------------===//
4018// CmpPredicate Implementation
4019//===----------------------------------------------------------------------===//
4020
4021std::optional<CmpPredicate> CmpPredicate::getMatching(CmpPredicate A,
4022 CmpPredicate B) {
4023 if (A.Pred == B.Pred)
4024 return A.HasSameSign == B.HasSameSign ? A : CmpPredicate(A.Pred);
4026 return {};
4027 if (A.HasSameSign &&
4029 return B.Pred;
4030 if (B.HasSameSign &&
4032 return A.Pred;
4033 return {};
4034}
4035
4039
4041 if (auto *ICI = dyn_cast<ICmpInst>(Cmp))
4042 return ICI->getCmpPredicate();
4043 return Cmp->getPredicate();
4044}
4045
4049
4051 return getSwapped(get(Cmp));
4052}
4053
4054//===----------------------------------------------------------------------===//
4055// SwitchInst Implementation
4056//===----------------------------------------------------------------------===//
4057
4058void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumReserved) {
4059 assert(Value && Default && NumReserved);
4060 ReservedSpace = NumReserved;
4062 allocHungoffUses(ReservedSpace);
4063
4064 Op<0>() = Value;
4065 Op<1>() = Default;
4066}
4067
4068/// SwitchInst ctor - Create a new switch instruction, specifying a value to
4069/// switch on and a default destination. The number of additional cases can
4070/// be specified here to make memory allocation more efficient. This
4071/// constructor can also autoinsert before another instruction.
4072SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
4073 InsertPosition InsertBefore)
4074 : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch,
4075 AllocMarker, InsertBefore) {
4076 init(Value, Default, 2 + NumCases);
4077}
4078
4079SwitchInst::SwitchInst(const SwitchInst &SI)
4080 : Instruction(SI.getType(), Instruction::Switch, AllocMarker) {
4081 init(SI.getCondition(), SI.getDefaultDest(), SI.getNumOperands());
4082 setNumHungOffUseOperands(SI.getNumOperands());
4083 Use *OL = getOperandList();
4084 ConstantInt **VL = case_values();
4085 const Use *InOL = SI.getOperandList();
4086 ConstantInt *const *InVL = SI.case_values();
4087 for (unsigned i = 2, E = SI.getNumOperands(); i != E; ++i) {
4088 OL[i] = InOL[i];
4089 VL[i - 2] = InVL[i - 2];
4090 }
4091 SubclassOptionalData = SI.SubclassOptionalData;
4092}
4093
4094/// addCase - Add an entry to the switch instruction...
4095///
4097 unsigned NewCaseIdx = getNumCases();
4098 unsigned OpNo = getNumOperands();
4099 if (OpNo + 1 > ReservedSpace)
4100 growOperands(); // Get more space!
4101 // Initialize some new operands.
4102 assert(OpNo < ReservedSpace && "Growing didn't work!");
4103 setNumHungOffUseOperands(OpNo + 1);
4104 CaseHandle Case(this, NewCaseIdx);
4105 Case.setValue(OnVal);
4106 Case.setSuccessor(Dest);
4107}
4108
4109/// removeCase - This method removes the specified case and its successor
4110/// from the switch instruction.
4112 unsigned idx = I->getCaseIndex();
4113
4114 assert(2 + idx < getNumOperands() && "Case index out of range!!!");
4115
4116 unsigned NumOps = getNumOperands();
4117 Use *OL = getOperandList();
4118 ConstantInt **VL = case_values();
4119
4120 // Overwrite this case with the end of the list.
4121 if (2 + idx + 1 != NumOps) {
4122 OL[2 + idx] = OL[NumOps - 1];
4123 VL[idx] = VL[NumOps - 2 - 1];
4124 }
4125
4126 // Nuke the last value.
4127 OL[NumOps - 1].set(nullptr);
4128 VL[NumOps - 2 - 1] = nullptr;
4130
4131 return CaseIt(this, idx);
4132}
4133
4134/// growOperands - grow operands - This grows the operand list in response
4135/// to a push_back style of operation. This grows the number of ops by 3 times.
4136///
4137void SwitchInst::growOperands() {
4138 unsigned e = getNumOperands();
4139 unsigned NumOps = e*3;
4140
4141 ReservedSpace = NumOps;
4142 growHungoffUses(ReservedSpace, /*WithExtraValues=*/true);
4143}
4144
4146 MDNode *ProfileData = getBranchWeightMDNode(SI);
4147 if (!ProfileData)
4148 return;
4149
4150 if (getNumBranchWeights(*ProfileData) != SI.getNumSuccessors()) {
4151 llvm_unreachable("number of prof branch_weights metadata operands does "
4152 "not correspond to number of succesors");
4153 }
4154
4156 if (!extractBranchWeights(ProfileData, Weights))
4157 return;
4158 this->Weights = std::move(Weights);
4159}
4160
4163 if (Weights) {
4164 assert(SI.getNumSuccessors() == Weights->size() &&
4165 "num of prof branch_weights must accord with num of successors");
4166 Changed = true;
4167 // Copy the last case to the place of the removed one and shrink.
4168 // This is tightly coupled with the way SwitchInst::removeCase() removes
4169 // the cases in SwitchInst::removeCase(CaseIt).
4170 (*Weights)[I->getCaseIndex() + 1] = Weights->back();
4171 Weights->pop_back();
4172 }
4173 return SI.removeCase(I);
4174}
4175
4177 auto *DestBlock = I->getCaseSuccessor();
4178 if (Weights) {
4179 auto Weight = getSuccessorWeight(I->getCaseIndex() + 1);
4180 (*Weights)[0] = Weight.value();
4181 }
4182
4183 SI.setDefaultDest(DestBlock);
4184}
4185
4187 ConstantInt *OnVal, BasicBlock *Dest,
4189 SI.addCase(OnVal, Dest);
4190
4191 if (!Weights && W && *W) {
4192 Changed = true;
4193 Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
4194 (*Weights)[SI.getNumSuccessors() - 1] = *W;
4195 } else if (Weights) {
4196 Changed = true;
4197 Weights->push_back(W.value_or(0));
4198 }
4199 if (Weights)
4200 assert(SI.getNumSuccessors() == Weights->size() &&
4201 "num of prof branch_weights must accord with num of successors");
4202}
4203
4206 // Instruction is erased. Mark as unchanged to not touch it in the destructor.
4207 Changed = false;
4208 if (Weights)
4209 Weights->resize(0);
4210 return SI.eraseFromParent();
4211}
4212
4215 if (!Weights)
4216 return std::nullopt;
4217 return (*Weights)[idx];
4218}
4219
4222 if (!W)
4223 return;
4224
4225 if (!Weights && *W)
4226 Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
4227
4228 if (Weights) {
4229 auto &OldW = (*Weights)[idx];
4230 if (*W != OldW) {
4231 Changed = true;
4232 OldW = *W;
4233 }
4234 }
4235}
4236
4239 unsigned idx) {
4240 if (MDNode *ProfileData = getBranchWeightMDNode(SI))
4241 if (ProfileData->getNumOperands() == SI.getNumSuccessors() + 1)
4242 return mdconst::extract<ConstantInt>(ProfileData->getOperand(idx + 1))
4243 ->getValue()
4244 .getZExtValue();
4245
4246 return std::nullopt;
4247}
4248
4249//===----------------------------------------------------------------------===//
4250// IndirectBrInst Implementation
4251//===----------------------------------------------------------------------===//
4252
4253void IndirectBrInst::init(Value *Address, unsigned NumDests) {
4254 assert(Address && Address->getType()->isPointerTy() &&
4255 "Address of indirectbr must be a pointer");
4256 ReservedSpace = 1+NumDests;
4258 allocHungoffUses(ReservedSpace);
4259
4260 Op<0>() = Address;
4261}
4262
4263
4264/// growOperands - grow operands - This grows the operand list in response
4265/// to a push_back style of operation. This grows the number of ops by 2 times.
4266///
4267void IndirectBrInst::growOperands() {
4268 unsigned e = getNumOperands();
4269 unsigned NumOps = e*2;
4270
4271 ReservedSpace = NumOps;
4272 growHungoffUses(ReservedSpace);
4273}
4274
4275IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
4276 InsertPosition InsertBefore)
4277 : Instruction(Type::getVoidTy(Address->getContext()),
4278 Instruction::IndirectBr, AllocMarker, InsertBefore) {
4279 init(Address, NumCases);
4280}
4281
4282IndirectBrInst::IndirectBrInst(const IndirectBrInst &IBI)
4283 : Instruction(Type::getVoidTy(IBI.getContext()), Instruction::IndirectBr,
4284 AllocMarker) {
4285 NumUserOperands = IBI.NumUserOperands;
4286 allocHungoffUses(IBI.getNumOperands());
4287 Use *OL = getOperandList();
4288 const Use *InOL = IBI.getOperandList();
4289 for (unsigned i = 0, E = IBI.getNumOperands(); i != E; ++i)
4290 OL[i] = InOL[i];
4291 SubclassOptionalData = IBI.SubclassOptionalData;
4292}
4293
4294/// addDestination - Add a destination.
4295///
4297 unsigned OpNo = getNumOperands();
4298 if (OpNo+1 > ReservedSpace)
4299 growOperands(); // Get more space!
4300 // Initialize some new operands.
4301 assert(OpNo < ReservedSpace && "Growing didn't work!");
4303 getOperandList()[OpNo] = DestBB;
4304}
4305
4306/// removeDestination - This method removes the specified successor from the
4307/// indirectbr instruction.
4309 assert(idx < getNumOperands()-1 && "Successor index out of range!");
4310
4311 unsigned NumOps = getNumOperands();
4312 Use *OL = getOperandList();
4313
4314 // Replace this value with the last one.
4315 OL[idx+1] = OL[NumOps-1];
4316
4317 // Nuke the last value.
4318 OL[NumOps-1].set(nullptr);
4320}
4321
4322//===----------------------------------------------------------------------===//
4323// FreezeInst Implementation
4324//===----------------------------------------------------------------------===//
4325
4326FreezeInst::FreezeInst(Value *S, const Twine &Name, InsertPosition InsertBefore)
4327 : UnaryInstruction(S->getType(), Freeze, S, InsertBefore) {
4328 setName(Name);
4329}
4330
4331//===----------------------------------------------------------------------===//
4332// cloneImpl() implementations
4333//===----------------------------------------------------------------------===//
4334
4335// Define these methods here so vtables don't get emitted into every translation
4336// unit that uses these classes.
4337
4338GetElementPtrInst *GetElementPtrInst::cloneImpl() const {
4340 return new (AllocMarker) GetElementPtrInst(*this, AllocMarker);
4341}
4342
4346
4350
4352 return new FCmpInst(getPredicate(), Op<0>(), Op<1>());
4353}
4354
4356 return new ICmpInst(getPredicate(), Op<0>(), Op<1>());
4357}
4358
4359ExtractValueInst *ExtractValueInst::cloneImpl() const {
4360 return new ExtractValueInst(*this);
4361}
4362
4363InsertValueInst *InsertValueInst::cloneImpl() const {
4364 return new InsertValueInst(*this);
4365}
4366
4369 getOperand(0), getAlign());
4370 Result->setUsedWithInAlloca(isUsedWithInAlloca());
4371 Result->setSwiftError(isSwiftError());
4372 return Result;
4373}
4374
4376 return new LoadInst(getType(), getOperand(0), Twine(), isVolatile(),
4378}
4379
4384
4389 Result->setVolatile(isVolatile());
4390 Result->setWeak(isWeak());
4391 return Result;
4392}
4393
4395 AtomicRMWInst *Result =
4398 Result->setVolatile(isVolatile());
4399 return Result;
4400}
4401
4405
4407 return new TruncInst(getOperand(0), getType());
4408}
4409
4411 return new ZExtInst(getOperand(0), getType());
4412}
4413
4415 return new SExtInst(getOperand(0), getType());
4416}
4417
4419 return new FPTruncInst(getOperand(0), getType());
4420}
4421
4423 return new FPExtInst(getOperand(0), getType());
4424}
4425
4427 return new UIToFPInst(getOperand(0), getType());
4428}
4429
4431 return new SIToFPInst(getOperand(0), getType());
4432}
4433
4435 return new FPToUIInst(getOperand(0), getType());
4436}
4437
4439 return new FPToSIInst(getOperand(0), getType());
4440}
4441
4443 return new PtrToIntInst(getOperand(0), getType());
4444}
4445
4449
4451 return new IntToPtrInst(getOperand(0), getType());
4452}
4453
4455 return new BitCastInst(getOperand(0), getType());
4456}
4457
4461
4462CallInst *CallInst::cloneImpl() const {
4463 if (hasOperandBundles()) {
4467 return new (AllocMarker) CallInst(*this, AllocMarker);
4468 }
4470 return new (AllocMarker) CallInst(*this, AllocMarker);
4471}
4472
4473SelectInst *SelectInst::cloneImpl() const {
4475}
4476
4478 return new VAArgInst(getOperand(0), getType());
4479}
4480
4481ExtractElementInst *ExtractElementInst::cloneImpl() const {
4483}
4484
4485InsertElementInst *InsertElementInst::cloneImpl() const {
4487}
4488
4492
4493PHINode *PHINode::cloneImpl() const { return new (AllocMarker) PHINode(*this); }
4494
4495LandingPadInst *LandingPadInst::cloneImpl() const {
4496 return new LandingPadInst(*this);
4497}
4498
4499ReturnInst *ReturnInst::cloneImpl() const {
4501 return new (AllocMarker) ReturnInst(*this, AllocMarker);
4502}
4503
4504BranchInst *BranchInst::cloneImpl() const {
4506 return new (AllocMarker) BranchInst(*this, AllocMarker);
4507}
4508
4509SwitchInst *SwitchInst::cloneImpl() const { return new SwitchInst(*this); }
4510
4511IndirectBrInst *IndirectBrInst::cloneImpl() const {
4512 return new IndirectBrInst(*this);
4513}
4514
4515InvokeInst *InvokeInst::cloneImpl() const {
4516 if (hasOperandBundles()) {
4520 return new (AllocMarker) InvokeInst(*this, AllocMarker);
4521 }
4523 return new (AllocMarker) InvokeInst(*this, AllocMarker);
4524}
4525
4526CallBrInst *CallBrInst::cloneImpl() const {
4527 if (hasOperandBundles()) {
4531 return new (AllocMarker) CallBrInst(*this, AllocMarker);
4532 }
4534 return new (AllocMarker) CallBrInst(*this, AllocMarker);
4535}
4536
4537ResumeInst *ResumeInst::cloneImpl() const {
4538 return new (AllocMarker) ResumeInst(*this);
4539}
4540
4541CleanupReturnInst *CleanupReturnInst::cloneImpl() const {
4543 return new (AllocMarker) CleanupReturnInst(*this, AllocMarker);
4544}
4545
4546CatchReturnInst *CatchReturnInst::cloneImpl() const {
4547 return new (AllocMarker) CatchReturnInst(*this);
4548}
4549
4550CatchSwitchInst *CatchSwitchInst::cloneImpl() const {
4551 return new CatchSwitchInst(*this);
4552}
4553
4554FuncletPadInst *FuncletPadInst::cloneImpl() const {
4556 return new (AllocMarker) FuncletPadInst(*this, AllocMarker);
4557}
4558
4560 LLVMContext &Context = getContext();
4561 return new UnreachableInst(Context);
4562}
4563
4564bool UnreachableInst::shouldLowerToTrap(bool TrapUnreachable,
4565 bool NoTrapAfterNoreturn) const {
4566 if (!TrapUnreachable)
4567 return false;
4568
4569 // We may be able to ignore unreachable behind a noreturn call.
4571 Call && Call->doesNotReturn()) {
4572 if (NoTrapAfterNoreturn)
4573 return false;
4574 // Do not emit an additional trap instruction.
4575 if (Call->isNonContinuableTrap())
4576 return false;
4577 }
4578
4579 if (getFunction()->hasFnAttribute(Attribute::Naked))
4580 return false;
4581
4582 return true;
4583}
4584
4586 return new FreezeInst(getOperand(0));
4587}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
constexpr LLT S1
Rewrite undef for PHI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Atomic ordering constants.
@ FnAttr
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_ABI
Definition Compiler.h:213
This file contains the declarations for the subclasses of Constant, which represent the different fla...
@ Default
static bool isSigned(unsigned int Opcode)
#define op(i)
Module.h This file contains the declarations for the Module class.
static Align computeLoadStoreDefaultAlign(Type *Ty, InsertPosition Pos)
static bool isImpliedFalseByMatchingCmp(CmpPredicate Pred1, CmpPredicate Pred2)
static Value * createPlaceholderForShuffleVector(Value *V)
static Align computeAllocaDefaultAlign(Type *Ty, InsertPosition Pos)
static cl::opt< bool > DisableI2pP2iOpt("disable-i2p-p2i-opt", cl::init(false), cl::desc("Disables inttoptr/ptrtoint roundtrip optimization"))
static bool hasNonZeroFPOperands(const CmpInst *Cmp)
static int matchShuffleAsBitRotate(ArrayRef< int > Mask, int NumSubElts)
Try to lower a vector shuffle as a bit rotation.
static Type * getIndexedTypeInternal(Type *Ty, ArrayRef< IndexTy > IdxList)
static bool isReplicationMaskWithParams(ArrayRef< int > Mask, int ReplicationFactor, int VF)
static bool isIdentityMaskImpl(ArrayRef< int > Mask, int NumOpElts)
static bool isSingleSourceMaskImpl(ArrayRef< int > Mask, int NumOpElts)
static Value * getAISize(LLVMContext &Context, Value *Amt)
static bool isImpliedTrueByMatchingCmp(CmpPredicate Pred1, CmpPredicate Pred2)
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
This file contains the declarations for metadata subclasses.
#define T
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
#define P(N)
PowerPC Reduce CR logical Operation
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
static unsigned getNumElements(Type *Ty)
This file implements the SmallBitVector class.
This file defines the SmallVector class.
#define LLVM_DEBUG(...)
Definition Debug.h:114
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition VPlanSLP.cpp:247
Value * RHS
Value * LHS
cmpResult
IEEE-754R 5.11: Floating Point Comparison Relations.
Definition APFloat.h:334
LLVM_ABI float convertToFloat() const
Converts this APFloat to host float value.
Definition APFloat.cpp:6143
Class for arbitrary precision integers.
Definition APInt.h:78
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
Definition APInt.h:1331
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition APInt.h:381
unsigned countr_zero() const
Count the number of trailing zero bits.
Definition APInt.h:1640
unsigned countl_zero() const
The APInt version of std::countl_zero.
Definition APInt.h:1599
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition APInt.h:201
This class represents a conversion between pointers from one address space to another.
LLVM_ABI AddrSpaceCastInst * cloneImpl() const
Clone an identical AddrSpaceCastInst.
LLVM_ABI AddrSpaceCastInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI std::optional< TypeSize > getAllocationSizeInBits(const DataLayout &DL) const
Get allocation size in bits.
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
LLVM_ABI AllocaInst * cloneImpl() const
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
unsigned getAddressSpace() const
Return the address space for the allocation.
LLVM_ABI std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
LLVM_ABI bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
void setAlignment(Align Align)
const Value * getArraySize() const
Get the number of elements allocated.
LLVM_ABI AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, const Twine &Name, InsertPosition InsertBefore)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
iterator end() const
Definition ArrayRef.h:131
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
iterator begin() const
Definition ArrayRef.h:130
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Definition ArrayRef.h:186
Class to represent array types.
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this cmpxchg instruction.
bool isVolatile() const
Return true if this is a cmpxchg from a volatile memory location.
void setFailureOrdering(AtomicOrdering Ordering)
Sets the failure ordering constraint of this cmpxchg instruction.
AtomicOrdering getFailureOrdering() const
Returns the failure ordering constraint of this cmpxchg instruction.
void setSuccessOrdering(AtomicOrdering Ordering)
Sets the success ordering constraint of this cmpxchg instruction.
LLVM_ABI AtomicCmpXchgInst * cloneImpl() const
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
friend class Instruction
Iterator for Instructions in a `BasicBlock.
bool isWeak() const
Return true if this cmpxchg may spuriously fail.
void setAlignment(Align Align)
AtomicOrdering getSuccessOrdering() const
Returns the success ordering constraint of this cmpxchg instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this cmpxchg instruction.
LLVM_ABI AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment, AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, SyncScope::ID SSID, InsertPosition InsertBefore=nullptr)
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
LLVM_ABI AtomicRMWInst * cloneImpl() const
bool isVolatile() const
Return true if this is a RMW on a volatile memory location.
BinOp
This enumeration lists the possible modifications atomicrmw can make.
@ Add
*p = old + v
@ FAdd
*p = old + v
@ USubCond
Subtract only if no unsigned overflow.
@ FMinimum
*p = minimum(old, v) minimum matches the behavior of llvm.minimum.
@ Min
*p = old <signed v ? old : v
@ Sub
*p = old - v
@ And
*p = old & v
@ Xor
*p = old ^ v
@ USubSat
*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.
@ FMaximum
*p = maximum(old, v) maximum matches the behavior of llvm.maximum.
@ FSub
*p = old - v
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
@ UMax
*p = old >unsigned v ? old : v
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
@ UDecWrap
Decrement one until a minimum value or zero.
@ Nand
*p = ~(old & v)
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this rmw instruction.
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this rmw instruction.
void setOperation(BinOp Operation)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
BinOp getOperation() const
LLVM_ABI AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment, AtomicOrdering Ordering, SyncScope::ID SSID, InsertPosition InsertBefore=nullptr)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this rmw instruction.
void setAlignment(Align Align)
static LLVM_ABI StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
LLVM_ABI CaptureInfo getCaptureInfo() const
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:69
LLVM_ABI const ConstantRange & getRange() const
Returns the value of the range attribute.
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition Attributes.h:88
static LLVM_ABI Attribute getWithMemoryEffects(LLVMContext &Context, MemoryEffects ME)
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:223
LLVM Basic Block Representation.
Definition BasicBlock.h:62
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
static LLVM_ABI BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
BinaryOps getOpcode() const
Definition InstrTypes.h:374
LLVM_ABI bool swapOperands()
Exchange the two operands to this instruction.
static LLVM_ABI BinaryOperator * CreateNot(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition InstrTypes.h:181
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
LLVM_ABI BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty, const Twine &Name, InsertPosition InsertBefore)
static LLVM_ABI BinaryOperator * CreateNSWNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
LLVM_ABI BinaryOperator * cloneImpl() const
This class represents a no-op cast from one type to another.
LLVM_ABI BitCastInst * cloneImpl() const
Clone an identical BitCastInst.
LLVM_ABI BitCastInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
Conditional or Unconditional Branch instruction.
LLVM_ABI void swapSuccessors()
Swap the successors of this branch instruction.
LLVM_ABI BranchInst * cloneImpl() const
bool isConditional() const
Value * getCondition() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
LLVM_ABI FPClassTest getParamNoFPClass(unsigned i) const
Extract a test mask for disallowed floating-point value classes for the parameter.
bool isInlineAsm() const
Check if this call is an inline asm statement.
LLVM_ABI BundleOpInfo & getBundleOpInfoForOperand(unsigned OpIdx)
Return the BundleOpInfo for the operand at index OpIdx.
void setCallingConv(CallingConv::ID CC)
LLVM_ABI FPClassTest getRetNoFPClass() const
Extract a test mask for disallowed floating-point value classes for the return value.
bundle_op_iterator bundle_op_info_begin()
Return the start of the list of BundleOpInfo instances associated with this OperandBundleUser.
LLVM_ABI bool paramHasNonNullAttr(unsigned ArgNo, bool AllowUndefOrPoison) const
Return true if this argument has the nonnull attribute on either the CallBase instruction or the call...
LLVM_ABI MemoryEffects getMemoryEffects() const
void addFnAttr(Attribute::AttrKind Kind)
Adds the attribute to the function.
LLVM_ABI bool doesNotAccessMemory() const
Determine if the call does not access memory.
LLVM_ABI void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
LLVM_ABI void setOnlyAccessesArgMemory()
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
OperandBundleUse operandBundleFromBundleOpInfo(const BundleOpInfo &BOI) const
Simple helper function to map a BundleOpInfo to an OperandBundleUse.
LLVM_ABI void setOnlyAccessesInaccessibleMemOrArgMem()
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
LLVM_ABI void setDoesNotAccessMemory()
AttributeSet getParamAttributes(unsigned ArgNo) const
Return the param attributes for this call.
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
LLVM_ABI bool onlyAccessesInaccessibleMemory() const
Determine if the function may only access memory that is inaccessible from the IR.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
CallingConv::ID getCallingConv() const
bundle_op_iterator bundle_op_info_end()
Return the end of the list of BundleOpInfo instances associated with this OperandBundleUser.
LLVM_ABI unsigned getNumSubclassExtraOperandsDynamic() const
Get the number of extra operands for instructions that don't have a fixed number of extra operands.
BundleOpInfo * bundle_op_iterator
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
LLVM_ABI bool isMustTailCall() const
Tests if this call site must be tail call optimized.
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
LLVM_ABI bool onlyReadsMemory() const
Determine if the call does not access or only reads memory.
bool isByValArgument(unsigned ArgNo) const
Determine whether this argument is passed by value.
iterator_range< bundle_op_iterator > bundle_op_infos()
Return the range [bundle_op_info_begin, bundle_op_info_end).
LLVM_ABI void setOnlyReadsMemory()
static LLVM_ABI CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle OB added.
LLVM_ABI bool onlyAccessesInaccessibleMemOrArgMem() const
Determine if the function may only access memory that is either inaccessible from the IR or pointed t...
LLVM_ABI CaptureInfo getCaptureInfo(unsigned OpNo) const
Return which pointer components this operand may capture.
LLVM_ABI bool hasArgumentWithAdditionalReturnCaptureComponents() const
Returns whether the call has an argument that has an attribute like captures(ret: address,...
CallBase(AttributeList const &A, FunctionType *FT, ArgsTy &&... Args)
Value * getCalledOperand() const
LLVM_ABI void setOnlyWritesMemory()
LLVM_ABI op_iterator populateBundleOperandInfos(ArrayRef< OperandBundleDef > Bundles, const unsigned BeginIndex)
Populate the BundleOpInfo instances and the Use& vector from Bundles.
AttributeList Attrs
parameter attributes for callable
bool hasOperandBundlesOtherThan(ArrayRef< uint32_t > IDs) const
Return true if this operand bundle user contains operand bundles with tags other than those specified...
LLVM_ABI std::optional< ConstantRange > getRange() const
If this return value has a range attribute, return the value range of the argument.
LLVM_ABI bool isReturnNonNull() const
Return true if the return value is known to be not null.
Value * getArgOperand(unsigned i) const
FunctionType * FTy
uint64_t getRetDereferenceableBytes() const
Extract the number of dereferenceable bytes for a call or parameter (0=unknown).
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
FunctionType * getFunctionType() const
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
static unsigned CountBundleInputs(ArrayRef< OperandBundleDef > Bundles)
Return the total number of values used in Bundles.
LLVM_ABI Value * getArgOperandWithAttribute(Attribute::AttrKind Kind) const
If one of the arguments has the specified attribute, returns its operand value.
LLVM_ABI void setOnlyAccessesInaccessibleMemory()
static LLVM_ABI CallBase * Create(CallBase *CB, ArrayRef< OperandBundleDef > Bundles, InsertPosition InsertPt=nullptr)
Create a clone of CB with a different set of operand bundles and insert it before InsertPt.
LLVM_ABI bool onlyWritesMemory() const
Determine if the call does not access or only writes memory.
LLVM_ABI bool hasClobberingOperandBundles() const
Return true if this operand bundle user has operand bundles that may write to the heap.
void setCalledOperand(Value *V)
static LLVM_ABI CallBase * removeOperandBundle(CallBase *CB, uint32_t ID, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle ID removed.
LLVM_ABI bool hasReadingOperandBundles() const
Return true if this operand bundle user has operand bundles that may read from the heap.
LLVM_ABI bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
LLVM_ABI void setMemoryEffects(MemoryEffects ME)
bool hasOperandBundles() const
Return true if this User has any operand bundles.
LLVM_ABI bool isTailCall() const
Tests if this call site is marked as a tail call.
LLVM_ABI Function * getCaller()
Helper to get the caller (the parent function).
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
SmallVector< BasicBlock *, 16 > getIndirectDests() const
void setDefaultDest(BasicBlock *B)
void setIndirectDest(unsigned i, BasicBlock *B)
BasicBlock * getDefaultDest() const
static CallBrInst * Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, ArrayRef< BasicBlock * > IndirectDests, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
LLVM_ABI CallBrInst * cloneImpl() const
This class represents a function call, abstracting a target machine's calling convention.
LLVM_ABI void updateProfWeight(uint64_t S, uint64_t T)
Updates profile metadata by scaling it by S / T.
TailCallKind getTailCallKind() const
LLVM_ABI CallInst * cloneImpl() const
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Represents which components of the pointer may be captured in which location.
Definition ModRef.h:359
CaptureComponents getOtherComponents() const
Get components potentially captured through locations other than the return value.
Definition ModRef.h:391
static CaptureInfo none()
Create CaptureInfo that does not capture any components of the pointer.
Definition ModRef.h:372
static CaptureInfo all()
Create CaptureInfo that may capture all components of the pointer.
Definition ModRef.h:375
CaptureComponents getRetComponents() const
Get components potentially captured by the return value.
Definition ModRef.h:387
static LLVM_ABI Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)
Returns the opcode necessary to cast Val into Ty using usual casting rules.
static LLVM_ABI CastInst * CreatePointerBitCastOrAddrSpaceCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast or an AddrSpaceCast cast instruction.
Instruction::CastOps getOpcode() const
Return the opcode of this CastInst.
Definition InstrTypes.h:610
static LLVM_ABI unsigned isEliminableCastPair(Instruction::CastOps firstOpcode, Instruction::CastOps secondOpcode, Type *SrcTy, Type *MidTy, Type *DstTy, const DataLayout *DL)
Determine how a pair of casts can be eliminated, if they can be at all.
static LLVM_ABI CastInst * CreateIntegerCast(Value *S, Type *Ty, bool isSigned, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a ZExt, BitCast, or Trunc for int -> int casts.
static LLVM_ABI CastInst * CreateFPCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create an FPExt, BitCast, or FPTrunc for fp -> fp casts.
CastInst(Type *Ty, unsigned iType, Value *S, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics for subclasses.
Definition InstrTypes.h:451
static LLVM_ABI bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
static LLVM_ABI bool isBitCastable(Type *SrcTy, Type *DestTy)
Check whether a bitcast between these types is valid.
static LLVM_ABI CastInst * CreateTruncOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a Trunc or BitCast cast instruction.
static LLVM_ABI CastInst * CreatePointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast, AddrSpaceCast or a PtrToInt cast instruction.
static LLVM_ABI CastInst * CreateBitOrPointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
static LLVM_ABI bool isNoopCast(Instruction::CastOps Opcode, Type *SrcTy, Type *DstTy, const DataLayout &DL)
A no-op cast is one that can be effected without changing any bits.
static LLVM_ABI CastInst * CreateZExtOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a ZExt or BitCast cast instruction.
static LLVM_ABI CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
LLVM_ABI bool isIntegerCast() const
There are several places where we need to know if a cast instruction only deals with integer source a...
static LLVM_ABI CastInst * CreateSExtOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a SExt or BitCast cast instruction.
static LLVM_ABI bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
LLVM_ABI CatchReturnInst * cloneImpl() const
void setUnwindDest(BasicBlock *UnwindDest)
LLVM_ABI void addHandler(BasicBlock *Dest)
Add an entry to the switch instruction... Note: This action invalidates handler_end().
LLVM_ABI CatchSwitchInst * cloneImpl() const
mapped_iterator< op_iterator, DerefFnTy > handler_iterator
Value * getParentPad() const
void setParentPad(Value *ParentPad)
BasicBlock * getUnwindDest() const
LLVM_ABI void removeHandler(handler_iterator HI)
LLVM_ABI CleanupReturnInst * cloneImpl() const
This class is the base class for the comparison instructions.
Definition InstrTypes.h:664
Predicate getStrictPredicate() const
For example, SGE -> SGT, SLE -> SLT, ULE -> ULT, UGE -> UGT.
Definition InstrTypes.h:858
bool isEquality() const
Determine if this is an equals/not equals predicate.
Definition InstrTypes.h:915
void setPredicate(Predicate P)
Set the predicate for this instruction to the specified value.
Definition InstrTypes.h:768
bool isFalseWhenEqual() const
This is just a convenience.
Definition InstrTypes.h:948
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition InstrTypes.h:679
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
Definition InstrTypes.h:693
@ ICMP_SLT
signed less than
Definition InstrTypes.h:705
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:706
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition InstrTypes.h:682
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
Definition InstrTypes.h:691
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Definition InstrTypes.h:680
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Definition InstrTypes.h:681
@ ICMP_UGE
unsigned greater or equal
Definition InstrTypes.h:700
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:703
@ FCMP_ULT
1 1 0 0 True if unordered or less than
Definition InstrTypes.h:690
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition InstrTypes.h:684
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition InstrTypes.h:687
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
Definition InstrTypes.h:688
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition InstrTypes.h:683
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition InstrTypes.h:685
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:704
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition InstrTypes.h:692
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
Definition InstrTypes.h:689
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
Definition InstrTypes.h:678
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition InstrTypes.h:686
LLVM_ABI bool isEquivalence(bool Invert=false) const
Determine if one operand of this compare can always be replaced by the other operand,...
bool isSigned() const
Definition InstrTypes.h:930
static LLVM_ABI bool isEquality(Predicate pred)
Determine if this is an equals/not equals predicate.
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition InstrTypes.h:827
bool isTrueWhenEqual() const
This is just a convenience.
Definition InstrTypes.h:942
static LLVM_ABI CmpInst * Create(OtherOps Op, Predicate Pred, Value *S1, Value *S2, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Construct a compare instruction, given the opcode, the predicate and the two operands.
static bool isFPPredicate(Predicate P)
Definition InstrTypes.h:770
Predicate getNonStrictPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
Definition InstrTypes.h:871
static LLVM_ABI CmpInst * CreateWithCopiedFlags(OtherOps Op, Predicate Pred, Value *S1, Value *S2, const Instruction *FlagsSource, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Construct a compare instruction, given the opcode, the predicate, the two operands and the instructio...
bool isNonStrictPredicate() const
Definition InstrTypes.h:852
LLVM_ABI void swapOperands()
This is just a convenience that dispatches to the subclasses.
static bool isRelational(Predicate P)
Return true if the predicate is relational (not EQ or NE).
Definition InstrTypes.h:923
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:789
static LLVM_ABI StringRef getPredicateName(Predicate P)
Predicate getPredicate() const
Return the predicate for this instruction.
Definition InstrTypes.h:765
bool isStrictPredicate() const
Definition InstrTypes.h:843
static LLVM_ABI bool isUnordered(Predicate predicate)
Determine if the predicate is an unordered operation.
Predicate getFlippedStrictnessPredicate() const
For predicate of kind "is X or equal to 0" returns the predicate "is X".
Definition InstrTypes.h:893
static bool isIntPredicate(Predicate P)
Definition InstrTypes.h:776
static LLVM_ABI bool isOrdered(Predicate predicate)
Determine if the predicate is an ordered operation.
LLVM_ABI CmpInst(Type *ty, Instruction::OtherOps op, Predicate pred, Value *LHS, Value *RHS, const Twine &Name="", InsertPosition InsertBefore=nullptr, Instruction *FlagsSource=nullptr)
bool isUnsigned() const
Definition InstrTypes.h:936
LLVM_ABI bool isCommutative() const
This is just a convenience that dispatches to the subclasses.
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static LLVM_ABI std::optional< CmpPredicate > getMatching(CmpPredicate A, CmpPredicate B)
Compares two CmpPredicates taking samesign into account and returns the canonicalized CmpPredicate if...
CmpPredicate()
Default constructor.
static LLVM_ABI CmpPredicate get(const CmpInst *Cmp)
Do a ICmpInst::getCmpPredicate() or CmpInst::getPredicate(), as appropriate.
LLVM_ABI CmpInst::Predicate getPreferredSignedPredicate() const
Attempts to return a signed CmpInst::Predicate from the CmpPredicate.
bool hasSameSign() const
Query samesign information, for optimizations.
static LLVM_ABI CmpPredicate getSwapped(CmpPredicate P)
Get the swapped predicate of a CmpPredicate.
ConstantFP - Floating Point Values [float, double].
Definition Constants.h:282
const APFloat & getValueAPF() const
Definition Constants.h:325
This is the shared class of boolean and integer constants.
Definition Constants.h:87
LLVM_ABI ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition TypeSize.h:309
LLVM_ABI ExtractElementInst * cloneImpl() const
static ExtractElementInst * Create(Value *Vec, Value *Idx, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
This instruction extracts a struct member or array element value from an aggregate value.
static LLVM_ABI Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
LLVM_ABI ExtractValueInst * cloneImpl() const
This instruction compares its operands according to the predicate given to the constructor.
bool isEquality() const
static LLVM_ABI bool compare(const APFloat &LHS, const APFloat &RHS, FCmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
LLVM_ABI FCmpInst * cloneImpl() const
Clone an identical FCmpInst.
FCmpInst(InsertPosition InsertBefore, Predicate pred, Value *LHS, Value *RHS, const Twine &NameStr="")
Constructor with insertion semantics.
This class represents an extension of floating point types.
LLVM_ABI FPExtInst * cloneImpl() const
Clone an identical FPExtInst.
LLVM_ABI FPExtInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI float getFPAccuracy() const
Get the maximum error permitted by this operation in ULPs.
This class represents a cast from floating point to signed integer.
LLVM_ABI FPToSIInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI FPToSIInst * cloneImpl() const
Clone an identical FPToSIInst.
This class represents a cast from floating point to unsigned integer.
LLVM_ABI FPToUIInst * cloneImpl() const
Clone an identical FPToUIInst.
LLVM_ABI FPToUIInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
This class represents a truncation of floating point types.
LLVM_ABI FPTruncInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI FPTruncInst * cloneImpl() const
Clone an identical FPTruncInst.
LLVM_ABI FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System, InsertPosition InsertBefore=nullptr)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this fence instruction.
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this fence instruction.
LLVM_ABI FenceInst * cloneImpl() const
friend class Instruction
Iterator for Instructions in a `BasicBlock.
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this fence instruction.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Class to represent fixed width SIMD vectors.
unsigned getNumElements() const
LLVM_ABI FreezeInst(Value *S, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
LLVM_ABI FreezeInst * cloneImpl() const
Clone an identical FreezeInst.
void setParentPad(Value *ParentPad)
Value * getParentPad() const
Convenience accessors.
LLVM_ABI FuncletPadInst * cloneImpl() const
Class to represent function types.
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Type * getParamType(unsigned i) const
Parameter type accessors.
bool isVarArg() const
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags inBounds()
GEPNoWrapFlags withoutInBounds() const
unsigned getRaw() const
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
LLVM_ABI bool isInBounds() const
Determine whether the GEP has the inbounds flag.
LLVM_ABI bool hasNoUnsignedSignedWrap() const
Determine whether the GEP has the nusw flag.
static LLVM_ABI Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
LLVM_ABI bool hasAllZeroIndices() const
Return true if all of the indices of this GEP are zeros.
LLVM_ABI bool hasNoUnsignedWrap() const
Determine whether the GEP has the nuw flag.
LLVM_ABI bool hasAllConstantIndices() const
Return true if all of the indices of this GEP are constant integers.
LLVM_ABI void setIsInBounds(bool b=true)
Set or clear the inbounds flag on this GEP instruction.
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
LLVM_ABI bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const
Accumulate the constant address offset of this GEP if possible.
LLVM_ABI GetElementPtrInst * cloneImpl() const
LLVM_ABI bool collectOffset(const DataLayout &DL, unsigned BitWidth, SmallMapVector< Value *, APInt, 4 > &VariableOffsets, APInt &ConstantOffset) const
LLVM_ABI void setNoWrapFlags(GEPNoWrapFlags NW)
Set nowrap flags for GEP instruction.
LLVM_ABI GEPNoWrapFlags getNoWrapFlags() const
Get the nowrap flags for the GEP instruction.
Module * getParent()
Get the module that this global value is contained inside of...
This instruction compares its operands according to the predicate given to the constructor.
ICmpInst(InsertPosition InsertBefore, Predicate pred, Value *LHS, Value *RHS, const Twine &NameStr="")
Constructor with insertion semantics.
static LLVM_ABI bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
LLVM_ABI ICmpInst * cloneImpl() const
Clone an identical ICmpInst.
Predicate getFlippedSignednessPredicate() const
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
static CmpPredicate getInverseCmpPredicate(CmpPredicate Pred)
bool isEquality() const
Return true if this predicate is either EQ or NE.
static LLVM_ABI Predicate getFlippedSignednessPredicate(Predicate Pred)
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
static LLVM_ABI std::optional< bool > isImpliedByMatchingCmp(CmpPredicate Pred1, CmpPredicate Pred2)
Determine if Pred1 implies Pred2 is true, false, or if nothing can be inferred about the implication,...
Predicate getUnsignedPredicate() const
For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
Indirect Branch Instruction.
LLVM_ABI void addDestination(BasicBlock *Dest)
Add a destination.
LLVM_ABI void removeDestination(unsigned i)
This method removes the specified successor from the indirectbr instruction.
LLVM_ABI IndirectBrInst * cloneImpl() const
LLVM_ABI InsertElementInst * cloneImpl() const
static InsertElementInst * Create(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
bool isValid() const
Definition Instruction.h:62
BasicBlock * getBasicBlock()
Definition Instruction.h:63
This instruction inserts a struct field of array element value into an aggregate value.
LLVM_ABI InsertValueInst * cloneImpl() const
BitfieldElement::Type getSubclassData() const
LLVM_ABI bool hasNoNaNs() const LLVM_READONLY
Determine whether the no-NaNs flag is set.
LLVM_ABI void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI void swapProfMetadata()
If the instruction has "branch_weights" MD_prof metadata and the MDNode has three operands (including...
LLVM_ABI bool isVolatile() const LLVM_READONLY
Return true if this instruction has a volatile memory access.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Bitfield::Element< uint16_t, 0, 15 > OpaqueField
Instruction(const Instruction &)=delete
friend class BasicBlock
Various leaf nodes.
void setSubclassData(typename BitfieldElement::Type Value)
This class represents a cast from an integer to a pointer.
LLVM_ABI IntToPtrInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI IntToPtrInst * cloneImpl() const
Clone an identical IntToPtrInst.
Invoke instruction.
BasicBlock * getUnwindDest() const
void setNormalDest(BasicBlock *B)
LLVM_ABI InvokeInst * cloneImpl() const
LLVM_ABI LandingPadInst * getLandingPadInst() const
Get the landingpad instruction from the landing pad block (the unwind destination).
void setUnwindDest(BasicBlock *B)
LLVM_ABI void updateProfWeight(uint64_t S, uint64_t T)
Updates profile metadata by scaling it by S / T.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
LLVMContextImpl *const pImpl
Definition LLVMContext.h:70
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
LLVM_ABI LandingPadInst * cloneImpl() const
static LLVM_ABI LandingPadInst * Create(Type *RetTy, unsigned NumReservedClauses, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedClauses is a hint for the number of incoming clauses that this landingpad w...
LLVM_ABI void addClause(Constant *ClauseVal)
Add a catch or filter clause to the landing pad.
void setCleanup(bool V)
Indicate that this landingpad instruction is a cleanup.
void setAlignment(Align Align)
bool isVolatile() const
Return true if this is a load from a volatile memory location.
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this load instruction.
LLVM_ABI LoadInst * cloneImpl() const
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
void setVolatile(bool V)
Specify whether this is a volatile load or not.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
LLVM_ABI LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, InsertPosition InsertBefore)
Align getAlign() const
Return the alignment of the access that is being performed.
Metadata node.
Definition Metadata.h:1078
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1442
static MemoryEffectsBase readOnly()
Definition ModRef.h:130
bool onlyWritesMemory() const
Whether this function only (at most) writes memory.
Definition ModRef.h:226
bool doesNotAccessMemory() const
Whether this function accesses no memory.
Definition ModRef.h:220
static MemoryEffectsBase argMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Definition ModRef.h:140
static MemoryEffectsBase inaccessibleMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Definition ModRef.h:146
bool onlyAccessesInaccessibleMem() const
Whether this function only (at most) accesses inaccessible memory.
Definition ModRef.h:239
bool onlyAccessesArgPointees() const
Whether this function only (at most) accesses argument memory.
Definition ModRef.h:229
bool onlyReadsMemory() const
Whether this function only (at most) reads memory.
Definition ModRef.h:223
static MemoryEffectsBase writeOnly()
Definition ModRef.h:135
static MemoryEffectsBase inaccessibleOrArgMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Definition ModRef.h:163
static MemoryEffectsBase none()
Definition ModRef.h:125
bool onlyAccessesInaccessibleOrArgMem() const
Whether this function only (at most) accesses argument and inaccessible memory.
Definition ModRef.h:250
StringRef getTag() const
void allocHungoffUses(unsigned N)
const_block_iterator block_begin() const
LLVM_ABI void removeIncomingValueIf(function_ref< bool(unsigned)> Predicate, bool DeletePHIIfEmpty=true)
Remove all incoming values for which the predicate returns true.
void setIncomingBlock(unsigned i, BasicBlock *BB)
LLVM_ABI Value * removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty=true)
Remove an incoming value.
LLVM_ABI bool hasConstantOrUndefValue() const
Whether the specified PHI node always merges together the same value, assuming undefs are equal to a ...
void setIncomingValue(unsigned i, Value *V)
const_block_iterator block_end() const
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
LLVM_ABI Value * hasConstantValue() const
If the specified PHI node always merges together the same value, return the value,...
LLVM_ABI PHINode * cloneImpl() const
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Class to represent pointers.
unsigned getAddressSpace() const
Return the address space of the Pointer type.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
This class represents a cast from a pointer to an address (non-capturing ptrtoint).
PtrToAddrInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
PtrToAddrInst * cloneImpl() const
Clone an identical PtrToAddrInst.
This class represents a cast from a pointer to an integer.
LLVM_ABI PtrToIntInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI PtrToIntInst * cloneImpl() const
Clone an identical PtrToIntInst.
Resume the propagation of an exception.
LLVM_ABI ResumeInst * cloneImpl() const
Return a value (possibly void), from a function.
LLVM_ABI ReturnInst * cloneImpl() const
This class represents a sign extension of integer types.
LLVM_ABI SExtInst * cloneImpl() const
Clone an identical SExtInst.
LLVM_ABI SExtInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
This class represents a cast from signed integer to floating point.
LLVM_ABI SIToFPInst * cloneImpl() const
Clone an identical SIToFPInst.
LLVM_ABI SIToFPInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
Class to represent scalable SIMD vectors.
LLVM_ABI SelectInst * cloneImpl() const
static LLVM_ABI const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, const Instruction *MDFrom=nullptr)
static LLVM_ABI bool isZeroEltSplatMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses all elements with the same value as the first element of exa...
ArrayRef< int > getShuffleMask() const
static LLVM_ABI bool isSpliceMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is a splice mask, concatenating the two inputs together and then ext...
int getMaskValue(unsigned Elt) const
Return the shuffle mask value of this instruction for the given element index.
LLVM_ABI ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
static LLVM_ABI bool isSelectMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from its source vectors without lane crossings.
static LLVM_ABI bool isBitRotateMask(ArrayRef< int > Mask, unsigned EltSizeInBits, unsigned MinSubElts, unsigned MaxSubElts, unsigned &NumSubElts, unsigned &RotateAmt)
Checks if the shuffle is a bit rotation of the first operand across multiple subelements,...
VectorType * getType() const
Overload to return most specific vector type.
LLVM_ABI bool isIdentityWithExtract() const
Return true if this shuffle extracts the first N elements of exactly one source vector.
static LLVM_ABI bool isOneUseSingleSourceMask(ArrayRef< int > Mask, int VF)
Return true if this shuffle mask represents "clustered" mask of size VF, i.e.
LLVM_ABI bool isIdentityWithPadding() const
Return true if this shuffle lengthens exactly one source vector with undefs in the high elements.
static LLVM_ABI bool isSingleSourceMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from exactly one source vector.
LLVM_ABI bool isConcat() const
Return true if this shuffle concatenates its 2 source vectors.
static LLVM_ABI bool isDeInterleaveMaskOfFactor(ArrayRef< int > Mask, unsigned Factor, unsigned &Index)
Check if the mask is a DE-interleave mask of the given factor Factor like: <Index,...
LLVM_ABI ShuffleVectorInst * cloneImpl() const
static LLVM_ABI bool isIdentityMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from exactly one source vector without lane crossin...
static LLVM_ABI bool isExtractSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is an extract subvector mask.
LLVM_ABI void setShuffleMask(ArrayRef< int > Mask)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
LLVM_ABI bool isInterleave(unsigned Factor)
Return if this shuffle interleaves its two input vectors together.
static LLVM_ABI bool isReverseMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask swaps the order of elements from exactly one source vector.
static LLVM_ABI bool isTransposeMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask is a transpose mask.
LLVM_ABI void commute()
Swap the operands and adjust the mask to preserve the semantics of the instruction.
static LLVM_ABI bool isInsertSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &NumSubElts, int &Index)
Return true if this shuffle mask is an insert subvector mask.
static LLVM_ABI Constant * convertShuffleMaskForBitcode(ArrayRef< int > Mask, Type *ResultTy)
static LLVM_ABI bool isReplicationMask(ArrayRef< int > Mask, int &ReplicationFactor, int &VF)
Return true if this shuffle mask replicates each of the VF elements in a vector ReplicationFactor tim...
static LLVM_ABI bool isInterleaveMask(ArrayRef< int > Mask, unsigned Factor, unsigned NumInputElts, SmallVectorImpl< unsigned > &StartIndexes)
Return true if the mask interleaves one or more input vectors together.
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this store instruction.
Align getAlign() const
void setVolatile(bool V)
Specify whether this is a volatile store or not.
void setAlignment(Align Align)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
LLVM_ABI StoreInst * cloneImpl() const
LLVM_ABI StoreInst(Value *Val, Value *Ptr, InsertPosition InsertBefore)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this store instruction.
bool isVolatile() const
Return true if this is a store to a volatile memory location.
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this store instruction.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Class to represent struct types.
LLVM_ABI void setSuccessorWeight(unsigned idx, CaseWeightOpt W)
LLVM_ABI Instruction::InstListType::iterator eraseFromParent()
Delegate the call to the underlying SwitchInst::eraseFromParent() and mark this object to not touch t...
LLVM_ABI void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W)
Delegate the call to the underlying SwitchInst::addCase() and set the specified branch weight for the...
LLVM_ABI CaseWeightOpt getSuccessorWeight(unsigned idx)
LLVM_ABI void replaceDefaultDest(SwitchInst::CaseIt I)
Replace the default destination by given case.
std::optional< uint32_t > CaseWeightOpt
LLVM_ABI SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I)
Delegate the call to the underlying SwitchInst::removeCase() and remove correspondent branch weight.
void setValue(ConstantInt *V) const
Sets the new value for current case.
void setSuccessor(BasicBlock *S) const
Sets the new successor for current case.
Multiway switch.
void allocHungoffUses(unsigned N)
LLVM_ABI SwitchInst * cloneImpl() const
LLVM_ABI void addCase(ConstantInt *OnVal, BasicBlock *Dest)
Add an entry to the switch instruction.
CaseIteratorImpl< CaseHandle > CaseIt
ConstantInt *const * case_values() const
unsigned getNumCases() const
Return the number of 'cases' in this switch instruction, excluding the default case.
LLVM_ABI CaseIt removeCase(CaseIt I)
This method removes the specified case and its successor from the switch instruction.
This class represents a truncation of integer types.
LLVM_ABI TruncInst * cloneImpl() const
Clone an identical TruncInst.
LLVM_ABI TruncInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition TypeSize.h:343
static constexpr TypeSize get(ScalarTy Quantity, bool Scalable)
Definition TypeSize.h:340
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:296
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition Type.h:246
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
LLVM_ABI bool isFirstClassType() const
Return true if the type is "first class", meaning it is a valid type for a Value.
Definition Type.cpp:249
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:197
bool isAggregateType() const
Return true if the type is an aggregate type.
Definition Type.h:304
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:128
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:230
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
Definition Type.cpp:293
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:184
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition Type.h:270
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
bool isTokenTy() const
Return true if this is 'token'.
Definition Type.h:234
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition Type.h:225
This class represents a cast unsigned integer to floating point.
LLVM_ABI UIToFPInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI UIToFPInst * cloneImpl() const
Clone an identical UIToFPInst.
UnaryInstruction(Type *Ty, unsigned iType, Value *V, InsertPosition InsertBefore=nullptr)
Definition InstrTypes.h:62
static LLVM_ABI UnaryOperator * Create(UnaryOps Op, Value *S, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a unary instruction, given the opcode and an operand.
LLVM_ABI UnaryOperator(UnaryOps iType, Value *S, Type *Ty, const Twine &Name, InsertPosition InsertBefore)
LLVM_ABI UnaryOperator * cloneImpl() const
UnaryOps getOpcode() const
Definition InstrTypes.h:154
LLVM_ABI UnreachableInst(LLVMContext &C, InsertPosition InsertBefore=nullptr)
LLVM_ABI bool shouldLowerToTrap(bool TrapUnreachable, bool NoTrapAfterNoreturn) const
friend class Instruction
Iterator for Instructions in a `BasicBlock.
LLVM_ABI UnreachableInst * cloneImpl() const
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
LLVM_ABI void set(Value *Val)
Definition Value.h:905
Use * op_iterator
Definition User.h:279
const Use * getOperandList() const
Definition User.h:225
op_iterator op_begin()
Definition User.h:284
LLVM_ABI void allocHungoffUses(unsigned N, bool WithExtraValues=false)
Allocate the array of Uses, followed by a pointer (with bottom bit set) to the User.
Definition User.cpp:53
const Use & getOperandUse(unsigned i) const
Definition User.h:245
void setNumHungOffUseOperands(unsigned NumOps)
Subclasses with hung off uses need to manage the operand count themselves.
Definition User.h:265
Use & Op()
Definition User.h:196
LLVM_ABI void growHungoffUses(unsigned N, bool WithExtraValues=false)
Grow the number of hung off uses.
Definition User.cpp:70
Value * getOperand(unsigned i) const
Definition User.h:232
unsigned getNumOperands() const
Definition User.h:254
op_iterator op_end()
Definition User.h:286
VAArgInst(Value *List, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
LLVM_ABI VAArgInst * cloneImpl() const
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI Value(Type *Ty, unsigned scid)
Definition Value.cpp:53
unsigned char SubclassOptionalData
Hold subclass data that can be dropped.
Definition Value.h:85
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
Definition Value.cpp:390
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:546
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1099
unsigned NumUserOperands
Definition Value.h:109
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
Base class of all SIMD vector types.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
This class represents zero extension of integer types.
LLVM_ABI ZExtInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI ZExtInst * cloneImpl() const
Clone an identical ZExtInst.
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
An efficient, type-erasing, non-owning reference to a callable.
typename base_list_type::iterator iterator
Definition ilist.h:121
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
CallInst * Call
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
bool match(Val *V, const Pattern &P)
cstfp_pred_ty< is_non_zero_not_denormal_fp > m_NonZeroNotDenormalFP()
Match a floating-point non-zero that is not a denormal.
initializer< Ty > init(const Ty &Val)
@ Switch
The "resume-switch" lowering, where there are separate resume and destroy functions that are shared b...
Definition CoroShape.h:31
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:667
NodeAddr< UseNode * > Use
Definition RDFGraph.h:385
Context & getContext() const
Definition BasicBlock.h:99
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:532
auto seq_inclusive(T Begin, T End)
Iterate over an integral type from Begin to End inclusive.
Definition Sequence.h:325
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1667
unsigned getPointerAddressSpace(const Type *T)
Definition SPIRVUtils.h:365
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
Definition InstrProf.h:296
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
LLVM_ABI MDNode * getBranchWeightMDNode(const Instruction &I)
Get the branch weights metadata node.
MemoryEffectsBase< IRMemLocation > MemoryEffects
Summary of how a function affects memory in the program.
Definition ModRef.h:301
constexpr bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1737
std::enable_if_t< std::is_unsigned_v< T >, std::optional< T > > checkedMulUnsigned(T LHS, T RHS)
Multiply two unsigned integers LHS and RHS.
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
auto reverse(ContainerTy &&C)
Definition STLExtras.h:406
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
bool isPointerTy(const Type *T)
Definition SPIRVUtils.h:359
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
constexpr int PoisonMaskElem
LLVM_ABI unsigned getNumBranchWeights(const MDNode &ProfileData)
AtomicOrdering
Atomic ordering for LLVM's memory model.
OperandBundleDefT< Value * > OperandBundleDef
Definition AutoUpgrade.h:34
@ Mul
Product of integers.
@ Xor
Bitwise or logical XOR of integers.
@ FMul
Product of floats.
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
@ FAdd
Sum of floats.
DWARFExpression::Operation Op
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
OutputIt copy(R &&Range, OutputIt Out)
Definition STLExtras.h:1856
constexpr unsigned BitWidth
LLVM_ABI bool extractBranchWeights(const MDNode *ProfileData, SmallVectorImpl< uint32_t > &Weights)
Extract branch weights from MD_prof metadata.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1918
bool capturesAnything(CaptureComponents CC)
Definition ModRef.h:324
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
Definition STLExtras.h:2129
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
Definition Sequence.h:305
@ Default
The result values are uniform if and only if all operands are uniform.
Definition Uniformity.h:20
LLVM_ABI void scaleProfData(Instruction &I, uint64_t S, uint64_t T)
Scaling the profile data attached to 'I' using the ratio of S/T.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Summary of memprof metadata on allocations.
Used to keep track of an operand bundle.
uint32_t End
The index in the Use& vector where operands for this operand bundle ends.
uint32_t Begin
The index in the Use& vector where operands for this operand bundle starts.
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
static LLVM_ABI std::optional< bool > eq(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_EQ result.
static LLVM_ABI std::optional< bool > ne(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_NE result.
static LLVM_ABI std::optional< bool > sge(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SGE result.
static LLVM_ABI std::optional< bool > ugt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_UGT result.
static LLVM_ABI std::optional< bool > slt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SLT result.
static LLVM_ABI std::optional< bool > ult(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_ULT result.
static LLVM_ABI std::optional< bool > ule(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_ULE result.
static LLVM_ABI std::optional< bool > sle(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SLE result.
static LLVM_ABI std::optional< bool > sgt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SGT result.
static LLVM_ABI std::optional< bool > uge(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_UGE result.
Matching combinators.
A MapVector that performs no allocations if smaller than a certain size.
Definition MapVector.h:276
Indicates this User has operands co-allocated.
Definition User.h:60
Indicates this User has operands and a descriptor co-allocated .
Definition User.h:66