LLVM 18.0.0git
Instructions.cpp
Go to the documentation of this file.
1//===- Instructions.cpp - Implement the LLVM instructions -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements all of the non-inline methods for the LLVM instruction
10// classes.
11//
12//===----------------------------------------------------------------------===//
13
15#include "LLVMContextImpl.h"
18#include "llvm/ADT/Twine.h"
19#include "llvm/IR/Attributes.h"
20#include "llvm/IR/BasicBlock.h"
21#include "llvm/IR/Constant.h"
22#include "llvm/IR/Constants.h"
23#include "llvm/IR/DataLayout.h"
25#include "llvm/IR/Function.h"
26#include "llvm/IR/InstrTypes.h"
27#include "llvm/IR/Instruction.h"
28#include "llvm/IR/Intrinsics.h"
29#include "llvm/IR/LLVMContext.h"
30#include "llvm/IR/MDBuilder.h"
31#include "llvm/IR/Metadata.h"
32#include "llvm/IR/Module.h"
33#include "llvm/IR/Operator.h"
35#include "llvm/IR/Type.h"
36#include "llvm/IR/Value.h"
41#include "llvm/Support/ModRef.h"
43#include <algorithm>
44#include <cassert>
45#include <cstdint>
46#include <optional>
47#include <vector>
48
49using namespace llvm;
50
52 "disable-i2p-p2i-opt", cl::init(false),
53 cl::desc("Disables inttoptr/ptrtoint roundtrip optimization"));
54
55//===----------------------------------------------------------------------===//
56// AllocaInst Class
57//===----------------------------------------------------------------------===//
58
59std::optional<TypeSize>
61 TypeSize Size = DL.getTypeAllocSize(getAllocatedType());
62 if (isArrayAllocation()) {
63 auto *C = dyn_cast<ConstantInt>(getArraySize());
64 if (!C)
65 return std::nullopt;
66 assert(!Size.isScalable() && "Array elements cannot have a scalable size");
67 Size *= C->getZExtValue();
68 }
69 return Size;
70}
71
72std::optional<TypeSize>
74 std::optional<TypeSize> Size = getAllocationSize(DL);
75 if (Size)
76 return *Size * 8;
77 return std::nullopt;
78}
79
80//===----------------------------------------------------------------------===//
81// SelectInst Class
82//===----------------------------------------------------------------------===//
83
84/// areInvalidOperands - Return a string if the specified operands are invalid
85/// for a select operation, otherwise return null.
86const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) {
87 if (Op1->getType() != Op2->getType())
88 return "both values to select must have same type";
89
90 if (Op1->getType()->isTokenTy())
91 return "select values cannot have token type";
92
93 if (VectorType *VT = dyn_cast<VectorType>(Op0->getType())) {
94 // Vector select.
95 if (VT->getElementType() != Type::getInt1Ty(Op0->getContext()))
96 return "vector select condition element type must be i1";
97 VectorType *ET = dyn_cast<VectorType>(Op1->getType());
98 if (!ET)
99 return "selected values for vector select must be vectors";
100 if (ET->getElementCount() != VT->getElementCount())
101 return "vector select requires selected vectors to have "
102 "the same vector length as select condition";
103 } else if (Op0->getType() != Type::getInt1Ty(Op0->getContext())) {
104 return "select condition must be i1 or <n x i1>";
105 }
106 return nullptr;
107}
108
109//===----------------------------------------------------------------------===//
110// PHINode Class
111//===----------------------------------------------------------------------===//
112
113PHINode::PHINode(const PHINode &PN)
114 : Instruction(PN.getType(), Instruction::PHI, nullptr, PN.getNumOperands()),
115 ReservedSpace(PN.getNumOperands()) {
117 std::copy(PN.op_begin(), PN.op_end(), op_begin());
118 copyIncomingBlocks(make_range(PN.block_begin(), PN.block_end()));
120}
121
122// removeIncomingValue - Remove an incoming value. This is useful if a
123// predecessor basic block is deleted.
124Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) {
125 Value *Removed = getIncomingValue(Idx);
126
127 // Move everything after this operand down.
128 //
129 // FIXME: we could just swap with the end of the list, then erase. However,
130 // clients might not expect this to happen. The code as it is thrashes the
131 // use/def lists, which is kinda lame.
132 std::copy(op_begin() + Idx + 1, op_end(), op_begin() + Idx);
134
135 // Nuke the last value.
136 Op<-1>().set(nullptr);
138
139 // If the PHI node is dead, because it has zero entries, nuke it now.
140 if (getNumOperands() == 0 && DeletePHIIfEmpty) {
141 // If anyone is using this PHI, make them use a dummy value instead...
144 }
145 return Removed;
146}
147
148void PHINode::removeIncomingValueIf(function_ref<bool(unsigned)> Predicate,
149 bool DeletePHIIfEmpty) {
150 SmallDenseSet<unsigned> RemoveIndices;
151 for (unsigned Idx = 0; Idx < getNumIncomingValues(); ++Idx)
152 if (Predicate(Idx))
153 RemoveIndices.insert(Idx);
154
155 if (RemoveIndices.empty())
156 return;
157
158 // Remove operands.
159 auto NewOpEnd = remove_if(operands(), [&](Use &U) {
160 return RemoveIndices.contains(U.getOperandNo());
161 });
162 for (Use &U : make_range(NewOpEnd, op_end()))
163 U.set(nullptr);
164
165 // Remove incoming blocks.
166 (void)std::remove_if(const_cast<block_iterator>(block_begin()),
167 const_cast<block_iterator>(block_end()), [&](BasicBlock *&BB) {
168 return RemoveIndices.contains(&BB - block_begin());
169 });
170
171 setNumHungOffUseOperands(getNumOperands() - RemoveIndices.size());
172
173 // If the PHI node is dead, because it has zero entries, nuke it now.
174 if (getNumOperands() == 0 && DeletePHIIfEmpty) {
175 // If anyone is using this PHI, make them use a dummy value instead...
178 }
179}
180
181/// growOperands - grow operands - This grows the operand list in response
182/// to a push_back style of operation. This grows the number of ops by 1.5
183/// times.
184///
185void PHINode::growOperands() {
186 unsigned e = getNumOperands();
187 unsigned NumOps = e + e / 2;
188 if (NumOps < 2) NumOps = 2; // 2 op PHI nodes are VERY common.
189
190 ReservedSpace = NumOps;
191 growHungoffUses(ReservedSpace, /* IsPhi */ true);
192}
193
194/// hasConstantValue - If the specified PHI node always merges together the same
195/// value, return the value, otherwise return null.
197 // Exploit the fact that phi nodes always have at least one entry.
198 Value *ConstantValue = getIncomingValue(0);
199 for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i)
200 if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) {
201 if (ConstantValue != this)
202 return nullptr; // Incoming values not all the same.
203 // The case where the first value is this PHI.
204 ConstantValue = getIncomingValue(i);
205 }
206 if (ConstantValue == this)
207 return UndefValue::get(getType());
208 return ConstantValue;
209}
210
211/// hasConstantOrUndefValue - Whether the specified PHI node always merges
212/// together the same value, assuming that undefs result in the same value as
213/// non-undefs.
214/// Unlike \ref hasConstantValue, this does not return a value because the
215/// unique non-undef incoming value need not dominate the PHI node.
217 Value *ConstantValue = nullptr;
218 for (unsigned i = 0, e = getNumIncomingValues(); i != e; ++i) {
219 Value *Incoming = getIncomingValue(i);
220 if (Incoming != this && !isa<UndefValue>(Incoming)) {
221 if (ConstantValue && ConstantValue != Incoming)
222 return false;
223 ConstantValue = Incoming;
224 }
225 }
226 return true;
227}
228
229//===----------------------------------------------------------------------===//
230// LandingPadInst Implementation
231//===----------------------------------------------------------------------===//
232
233LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
234 const Twine &NameStr, Instruction *InsertBefore)
235 : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertBefore) {
236 init(NumReservedValues, NameStr);
237}
238
239LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
240 const Twine &NameStr, BasicBlock *InsertAtEnd)
241 : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertAtEnd) {
242 init(NumReservedValues, NameStr);
243}
244
245LandingPadInst::LandingPadInst(const LandingPadInst &LP)
246 : Instruction(LP.getType(), Instruction::LandingPad, nullptr,
247 LP.getNumOperands()),
248 ReservedSpace(LP.getNumOperands()) {
250 Use *OL = getOperandList();
251 const Use *InOL = LP.getOperandList();
252 for (unsigned I = 0, E = ReservedSpace; I != E; ++I)
253 OL[I] = InOL[I];
254
255 setCleanup(LP.isCleanup());
256}
257
258LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
259 const Twine &NameStr,
260 Instruction *InsertBefore) {
261 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertBefore);
262}
263
264LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
265 const Twine &NameStr,
266 BasicBlock *InsertAtEnd) {
267 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertAtEnd);
268}
269
270void LandingPadInst::init(unsigned NumReservedValues, const Twine &NameStr) {
271 ReservedSpace = NumReservedValues;
273 allocHungoffUses(ReservedSpace);
274 setName(NameStr);
275 setCleanup(false);
276}
277
278/// growOperands - grow operands - This grows the operand list in response to a
279/// push_back style of operation. This grows the number of ops by 2 times.
280void LandingPadInst::growOperands(unsigned Size) {
281 unsigned e = getNumOperands();
282 if (ReservedSpace >= e + Size) return;
283 ReservedSpace = (std::max(e, 1U) + Size / 2) * 2;
284 growHungoffUses(ReservedSpace);
285}
286
288 unsigned OpNo = getNumOperands();
289 growOperands(1);
290 assert(OpNo < ReservedSpace && "Growing didn't work!");
292 getOperandList()[OpNo] = Val;
293}
294
295//===----------------------------------------------------------------------===//
296// CallBase Implementation
297//===----------------------------------------------------------------------===//
298
300 Instruction *InsertPt) {
301 switch (CB->getOpcode()) {
302 case Instruction::Call:
303 return CallInst::Create(cast<CallInst>(CB), Bundles, InsertPt);
304 case Instruction::Invoke:
305 return InvokeInst::Create(cast<InvokeInst>(CB), Bundles, InsertPt);
306 case Instruction::CallBr:
307 return CallBrInst::Create(cast<CallBrInst>(CB), Bundles, InsertPt);
308 default:
309 llvm_unreachable("Unknown CallBase sub-class!");
310 }
311}
312
314 Instruction *InsertPt) {
316 for (unsigned i = 0, e = CI->getNumOperandBundles(); i < e; ++i) {
317 auto ChildOB = CI->getOperandBundleAt(i);
318 if (ChildOB.getTagName() != OpB.getTag())
319 OpDefs.emplace_back(ChildOB);
320 }
321 OpDefs.emplace_back(OpB);
322 return CallBase::Create(CI, OpDefs, InsertPt);
323}
324
325
327
329 assert(getOpcode() == Instruction::CallBr && "Unexpected opcode!");
330 return cast<CallBrInst>(this)->getNumIndirectDests() + 1;
331}
332
334 const Value *V = getCalledOperand();
335 if (isa<Function>(V) || isa<Constant>(V))
336 return false;
337 return !isInlineAsm();
338}
339
340/// Tests if this call site must be tail call optimized. Only a CallInst can
341/// be tail call optimized.
343 if (auto *CI = dyn_cast<CallInst>(this))
344 return CI->isMustTailCall();
345 return false;
346}
347
348/// Tests if this call site is marked as a tail call.
350 if (auto *CI = dyn_cast<CallInst>(this))
351 return CI->isTailCall();
352 return false;
353}
354
356 if (auto *F = getCalledFunction())
357 return F->getIntrinsicID();
359}
360
363
364 if (const Function *F = getCalledFunction())
365 Mask |= F->getAttributes().getRetNoFPClass();
366 return Mask;
367}
368
371
372 if (const Function *F = getCalledFunction())
373 Mask |= F->getAttributes().getParamNoFPClass(i);
374 return Mask;
375}
376
378 if (hasRetAttr(Attribute::NonNull))
379 return true;
380
381 if (getRetDereferenceableBytes() > 0 &&
382 !NullPointerIsDefined(getCaller(), getType()->getPointerAddressSpace()))
383 return true;
384
385 return false;
386}
387
389 unsigned Index;
390
391 if (Attrs.hasAttrSomewhere(Kind, &Index))
393 if (const Function *F = getCalledFunction())
394 if (F->getAttributes().hasAttrSomewhere(Kind, &Index))
396
397 return nullptr;
398}
399
400/// Determine whether the argument or parameter has the given attribute.
401bool CallBase::paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
402 assert(ArgNo < arg_size() && "Param index out of bounds!");
403
404 if (Attrs.hasParamAttr(ArgNo, Kind))
405 return true;
406
407 const Function *F = getCalledFunction();
408 if (!F)
409 return false;
410
411 if (!F->getAttributes().hasParamAttr(ArgNo, Kind))
412 return false;
413
414 // Take into account mod/ref by operand bundles.
415 switch (Kind) {
416 case Attribute::ReadNone:
418 case Attribute::ReadOnly:
420 case Attribute::WriteOnly:
421 return !hasReadingOperandBundles();
422 default:
423 return true;
424 }
425}
426
427bool CallBase::hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const {
428 Value *V = getCalledOperand();
429 if (auto *CE = dyn_cast<ConstantExpr>(V))
430 if (CE->getOpcode() == BitCast)
431 V = CE->getOperand(0);
432
433 if (auto *F = dyn_cast<Function>(V))
434 return F->getAttributes().hasFnAttr(Kind);
435
436 return false;
437}
438
439bool CallBase::hasFnAttrOnCalledFunction(StringRef Kind) const {
440 Value *V = getCalledOperand();
441 if (auto *CE = dyn_cast<ConstantExpr>(V))
442 if (CE->getOpcode() == BitCast)
443 V = CE->getOperand(0);
444
445 if (auto *F = dyn_cast<Function>(V))
446 return F->getAttributes().hasFnAttr(Kind);
447
448 return false;
449}
450
451template <typename AK>
452Attribute CallBase::getFnAttrOnCalledFunction(AK Kind) const {
453 if constexpr (std::is_same_v<AK, Attribute::AttrKind>) {
454 // getMemoryEffects() correctly combines memory effects from the call-site,
455 // operand bundles and function.
456 assert(Kind != Attribute::Memory && "Use getMemoryEffects() instead");
457 }
458
460 if (auto *CE = dyn_cast<ConstantExpr>(V))
461 if (CE->getOpcode() == BitCast)
462 V = CE->getOperand(0);
463
464 if (auto *F = dyn_cast<Function>(V))
465 return F->getAttributes().getFnAttr(Kind);
466
467 return Attribute();
468}
469
470template Attribute
471CallBase::getFnAttrOnCalledFunction(Attribute::AttrKind Kind) const;
472template Attribute CallBase::getFnAttrOnCalledFunction(StringRef Kind) const;
473
476 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
478}
479
482 const unsigned BeginIndex) {
483 auto It = op_begin() + BeginIndex;
484 for (auto &B : Bundles)
485 It = std::copy(B.input_begin(), B.input_end(), It);
486
487 auto *ContextImpl = getContext().pImpl;
488 auto BI = Bundles.begin();
489 unsigned CurrentIndex = BeginIndex;
490
491 for (auto &BOI : bundle_op_infos()) {
492 assert(BI != Bundles.end() && "Incorrect allocation?");
493
494 BOI.Tag = ContextImpl->getOrInsertBundleTag(BI->getTag());
495 BOI.Begin = CurrentIndex;
496 BOI.End = CurrentIndex + BI->input_size();
497 CurrentIndex = BOI.End;
498 BI++;
499 }
500
501 assert(BI == Bundles.end() && "Incorrect allocation?");
502
503 return It;
504}
505
507 /// When there isn't many bundles, we do a simple linear search.
508 /// Else fallback to a binary-search that use the fact that bundles usually
509 /// have similar number of argument to get faster convergence.
511 for (auto &BOI : bundle_op_infos())
512 if (BOI.Begin <= OpIdx && OpIdx < BOI.End)
513 return BOI;
514
515 llvm_unreachable("Did not find operand bundle for operand!");
516 }
517
518 assert(OpIdx >= arg_size() && "the Idx is not in the operand bundles");
520 OpIdx < std::prev(bundle_op_info_end())->End &&
521 "The Idx isn't in the operand bundle");
522
523 /// We need a decimal number below and to prevent using floating point numbers
524 /// we use an intergal value multiplied by this constant.
525 constexpr unsigned NumberScaling = 1024;
526
529 bundle_op_iterator Current = Begin;
530
531 while (Begin != End) {
532 unsigned ScaledOperandPerBundle =
533 NumberScaling * (std::prev(End)->End - Begin->Begin) / (End - Begin);
534 Current = Begin + (((OpIdx - Begin->Begin) * NumberScaling) /
535 ScaledOperandPerBundle);
536 if (Current >= End)
537 Current = std::prev(End);
538 assert(Current < End && Current >= Begin &&
539 "the operand bundle doesn't cover every value in the range");
540 if (OpIdx >= Current->Begin && OpIdx < Current->End)
541 break;
542 if (OpIdx >= Current->End)
543 Begin = Current + 1;
544 else
545 End = Current;
546 }
547
548 assert(OpIdx >= Current->Begin && OpIdx < Current->End &&
549 "the operand bundle doesn't cover every value in the range");
550 return *Current;
551}
552
555 Instruction *InsertPt) {
556 if (CB->getOperandBundle(ID))
557 return CB;
558
560 CB->getOperandBundlesAsDefs(Bundles);
561 Bundles.push_back(OB);
562 return Create(CB, Bundles, InsertPt);
563}
564
566 Instruction *InsertPt) {
568 bool CreateNew = false;
569
570 for (unsigned I = 0, E = CB->getNumOperandBundles(); I != E; ++I) {
571 auto Bundle = CB->getOperandBundleAt(I);
572 if (Bundle.getTagID() == ID) {
573 CreateNew = true;
574 continue;
575 }
576 Bundles.emplace_back(Bundle);
577 }
578
579 return CreateNew ? Create(CB, Bundles, InsertPt) : CB;
580}
581
583 // Implementation note: this is a conservative implementation of operand
584 // bundle semantics, where *any* non-assume operand bundle (other than
585 // ptrauth) forces a callsite to be at least readonly.
588 getIntrinsicID() != Intrinsic::assume;
589}
590
595 getIntrinsicID() != Intrinsic::assume;
596}
597
600 if (auto *Fn = dyn_cast<Function>(getCalledOperand())) {
601 MemoryEffects FnME = Fn->getMemoryEffects();
602 if (hasOperandBundles()) {
603 // TODO: Add a method to get memory effects for operand bundles instead.
605 FnME |= MemoryEffects::readOnly();
607 FnME |= MemoryEffects::writeOnly();
608 }
609 ME &= FnME;
610 }
611 return ME;
612}
615}
616
617/// Determine if the function does not access memory.
620}
623}
624
625/// Determine if the function does not access or only reads memory.
628}
631}
632
633/// Determine if the function does not access or only writes memory.
636}
639}
640
641/// Determine if the call can access memmory only using pointers based
642/// on its arguments.
645}
648}
649
650/// Determine if the function may only access memory that is
651/// inaccessible from the IR.
654}
657}
658
659/// Determine if the function may only access memory that is
660/// either inaccessible from the IR or pointed to by its arguments.
663}
667}
668
669//===----------------------------------------------------------------------===//
670// CallInst Implementation
671//===----------------------------------------------------------------------===//
672
673void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
674 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) {
675 this->FTy = FTy;
676 assert(getNumOperands() == Args.size() + CountBundleInputs(Bundles) + 1 &&
677 "NumOperands not set up?");
678
679#ifndef NDEBUG
680 assert((Args.size() == FTy->getNumParams() ||
681 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
682 "Calling a function with bad signature!");
683
684 for (unsigned i = 0; i != Args.size(); ++i)
685 assert((i >= FTy->getNumParams() ||
686 FTy->getParamType(i) == Args[i]->getType()) &&
687 "Calling a function with a bad signature!");
688#endif
689
690 // Set operands in order of their index to match use-list-order
691 // prediction.
692 llvm::copy(Args, op_begin());
693 setCalledOperand(Func);
694
695 auto It = populateBundleOperandInfos(Bundles, Args.size());
696 (void)It;
697 assert(It + 1 == op_end() && "Should add up!");
698
699 setName(NameStr);
700}
701
702void CallInst::init(FunctionType *FTy, Value *Func, const Twine &NameStr) {
703 this->FTy = FTy;
704 assert(getNumOperands() == 1 && "NumOperands not set up?");
705 setCalledOperand(Func);
706
707 assert(FTy->getNumParams() == 0 && "Calling a function with bad signature");
708
709 setName(NameStr);
710}
711
712CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
713 Instruction *InsertBefore)
714 : CallBase(Ty->getReturnType(), Instruction::Call,
715 OperandTraits<CallBase>::op_end(this) - 1, 1, InsertBefore) {
716 init(Ty, Func, Name);
717}
718
719CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
720 BasicBlock *InsertAtEnd)
721 : CallBase(Ty->getReturnType(), Instruction::Call,
722 OperandTraits<CallBase>::op_end(this) - 1, 1, InsertAtEnd) {
723 init(Ty, Func, Name);
724}
725
726CallInst::CallInst(const CallInst &CI)
727 : CallBase(CI.Attrs, CI.FTy, CI.getType(), Instruction::Call,
728 OperandTraits<CallBase>::op_end(this) - CI.getNumOperands(),
729 CI.getNumOperands()) {
730 setTailCallKind(CI.getTailCallKind());
732
733 std::copy(CI.op_begin(), CI.op_end(), op_begin());
734 std::copy(CI.bundle_op_info_begin(), CI.bundle_op_info_end(),
737}
738
740 Instruction *InsertPt) {
741 std::vector<Value *> Args(CI->arg_begin(), CI->arg_end());
742
743 auto *NewCI = CallInst::Create(CI->getFunctionType(), CI->getCalledOperand(),
744 Args, OpB, CI->getName(), InsertPt);
745 NewCI->setTailCallKind(CI->getTailCallKind());
746 NewCI->setCallingConv(CI->getCallingConv());
747 NewCI->SubclassOptionalData = CI->SubclassOptionalData;
748 NewCI->setAttributes(CI->getAttributes());
749 NewCI->setDebugLoc(CI->getDebugLoc());
750 return NewCI;
751}
752
753// Update profile weight for call instruction by scaling it using the ratio
754// of S/T. The meaning of "branch_weights" meta data for call instruction is
755// transfered to represent call count.
757 auto *ProfileData = getMetadata(LLVMContext::MD_prof);
758 if (ProfileData == nullptr)
759 return;
760
761 auto *ProfDataName = dyn_cast<MDString>(ProfileData->getOperand(0));
762 if (!ProfDataName || (!ProfDataName->getString().equals("branch_weights") &&
763 !ProfDataName->getString().equals("VP")))
764 return;
765
766 if (T == 0) {
767 LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
768 "div by 0. Ignoring. Likely the function "
769 << getParent()->getParent()->getName()
770 << " has 0 entry count, and contains call instructions "
771 "with non-zero prof info.");
772 return;
773 }
774
775 MDBuilder MDB(getContext());
777 Vals.push_back(ProfileData->getOperand(0));
778 APInt APS(128, S), APT(128, T);
779 if (ProfDataName->getString().equals("branch_weights") &&
780 ProfileData->getNumOperands() > 0) {
781 // Using APInt::div may be expensive, but most cases should fit 64 bits.
782 APInt Val(128, mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(1))
783 ->getValue()
784 .getZExtValue());
785 Val *= APS;
786 Vals.push_back(MDB.createConstant(
788 Val.udiv(APT).getLimitedValue(UINT32_MAX))));
789 } else if (ProfDataName->getString().equals("VP"))
790 for (unsigned i = 1; i < ProfileData->getNumOperands(); i += 2) {
791 // The first value is the key of the value profile, which will not change.
792 Vals.push_back(ProfileData->getOperand(i));
793 uint64_t Count =
794 mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(i + 1))
795 ->getValue()
796 .getZExtValue();
797 // Don't scale the magic number.
798 if (Count == NOMORE_ICP_MAGICNUM) {
799 Vals.push_back(ProfileData->getOperand(i + 1));
800 continue;
801 }
802 // Using APInt::div may be expensive, but most cases should fit 64 bits.
803 APInt Val(128, Count);
804 Val *= APS;
805 Vals.push_back(MDB.createConstant(
807 Val.udiv(APT).getLimitedValue())));
808 }
809 setMetadata(LLVMContext::MD_prof, MDNode::get(getContext(), Vals));
810}
811
812//===----------------------------------------------------------------------===//
813// InvokeInst Implementation
814//===----------------------------------------------------------------------===//
815
816void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal,
817 BasicBlock *IfException, ArrayRef<Value *> Args,
819 const Twine &NameStr) {
820 this->FTy = FTy;
821
822 assert((int)getNumOperands() ==
823 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)) &&
824 "NumOperands not set up?");
825
826#ifndef NDEBUG
827 assert(((Args.size() == FTy->getNumParams()) ||
828 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
829 "Invoking a function with bad signature");
830
831 for (unsigned i = 0, e = Args.size(); i != e; i++)
832 assert((i >= FTy->getNumParams() ||
833 FTy->getParamType(i) == Args[i]->getType()) &&
834 "Invoking a function with a bad signature!");
835#endif
836
837 // Set operands in order of their index to match use-list-order
838 // prediction.
839 llvm::copy(Args, op_begin());
840 setNormalDest(IfNormal);
841 setUnwindDest(IfException);
843
844 auto It = populateBundleOperandInfos(Bundles, Args.size());
845 (void)It;
846 assert(It + 3 == op_end() && "Should add up!");
847
848 setName(NameStr);
849}
850
851InvokeInst::InvokeInst(const InvokeInst &II)
852 : CallBase(II.Attrs, II.FTy, II.getType(), Instruction::Invoke,
853 OperandTraits<CallBase>::op_end(this) - II.getNumOperands(),
854 II.getNumOperands()) {
856 std::copy(II.op_begin(), II.op_end(), op_begin());
857 std::copy(II.bundle_op_info_begin(), II.bundle_op_info_end(),
860}
861
863 Instruction *InsertPt) {
864 std::vector<Value *> Args(II->arg_begin(), II->arg_end());
865
866 auto *NewII = InvokeInst::Create(
868 II->getUnwindDest(), Args, OpB, II->getName(), InsertPt);
869 NewII->setCallingConv(II->getCallingConv());
870 NewII->SubclassOptionalData = II->SubclassOptionalData;
871 NewII->setAttributes(II->getAttributes());
872 NewII->setDebugLoc(II->getDebugLoc());
873 return NewII;
874}
875
877 return cast<LandingPadInst>(getUnwindDest()->getFirstNonPHI());
878}
879
880//===----------------------------------------------------------------------===//
881// CallBrInst Implementation
882//===----------------------------------------------------------------------===//
883
884void CallBrInst::init(FunctionType *FTy, Value *Fn, BasicBlock *Fallthrough,
885 ArrayRef<BasicBlock *> IndirectDests,
888 const Twine &NameStr) {
889 this->FTy = FTy;
890
891 assert((int)getNumOperands() ==
892 ComputeNumOperands(Args.size(), IndirectDests.size(),
893 CountBundleInputs(Bundles)) &&
894 "NumOperands not set up?");
895
896#ifndef NDEBUG
897 assert(((Args.size() == FTy->getNumParams()) ||
898 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
899 "Calling a function with bad signature");
900
901 for (unsigned i = 0, e = Args.size(); i != e; i++)
902 assert((i >= FTy->getNumParams() ||
903 FTy->getParamType(i) == Args[i]->getType()) &&
904 "Calling a function with a bad signature!");
905#endif
906
907 // Set operands in order of their index to match use-list-order
908 // prediction.
909 std::copy(Args.begin(), Args.end(), op_begin());
910 NumIndirectDests = IndirectDests.size();
911 setDefaultDest(Fallthrough);
912 for (unsigned i = 0; i != NumIndirectDests; ++i)
913 setIndirectDest(i, IndirectDests[i]);
915
916 auto It = populateBundleOperandInfos(Bundles, Args.size());
917 (void)It;
918 assert(It + 2 + IndirectDests.size() == op_end() && "Should add up!");
919
920 setName(NameStr);
921}
922
923CallBrInst::CallBrInst(const CallBrInst &CBI)
924 : CallBase(CBI.Attrs, CBI.FTy, CBI.getType(), Instruction::CallBr,
925 OperandTraits<CallBase>::op_end(this) - CBI.getNumOperands(),
926 CBI.getNumOperands()) {
928 std::copy(CBI.op_begin(), CBI.op_end(), op_begin());
929 std::copy(CBI.bundle_op_info_begin(), CBI.bundle_op_info_end(),
932 NumIndirectDests = CBI.NumIndirectDests;
933}
934
936 Instruction *InsertPt) {
937 std::vector<Value *> Args(CBI->arg_begin(), CBI->arg_end());
938
939 auto *NewCBI = CallBrInst::Create(
940 CBI->getFunctionType(), CBI->getCalledOperand(), CBI->getDefaultDest(),
941 CBI->getIndirectDests(), Args, OpB, CBI->getName(), InsertPt);
942 NewCBI->setCallingConv(CBI->getCallingConv());
943 NewCBI->SubclassOptionalData = CBI->SubclassOptionalData;
944 NewCBI->setAttributes(CBI->getAttributes());
945 NewCBI->setDebugLoc(CBI->getDebugLoc());
946 NewCBI->NumIndirectDests = CBI->NumIndirectDests;
947 return NewCBI;
948}
949
950//===----------------------------------------------------------------------===//
951// ReturnInst Implementation
952//===----------------------------------------------------------------------===//
953
954ReturnInst::ReturnInst(const ReturnInst &RI)
955 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Ret,
956 OperandTraits<ReturnInst>::op_end(this) - RI.getNumOperands(),
957 RI.getNumOperands()) {
958 if (RI.getNumOperands())
959 Op<0>() = RI.Op<0>();
961}
962
963ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, Instruction *InsertBefore)
964 : Instruction(Type::getVoidTy(C), Instruction::Ret,
965 OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal,
966 InsertBefore) {
967 if (retVal)
968 Op<0>() = retVal;
969}
970
971ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd)
972 : Instruction(Type::getVoidTy(C), Instruction::Ret,
973 OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal,
974 InsertAtEnd) {
975 if (retVal)
976 Op<0>() = retVal;
977}
978
979ReturnInst::ReturnInst(LLVMContext &Context, BasicBlock *InsertAtEnd)
980 : Instruction(Type::getVoidTy(Context), Instruction::Ret,
981 OperandTraits<ReturnInst>::op_end(this), 0, InsertAtEnd) {}
982
983//===----------------------------------------------------------------------===//
984// ResumeInst Implementation
985//===----------------------------------------------------------------------===//
986
987ResumeInst::ResumeInst(const ResumeInst &RI)
988 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Resume,
989 OperandTraits<ResumeInst>::op_begin(this), 1) {
990 Op<0>() = RI.Op<0>();
991}
992
993ResumeInst::ResumeInst(Value *Exn, Instruction *InsertBefore)
994 : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
995 OperandTraits<ResumeInst>::op_begin(this), 1, InsertBefore) {
996 Op<0>() = Exn;
997}
998
999ResumeInst::ResumeInst(Value *Exn, BasicBlock *InsertAtEnd)
1000 : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
1001 OperandTraits<ResumeInst>::op_begin(this), 1, InsertAtEnd) {
1002 Op<0>() = Exn;
1003}
1004
1005//===----------------------------------------------------------------------===//
1006// CleanupReturnInst Implementation
1007//===----------------------------------------------------------------------===//
1008
1009CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst &CRI)
1010 : Instruction(CRI.getType(), Instruction::CleanupRet,
1011 OperandTraits<CleanupReturnInst>::op_end(this) -
1012 CRI.getNumOperands(),
1013 CRI.getNumOperands()) {
1014 setSubclassData<Instruction::OpaqueField>(
1016 Op<0>() = CRI.Op<0>();
1017 if (CRI.hasUnwindDest())
1018 Op<1>() = CRI.Op<1>();
1019}
1020
1021void CleanupReturnInst::init(Value *CleanupPad, BasicBlock *UnwindBB) {
1022 if (UnwindBB)
1023 setSubclassData<UnwindDestField>(true);
1024
1025 Op<0>() = CleanupPad;
1026 if (UnwindBB)
1027 Op<1>() = UnwindBB;
1028}
1029
1030CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
1031 unsigned Values, Instruction *InsertBefore)
1032 : Instruction(Type::getVoidTy(CleanupPad->getContext()),
1033 Instruction::CleanupRet,
1034 OperandTraits<CleanupReturnInst>::op_end(this) - Values,
1035 Values, InsertBefore) {
1036 init(CleanupPad, UnwindBB);
1037}
1038
1039CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
1040 unsigned Values, BasicBlock *InsertAtEnd)
1041 : Instruction(Type::getVoidTy(CleanupPad->getContext()),
1042 Instruction::CleanupRet,
1043 OperandTraits<CleanupReturnInst>::op_end(this) - Values,
1044 Values, InsertAtEnd) {
1045 init(CleanupPad, UnwindBB);
1046}
1047
1048//===----------------------------------------------------------------------===//
1049// CatchReturnInst Implementation
1050//===----------------------------------------------------------------------===//
1051void CatchReturnInst::init(Value *CatchPad, BasicBlock *BB) {
1052 Op<0>() = CatchPad;
1053 Op<1>() = BB;
1054}
1055
1056CatchReturnInst::CatchReturnInst(const CatchReturnInst &CRI)
1057 : Instruction(Type::getVoidTy(CRI.getContext()), Instruction::CatchRet,
1058 OperandTraits<CatchReturnInst>::op_begin(this), 2) {
1059 Op<0>() = CRI.Op<0>();
1060 Op<1>() = CRI.Op<1>();
1061}
1062
1063CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
1064 Instruction *InsertBefore)
1065 : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
1066 OperandTraits<CatchReturnInst>::op_begin(this), 2,
1067 InsertBefore) {
1068 init(CatchPad, BB);
1069}
1070
1071CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
1072 BasicBlock *InsertAtEnd)
1073 : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
1074 OperandTraits<CatchReturnInst>::op_begin(this), 2,
1075 InsertAtEnd) {
1076 init(CatchPad, BB);
1077}
1078
1079//===----------------------------------------------------------------------===//
1080// CatchSwitchInst Implementation
1081//===----------------------------------------------------------------------===//
1082
1083CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
1084 unsigned NumReservedValues,
1085 const Twine &NameStr,
1086 Instruction *InsertBefore)
1087 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
1088 InsertBefore) {
1089 if (UnwindDest)
1090 ++NumReservedValues;
1091 init(ParentPad, UnwindDest, NumReservedValues + 1);
1092 setName(NameStr);
1093}
1094
1095CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
1096 unsigned NumReservedValues,
1097 const Twine &NameStr, BasicBlock *InsertAtEnd)
1098 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
1099 InsertAtEnd) {
1100 if (UnwindDest)
1101 ++NumReservedValues;
1102 init(ParentPad, UnwindDest, NumReservedValues + 1);
1103 setName(NameStr);
1104}
1105
1106CatchSwitchInst::CatchSwitchInst(const CatchSwitchInst &CSI)
1107 : Instruction(CSI.getType(), Instruction::CatchSwitch, nullptr,
1108 CSI.getNumOperands()) {
1109 init(CSI.getParentPad(), CSI.getUnwindDest(), CSI.getNumOperands());
1110 setNumHungOffUseOperands(ReservedSpace);
1111 Use *OL = getOperandList();
1112 const Use *InOL = CSI.getOperandList();
1113 for (unsigned I = 1, E = ReservedSpace; I != E; ++I)
1114 OL[I] = InOL[I];
1115}
1116
1117void CatchSwitchInst::init(Value *ParentPad, BasicBlock *UnwindDest,
1118 unsigned NumReservedValues) {
1119 assert(ParentPad && NumReservedValues);
1120
1121 ReservedSpace = NumReservedValues;
1122 setNumHungOffUseOperands(UnwindDest ? 2 : 1);
1123 allocHungoffUses(ReservedSpace);
1124
1125 Op<0>() = ParentPad;
1126 if (UnwindDest) {
1127 setSubclassData<UnwindDestField>(true);
1128 setUnwindDest(UnwindDest);
1129 }
1130}
1131
1132/// growOperands - grow operands - This grows the operand list in response to a
1133/// push_back style of operation. This grows the number of ops by 2 times.
1134void CatchSwitchInst::growOperands(unsigned Size) {
1135 unsigned NumOperands = getNumOperands();
1136 assert(NumOperands >= 1);
1137 if (ReservedSpace >= NumOperands + Size)
1138 return;
1139 ReservedSpace = (NumOperands + Size / 2) * 2;
1140 growHungoffUses(ReservedSpace);
1141}
1142
1144 unsigned OpNo = getNumOperands();
1145 growOperands(1);
1146 assert(OpNo < ReservedSpace && "Growing didn't work!");
1148 getOperandList()[OpNo] = Handler;
1149}
1150
1152 // Move all subsequent handlers up one.
1153 Use *EndDst = op_end() - 1;
1154 for (Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst)
1155 *CurDst = *(CurDst + 1);
1156 // Null out the last handler use.
1157 *EndDst = nullptr;
1158
1160}
1161
1162//===----------------------------------------------------------------------===//
1163// FuncletPadInst Implementation
1164//===----------------------------------------------------------------------===//
1165void FuncletPadInst::init(Value *ParentPad, ArrayRef<Value *> Args,
1166 const Twine &NameStr) {
1167 assert(getNumOperands() == 1 + Args.size() && "NumOperands not set up?");
1168 llvm::copy(Args, op_begin());
1169 setParentPad(ParentPad);
1170 setName(NameStr);
1171}
1172
1173FuncletPadInst::FuncletPadInst(const FuncletPadInst &FPI)
1174 : Instruction(FPI.getType(), FPI.getOpcode(),
1175 OperandTraits<FuncletPadInst>::op_end(this) -
1176 FPI.getNumOperands(),
1177 FPI.getNumOperands()) {
1178 std::copy(FPI.op_begin(), FPI.op_end(), op_begin());
1180}
1181
1182FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
1183 ArrayRef<Value *> Args, unsigned Values,
1184 const Twine &NameStr, Instruction *InsertBefore)
1185 : Instruction(ParentPad->getType(), Op,
1186 OperandTraits<FuncletPadInst>::op_end(this) - Values, Values,
1187 InsertBefore) {
1188 init(ParentPad, Args, NameStr);
1189}
1190
1191FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
1192 ArrayRef<Value *> Args, unsigned Values,
1193 const Twine &NameStr, BasicBlock *InsertAtEnd)
1194 : Instruction(ParentPad->getType(), Op,
1195 OperandTraits<FuncletPadInst>::op_end(this) - Values, Values,
1196 InsertAtEnd) {
1197 init(ParentPad, Args, NameStr);
1198}
1199
1200//===----------------------------------------------------------------------===//
1201// UnreachableInst Implementation
1202//===----------------------------------------------------------------------===//
1203
1205 Instruction *InsertBefore)
1206 : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr,
1207 0, InsertBefore) {}
1209 : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr,
1210 0, InsertAtEnd) {}
1211
1212//===----------------------------------------------------------------------===//
1213// BranchInst Implementation
1214//===----------------------------------------------------------------------===//
1215
1216void BranchInst::AssertOK() {
1217 if (isConditional())
1218 assert(getCondition()->getType()->isIntegerTy(1) &&
1219 "May only branch on boolean predicates!");
1220}
1221
1222BranchInst::BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore)
1223 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1224 OperandTraits<BranchInst>::op_end(this) - 1, 1,
1225 InsertBefore) {
1226 assert(IfTrue && "Branch destination may not be null!");
1227 Op<-1>() = IfTrue;
1228}
1229
1230BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
1231 Instruction *InsertBefore)
1232 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1233 OperandTraits<BranchInst>::op_end(this) - 3, 3,
1234 InsertBefore) {
1235 // Assign in order of operand index to make use-list order predictable.
1236 Op<-3>() = Cond;
1237 Op<-2>() = IfFalse;
1238 Op<-1>() = IfTrue;
1239#ifndef NDEBUG
1240 AssertOK();
1241#endif
1242}
1243
1244BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd)
1245 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1246 OperandTraits<BranchInst>::op_end(this) - 1, 1, InsertAtEnd) {
1247 assert(IfTrue && "Branch destination may not be null!");
1248 Op<-1>() = IfTrue;
1249}
1250
1251BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
1252 BasicBlock *InsertAtEnd)
1253 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1254 OperandTraits<BranchInst>::op_end(this) - 3, 3, InsertAtEnd) {
1255 // Assign in order of operand index to make use-list order predictable.
1256 Op<-3>() = Cond;
1257 Op<-2>() = IfFalse;
1258 Op<-1>() = IfTrue;
1259#ifndef NDEBUG
1260 AssertOK();
1261#endif
1262}
1263
1264BranchInst::BranchInst(const BranchInst &BI)
1265 : Instruction(Type::getVoidTy(BI.getContext()), Instruction::Br,
1266 OperandTraits<BranchInst>::op_end(this) - BI.getNumOperands(),
1267 BI.getNumOperands()) {
1268 // Assign in order of operand index to make use-list order predictable.
1269 if (BI.getNumOperands() != 1) {
1270 assert(BI.getNumOperands() == 3 && "BR can have 1 or 3 operands!");
1271 Op<-3>() = BI.Op<-3>();
1272 Op<-2>() = BI.Op<-2>();
1273 }
1274 Op<-1>() = BI.Op<-1>();
1276}
1277
1280 "Cannot swap successors of an unconditional branch");
1281 Op<-1>().swap(Op<-2>());
1282
1283 // Update profile metadata if present and it matches our structural
1284 // expectations.
1286}
1287
1288//===----------------------------------------------------------------------===//
1289// AllocaInst Implementation
1290//===----------------------------------------------------------------------===//
1291
1292static Value *getAISize(LLVMContext &Context, Value *Amt) {
1293 if (!Amt)
1295 else {
1296 assert(!isa<BasicBlock>(Amt) &&
1297 "Passed basic block into allocation size parameter! Use other ctor");
1298 assert(Amt->getType()->isIntegerTy() &&
1299 "Allocation array size is not an integer!");
1300 }
1301 return Amt;
1302}
1303
1305 assert(BB && "Insertion BB cannot be null when alignment not provided!");
1306 assert(BB->getParent() &&
1307 "BB must be in a Function when alignment not provided!");
1308 const DataLayout &DL = BB->getModule()->getDataLayout();
1309 return DL.getPrefTypeAlign(Ty);
1310}
1311
1313 assert(I && "Insertion position cannot be null when alignment not provided!");
1314 return computeAllocaDefaultAlign(Ty, I->getParent());
1315}
1316
1317AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
1318 Instruction *InsertBefore)
1319 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertBefore) {}
1320
1321AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
1322 BasicBlock *InsertAtEnd)
1323 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertAtEnd) {}
1324
1325AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1326 const Twine &Name, Instruction *InsertBefore)
1327 : AllocaInst(Ty, AddrSpace, ArraySize,
1328 computeAllocaDefaultAlign(Ty, InsertBefore), Name,
1329 InsertBefore) {}
1330
1331AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1332 const Twine &Name, BasicBlock *InsertAtEnd)
1333 : AllocaInst(Ty, AddrSpace, ArraySize,
1334 computeAllocaDefaultAlign(Ty, InsertAtEnd), Name,
1335 InsertAtEnd) {}
1336
1337AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1338 Align Align, const Twine &Name,
1339 Instruction *InsertBefore)
1340 : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca,
1341 getAISize(Ty->getContext(), ArraySize), InsertBefore),
1342 AllocatedType(Ty) {
1344 assert(!Ty->isVoidTy() && "Cannot allocate void!");
1345 setName(Name);
1346}
1347
1348AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1349 Align Align, const Twine &Name, BasicBlock *InsertAtEnd)
1350 : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca,
1351 getAISize(Ty->getContext(), ArraySize), InsertAtEnd),
1352 AllocatedType(Ty) {
1354 assert(!Ty->isVoidTy() && "Cannot allocate void!");
1355 setName(Name);
1356}
1357
1358
1360 if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(0)))
1361 return !CI->isOne();
1362 return true;
1363}
1364
1365/// isStaticAlloca - Return true if this alloca is in the entry block of the
1366/// function and is a constant size. If so, the code generator will fold it
1367/// into the prolog/epilog code, so it is basically free.
1369 // Must be constant size.
1370 if (!isa<ConstantInt>(getArraySize())) return false;
1371
1372 // Must be in the entry block.
1373 const BasicBlock *Parent = getParent();
1374 return Parent->isEntryBlock() && !isUsedWithInAlloca();
1375}
1376
1377//===----------------------------------------------------------------------===//
1378// LoadInst Implementation
1379//===----------------------------------------------------------------------===//
1380
1381void LoadInst::AssertOK() {
1382 assert(getOperand(0)->getType()->isPointerTy() &&
1383 "Ptr must have pointer type.");
1384}
1385
1387 assert(BB && "Insertion BB cannot be null when alignment not provided!");
1388 assert(BB->getParent() &&
1389 "BB must be in a Function when alignment not provided!");
1390 const DataLayout &DL = BB->getModule()->getDataLayout();
1391 return DL.getABITypeAlign(Ty);
1392}
1393
1395 assert(I && "Insertion position cannot be null when alignment not provided!");
1396 return computeLoadStoreDefaultAlign(Ty, I->getParent());
1397}
1398
1400 Instruction *InsertBef)
1401 : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertBef) {}
1402
1404 BasicBlock *InsertAE)
1405 : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertAE) {}
1406
1407LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1408 Instruction *InsertBef)
1409 : LoadInst(Ty, Ptr, Name, isVolatile,
1410 computeLoadStoreDefaultAlign(Ty, InsertBef), InsertBef) {}
1411
1412LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1413 BasicBlock *InsertAE)
1414 : LoadInst(Ty, Ptr, Name, isVolatile,
1415 computeLoadStoreDefaultAlign(Ty, InsertAE), InsertAE) {}
1416
1417LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1418 Align Align, Instruction *InsertBef)
1419 : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
1420 SyncScope::System, InsertBef) {}
1421
1422LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1423 Align Align, BasicBlock *InsertAE)
1424 : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
1425 SyncScope::System, InsertAE) {}
1426
1427LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1429 Instruction *InsertBef)
1430 : UnaryInstruction(Ty, Load, Ptr, InsertBef) {
1433 setAtomic(Order, SSID);
1434 AssertOK();
1435 setName(Name);
1436}
1437
1438LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1440 BasicBlock *InsertAE)
1441 : UnaryInstruction(Ty, Load, Ptr, InsertAE) {
1444 setAtomic(Order, SSID);
1445 AssertOK();
1446 setName(Name);
1447}
1448
1449//===----------------------------------------------------------------------===//
1450// StoreInst Implementation
1451//===----------------------------------------------------------------------===//
1452
1453void StoreInst::AssertOK() {
1454 assert(getOperand(0) && getOperand(1) && "Both operands must be non-null!");
1455 assert(getOperand(1)->getType()->isPointerTy() &&
1456 "Ptr must have pointer type!");
1457}
1458
1459StoreInst::StoreInst(Value *val, Value *addr, Instruction *InsertBefore)
1460 : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {}
1461
1462StoreInst::StoreInst(Value *val, Value *addr, BasicBlock *InsertAtEnd)
1463 : StoreInst(val, addr, /*isVolatile=*/false, InsertAtEnd) {}
1464
1466 : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {}
1467
1468StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1469 Instruction *InsertBefore)
1470 : StoreInst(val, addr, isVolatile,
1471 computeLoadStoreDefaultAlign(val->getType(), InsertBefore),
1472 InsertBefore) {}
1473
1474StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1475 BasicBlock *InsertAtEnd)
1476 : StoreInst(val, addr, isVolatile,
1477 computeLoadStoreDefaultAlign(val->getType(), InsertAtEnd),
1478 InsertAtEnd) {}
1479
1480StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1481 BasicBlock::iterator InsertBefore)
1482 : StoreInst(val, addr, isVolatile,
1483 computeLoadStoreDefaultAlign(val->getType(), &*InsertBefore),
1484 InsertBefore) {}
1485
1486StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1487 Instruction *InsertBefore)
1488 : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
1489 SyncScope::System, InsertBefore) {}
1490
1491StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1492 BasicBlock *InsertAtEnd)
1493 : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
1494 SyncScope::System, InsertAtEnd) {}
1495
1496StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1497 BasicBlock::iterator InsertBefore)
1498 : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
1499 SyncScope::System, InsertBefore) {}
1500
1501StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1502 AtomicOrdering Order, SyncScope::ID SSID,
1503 Instruction *InsertBefore)
1504 : Instruction(Type::getVoidTy(val->getContext()), Store,
1505 OperandTraits<StoreInst>::op_begin(this),
1506 OperandTraits<StoreInst>::operands(this), InsertBefore) {
1507 Op<0>() = val;
1508 Op<1>() = addr;
1511 setAtomic(Order, SSID);
1512 AssertOK();
1513}
1514
1515StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1516 AtomicOrdering Order, SyncScope::ID SSID,
1517 BasicBlock *InsertAtEnd)
1518 : Instruction(Type::getVoidTy(val->getContext()), Store,
1519 OperandTraits<StoreInst>::op_begin(this),
1520 OperandTraits<StoreInst>::operands(this), InsertAtEnd) {
1521 Op<0>() = val;
1522 Op<1>() = addr;
1525 setAtomic(Order, SSID);
1526 AssertOK();
1527}
1528
1529StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1530 AtomicOrdering Order, SyncScope::ID SSID,
1531 BasicBlock::iterator InsertBefore)
1532 : Instruction(Type::getVoidTy(val->getContext()), Store,
1533 OperandTraits<StoreInst>::op_begin(this),
1534 OperandTraits<StoreInst>::operands(this)) {
1535 Op<0>() = val;
1536 Op<1>() = addr;
1539 setAtomic(Order, SSID);
1540 insertBefore(*InsertBefore->getParent(), InsertBefore);
1541 AssertOK();
1542}
1543
1544//===----------------------------------------------------------------------===//
1545// AtomicCmpXchgInst Implementation
1546//===----------------------------------------------------------------------===//
1547
1548void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
1549 Align Alignment, AtomicOrdering SuccessOrdering,
1550 AtomicOrdering FailureOrdering,
1551 SyncScope::ID SSID) {
1552 Op<0>() = Ptr;
1553 Op<1>() = Cmp;
1554 Op<2>() = NewVal;
1555 setSuccessOrdering(SuccessOrdering);
1556 setFailureOrdering(FailureOrdering);
1557 setSyncScopeID(SSID);
1558 setAlignment(Alignment);
1559
1560 assert(getOperand(0) && getOperand(1) && getOperand(2) &&
1561 "All operands must be non-null!");
1562 assert(getOperand(0)->getType()->isPointerTy() &&
1563 "Ptr must have pointer type!");
1564 assert(getOperand(1)->getType() == getOperand(2)->getType() &&
1565 "Cmp type and NewVal type must be same!");
1566}
1567
1569 Align Alignment,
1570 AtomicOrdering SuccessOrdering,
1571 AtomicOrdering FailureOrdering,
1572 SyncScope::ID SSID,
1573 Instruction *InsertBefore)
1574 : Instruction(
1575 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
1576 AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
1577 OperandTraits<AtomicCmpXchgInst>::operands(this), InsertBefore) {
1578 Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
1579}
1580
1582 Align Alignment,
1583 AtomicOrdering SuccessOrdering,
1584 AtomicOrdering FailureOrdering,
1585 SyncScope::ID SSID,
1586 BasicBlock *InsertAtEnd)
1587 : Instruction(
1588 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
1589 AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
1590 OperandTraits<AtomicCmpXchgInst>::operands(this), InsertAtEnd) {
1591 Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
1592}
1593
1594//===----------------------------------------------------------------------===//
1595// AtomicRMWInst Implementation
1596//===----------------------------------------------------------------------===//
1597
1598void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,
1599 Align Alignment, AtomicOrdering Ordering,
1600 SyncScope::ID SSID) {
1601 assert(Ordering != AtomicOrdering::NotAtomic &&
1602 "atomicrmw instructions can only be atomic.");
1603 assert(Ordering != AtomicOrdering::Unordered &&
1604 "atomicrmw instructions cannot be unordered.");
1605 Op<0>() = Ptr;
1606 Op<1>() = Val;
1608 setOrdering(Ordering);
1609 setSyncScopeID(SSID);
1610 setAlignment(Alignment);
1611
1612 assert(getOperand(0) && getOperand(1) &&
1613 "All operands must be non-null!");
1614 assert(getOperand(0)->getType()->isPointerTy() &&
1615 "Ptr must have pointer type!");
1616 assert(Ordering != AtomicOrdering::NotAtomic &&
1617 "AtomicRMW instructions must be atomic!");
1618}
1619
1621 Align Alignment, AtomicOrdering Ordering,
1622 SyncScope::ID SSID, Instruction *InsertBefore)
1623 : Instruction(Val->getType(), AtomicRMW,
1624 OperandTraits<AtomicRMWInst>::op_begin(this),
1625 OperandTraits<AtomicRMWInst>::operands(this), InsertBefore) {
1626 Init(Operation, Ptr, Val, Alignment, Ordering, SSID);
1627}
1628
1630 Align Alignment, AtomicOrdering Ordering,
1631 SyncScope::ID SSID, BasicBlock *InsertAtEnd)
1632 : Instruction(Val->getType(), AtomicRMW,
1633 OperandTraits<AtomicRMWInst>::op_begin(this),
1634 OperandTraits<AtomicRMWInst>::operands(this), InsertAtEnd) {
1635 Init(Operation, Ptr, Val, Alignment, Ordering, SSID);
1636}
1637
1639 switch (Op) {
1641 return "xchg";
1642 case AtomicRMWInst::Add:
1643 return "add";
1644 case AtomicRMWInst::Sub:
1645 return "sub";
1646 case AtomicRMWInst::And:
1647 return "and";
1649 return "nand";
1650 case AtomicRMWInst::Or:
1651 return "or";
1652 case AtomicRMWInst::Xor:
1653 return "xor";
1654 case AtomicRMWInst::Max:
1655 return "max";
1656 case AtomicRMWInst::Min:
1657 return "min";
1659 return "umax";
1661 return "umin";
1663 return "fadd";
1665 return "fsub";
1667 return "fmax";
1669 return "fmin";
1671 return "uinc_wrap";
1673 return "udec_wrap";
1675 return "<invalid operation>";
1676 }
1677
1678 llvm_unreachable("invalid atomicrmw operation");
1679}
1680
1681//===----------------------------------------------------------------------===//
1682// FenceInst Implementation
1683//===----------------------------------------------------------------------===//
1684
1686 SyncScope::ID SSID,
1687 Instruction *InsertBefore)
1688 : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertBefore) {
1689 setOrdering(Ordering);
1690 setSyncScopeID(SSID);
1691}
1692
1694 SyncScope::ID SSID,
1695 BasicBlock *InsertAtEnd)
1696 : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertAtEnd) {
1697 setOrdering(Ordering);
1698 setSyncScopeID(SSID);
1699}
1700
1701//===----------------------------------------------------------------------===//
1702// GetElementPtrInst Implementation
1703//===----------------------------------------------------------------------===//
1704
1705void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList,
1706 const Twine &Name) {
1707 assert(getNumOperands() == 1 + IdxList.size() &&
1708 "NumOperands not initialized?");
1709 Op<0>() = Ptr;
1710 llvm::copy(IdxList, op_begin() + 1);
1711 setName(Name);
1712}
1713
1714GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI)
1715 : Instruction(GEPI.getType(), GetElementPtr,
1716 OperandTraits<GetElementPtrInst>::op_end(this) -
1717 GEPI.getNumOperands(),
1718 GEPI.getNumOperands()),
1719 SourceElementType(GEPI.SourceElementType),
1720 ResultElementType(GEPI.ResultElementType) {
1721 std::copy(GEPI.op_begin(), GEPI.op_end(), op_begin());
1723}
1724
1726 if (auto *Struct = dyn_cast<StructType>(Ty)) {
1727 if (!Struct->indexValid(Idx))
1728 return nullptr;
1729 return Struct->getTypeAtIndex(Idx);
1730 }
1731 if (!Idx->getType()->isIntOrIntVectorTy())
1732 return nullptr;
1733 if (auto *Array = dyn_cast<ArrayType>(Ty))
1734 return Array->getElementType();
1735 if (auto *Vector = dyn_cast<VectorType>(Ty))
1736 return Vector->getElementType();
1737 return nullptr;
1738}
1739
1741 if (auto *Struct = dyn_cast<StructType>(Ty)) {
1742 if (Idx >= Struct->getNumElements())
1743 return nullptr;
1744 return Struct->getElementType(Idx);
1745 }
1746 if (auto *Array = dyn_cast<ArrayType>(Ty))
1747 return Array->getElementType();
1748 if (auto *Vector = dyn_cast<VectorType>(Ty))
1749 return Vector->getElementType();
1750 return nullptr;
1751}
1752
1753template <typename IndexTy>
1755 if (IdxList.empty())
1756 return Ty;
1757 for (IndexTy V : IdxList.slice(1)) {
1759 if (!Ty)
1760 return Ty;
1761 }
1762 return Ty;
1763}
1764
1766 return getIndexedTypeInternal(Ty, IdxList);
1767}
1768
1770 ArrayRef<Constant *> IdxList) {
1771 return getIndexedTypeInternal(Ty, IdxList);
1772}
1773
1775 return getIndexedTypeInternal(Ty, IdxList);
1776}
1777
1778/// hasAllZeroIndices - Return true if all of the indices of this GEP are
1779/// zeros. If so, the result pointer and the first operand have the same
1780/// value, just potentially different types.
1782 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1783 if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(i))) {
1784 if (!CI->isZero()) return false;
1785 } else {
1786 return false;
1787 }
1788 }
1789 return true;
1790}
1791
1792/// hasAllConstantIndices - Return true if all of the indices of this GEP are
1793/// constant integers. If so, the result pointer and the first operand have
1794/// a constant offset between them.
1796 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1797 if (!isa<ConstantInt>(getOperand(i)))
1798 return false;
1799 }
1800 return true;
1801}
1802
1804 cast<GEPOperator>(this)->setIsInBounds(B);
1805}
1806
1808 return cast<GEPOperator>(this)->isInBounds();
1809}
1810
1812 APInt &Offset) const {
1813 // Delegate to the generic GEPOperator implementation.
1814 return cast<GEPOperator>(this)->accumulateConstantOffset(DL, Offset);
1815}
1816
1818 const DataLayout &DL, unsigned BitWidth,
1819 MapVector<Value *, APInt> &VariableOffsets,
1820 APInt &ConstantOffset) const {
1821 // Delegate to the generic GEPOperator implementation.
1822 return cast<GEPOperator>(this)->collectOffset(DL, BitWidth, VariableOffsets,
1823 ConstantOffset);
1824}
1825
1826//===----------------------------------------------------------------------===//
1827// ExtractElementInst Implementation
1828//===----------------------------------------------------------------------===//
1829
1830ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
1831 const Twine &Name,
1832 Instruction *InsertBef)
1833 : Instruction(cast<VectorType>(Val->getType())->getElementType(),
1834 ExtractElement,
1835 OperandTraits<ExtractElementInst>::op_begin(this),
1836 2, InsertBef) {
1837 assert(isValidOperands(Val, Index) &&
1838 "Invalid extractelement instruction operands!");
1839 Op<0>() = Val;
1840 Op<1>() = Index;
1841 setName(Name);
1842}
1843
1844ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
1845 const Twine &Name,
1846 BasicBlock *InsertAE)
1847 : Instruction(cast<VectorType>(Val->getType())->getElementType(),
1848 ExtractElement,
1849 OperandTraits<ExtractElementInst>::op_begin(this),
1850 2, InsertAE) {
1851 assert(isValidOperands(Val, Index) &&
1852 "Invalid extractelement instruction operands!");
1853
1854 Op<0>() = Val;
1855 Op<1>() = Index;
1856 setName(Name);
1857}
1858
1860 if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy())
1861 return false;
1862 return true;
1863}
1864
1865//===----------------------------------------------------------------------===//
1866// InsertElementInst Implementation
1867//===----------------------------------------------------------------------===//
1868
1869InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
1870 const Twine &Name,
1871 Instruction *InsertBef)
1872 : Instruction(Vec->getType(), InsertElement,
1873 OperandTraits<InsertElementInst>::op_begin(this),
1874 3, InsertBef) {
1875 assert(isValidOperands(Vec, Elt, Index) &&
1876 "Invalid insertelement instruction operands!");
1877 Op<0>() = Vec;
1878 Op<1>() = Elt;
1879 Op<2>() = Index;
1880 setName(Name);
1881}
1882
1883InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
1884 const Twine &Name,
1885 BasicBlock *InsertAE)
1886 : Instruction(Vec->getType(), InsertElement,
1887 OperandTraits<InsertElementInst>::op_begin(this),
1888 3, InsertAE) {
1889 assert(isValidOperands(Vec, Elt, Index) &&
1890 "Invalid insertelement instruction operands!");
1891
1892 Op<0>() = Vec;
1893 Op<1>() = Elt;
1894 Op<2>() = Index;
1895 setName(Name);
1896}
1897
1899 const Value *Index) {
1900 if (!Vec->getType()->isVectorTy())
1901 return false; // First operand of insertelement must be vector type.
1902
1903 if (Elt->getType() != cast<VectorType>(Vec->getType())->getElementType())
1904 return false;// Second operand of insertelement must be vector element type.
1905
1906 if (!Index->getType()->isIntegerTy())
1907 return false; // Third operand of insertelement must be i32.
1908 return true;
1909}
1910
1911//===----------------------------------------------------------------------===//
1912// ShuffleVectorInst Implementation
1913//===----------------------------------------------------------------------===//
1914
1916 assert(V && "Cannot create placeholder of nullptr V");
1917 return PoisonValue::get(V->getType());
1918}
1919
1921 Instruction *InsertBefore)
1923 InsertBefore) {}
1924
1926 BasicBlock *InsertAtEnd)
1928 InsertAtEnd) {}
1929
1931 const Twine &Name,
1932 Instruction *InsertBefore)
1934 InsertBefore) {}
1935
1937 const Twine &Name, BasicBlock *InsertAtEnd)
1939 InsertAtEnd) {}
1940
1942 const Twine &Name,
1943 Instruction *InsertBefore)
1944 : Instruction(
1945 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
1946 cast<VectorType>(Mask->getType())->getElementCount()),
1947 ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
1948 OperandTraits<ShuffleVectorInst>::operands(this), InsertBefore) {
1949 assert(isValidOperands(V1, V2, Mask) &&
1950 "Invalid shuffle vector instruction operands!");
1951
1952 Op<0>() = V1;
1953 Op<1>() = V2;
1954 SmallVector<int, 16> MaskArr;
1955 getShuffleMask(cast<Constant>(Mask), MaskArr);
1956 setShuffleMask(MaskArr);
1957 setName(Name);
1958}
1959
1961 const Twine &Name, BasicBlock *InsertAtEnd)
1962 : Instruction(
1963 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
1964 cast<VectorType>(Mask->getType())->getElementCount()),
1965 ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
1966 OperandTraits<ShuffleVectorInst>::operands(this), InsertAtEnd) {
1967 assert(isValidOperands(V1, V2, Mask) &&
1968 "Invalid shuffle vector instruction operands!");
1969
1970 Op<0>() = V1;
1971 Op<1>() = V2;
1972 SmallVector<int, 16> MaskArr;
1973 getShuffleMask(cast<Constant>(Mask), MaskArr);
1974 setShuffleMask(MaskArr);
1975 setName(Name);
1976}
1977
1979 const Twine &Name,
1980 Instruction *InsertBefore)
1981 : Instruction(
1982 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
1983 Mask.size(), isa<ScalableVectorType>(V1->getType())),
1984 ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
1985 OperandTraits<ShuffleVectorInst>::operands(this), InsertBefore) {
1986 assert(isValidOperands(V1, V2, Mask) &&
1987 "Invalid shuffle vector instruction operands!");
1988 Op<0>() = V1;
1989 Op<1>() = V2;
1990 setShuffleMask(Mask);
1991 setName(Name);
1992}
1993
1995 const Twine &Name, BasicBlock *InsertAtEnd)
1996 : Instruction(
1997 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
1998 Mask.size(), isa<ScalableVectorType>(V1->getType())),
1999 ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
2000 OperandTraits<ShuffleVectorInst>::operands(this), InsertAtEnd) {
2001 assert(isValidOperands(V1, V2, Mask) &&
2002 "Invalid shuffle vector instruction operands!");
2003
2004 Op<0>() = V1;
2005 Op<1>() = V2;
2006 setShuffleMask(Mask);
2007 setName(Name);
2008}
2009
2011 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2012 int NumMaskElts = ShuffleMask.size();
2013 SmallVector<int, 16> NewMask(NumMaskElts);
2014 for (int i = 0; i != NumMaskElts; ++i) {
2015 int MaskElt = getMaskValue(i);
2016 if (MaskElt == PoisonMaskElem) {
2017 NewMask[i] = PoisonMaskElem;
2018 continue;
2019 }
2020 assert(MaskElt >= 0 && MaskElt < 2 * NumOpElts && "Out-of-range mask");
2021 MaskElt = (MaskElt < NumOpElts) ? MaskElt + NumOpElts : MaskElt - NumOpElts;
2022 NewMask[i] = MaskElt;
2023 }
2024 setShuffleMask(NewMask);
2025 Op<0>().swap(Op<1>());
2026}
2027
2029 ArrayRef<int> Mask) {
2030 // V1 and V2 must be vectors of the same type.
2031 if (!isa<VectorType>(V1->getType()) || V1->getType() != V2->getType())
2032 return false;
2033
2034 // Make sure the mask elements make sense.
2035 int V1Size =
2036 cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue();
2037 for (int Elem : Mask)
2038 if (Elem != PoisonMaskElem && Elem >= V1Size * 2)
2039 return false;
2040
2041 if (isa<ScalableVectorType>(V1->getType()))
2042 if ((Mask[0] != 0 && Mask[0] != PoisonMaskElem) || !all_equal(Mask))
2043 return false;
2044
2045 return true;
2046}
2047
2049 const Value *Mask) {
2050 // V1 and V2 must be vectors of the same type.
2051 if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType())
2052 return false;
2053
2054 // Mask must be vector of i32, and must be the same kind of vector as the
2055 // input vectors
2056 auto *MaskTy = dyn_cast<VectorType>(Mask->getType());
2057 if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32) ||
2058 isa<ScalableVectorType>(MaskTy) != isa<ScalableVectorType>(V1->getType()))
2059 return false;
2060
2061 // Check to see if Mask is valid.
2062 if (isa<UndefValue>(Mask) || isa<ConstantAggregateZero>(Mask))
2063 return true;
2064
2065 if (const auto *MV = dyn_cast<ConstantVector>(Mask)) {
2066 unsigned V1Size = cast<FixedVectorType>(V1->getType())->getNumElements();
2067 for (Value *Op : MV->operands()) {
2068 if (auto *CI = dyn_cast<ConstantInt>(Op)) {
2069 if (CI->uge(V1Size*2))
2070 return false;
2071 } else if (!isa<UndefValue>(Op)) {
2072 return false;
2073 }
2074 }
2075 return true;
2076 }
2077
2078 if (const auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
2079 unsigned V1Size = cast<FixedVectorType>(V1->getType())->getNumElements();
2080 for (unsigned i = 0, e = cast<FixedVectorType>(MaskTy)->getNumElements();
2081 i != e; ++i)
2082 if (CDS->getElementAsInteger(i) >= V1Size*2)
2083 return false;
2084 return true;
2085 }
2086
2087 return false;
2088}
2089
2091 SmallVectorImpl<int> &Result) {
2092 ElementCount EC = cast<VectorType>(Mask->getType())->getElementCount();
2093
2094 if (isa<ConstantAggregateZero>(Mask)) {
2095 Result.resize(EC.getKnownMinValue(), 0);
2096 return;
2097 }
2098
2099 Result.reserve(EC.getKnownMinValue());
2100
2101 if (EC.isScalable()) {
2102 assert((isa<ConstantAggregateZero>(Mask) || isa<UndefValue>(Mask)) &&
2103 "Scalable vector shuffle mask must be undef or zeroinitializer");
2104 int MaskVal = isa<UndefValue>(Mask) ? -1 : 0;
2105 for (unsigned I = 0; I < EC.getKnownMinValue(); ++I)
2106 Result.emplace_back(MaskVal);
2107 return;
2108 }
2109
2110 unsigned NumElts = EC.getKnownMinValue();
2111
2112 if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
2113 for (unsigned i = 0; i != NumElts; ++i)
2114 Result.push_back(CDS->getElementAsInteger(i));
2115 return;
2116 }
2117 for (unsigned i = 0; i != NumElts; ++i) {
2118 Constant *C = Mask->getAggregateElement(i);
2119 Result.push_back(isa<UndefValue>(C) ? -1 :
2120 cast<ConstantInt>(C)->getZExtValue());
2121 }
2122}
2123
2125 ShuffleMask.assign(Mask.begin(), Mask.end());
2126 ShuffleMaskForBitcode = convertShuffleMaskForBitcode(Mask, getType());
2127}
2128
2130 Type *ResultTy) {
2131 Type *Int32Ty = Type::getInt32Ty(ResultTy->getContext());
2132 if (isa<ScalableVectorType>(ResultTy)) {
2133 assert(all_equal(Mask) && "Unexpected shuffle");
2134 Type *VecTy = VectorType::get(Int32Ty, Mask.size(), true);
2135 if (Mask[0] == 0)
2136 return Constant::getNullValue(VecTy);
2137 return UndefValue::get(VecTy);
2138 }
2140 for (int Elem : Mask) {
2141 if (Elem == PoisonMaskElem)
2143 else
2144 MaskConst.push_back(ConstantInt::get(Int32Ty, Elem));
2145 }
2146 return ConstantVector::get(MaskConst);
2147}
2148
2149static bool isSingleSourceMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
2150 assert(!Mask.empty() && "Shuffle mask must contain elements");
2151 bool UsesLHS = false;
2152 bool UsesRHS = false;
2153 for (int I : Mask) {
2154 if (I == -1)
2155 continue;
2156 assert(I >= 0 && I < (NumOpElts * 2) &&
2157 "Out-of-bounds shuffle mask element");
2158 UsesLHS |= (I < NumOpElts);
2159 UsesRHS |= (I >= NumOpElts);
2160 if (UsesLHS && UsesRHS)
2161 return false;
2162 }
2163 // Allow for degenerate case: completely undef mask means neither source is used.
2164 return UsesLHS || UsesRHS;
2165}
2166
2168 // We don't have vector operand size information, so assume operands are the
2169 // same size as the mask.
2170 return isSingleSourceMaskImpl(Mask, NumSrcElts);
2171}
2172
2173static bool isIdentityMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
2174 if (!isSingleSourceMaskImpl(Mask, NumOpElts))
2175 return false;
2176 for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) {
2177 if (Mask[i] == -1)
2178 continue;
2179 if (Mask[i] != i && Mask[i] != (NumOpElts + i))
2180 return false;
2181 }
2182 return true;
2183}
2184
2186 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2187 return false;
2188 // We don't have vector operand size information, so assume operands are the
2189 // same size as the mask.
2190 return isIdentityMaskImpl(Mask, NumSrcElts);
2191}
2192
2194 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2195 return false;
2196 if (!isSingleSourceMask(Mask, NumSrcElts))
2197 return false;
2198
2199 // The number of elements in the mask must be at least 2.
2200 if (NumSrcElts < 2)
2201 return false;
2202
2203 for (int I = 0, E = Mask.size(); I < E; ++I) {
2204 if (Mask[I] == -1)
2205 continue;
2206 if (Mask[I] != (NumSrcElts - 1 - I) &&
2207 Mask[I] != (NumSrcElts + NumSrcElts - 1 - I))
2208 return false;
2209 }
2210 return true;
2211}
2212
2214 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2215 return false;
2216 if (!isSingleSourceMask(Mask, NumSrcElts))
2217 return false;
2218 for (int I = 0, E = Mask.size(); I < E; ++I) {
2219 if (Mask[I] == -1)
2220 continue;
2221 if (Mask[I] != 0 && Mask[I] != NumSrcElts)
2222 return false;
2223 }
2224 return true;
2225}
2226
2228 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2229 return false;
2230 // Select is differentiated from identity. It requires using both sources.
2231 if (isSingleSourceMask(Mask, NumSrcElts))
2232 return false;
2233 for (int I = 0, E = Mask.size(); I < E; ++I) {
2234 if (Mask[I] == -1)
2235 continue;
2236 if (Mask[I] != I && Mask[I] != (NumSrcElts + I))
2237 return false;
2238 }
2239 return true;
2240}
2241
2243 // Example masks that will return true:
2244 // v1 = <a, b, c, d>
2245 // v2 = <e, f, g, h>
2246 // trn1 = shufflevector v1, v2 <0, 4, 2, 6> = <a, e, c, g>
2247 // trn2 = shufflevector v1, v2 <1, 5, 3, 7> = <b, f, d, h>
2248
2249 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2250 return false;
2251 // 1. The number of elements in the mask must be a power-of-2 and at least 2.
2252 int Sz = Mask.size();
2253 if (Sz < 2 || !isPowerOf2_32(Sz))
2254 return false;
2255
2256 // 2. The first element of the mask must be either a 0 or a 1.
2257 if (Mask[0] != 0 && Mask[0] != 1)
2258 return false;
2259
2260 // 3. The difference between the first 2 elements must be equal to the
2261 // number of elements in the mask.
2262 if ((Mask[1] - Mask[0]) != NumSrcElts)
2263 return false;
2264
2265 // 4. The difference between consecutive even-numbered and odd-numbered
2266 // elements must be equal to 2.
2267 for (int I = 2; I < Sz; ++I) {
2268 int MaskEltVal = Mask[I];
2269 if (MaskEltVal == -1)
2270 return false;
2271 int MaskEltPrevVal = Mask[I - 2];
2272 if (MaskEltVal - MaskEltPrevVal != 2)
2273 return false;
2274 }
2275 return true;
2276}
2277
2279 int &Index) {
2280 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2281 return false;
2282 // Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2283 int StartIndex = -1;
2284 for (int I = 0, E = Mask.size(); I != E; ++I) {
2285 int MaskEltVal = Mask[I];
2286 if (MaskEltVal == -1)
2287 continue;
2288
2289 if (StartIndex == -1) {
2290 // Don't support a StartIndex that begins in the second input, or if the
2291 // first non-undef index would access below the StartIndex.
2292 if (MaskEltVal < I || NumSrcElts <= (MaskEltVal - I))
2293 return false;
2294
2295 StartIndex = MaskEltVal - I;
2296 continue;
2297 }
2298
2299 // Splice is sequential starting from StartIndex.
2300 if (MaskEltVal != (StartIndex + I))
2301 return false;
2302 }
2303
2304 if (StartIndex == -1)
2305 return false;
2306
2307 // NOTE: This accepts StartIndex == 0 (COPY).
2308 Index = StartIndex;
2309 return true;
2310}
2311
2313 int NumSrcElts, int &Index) {
2314 // Must extract from a single source.
2315 if (!isSingleSourceMaskImpl(Mask, NumSrcElts))
2316 return false;
2317
2318 // Must be smaller (else this is an Identity shuffle).
2319 if (NumSrcElts <= (int)Mask.size())
2320 return false;
2321
2322 // Find start of extraction, accounting that we may start with an UNDEF.
2323 int SubIndex = -1;
2324 for (int i = 0, e = Mask.size(); i != e; ++i) {
2325 int M = Mask[i];
2326 if (M < 0)
2327 continue;
2328 int Offset = (M % NumSrcElts) - i;
2329 if (0 <= SubIndex && SubIndex != Offset)
2330 return false;
2331 SubIndex = Offset;
2332 }
2333
2334 if (0 <= SubIndex && SubIndex + (int)Mask.size() <= NumSrcElts) {
2335 Index = SubIndex;
2336 return true;
2337 }
2338 return false;
2339}
2340
2342 int NumSrcElts, int &NumSubElts,
2343 int &Index) {
2344 int NumMaskElts = Mask.size();
2345
2346 // Don't try to match if we're shuffling to a smaller size.
2347 if (NumMaskElts < NumSrcElts)
2348 return false;
2349
2350 // TODO: We don't recognize self-insertion/widening.
2351 if (isSingleSourceMaskImpl(Mask, NumSrcElts))
2352 return false;
2353
2354 // Determine which mask elements are attributed to which source.
2355 APInt UndefElts = APInt::getZero(NumMaskElts);
2356 APInt Src0Elts = APInt::getZero(NumMaskElts);
2357 APInt Src1Elts = APInt::getZero(NumMaskElts);
2358 bool Src0Identity = true;
2359 bool Src1Identity = true;
2360
2361 for (int i = 0; i != NumMaskElts; ++i) {
2362 int M = Mask[i];
2363 if (M < 0) {
2364 UndefElts.setBit(i);
2365 continue;
2366 }
2367 if (M < NumSrcElts) {
2368 Src0Elts.setBit(i);
2369 Src0Identity &= (M == i);
2370 continue;
2371 }
2372 Src1Elts.setBit(i);
2373 Src1Identity &= (M == (i + NumSrcElts));
2374 }
2375 assert((Src0Elts | Src1Elts | UndefElts).isAllOnes() &&
2376 "unknown shuffle elements");
2377 assert(!Src0Elts.isZero() && !Src1Elts.isZero() &&
2378 "2-source shuffle not found");
2379
2380 // Determine lo/hi span ranges.
2381 // TODO: How should we handle undefs at the start of subvector insertions?
2382 int Src0Lo = Src0Elts.countr_zero();
2383 int Src1Lo = Src1Elts.countr_zero();
2384 int Src0Hi = NumMaskElts - Src0Elts.countl_zero();
2385 int Src1Hi = NumMaskElts - Src1Elts.countl_zero();
2386
2387 // If src0 is in place, see if the src1 elements is inplace within its own
2388 // span.
2389 if (Src0Identity) {
2390 int NumSub1Elts = Src1Hi - Src1Lo;
2391 ArrayRef<int> Sub1Mask = Mask.slice(Src1Lo, NumSub1Elts);
2392 if (isIdentityMaskImpl(Sub1Mask, NumSrcElts)) {
2393 NumSubElts = NumSub1Elts;
2394 Index = Src1Lo;
2395 return true;
2396 }
2397 }
2398
2399 // If src1 is in place, see if the src0 elements is inplace within its own
2400 // span.
2401 if (Src1Identity) {
2402 int NumSub0Elts = Src0Hi - Src0Lo;
2403 ArrayRef<int> Sub0Mask = Mask.slice(Src0Lo, NumSub0Elts);
2404 if (isIdentityMaskImpl(Sub0Mask, NumSrcElts)) {
2405 NumSubElts = NumSub0Elts;
2406 Index = Src0Lo;
2407 return true;
2408 }
2409 }
2410
2411 return false;
2412}
2413
2415 if (isa<UndefValue>(Op<2>()))
2416 return false;
2417
2418 // FIXME: Not currently possible to express a shuffle mask for a scalable
2419 // vector for this case.
2420 if (isa<ScalableVectorType>(getType()))
2421 return false;
2422
2423 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2424 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2425 if (NumMaskElts <= NumOpElts)
2426 return false;
2427
2428 // The first part of the mask must choose elements from exactly 1 source op.
2430 if (!isIdentityMaskImpl(Mask, NumOpElts))
2431 return false;
2432
2433 // All extending must be with undef elements.
2434 for (int i = NumOpElts; i < NumMaskElts; ++i)
2435 if (Mask[i] != -1)
2436 return false;
2437
2438 return true;
2439}
2440
2442 if (isa<UndefValue>(Op<2>()))
2443 return false;
2444
2445 // FIXME: Not currently possible to express a shuffle mask for a scalable
2446 // vector for this case.
2447 if (isa<ScalableVectorType>(getType()))
2448 return false;
2449
2450 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2451 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2452 if (NumMaskElts >= NumOpElts)
2453 return false;
2454
2455 return isIdentityMaskImpl(getShuffleMask(), NumOpElts);
2456}
2457
2459 // Vector concatenation is differentiated from identity with padding.
2460 if (isa<UndefValue>(Op<0>()) || isa<UndefValue>(Op<1>()) ||
2461 isa<UndefValue>(Op<2>()))
2462 return false;
2463
2464 // FIXME: Not currently possible to express a shuffle mask for a scalable
2465 // vector for this case.
2466 if (isa<ScalableVectorType>(getType()))
2467 return false;
2468
2469 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2470 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2471 if (NumMaskElts != NumOpElts * 2)
2472 return false;
2473
2474 // Use the mask length rather than the operands' vector lengths here. We
2475 // already know that the shuffle returns a vector twice as long as the inputs,
2476 // and neither of the inputs are undef vectors. If the mask picks consecutive
2477 // elements from both inputs, then this is a concatenation of the inputs.
2478 return isIdentityMaskImpl(getShuffleMask(), NumMaskElts);
2479}
2480
2482 int ReplicationFactor, int VF) {
2483 assert(Mask.size() == (unsigned)ReplicationFactor * VF &&
2484 "Unexpected mask size.");
2485
2486 for (int CurrElt : seq(VF)) {
2487 ArrayRef<int> CurrSubMask = Mask.take_front(ReplicationFactor);
2488 assert(CurrSubMask.size() == (unsigned)ReplicationFactor &&
2489 "Run out of mask?");
2490 Mask = Mask.drop_front(ReplicationFactor);
2491 if (!all_of(CurrSubMask, [CurrElt](int MaskElt) {
2492 return MaskElt == PoisonMaskElem || MaskElt == CurrElt;
2493 }))
2494 return false;
2495 }
2496 assert(Mask.empty() && "Did not consume the whole mask?");
2497
2498 return true;
2499}
2500
2502 int &ReplicationFactor, int &VF) {
2503 // undef-less case is trivial.
2504 if (!llvm::is_contained(Mask, PoisonMaskElem)) {
2505 ReplicationFactor =
2506 Mask.take_while([](int MaskElt) { return MaskElt == 0; }).size();
2507 if (ReplicationFactor == 0 || Mask.size() % ReplicationFactor != 0)
2508 return false;
2509 VF = Mask.size() / ReplicationFactor;
2510 return isReplicationMaskWithParams(Mask, ReplicationFactor, VF);
2511 }
2512
2513 // However, if the mask contains undef's, we have to enumerate possible tuples
2514 // and pick one. There are bounds on replication factor: [1, mask size]
2515 // (where RF=1 is an identity shuffle, RF=mask size is a broadcast shuffle)
2516 // Additionally, mask size is a replication factor multiplied by vector size,
2517 // which further significantly reduces the search space.
2518
2519 // Before doing that, let's perform basic correctness checking first.
2520 int Largest = -1;
2521 for (int MaskElt : Mask) {
2522 if (MaskElt == PoisonMaskElem)
2523 continue;
2524 // Elements must be in non-decreasing order.
2525 if (MaskElt < Largest)
2526 return false;
2527 Largest = std::max(Largest, MaskElt);
2528 }
2529
2530 // Prefer larger replication factor if all else equal.
2531 for (int PossibleReplicationFactor :
2532 reverse(seq_inclusive<unsigned>(1, Mask.size()))) {
2533 if (Mask.size() % PossibleReplicationFactor != 0)
2534 continue;
2535 int PossibleVF = Mask.size() / PossibleReplicationFactor;
2536 if (!isReplicationMaskWithParams(Mask, PossibleReplicationFactor,
2537 PossibleVF))
2538 continue;
2539 ReplicationFactor = PossibleReplicationFactor;
2540 VF = PossibleVF;
2541 return true;
2542 }
2543
2544 return false;
2545}
2546
2547bool ShuffleVectorInst::isReplicationMask(int &ReplicationFactor,
2548 int &VF) const {
2549 // Not possible to express a shuffle mask for a scalable vector for this
2550 // case.
2551 if (isa<ScalableVectorType>(getType()))
2552 return false;
2553
2554 VF = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2555 if (ShuffleMask.size() % VF != 0)
2556 return false;
2557 ReplicationFactor = ShuffleMask.size() / VF;
2558
2559 return isReplicationMaskWithParams(ShuffleMask, ReplicationFactor, VF);
2560}
2561
2563 if (VF <= 0 || Mask.size() < static_cast<unsigned>(VF) ||
2564 Mask.size() % VF != 0)
2565 return false;
2566 for (unsigned K = 0, Sz = Mask.size(); K < Sz; K += VF) {
2567 ArrayRef<int> SubMask = Mask.slice(K, VF);
2568 if (all_of(SubMask, [](int Idx) { return Idx == PoisonMaskElem; }))
2569 continue;
2570 SmallBitVector Used(VF, false);
2571 for (int Idx : SubMask) {
2572 if (Idx != PoisonMaskElem && Idx < VF)
2573 Used.set(Idx);
2574 }
2575 if (!Used.all())
2576 return false;
2577 }
2578 return true;
2579}
2580
2581/// Return true if this shuffle mask is a replication mask.
2583 // Not possible to express a shuffle mask for a scalable vector for this
2584 // case.
2585 if (isa<ScalableVectorType>(getType()))
2586 return false;
2587 if (!isSingleSourceMask(ShuffleMask, VF))
2588 return false;
2589
2590 return isOneUseSingleSourceMask(ShuffleMask, VF);
2591}
2592
2593bool ShuffleVectorInst::isInterleave(unsigned Factor) {
2594 FixedVectorType *OpTy = dyn_cast<FixedVectorType>(getOperand(0)->getType());
2595 // shuffle_vector can only interleave fixed length vectors - for scalable
2596 // vectors, see the @llvm.experimental.vector.interleave2 intrinsic
2597 if (!OpTy)
2598 return false;
2599 unsigned OpNumElts = OpTy->getNumElements();
2600
2601 return isInterleaveMask(ShuffleMask, Factor, OpNumElts * 2);
2602}
2603
2605 ArrayRef<int> Mask, unsigned Factor, unsigned NumInputElts,
2606 SmallVectorImpl<unsigned> &StartIndexes) {
2607 unsigned NumElts = Mask.size();
2608 if (NumElts % Factor)
2609 return false;
2610
2611 unsigned LaneLen = NumElts / Factor;
2612 if (!isPowerOf2_32(LaneLen))
2613 return false;
2614
2615 StartIndexes.resize(Factor);
2616
2617 // Check whether each element matches the general interleaved rule.
2618 // Ignore undef elements, as long as the defined elements match the rule.
2619 // Outer loop processes all factors (x, y, z in the above example)
2620 unsigned I = 0, J;
2621 for (; I < Factor; I++) {
2622 unsigned SavedLaneValue;
2623 unsigned SavedNoUndefs = 0;
2624
2625 // Inner loop processes consecutive accesses (x, x+1... in the example)
2626 for (J = 0; J < LaneLen - 1; J++) {
2627 // Lane computes x's position in the Mask
2628 unsigned Lane = J * Factor + I;
2629 unsigned NextLane = Lane + Factor;
2630 int LaneValue = Mask[Lane];
2631 int NextLaneValue = Mask[NextLane];
2632
2633 // If both are defined, values must be sequential
2634 if (LaneValue >= 0 && NextLaneValue >= 0 &&
2635 LaneValue + 1 != NextLaneValue)
2636 break;
2637
2638 // If the next value is undef, save the current one as reference
2639 if (LaneValue >= 0 && NextLaneValue < 0) {
2640 SavedLaneValue = LaneValue;
2641 SavedNoUndefs = 1;
2642 }
2643
2644 // Undefs are allowed, but defined elements must still be consecutive:
2645 // i.e.: x,..., undef,..., x + 2,..., undef,..., undef,..., x + 5, ....
2646 // Verify this by storing the last non-undef followed by an undef
2647 // Check that following non-undef masks are incremented with the
2648 // corresponding distance.
2649 if (SavedNoUndefs > 0 && LaneValue < 0) {
2650 SavedNoUndefs++;
2651 if (NextLaneValue >= 0 &&
2652 SavedLaneValue + SavedNoUndefs != (unsigned)NextLaneValue)
2653 break;
2654 }
2655 }
2656
2657 if (J < LaneLen - 1)
2658 return false;
2659
2660 int StartMask = 0;
2661 if (Mask[I] >= 0) {
2662 // Check that the start of the I range (J=0) is greater than 0
2663 StartMask = Mask[I];
2664 } else if (Mask[(LaneLen - 1) * Factor + I] >= 0) {
2665 // StartMask defined by the last value in lane
2666 StartMask = Mask[(LaneLen - 1) * Factor + I] - J;
2667 } else if (SavedNoUndefs > 0) {
2668 // StartMask defined by some non-zero value in the j loop
2669 StartMask = SavedLaneValue - (LaneLen - 1 - SavedNoUndefs);
2670 }
2671 // else StartMask remains set to 0, i.e. all elements are undefs
2672
2673 if (StartMask < 0)
2674 return false;
2675 // We must stay within the vectors; This case can happen with undefs.
2676 if (StartMask + LaneLen > NumInputElts)
2677 return false;
2678
2679 StartIndexes[I] = StartMask;
2680 }
2681
2682 return true;
2683}
2684
2685/// Try to lower a vector shuffle as a bit rotation.
2686///
2687/// Look for a repeated rotation pattern in each sub group.
2688/// Returns an element-wise left bit rotation amount or -1 if failed.
2689static int matchShuffleAsBitRotate(ArrayRef<int> Mask, int NumSubElts) {
2690 int NumElts = Mask.size();
2691 assert((NumElts % NumSubElts) == 0 && "Illegal shuffle mask");
2692
2693 int RotateAmt = -1;
2694 for (int i = 0; i != NumElts; i += NumSubElts) {
2695 for (int j = 0; j != NumSubElts; ++j) {
2696 int M = Mask[i + j];
2697 if (M < 0)
2698 continue;
2699 if (M < i || M >= i + NumSubElts)
2700 return -1;
2701 int Offset = (NumSubElts - (M - (i + j))) % NumSubElts;
2702 if (0 <= RotateAmt && Offset != RotateAmt)
2703 return -1;
2704 RotateAmt = Offset;
2705 }
2706 }
2707 return RotateAmt;
2708}
2709
2711 ArrayRef<int> Mask, unsigned EltSizeInBits, unsigned MinSubElts,
2712 unsigned MaxSubElts, unsigned &NumSubElts, unsigned &RotateAmt) {
2713 for (NumSubElts = MinSubElts; NumSubElts <= MaxSubElts; NumSubElts *= 2) {
2714 int EltRotateAmt = matchShuffleAsBitRotate(Mask, NumSubElts);
2715 if (EltRotateAmt < 0)
2716 continue;
2717 RotateAmt = EltRotateAmt * EltSizeInBits;
2718 return true;
2719 }
2720
2721 return false;
2722}
2723
2724//===----------------------------------------------------------------------===//
2725// InsertValueInst Class
2726//===----------------------------------------------------------------------===//
2727
2728void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2729 const Twine &Name) {
2730 assert(getNumOperands() == 2 && "NumOperands not initialized?");
2731
2732 // There's no fundamental reason why we require at least one index
2733 // (other than weirdness with &*IdxBegin being invalid; see
2734 // getelementptr's init routine for example). But there's no
2735 // present need to support it.
2736 assert(!Idxs.empty() && "InsertValueInst must have at least one index");
2737
2739 Val->getType() && "Inserted value must match indexed type!");
2740 Op<0>() = Agg;
2741 Op<1>() = Val;
2742
2743 Indices.append(Idxs.begin(), Idxs.end());
2744 setName(Name);
2745}
2746
2747InsertValueInst::InsertValueInst(const InsertValueInst &IVI)
2748 : Instruction(IVI.getType(), InsertValue,
2749 OperandTraits<InsertValueInst>::op_begin(this), 2),
2750 Indices(IVI.Indices) {
2751 Op<0>() = IVI.getOperand(0);
2752 Op<1>() = IVI.getOperand(1);
2754}
2755
2756//===----------------------------------------------------------------------===//
2757// ExtractValueInst Class
2758//===----------------------------------------------------------------------===//
2759
2760void ExtractValueInst::init(ArrayRef<unsigned> Idxs, const Twine &Name) {
2761 assert(getNumOperands() == 1 && "NumOperands not initialized?");
2762
2763 // There's no fundamental reason why we require at least one index.
2764 // But there's no present need to support it.
2765 assert(!Idxs.empty() && "ExtractValueInst must have at least one index");
2766
2767 Indices.append(Idxs.begin(), Idxs.end());
2768 setName(Name);
2769}
2770
2771ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI)
2772 : UnaryInstruction(EVI.getType(), ExtractValue, EVI.getOperand(0)),
2773 Indices(EVI.Indices) {
2775}
2776
2777// getIndexedType - Returns the type of the element that would be extracted
2778// with an extractvalue instruction with the specified parameters.
2779//
2780// A null type is returned if the indices are invalid for the specified
2781// pointer type.
2782//
2784 ArrayRef<unsigned> Idxs) {
2785 for (unsigned Index : Idxs) {
2786 // We can't use CompositeType::indexValid(Index) here.
2787 // indexValid() always returns true for arrays because getelementptr allows
2788 // out-of-bounds indices. Since we don't allow those for extractvalue and
2789 // insertvalue we need to check array indexing manually.
2790 // Since the only other types we can index into are struct types it's just
2791 // as easy to check those manually as well.
2792 if (ArrayType *AT = dyn_cast<ArrayType>(Agg)) {
2793 if (Index >= AT->getNumElements())
2794 return nullptr;
2795 Agg = AT->getElementType();
2796 } else if (StructType *ST = dyn_cast<StructType>(Agg)) {
2797 if (Index >= ST->getNumElements())
2798 return nullptr;
2799 Agg = ST->getElementType(Index);
2800 } else {
2801 // Not a valid type to index into.
2802 return nullptr;
2803 }
2804 }
2805 return const_cast<Type*>(Agg);
2806}
2807
2808//===----------------------------------------------------------------------===//
2809// UnaryOperator Class
2810//===----------------------------------------------------------------------===//
2811
2813 Type *Ty, const Twine &Name,
2814 Instruction *InsertBefore)
2815 : UnaryInstruction(Ty, iType, S, InsertBefore) {
2816 Op<0>() = S;
2817 setName(Name);
2818 AssertOK();
2819}
2820
2822 Type *Ty, const Twine &Name,
2823 BasicBlock *InsertAtEnd)
2824 : UnaryInstruction(Ty, iType, S, InsertAtEnd) {
2825 Op<0>() = S;
2826 setName(Name);
2827 AssertOK();
2828}
2829
2831 const Twine &Name,
2832 Instruction *InsertBefore) {
2833 return new UnaryOperator(Op, S, S->getType(), Name, InsertBefore);
2834}
2835
2837 const Twine &Name,
2838 BasicBlock *InsertAtEnd) {
2839 UnaryOperator *Res = Create(Op, S, Name);
2840 Res->insertInto(InsertAtEnd, InsertAtEnd->end());
2841 return Res;
2842}
2843
2844void UnaryOperator::AssertOK() {
2845 Value *LHS = getOperand(0);
2846 (void)LHS; // Silence warnings.
2847#ifndef NDEBUG
2848 switch (getOpcode()) {
2849 case FNeg:
2850 assert(getType() == LHS->getType() &&
2851 "Unary operation should return same type as operand!");
2852 assert(getType()->isFPOrFPVectorTy() &&
2853 "Tried to create a floating-point operation on a "
2854 "non-floating-point type!");
2855 break;
2856 default: llvm_unreachable("Invalid opcode provided");
2857 }
2858#endif
2859}
2860
2861//===----------------------------------------------------------------------===//
2862// BinaryOperator Class
2863//===----------------------------------------------------------------------===//
2864
2866 Type *Ty, const Twine &Name,
2867 Instruction *InsertBefore)
2868 : Instruction(Ty, iType,
2869 OperandTraits<BinaryOperator>::op_begin(this),
2870 OperandTraits<BinaryOperator>::operands(this),
2871 InsertBefore) {
2872 Op<0>() = S1;
2873 Op<1>() = S2;
2874 setName(Name);
2875 AssertOK();
2876}
2877
2879 Type *Ty, const Twine &Name,
2880 BasicBlock *InsertAtEnd)
2881 : Instruction(Ty, iType,
2882 OperandTraits<BinaryOperator>::op_begin(this),
2883 OperandTraits<BinaryOperator>::operands(this),
2884 InsertAtEnd) {
2885 Op<0>() = S1;
2886 Op<1>() = S2;
2887 setName(Name);
2888 AssertOK();
2889}
2890
2891void BinaryOperator::AssertOK() {
2892 Value *LHS = getOperand(0), *RHS = getOperand(1);
2893 (void)LHS; (void)RHS; // Silence warnings.
2894 assert(LHS->getType() == RHS->getType() &&
2895 "Binary operator operand types must match!");
2896#ifndef NDEBUG
2897 switch (getOpcode()) {
2898 case Add: case Sub:
2899 case Mul:
2900 assert(getType() == LHS->getType() &&
2901 "Arithmetic operation should return same type as operands!");
2902 assert(getType()->isIntOrIntVectorTy() &&
2903 "Tried to create an integer operation on a non-integer type!");
2904 break;
2905 case FAdd: case FSub:
2906 case FMul:
2907 assert(getType() == LHS->getType() &&
2908 "Arithmetic operation should return same type as operands!");
2909 assert(getType()->isFPOrFPVectorTy() &&
2910 "Tried to create a floating-point operation on a "
2911 "non-floating-point type!");
2912 break;
2913 case UDiv:
2914 case SDiv:
2915 assert(getType() == LHS->getType() &&
2916 "Arithmetic operation should return same type as operands!");
2917 assert(getType()->isIntOrIntVectorTy() &&
2918 "Incorrect operand type (not integer) for S/UDIV");
2919 break;
2920 case FDiv:
2921 assert(getType() == LHS->getType() &&
2922 "Arithmetic operation should return same type as operands!");
2923 assert(getType()->isFPOrFPVectorTy() &&
2924 "Incorrect operand type (not floating point) for FDIV");
2925 break;
2926 case URem:
2927 case SRem:
2928 assert(getType() == LHS->getType() &&
2929 "Arithmetic operation should return same type as operands!");
2930 assert(getType()->isIntOrIntVectorTy() &&
2931 "Incorrect operand type (not integer) for S/UREM");
2932 break;
2933 case FRem:
2934 assert(getType() == LHS->getType() &&
2935 "Arithmetic operation should return same type as operands!");
2936 assert(getType()->isFPOrFPVectorTy() &&
2937 "Incorrect operand type (not floating point) for FREM");
2938 break;
2939 case Shl:
2940 case LShr:
2941 case AShr:
2942 assert(getType() == LHS->getType() &&
2943 "Shift operation should return same type as operands!");
2944 assert(getType()->isIntOrIntVectorTy() &&
2945 "Tried to create a shift operation on a non-integral type!");
2946 break;
2947 case And: case Or:
2948 case Xor:
2949 assert(getType() == LHS->getType() &&
2950 "Logical operation should return same type as operands!");
2951 assert(getType()->isIntOrIntVectorTy() &&
2952 "Tried to create a logical operation on a non-integral type!");
2953 break;
2954 default: llvm_unreachable("Invalid opcode provided");
2955 }
2956#endif
2957}
2958
2960 const Twine &Name,
2961 Instruction *InsertBefore) {
2962 assert(S1->getType() == S2->getType() &&
2963 "Cannot create binary operator with two operands of differing type!");
2964 return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore);
2965}
2966
2968 const Twine &Name,
2969 BasicBlock *InsertAtEnd) {
2970 BinaryOperator *Res = Create(Op, S1, S2, Name);
2971 Res->insertInto(InsertAtEnd, InsertAtEnd->end());
2972 return Res;
2973}
2974
2976 Instruction *InsertBefore) {
2977 Value *Zero = ConstantInt::get(Op->getType(), 0);
2978 return new BinaryOperator(Instruction::Sub,
2979 Zero, Op,
2980 Op->getType(), Name, InsertBefore);
2981}
2982
2984 BasicBlock *InsertAtEnd) {
2985 Value *Zero = ConstantInt::get(Op->getType(), 0);
2986 return new BinaryOperator(Instruction::Sub,
2987 Zero, Op,
2988 Op->getType(), Name, InsertAtEnd);
2989}
2990
2992 Instruction *InsertBefore) {
2993 Value *Zero = ConstantInt::get(Op->getType(), 0);
2994 return BinaryOperator::CreateNSWSub(Zero, Op, Name, InsertBefore);
2995}
2996
2998 BasicBlock *InsertAtEnd) {
2999 Value *Zero = ConstantInt::get(Op->getType(), 0);
3000 return BinaryOperator::CreateNSWSub(Zero, Op, Name, InsertAtEnd);
3001}
3002
3004 Instruction *InsertBefore) {
3005 Value *Zero = ConstantInt::get(Op->getType(), 0);
3006 return BinaryOperator::CreateNUWSub(Zero, Op, Name, InsertBefore);
3007}
3008
3010 BasicBlock *InsertAtEnd) {
3011 Value *Zero = ConstantInt::get(Op->getType(), 0);
3012 return BinaryOperator::CreateNUWSub(Zero, Op, Name, InsertAtEnd);
3013}
3014
3016 Instruction *InsertBefore) {
3017 Constant *C = Constant::getAllOnesValue(Op->getType());
3018 return new BinaryOperator(Instruction::Xor, Op, C,
3019 Op->getType(), Name, InsertBefore);
3020}
3021
3023 BasicBlock *InsertAtEnd) {
3025 return new BinaryOperator(Instruction::Xor, Op, AllOnes,
3026 Op->getType(), Name, InsertAtEnd);
3027}
3028
3029// Exchange the two operands to this instruction. This instruction is safe to
3030// use on any binary instruction and does not modify the semantics of the
3031// instruction. If the instruction is order-dependent (SetLT f.e.), the opcode
3032// is changed.
3034 if (!isCommutative())
3035 return true; // Can't commute operands
3036 Op<0>().swap(Op<1>());
3037 return false;
3038}
3039
3040//===----------------------------------------------------------------------===//
3041// FPMathOperator Class
3042//===----------------------------------------------------------------------===//
3043
3045 const MDNode *MD =
3046 cast<Instruction>(this)->getMetadata(LLVMContext::MD_fpmath);
3047 if (!MD)
3048 return 0.0;
3049 ConstantFP *Accuracy = mdconst::extract<ConstantFP>(MD->getOperand(0));
3050 return Accuracy->getValueAPF().convertToFloat();
3051}
3052
3053//===----------------------------------------------------------------------===//
3054// CastInst Class
3055//===----------------------------------------------------------------------===//
3056
3057// Just determine if this cast only deals with integral->integral conversion.
3059 switch (getOpcode()) {
3060 default: return false;
3061 case Instruction::ZExt:
3062 case Instruction::SExt:
3063 case Instruction::Trunc:
3064 return true;
3065 case Instruction::BitCast:
3066 return getOperand(0)->getType()->isIntegerTy() &&
3067 getType()->isIntegerTy();
3068 }
3069}
3070
3071/// This function determines if the CastInst does not require any bits to be
3072/// changed in order to effect the cast. Essentially, it identifies cases where
3073/// no code gen is necessary for the cast, hence the name no-op cast. For
3074/// example, the following are all no-op casts:
3075/// # bitcast i32* %x to i8*
3076/// # bitcast <2 x i32> %x to <4 x i16>
3077/// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only
3078/// Determine if the described cast is a no-op.
3080 Type *SrcTy,
3081 Type *DestTy,
3082 const DataLayout &DL) {
3083 assert(castIsValid(Opcode, SrcTy, DestTy) && "method precondition");
3084 switch (Opcode) {
3085 default: llvm_unreachable("Invalid CastOp");
3086 case Instruction::Trunc:
3087 case Instruction::ZExt:
3088 case Instruction::SExt:
3089 case Instruction::FPTrunc:
3090 case Instruction::FPExt:
3091 case Instruction::UIToFP:
3092 case Instruction::SIToFP:
3093 case Instruction::FPToUI:
3094 case Instruction::FPToSI:
3095 case Instruction::AddrSpaceCast:
3096 // TODO: Target informations may give a more accurate answer here.
3097 return false;
3098 case Instruction::BitCast:
3099 return true; // BitCast never modifies bits.
3100 case Instruction::PtrToInt:
3101 return DL.getIntPtrType(SrcTy)->getScalarSizeInBits() ==
3102 DestTy->getScalarSizeInBits();
3103 case Instruction::IntToPtr:
3104 return DL.getIntPtrType(DestTy)->getScalarSizeInBits() ==
3105 SrcTy->getScalarSizeInBits();
3106 }
3107}
3108
3110 return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), DL);
3111}
3112
3113/// This function determines if a pair of casts can be eliminated and what
3114/// opcode should be used in the elimination. This assumes that there are two
3115/// instructions like this:
3116/// * %F = firstOpcode SrcTy %x to MidTy
3117/// * %S = secondOpcode MidTy %F to DstTy
3118/// The function returns a resultOpcode so these two casts can be replaced with:
3119/// * %Replacement = resultOpcode %SrcTy %x to DstTy
3120/// If no such cast is permitted, the function returns 0.
3123 Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy,
3124 Type *DstIntPtrTy) {
3125 // Define the 144 possibilities for these two cast instructions. The values
3126 // in this matrix determine what to do in a given situation and select the
3127 // case in the switch below. The rows correspond to firstOp, the columns
3128 // correspond to secondOp. In looking at the table below, keep in mind
3129 // the following cast properties:
3130 //
3131 // Size Compare Source Destination
3132 // Operator Src ? Size Type Sign Type Sign
3133 // -------- ------------ ------------------- ---------------------
3134 // TRUNC > Integer Any Integral Any
3135 // ZEXT < Integral Unsigned Integer Any
3136 // SEXT < Integral Signed Integer Any
3137 // FPTOUI n/a FloatPt n/a Integral Unsigned
3138 // FPTOSI n/a FloatPt n/a Integral Signed
3139 // UITOFP n/a Integral Unsigned FloatPt n/a
3140 // SITOFP n/a Integral Signed FloatPt n/a
3141 // FPTRUNC > FloatPt n/a FloatPt n/a
3142 // FPEXT < FloatPt n/a FloatPt n/a
3143 // PTRTOINT n/a Pointer n/a Integral Unsigned
3144 // INTTOPTR n/a Integral Unsigned Pointer n/a
3145 // BITCAST = FirstClass n/a FirstClass n/a
3146 // ADDRSPCST n/a Pointer n/a Pointer n/a
3147 //
3148 // NOTE: some transforms are safe, but we consider them to be non-profitable.
3149 // For example, we could merge "fptoui double to i32" + "zext i32 to i64",
3150 // into "fptoui double to i64", but this loses information about the range
3151 // of the produced value (we no longer know the top-part is all zeros).
3152 // Further this conversion is often much more expensive for typical hardware,
3153 // and causes issues when building libgcc. We disallow fptosi+sext for the
3154 // same reason.
3155 const unsigned numCastOps =
3156 Instruction::CastOpsEnd - Instruction::CastOpsBegin;
3157 static const uint8_t CastResults[numCastOps][numCastOps] = {
3158 // T F F U S F F P I B A -+
3159 // R Z S P P I I T P 2 N T S |
3160 // U E E 2 2 2 2 R E I T C C +- secondOp
3161 // N X X U S F F N X N 2 V V |
3162 // C T T I I P P C T T P T T -+
3163 { 1, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // Trunc -+
3164 { 8, 1, 9,99,99, 2,17,99,99,99, 2, 3, 0}, // ZExt |
3165 { 8, 0, 1,99,99, 0, 2,99,99,99, 0, 3, 0}, // SExt |
3166 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToUI |
3167 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToSI |
3168 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // UIToFP +- firstOp
3169 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // SIToFP |
3170 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // FPTrunc |
3171 { 99,99,99, 2, 2,99,99, 8, 2,99,99, 4, 0}, // FPExt |
3172 { 1, 0, 0,99,99, 0, 0,99,99,99, 7, 3, 0}, // PtrToInt |
3173 { 99,99,99,99,99,99,99,99,99,11,99,15, 0}, // IntToPtr |
3174 { 5, 5, 5, 6, 6, 5, 5, 6, 6,16, 5, 1,14}, // BitCast |
3175 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+
3176 };
3177
3178 // TODO: This logic could be encoded into the table above and handled in the
3179 // switch below.
3180 // If either of the casts are a bitcast from scalar to vector, disallow the
3181 // merging. However, any pair of bitcasts are allowed.
3182 bool IsFirstBitcast = (firstOp == Instruction::BitCast);
3183 bool IsSecondBitcast = (secondOp == Instruction::BitCast);
3184 bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast;
3185
3186 // Check if any of the casts convert scalars <-> vectors.
3187 if ((IsFirstBitcast && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) ||
3188 (IsSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy)))
3189 if (!AreBothBitcasts)
3190 return 0;
3191
3192 int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin]
3193 [secondOp-Instruction::CastOpsBegin];
3194 switch (ElimCase) {
3195 case 0:
3196 // Categorically disallowed.
3197 return 0;
3198 case 1:
3199 // Allowed, use first cast's opcode.
3200 return firstOp;
3201 case 2:
3202 // Allowed, use second cast's opcode.
3203 return secondOp;
3204 case 3:
3205 // No-op cast in second op implies firstOp as long as the DestTy
3206 // is integer and we are not converting between a vector and a
3207 // non-vector type.
3208 if (!SrcTy->isVectorTy() && DstTy->isIntegerTy())
3209 return firstOp;
3210 return 0;
3211 case 4:
3212 // No-op cast in second op implies firstOp as long as the DestTy
3213 // is floating point.
3214 if (DstTy->isFloatingPointTy())
3215 return firstOp;
3216 return 0;
3217 case 5:
3218 // No-op cast in first op implies secondOp as long as the SrcTy
3219 // is an integer.
3220 if (SrcTy->isIntegerTy())
3221 return secondOp;
3222 return 0;
3223 case 6:
3224 // No-op cast in first op implies secondOp as long as the SrcTy
3225 // is a floating point.
3226 if (SrcTy->isFloatingPointTy())
3227 return secondOp;
3228 return 0;
3229 case 7: {
3230 // Disable inttoptr/ptrtoint optimization if enabled.
3231 if (DisableI2pP2iOpt)
3232 return 0;
3233
3234 // Cannot simplify if address spaces are different!
3235 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
3236 return 0;
3237
3238 unsigned MidSize = MidTy->getScalarSizeInBits();
3239 // We can still fold this without knowing the actual sizes as long we
3240 // know that the intermediate pointer is the largest possible
3241 // pointer size.
3242 // FIXME: Is this always true?
3243 if (MidSize == 64)
3244 return Instruction::BitCast;
3245
3246 // ptrtoint, inttoptr -> bitcast (ptr -> ptr) if int size is >= ptr size.
3247 if (!SrcIntPtrTy || DstIntPtrTy != SrcIntPtrTy)
3248 return 0;
3249 unsigned PtrSize = SrcIntPtrTy->getScalarSizeInBits();
3250 if (MidSize >= PtrSize)
3251 return Instruction::BitCast;
3252 return 0;
3253 }
3254 case 8: {
3255 // ext, trunc -> bitcast, if the SrcTy and DstTy are the same
3256 // ext, trunc -> ext, if sizeof(SrcTy) < sizeof(DstTy)
3257 // ext, trunc -> trunc, if sizeof(SrcTy) > sizeof(DstTy)
3258 unsigned SrcSize = SrcTy->getScalarSizeInBits();
3259 unsigned DstSize = DstTy->getScalarSizeInBits();
3260 if (SrcTy == DstTy)
3261 return Instruction::BitCast;
3262 if (SrcSize < DstSize)
3263 return firstOp;
3264 if (SrcSize > DstSize)
3265 return secondOp;
3266 return 0;
3267 }
3268 case 9:
3269 // zext, sext -> zext, because sext can't sign extend after zext
3270 return Instruction::ZExt;
3271 case 11: {
3272 // inttoptr, ptrtoint -> bitcast if SrcSize<=PtrSize and SrcSize==DstSize
3273 if (!MidIntPtrTy)
3274 return 0;
3275 unsigned PtrSize = MidIntPtrTy->getScalarSizeInBits();
3276 unsigned SrcSize = SrcTy->getScalarSizeInBits();
3277 unsigned DstSize = DstTy->getScalarSizeInBits();
3278 if (SrcSize <= PtrSize && SrcSize == DstSize)
3279 return Instruction::BitCast;
3280 return 0;
3281 }
3282 case 12:
3283 // addrspacecast, addrspacecast -> bitcast, if SrcAS == DstAS
3284 // addrspacecast, addrspacecast -> addrspacecast, if SrcAS != DstAS
3285 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
3286 return Instruction::AddrSpaceCast;
3287 return Instruction::BitCast;
3288 case 13:
3289 // FIXME: this state can be merged with (1), but the following assert
3290 // is useful to check the correcteness of the sequence due to semantic
3291 // change of bitcast.
3292 assert(
3293 SrcTy->isPtrOrPtrVectorTy() &&
3294 MidTy->isPtrOrPtrVectorTy() &&
3295 DstTy->isPtrOrPtrVectorTy() &&
3296 SrcTy->getPointerAddressSpace() != MidTy->getPointerAddressSpace() &&
3297 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
3298 "Illegal addrspacecast, bitcast sequence!");
3299 // Allowed, use first cast's opcode
3300 return firstOp;
3301 case 14:
3302 // bitcast, addrspacecast -> addrspacecast
3303 return Instruction::AddrSpaceCast;
3304 case 15:
3305 // FIXME: this state can be merged with (1), but the following assert
3306 // is useful to check the correcteness of the sequence due to semantic
3307 // change of bitcast.
3308 assert(
3309 SrcTy->isIntOrIntVectorTy() &&
3310 MidTy->isPtrOrPtrVectorTy() &&
3311 DstTy->isPtrOrPtrVectorTy() &&
3312 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
3313 "Illegal inttoptr, bitcast sequence!");
3314 // Allowed, use first cast's opcode
3315 return firstOp;
3316 case 16:
3317 // FIXME: this state can be merged with (2), but the following assert
3318 // is useful to check the correcteness of the sequence due to semantic
3319 // change of bitcast.
3320 assert(
3321 SrcTy->isPtrOrPtrVectorTy() &&
3322 MidTy->isPtrOrPtrVectorTy() &&
3323 DstTy->isIntOrIntVectorTy() &&
3324 SrcTy->getPointerAddressSpace() == MidTy->getPointerAddressSpace() &&
3325 "Illegal bitcast, ptrtoint sequence!");
3326 // Allowed, use second cast's opcode
3327 return secondOp;
3328 case 17:
3329 // (sitofp (zext x)) -> (uitofp x)
3330 return Instruction::UIToFP;
3331 case 99:
3332 // Cast combination can't happen (error in input). This is for all cases
3333 // where the MidTy is not the same for the two cast instructions.
3334 llvm_unreachable("Invalid Cast Combination");
3335 default:
3336 llvm_unreachable("Error in CastResults table!!!");
3337 }
3338}
3339
3341 const Twine &Name, Instruction *InsertBefore) {
3342 assert(castIsValid(op, S, Ty) && "Invalid cast!");
3343 // Construct and return the appropriate CastInst subclass
3344 switch (op) {
3345 case Trunc: return new TruncInst (S, Ty, Name, InsertBefore);
3346 case ZExt: return new ZExtInst (S, Ty, Name, InsertBefore);
3347 case SExt: return new SExtInst (S, Ty, Name, InsertBefore);
3348 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertBefore);
3349 case FPExt: return new FPExtInst (S, Ty, Name, InsertBefore);
3350 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertBefore);
3351 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore);
3352 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore);
3353 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore);
3354 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore);
3355 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore);
3356 case BitCast: return new BitCastInst (S, Ty, Name, InsertBefore);
3357 case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertBefore);
3358 default: llvm_unreachable("Invalid opcode provided");
3359 }
3360}
3361
3363 const Twine &Name, BasicBlock *InsertAtEnd) {
3364 assert(castIsValid(op, S, Ty) && "Invalid cast!");
3365 // Construct and return the appropriate CastInst subclass
3366 switch (op) {
3367 case Trunc: return new TruncInst (S, Ty, Name, InsertAtEnd);
3368 case ZExt: return new ZExtInst (S, Ty, Name, InsertAtEnd);
3369 case SExt: return new SExtInst (S, Ty, Name, InsertAtEnd);
3370 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertAtEnd);
3371 case FPExt: return new FPExtInst (S, Ty, Name, InsertAtEnd);
3372 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertAtEnd);
3373 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertAtEnd);
3374 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertAtEnd);
3375 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertAtEnd);
3376 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertAtEnd);
3377 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertAtEnd);
3378 case BitCast: return new BitCastInst (S, Ty, Name, InsertAtEnd);
3379 case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertAtEnd);
3380 default: llvm_unreachable("Invalid opcode provided");
3381 }
3382}
3383
3385 const Twine &Name,
3386 Instruction *InsertBefore) {
3387 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3388 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3389 return Create(Instruction::ZExt, S, Ty, Name, InsertBefore);
3390}
3391
3393 const Twine &Name,
3394 BasicBlock *InsertAtEnd) {
3395 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3396 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
3397 return Create(Instruction::ZExt, S, Ty, Name, InsertAtEnd);
3398}
3399
3401 const Twine &Name,
3402 Instruction *InsertBefore) {
3403 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3404 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3405 return Create(Instruction::SExt, S, Ty, Name, InsertBefore);
3406}
3407
3409 const Twine &Name,
3410 BasicBlock *InsertAtEnd) {
3411 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3412 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
3413 return Create(Instruction::SExt, S, Ty, Name, InsertAtEnd);
3414}
3415
3417 const Twine &Name,
3418 Instruction *InsertBefore) {
3419 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3420 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3421 return Create(Instruction::Trunc, S, Ty, Name, InsertBefore);
3422}
3423
3425 const Twine &Name,
3426 BasicBlock *InsertAtEnd) {
3427 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3428 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
3429 return Create(Instruction::Trunc, S, Ty, Name, InsertAtEnd);
3430}
3431
3433 const Twine &Name,
3434 BasicBlock *InsertAtEnd) {
3435 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3436 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
3437 "Invalid cast");
3438 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
3439 assert((!Ty->isVectorTy() ||
3440 cast<VectorType>(Ty)->getElementCount() ==
3441 cast<VectorType>(S->getType())->getElementCount()) &&
3442 "Invalid cast");
3443
3444 if (Ty->isIntOrIntVectorTy())
3445 return Create(Instruction::PtrToInt, S, Ty, Name, InsertAtEnd);
3446
3447 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertAtEnd);
3448}
3449
3450/// Create a BitCast or a PtrToInt cast instruction
3452 const Twine &Name,
3453 Instruction *InsertBefore) {
3454 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3455 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
3456 "Invalid cast");
3457 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
3458 assert((!Ty->isVectorTy() ||
3459 cast<VectorType>(Ty)->getElementCount() ==
3460 cast<VectorType>(S->getType())->getElementCount()) &&
3461 "Invalid cast");
3462
3463 if (Ty->isIntOrIntVectorTy())
3464 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3465
3466 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore);
3467}
3468
3470 Value *S, Type *Ty,
3471 const Twine &Name,
3472 BasicBlock *InsertAtEnd) {
3473 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3474 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
3475
3477 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertAtEnd);
3478
3479 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
3480}
3481
3483 Value *S, Type *Ty,
3484 const Twine &Name,
3485 Instruction *InsertBefore) {
3486 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3487 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
3488
3490 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore);
3491
3492 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3493}
3494
3496 const Twine &Name,
3497 Instruction *InsertBefore) {
3498 if (S->getType()->isPointerTy() && Ty->isIntegerTy())
3499 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3500 if (S->getType()->isIntegerTy() && Ty->isPointerTy())
3501 return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore);
3502
3503 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3504}
3505
3507 bool isSigned, const Twine &Name,
3508 Instruction *InsertBefore) {
3509 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
3510 "Invalid integer cast");
3511 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3512 unsigned DstBits = Ty->getScalarSizeInBits();
3513 Instruction::CastOps opcode =
3514 (SrcBits == DstBits ? Instruction::BitCast :
3515 (SrcBits > DstBits ? Instruction::Trunc :
3516 (isSigned ? Instruction::SExt : Instruction::ZExt)));
3517 return Create(opcode, C, Ty, Name, InsertBefore);
3518}
3519
3521 bool isSigned, const Twine &Name,
3522 BasicBlock *InsertAtEnd) {
3523 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
3524 "Invalid cast");
3525 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3526 unsigned DstBits = Ty->getScalarSizeInBits();
3527 Instruction::CastOps opcode =
3528 (SrcBits == DstBits ? Instruction::BitCast :
3529 (SrcBits > DstBits ? Instruction::Trunc :
3530 (isSigned ? Instruction::SExt : Instruction::ZExt)));
3531 return Create(opcode, C, Ty, Name, InsertAtEnd);
3532}
3533
3535 const Twine &Name,
3536 Instruction *InsertBefore) {
3537 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
3538 "Invalid cast");
3539 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3540 unsigned DstBits = Ty->getScalarSizeInBits();
3541 Instruction::CastOps opcode =
3542 (SrcBits == DstBits ? Instruction::BitCast :
3543 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
3544 return Create(opcode, C, Ty, Name, InsertBefore);
3545}
3546
3548 const Twine &Name,
3549 BasicBlock *InsertAtEnd) {
3550 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
3551 "Invalid cast");
3552 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3553 unsigned DstBits = Ty->getScalarSizeInBits();
3554 Instruction::CastOps opcode =
3555 (SrcBits == DstBits ? Instruction::BitCast :
3556 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
3557 return Create(opcode, C, Ty, Name, InsertAtEnd);
3558}
3559
3560bool CastInst::isBitCastable(Type *SrcTy, Type *DestTy) {
3561 if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType())
3562 return false;
3563
3564 if (SrcTy == DestTy)
3565 return true;
3566
3567 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) {
3568 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) {
3569 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3570 // An element by element cast. Valid if casting the elements is valid.
3571 SrcTy = SrcVecTy->getElementType();
3572 DestTy = DestVecTy->getElementType();
3573 }
3574 }
3575 }
3576
3577 if (PointerType *DestPtrTy = dyn_cast<PointerType>(DestTy)) {
3578 if (PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy)) {
3579 return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace();
3580 }
3581 }
3582
3583 TypeSize SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
3584 TypeSize DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
3585
3586 // Could still have vectors of pointers if the number of elements doesn't
3587 // match
3588 if (SrcBits.getKnownMinValue() == 0 || DestBits.getKnownMinValue() == 0)
3589 return false;
3590
3591 if (SrcBits != DestBits)
3592 return false;
3593
3594 if (DestTy->isX86_MMXTy() || SrcTy->isX86_MMXTy())
3595 return false;
3596
3597 return true;
3598}
3599
3601 const DataLayout &DL) {
3602 // ptrtoint and inttoptr are not allowed on non-integral pointers
3603 if (auto *PtrTy = dyn_cast<PointerType>(SrcTy))
3604 if (auto *IntTy = dyn_cast<IntegerType>(DestTy))
3605 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
3606 !DL.isNonIntegralPointerType(PtrTy));
3607 if (auto *PtrTy = dyn_cast<PointerType>(DestTy))
3608 if (auto *IntTy = dyn_cast<IntegerType>(SrcTy))
3609 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
3610 !DL.isNonIntegralPointerType(PtrTy));
3611
3612 return isBitCastable(SrcTy, DestTy);
3613}
3614
3615// Provide a way to get a "cast" where the cast opcode is inferred from the
3616// types and size of the operand. This, basically, is a parallel of the
3617// logic in the castIsValid function below. This axiom should hold:
3618// castIsValid( getCastOpcode(Val, Ty), Val, Ty)
3619// should not assert in castIsValid. In other words, this produces a "correct"
3620// casting opcode for the arguments passed to it.
3623 const Value *Src, bool SrcIsSigned, Type *DestTy, bool DestIsSigned) {
3624 Type *SrcTy = Src->getType();
3625
3626 assert(SrcTy->isFirstClassType() && DestTy->isFirstClassType() &&
3627 "Only first class types are castable!");
3628
3629 if (SrcTy == DestTy)
3630 return BitCast;
3631
3632 // FIXME: Check address space sizes here
3633 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy))
3634 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy))
3635 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3636 // An element by element cast. Find the appropriate opcode based on the
3637 // element types.
3638 SrcTy = SrcVecTy->getElementType();
3639 DestTy = DestVecTy->getElementType();
3640 }
3641
3642 // Get the bit sizes, we'll need these
3643 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
3644 unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
3645
3646 // Run through the possibilities ...
3647 if (DestTy->isIntegerTy()) { // Casting to integral
3648 if (SrcTy->isIntegerTy()) { // Casting from integral
3649 if (DestBits < SrcBits)
3650 return Trunc; // int -> smaller int
3651 else if (DestBits > SrcBits) { // its an extension
3652 if (SrcIsSigned)
3653 return SExt; // signed -> SEXT
3654 else
3655 return ZExt; // unsigned -> ZEXT
3656 } else {
3657 return BitCast; // Same size, No-op cast
3658 }
3659 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
3660 if (DestIsSigned)
3661 return FPToSI; // FP -> sint
3662 else
3663 return FPToUI; // FP -> uint
3664 } else if (SrcTy->isVectorTy()) {
3665 assert(DestBits == SrcBits &&
3666 "Casting vector to integer of different width");
3667 return BitCast; // Same size, no-op cast
3668 } else {
3669 assert(SrcTy->isPointerTy() &&
3670 "Casting from a value that is not first-class type");
3671 return PtrToInt; // ptr -> int
3672 }
3673 } else if (DestTy->isFloatingPointTy()) { // Casting to floating pt
3674 if (SrcTy->isIntegerTy()) { // Casting from integral
3675 if (SrcIsSigned)
3676 return SIToFP; // sint -> FP
3677 else
3678 return UIToFP; // uint -> FP
3679 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
3680 if (DestBits < SrcBits) {
3681 return FPTrunc; // FP -> smaller FP
3682 } else if (DestBits > SrcBits) {
3683 return FPExt; // FP -> larger FP
3684 } else {
3685 return BitCast; // same size, no-op cast
3686 }
3687 } else if (SrcTy->isVectorTy()) {
3688 assert(DestBits == SrcBits &&
3689 "Casting vector to floating point of different width");
3690 return BitCast; // same size, no-op cast
3691 }
3692 llvm_unreachable("Casting pointer or non-first class to float");
3693 } else if (DestTy->isVectorTy()) {
3694 assert(DestBits == SrcBits &&
3695 "Illegal cast to vector (wrong type or size)");
3696 return BitCast;
3697 } else if (DestTy->isPointerTy()) {
3698 if (SrcTy->isPointerTy()) {
3699 if (DestTy->getPointerAddressSpace() != SrcTy->getPointerAddressSpace())
3700 return AddrSpaceCast;
3701 return BitCast; // ptr -> ptr
3702 } else if (SrcTy->isIntegerTy()) {
3703 return IntToPtr; // int -> ptr
3704 }
3705 llvm_unreachable("Casting pointer to other than pointer or int");
3706 } else if (DestTy->isX86_MMXTy()) {
3707 if (SrcTy->isVectorTy()) {
3708 assert(DestBits == SrcBits && "Casting vector of wrong width to X86_MMX");
3709 return BitCast; // 64-bit vector to MMX
3710 }
3711 llvm_unreachable("Illegal cast to X86_MMX");
3712 }
3713 llvm_unreachable("Casting to type that is not first-class");
3714}
3715
3716//===----------------------------------------------------------------------===//
3717// CastInst SubClass Constructors
3718//===----------------------------------------------------------------------===//
3719
3720/// Check that the construction parameters for a CastInst are correct. This
3721/// could be broken out into the separate constructors but it is useful to have
3722/// it in one place and to eliminate the redundant code for getting the sizes
3723/// of the types involved.
3724bool
3726 if (!SrcTy->isFirstClassType() || !DstTy->isFirstClassType() ||
3727 SrcTy->isAggregateType() || DstTy->isAggregateType())
3728 return false;
3729
3730 // Get the size of the types in bits, and whether we are dealing
3731 // with vector types, we'll need this later.
3732 bool SrcIsVec = isa<VectorType>(SrcTy);
3733 bool DstIsVec = isa<VectorType>(DstTy);
3734 unsigned SrcScalarBitSize = SrcTy->getScalarSizeInBits();
3735 unsigned DstScalarBitSize = DstTy->getScalarSizeInBits();
3736
3737 // If these are vector types, get the lengths of the vectors (using zero for
3738 // scalar types means that checking that vector lengths match also checks that
3739 // scalars are not being converted to vectors or vectors to scalars).
3740 ElementCount SrcEC = SrcIsVec ? cast<VectorType>(SrcTy)->getElementCount()
3742 ElementCount DstEC = DstIsVec ? cast<VectorType>(DstTy)->getElementCount()
3744
3745 // Switch on the opcode provided
3746 switch (op) {
3747 default: return false; // This is an input error
3748 case Instruction::Trunc:
3749 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3750 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
3751 case Instruction::ZExt:
3752 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3753 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3754 case Instruction::SExt:
3755 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3756 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3757 case Instruction::FPTrunc:
3758 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
3759 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
3760 case Instruction::FPExt:
3761 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
3762 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3763 case Instruction::UIToFP:
3764 case Instruction::SIToFP:
3765 return SrcTy->isIntOrIntVectorTy() && DstTy->isFPOrFPVectorTy() &&
3766 SrcEC == DstEC;
3767 case Instruction::FPToUI:
3768 case Instruction::FPToSI:
3769 return SrcTy->isFPOrFPVectorTy() && DstTy->isIntOrIntVectorTy() &&
3770 SrcEC == DstEC;
3771 case Instruction::PtrToInt:
3772 if (SrcEC != DstEC)
3773 return false;
3774 return SrcTy->isPtrOrPtrVectorTy() && DstTy->isIntOrIntVectorTy();
3775 case Instruction::IntToPtr:
3776 if (SrcEC != DstEC)
3777 return false;
3778 return SrcTy->isIntOrIntVectorTy() && DstTy->isPtrOrPtrVectorTy();
3779 case Instruction::BitCast: {
3780 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
3781 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
3782
3783 // BitCast implies a no-op cast of type only. No bits change.
3784 // However, you can't cast pointers to anything but pointers.
3785 if (!SrcPtrTy != !DstPtrTy)
3786 return false;
3787
3788 // For non-pointer cases, the cast is okay if the source and destination bit
3789 // widths are identical.
3790 if (!SrcPtrTy)
3791 return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits();
3792
3793 // If both are pointers then the address spaces must match.
3794 if (SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace())
3795 return false;
3796
3797 // A vector of pointers must have the same number of elements.
3798 if (SrcIsVec && DstIsVec)
3799 return SrcEC == DstEC;
3800 if (SrcIsVec)
3801 return SrcEC == ElementCount::getFixed(1);
3802 if (DstIsVec)
3803 return DstEC == ElementCount::getFixed(1);
3804
3805 return true;
3806 }
3807 case Instruction::AddrSpaceCast: {
3808 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
3809 if (!SrcPtrTy)
3810 return false;
3811
3812 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
3813 if (!DstPtrTy)
3814 return false;
3815
3816 if (SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace())
3817 return false;
3818
3819 return SrcEC == DstEC;
3820 }
3821 }
3822}
3823
3825 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3826) : CastInst(Ty, Trunc, S, Name, InsertBefore) {
3827 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
3828}
3829
3831 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3832) : CastInst(Ty, Trunc, S, Name, InsertAtEnd) {
3833 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
3834}
3835
3837 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3838) : CastInst(Ty, ZExt, S, Name, InsertBefore) {
3839 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
3840}
3841
3843 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3844) : CastInst(Ty, ZExt, S, Name, InsertAtEnd) {
3845 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
3846}
3848 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3849) : CastInst(Ty, SExt, S, Name, InsertBefore) {
3850 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
3851}
3852
3854 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3855) : CastInst(Ty, SExt, S, Name, InsertAtEnd) {
3856 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
3857}
3858
3860 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3861) : CastInst(Ty, FPTrunc, S, Name, InsertBefore) {
3862 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
3863}
3864
3866 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3867) : CastInst(Ty, FPTrunc, S, Name, InsertAtEnd) {
3868 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
3869}
3870
3872 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3873) : CastInst(Ty, FPExt, S, Name, InsertBefore) {
3874 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
3875}
3876
3878 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3879) : CastInst(Ty, FPExt, S, Name, InsertAtEnd) {
3880 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
3881}
3882
3884 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3885) : CastInst(Ty, UIToFP, S, Name, InsertBefore) {
3886 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
3887}
3888
3890 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3891) : CastInst(Ty, UIToFP, S, Name, InsertAtEnd) {
3892 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
3893}
3894
3896 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3897) : CastInst(Ty, SIToFP, S, Name, InsertBefore) {
3898 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
3899}
3900
3902 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3903) : CastInst(Ty, SIToFP, S, Name, InsertAtEnd) {
3904 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
3905}
3906
3908 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3909) : CastInst(Ty, FPToUI, S, Name, InsertBefore) {
3910 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
3911}
3912
3914 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3915) : CastInst(Ty, FPToUI, S, Name, InsertAtEnd) {
3916 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
3917}
3918
3920 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3921) : CastInst(Ty, FPToSI, S, Name, InsertBefore) {
3922 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
3923}
3924
3926 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3927) : CastInst(Ty, FPToSI, S, Name, InsertAtEnd) {
3928 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
3929}
3930
3932 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3933) : CastInst(Ty, PtrToInt, S, Name, InsertBefore) {
3934 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
3935}
3936
3938 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3939) : CastInst(Ty, PtrToInt, S, Name, InsertAtEnd) {
3940 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
3941}
3942
3944 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3945) : CastInst(Ty, IntToPtr, S, Name, InsertBefore) {
3946 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
3947}
3948
3950 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3951) : CastInst(Ty, IntToPtr, S, Name, InsertAtEnd) {
3952 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
3953}
3954
3956 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3957) : CastInst(Ty, BitCast, S, Name, InsertBefore) {
3958 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
3959}
3960
3962 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3963) : CastInst(Ty, BitCast, S, Name, InsertAtEnd) {
3964 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
3965}
3966
3968 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3969) : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) {
3970 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
3971}
3972
3974 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3975) : CastInst(Ty, AddrSpaceCast, S, Name, InsertAtEnd) {
3976 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
3977}
3978
3979//===----------------------------------------------------------------------===//
3980// CmpInst Classes
3981//===----------------------------------------------------------------------===//
3982
3984 Value *RHS, const Twine &Name, Instruction *InsertBefore,
3985 Instruction *FlagsSource)
3986 : Instruction(ty, op,
3987 OperandTraits<CmpInst>::op_begin(this),
3988 OperandTraits<CmpInst>::operands(this),
3989 InsertBefore) {
3990 Op<0>() = LHS;
3991 Op<1>() = RHS;
3992 setPredicate((Predicate)predicate);
3993 setName(Name);
3994 if (FlagsSource)
3995 copyIRFlags(FlagsSource);
3996}
3997
3999 Value *RHS, const Twine &Name, BasicBlock *InsertAtEnd)
4000 : Instruction(ty, op,
4001 OperandTraits<CmpInst>::op_begin(this),
4002 OperandTraits<CmpInst>::operands(this),
4003 InsertAtEnd) {
4004 Op<0>() = LHS;
4005 Op<1>() = RHS;
4006 setPredicate((Predicate)predicate);
4007 setName(Name);
4008}
4009
4010CmpInst *
4012 const Twine &Name, Instruction *InsertBefore) {
4013 if (Op == Instruction::ICmp) {
4014 if (InsertBefore)
4015 return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate),
4016 S1, S2, Name);
4017 else
4018 return new ICmpInst(CmpInst::Predicate(predicate),
4019 S1, S2, Name);
4020 }
4021
4022 if (InsertBefore)
4023 return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate),
4024 S1, S2, Name);
4025 else
4026 return new FCmpInst(CmpInst::Predicate(predicate),
4027 S1, S2, Name);
4028}
4029
4030CmpInst *
4032 const Twine &Name, BasicBlock *InsertAtEnd) {
4033 if (Op == Instruction::ICmp) {
4034 return new ICmpInst(*InsertAtEnd, CmpInst::Predicate(predicate),
4035 S1, S2, Name);
4036 }
4037 return new FCmpInst(*InsertAtEnd, CmpInst::Predicate(predicate),
4038 S1, S2, Name);
4039}
4040
4042 if (ICmpInst *IC = dyn_cast<ICmpInst>(this))
4043 IC->swapOperands();
4044 else
4045 cast<FCmpInst>(this)->swapOperands();
4046}
4047
4049 if (const ICmpInst *IC = dyn_cast<ICmpInst>(this))
4050 return IC->isCommutative();
4051 return cast<FCmpInst>(this)->isCommutative();
4052}
4053
4056 return ICmpInst::isEquality(P);
4058 return FCmpInst::isEquality(P);
4059 llvm_unreachable("Unsupported predicate kind");
4060}
4061
4063 switch (pred) {
4064 default: llvm_unreachable("Unknown cmp predicate!");
4065 case ICMP_EQ: return ICMP_NE;
4066 case ICMP_NE: return ICMP_EQ;
4067 case ICMP_UGT: return ICMP_ULE;
4068 case ICMP_ULT: return ICMP_UGE;
4069 case ICMP_UGE: return ICMP_ULT;
4070 case ICMP_ULE: return ICMP_UGT;
4071 case ICMP_SGT: return ICMP_SLE;
4072 case ICMP_SLT: return ICMP_SGE;
4073 case ICMP_SGE: return ICMP_SLT;
4074 case ICMP_SLE: return ICMP_SGT;
4075
4076 case FCMP_OEQ: return FCMP_UNE;
4077 case FCMP_ONE: return FCMP_UEQ;
4078 case FCMP_OGT: return FCMP_ULE;
4079 case FCMP_OLT: return FCMP_UGE;
4080 case FCMP_OGE: return FCMP_ULT;
4081 case FCMP_OLE: return FCMP_UGT;
4082 case FCMP_UEQ: return FCMP_ONE;
4083 case FCMP_UNE: return FCMP_OEQ;
4084 case FCMP_UGT: return FCMP_OLE;
4085 case FCMP_ULT: return FCMP_OGE;
4086 case FCMP_UGE: return FCMP_OLT;
4087 case FCMP_ULE: return FCMP_OGT;
4088 case FCMP_ORD: return FCMP_UNO;
4089 case FCMP_UNO: return FCMP_ORD;
4090 case FCMP_TRUE: return FCMP_FALSE;
4091 case FCMP_FALSE: return FCMP_TRUE;
4092 }
4093}
4094
4096 switch (Pred) {
4097 default: return "unknown";
4098 case FCmpInst::FCMP_FALSE: return "false";
4099 case FCmpInst::FCMP_OEQ: return "oeq";
4100 case FCmpInst::FCMP_OGT: return "ogt";
4101 case FCmpInst::FCMP_OGE: return "oge";
4102 case FCmpInst::FCMP_OLT: return "olt";
4103 case FCmpInst::FCMP_OLE: return "ole";
4104 case FCmpInst::FCMP_ONE: return "one";
4105 case FCmpInst::FCMP_ORD: return "ord";
4106 case FCmpInst::FCMP_UNO: return "uno";
4107 case FCmpInst::FCMP_UEQ: return "ueq";
4108 case FCmpInst::FCMP_UGT: return "ugt";
4109 case FCmpInst::FCMP_UGE: return "uge";
4110 case FCmpInst::FCMP_ULT: return "ult";
4111 case FCmpInst::FCMP_ULE: return "ule";
4112 case FCmpInst::FCMP_UNE: return "une";
4113 case FCmpInst::FCMP_TRUE: return "true";
4114 case ICmpInst::ICMP_EQ: return "eq";
4115 case ICmpInst::ICMP_NE: return "ne";
4116 case ICmpInst::ICMP_SGT: return "sgt";
4117 case ICmpInst::ICMP_SGE: return "sge";
4118 case ICmpInst::ICMP_SLT: return "slt";
4119 case ICmpInst::ICMP_SLE: return "sle";
4120 case ICmpInst::ICMP_UGT: return "ugt";
4121 case ICmpInst::ICMP_UGE: return "uge";
4122 case ICmpInst::ICMP_ULT: return "ult";
4123 case ICmpInst::ICMP_ULE: return "ule";
4124 }
4125}
4126
4129 return OS;
4130}
4131
4133 switch (pred) {
4134 default: llvm_unreachable("Unknown icmp predicate!");
4135 case ICMP_EQ: case ICMP_NE:
4136 case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE:
4137 return pred;
4138 case ICMP_UGT: return ICMP_SGT;
4139 case ICMP_ULT: return ICMP_SLT;
4140 case ICMP_UGE: return ICMP_SGE;
4141 case ICMP_ULE: return ICMP_SLE;
4142 }
4143}
4144
4146 switch (pred) {
4147 default: llvm_unreachable("Unknown icmp predicate!");
4148 case ICMP_EQ: case ICMP_NE:
4149 case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE:
4150 return pred;
4151 case ICMP_SGT: return ICMP_UGT;
4152 case ICMP_SLT: return ICMP_ULT;
4153 case ICMP_SGE: return ICMP_UGE;
4154 case ICMP_SLE: return ICMP_ULE;
4155 }
4156}
4157
4159 switch (pred) {
4160 default: llvm_unreachable("Unknown cmp predicate!");
4161 case ICMP_EQ: case ICMP_NE:
4162 return pred;
4163 case ICMP_SGT: return ICMP_SLT;
4164 case ICMP_SLT: return ICMP_SGT;
4165 case ICMP_SGE: return ICMP_SLE;
4166 case ICMP_SLE: return ICMP_SGE;
4167 case ICMP_UGT: return ICMP_ULT;
4168 case ICMP_ULT: return ICMP_UGT;
4169 case ICMP_UGE: return ICMP_ULE;
4170 case ICMP_ULE: return ICMP_UGE;
4171
4172 case FCMP_FALSE: case FCMP_TRUE:
4173 case FCMP_OEQ: case FCMP_ONE:
4174 case FCMP_UEQ: case FCMP_UNE:
4175 case FCMP_ORD: case FCMP_UNO:
4176 return pred;
4177 case FCMP_OGT: return FCMP_OLT;
4178 case FCMP_OLT: return FCMP_OGT;
4179 case FCMP_OGE: return FCMP_OLE;
4180 case FCMP_OLE: return FCMP_OGE;
4181 case FCMP_UGT: return FCMP_ULT;
4182 case FCMP_ULT: return FCMP_UGT;
4183 case FCMP_UGE: return FCMP_ULE;
4184 case FCMP_ULE: return FCMP_UGE;
4185 }
4186}
4187
4189 switch (pred) {
4190 case ICMP_SGE:
4191 case ICMP_SLE:
4192 case ICMP_UGE:
4193 case ICMP_ULE:
4194 case FCMP_OGE:
4195 case FCMP_OLE:
4196 case FCMP_UGE:
4197 case FCMP_ULE:
4198 return true;
4199 default:
4200 return false;
4201 }
4202}
4203
4205 switch (pred) {
4206 case ICMP_SGT:
4207 case ICMP_SLT:
4208 case ICMP_UGT:
4209 case ICMP_ULT:
4210 case FCMP_OGT:
4211 case FCMP_OLT:
4212 case FCMP_UGT:
4213 case FCMP_ULT:
4214 return true;
4215 default:
4216 return false;
4217 }
4218}
4219
4221 switch (pred) {
4222 case ICMP_SGE:
4223 return ICMP_SGT;
4224 case ICMP_SLE:
4225 return ICMP_SLT;
4226 case ICMP_UGE:
4227 return ICMP_UGT;
4228 case ICMP_ULE:
4229 return ICMP_ULT;
4230 case FCMP_OGE:
4231 return FCMP_OGT;
4232 case FCMP_OLE:
4233 return FCMP_OLT;
4234 case FCMP_UGE:
4235 return FCMP_UGT;
4236 case FCMP_ULE:
4237 return FCMP_ULT;
4238 default:
4239 return pred;
4240 }
4241}
4242
4244 switch (pred) {
4245 case ICMP_SGT:
4246 return ICMP_SGE;
4247 case ICMP_SLT:
4248 return ICMP_SLE;
4249 case ICMP_UGT:
4250 return ICMP_UGE;
4251 case ICMP_ULT:
4252 return ICMP_ULE;
4253 case FCMP_OGT:
4254 return FCMP_OGE;
4255 case FCMP_OLT:
4256 return FCMP_OLE;
4257 case FCMP_UGT:
4258 return FCMP_UGE;
4259 case FCMP_ULT:
4260 return FCMP_ULE;
4261 default:
4262 return pred;
4263 }
4264}
4265
4267 assert(CmpInst::isRelational(pred) && "Call only with relational predicate!");
4268
4272 return getStrictPredicate(pred);
4273
4274 llvm_unreachable("Unknown predicate!");
4275}
4276
4278 assert(CmpInst::isUnsigned(pred) && "Call only with unsigned predicates!");
4279
4280 switch (pred) {
4281 default:
4282 llvm_unreachable("Unknown predicate!");
4283 case CmpInst::ICMP_ULT:
4284 return CmpInst::ICMP_SLT;
4285 case CmpInst::ICMP_ULE:
4286 return CmpInst::ICMP_SLE;
4287 case CmpInst::ICMP_UGT:
4288 return CmpInst::ICMP_SGT;
4289 case CmpInst::ICMP_UGE:
4290 return CmpInst::ICMP_SGE;
4291 }
4292}
4293
4295 assert(CmpInst::isSigned(pred) && "Call only with signed predicates!");
4296
4297 switch (pred) {
4298 default:
4299 llvm_unreachable("Unknown predicate!");
4300 case CmpInst::ICMP_SLT:
4301 return CmpInst::ICMP_ULT;
4302 case CmpInst::ICMP_SLE:
4303 return CmpInst::ICMP_ULE;
4304 case CmpInst::ICMP_SGT:
4305 return CmpInst::ICMP_UGT;
4306 case CmpInst::ICMP_SGE:
4307 return CmpInst::ICMP_UGE;
4308 }
4309}
4310
4312 switch (predicate) {
4313 default: return false;
4315 case ICmpInst::ICMP_UGE: return true;
4316 }
4317}
4318
4320 switch (predicate) {
4321 default: return false;
4323 case ICmpInst::ICMP_SGE: return true;
4324 }
4325}
4326
4327bool ICmpInst::compare(const APInt &LHS, const APInt &RHS,
4328 ICmpInst::Predicate Pred) {
4329 assert(ICmpInst::isIntPredicate(Pred) && "Only for integer predicates!");
4330 switch (Pred) {
4332 return LHS.eq(RHS);
4334 return LHS.ne(RHS);
4336 return LHS.ugt(RHS);
4338 return LHS.uge(RHS);
4340 return LHS.ult(RHS);
4342 return LHS.ule(RHS);
4344 return LHS.sgt(RHS);
4346 return LHS.sge(RHS);
4348 return LHS.slt(RHS);
4350 return LHS.sle(RHS);
4351 default:
4352 llvm_unreachable("Unexpected non-integer predicate.");
4353 };
4354}
4355
4356bool FCmpInst::compare(const APFloat &LHS, const APFloat &RHS,
4357 FCmpInst::Predicate Pred) {
4358 APFloat::cmpResult R = LHS.compare(RHS);
4359 switch (Pred) {
4360 default:
4361 llvm_unreachable("Invalid FCmp Predicate");
4363 return false;
4365 return true;
4366 case FCmpInst::FCMP_UNO:
4367 return R == APFloat::cmpUnordered;
4368 case FCmpInst::FCMP_ORD:
4369 return R != APFloat::cmpUnordered;
4370 case FCmpInst::FCMP_UEQ:
4371 return R == APFloat::cmpUnordered || R == APFloat::cmpEqual;
4372 case FCmpInst::FCMP_OEQ:
4373 return R == APFloat::cmpEqual;
4374 case FCmpInst::FCMP_UNE:
4375 return R != APFloat::cmpEqual;
4376 case FCmpInst::FCMP_ONE:
4378 case FCmpInst::FCMP_ULT: