LLVM 19.0.0git
Verifier.cpp
Go to the documentation of this file.
1//===-- Verifier.cpp - Implement the Module Verifier -----------------------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the function verifier interface, that can be used for some
10// basic correctness checking of input to the system.
11//
12// Note that this does not provide full `Java style' security and verifications,
13// instead it just tries to ensure that code is well-formed.
14//
15// * Both of a binary operator's parameters are of the same type
16// * Verify that the indices of mem access instructions match other operands
17// * Verify that arithmetic and other things are only performed on first-class
18// types. Verify that shifts & logicals only happen on integrals f.e.
19// * All of the constants in a switch statement are of the correct type
20// * The code is in valid SSA form
21// * It should be illegal to put a label into any other type (like a structure)
22// or to return one. [except constant arrays!]
23// * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad
24// * PHI nodes must have an entry for each predecessor, with no extras.
25// * PHI nodes must be the first thing in a basic block, all grouped together
26// * All basic blocks should only end with terminator insts, not contain them
27// * The entry node to a function must not have predecessors
28// * All Instructions must be embedded into a basic block
29// * Functions cannot take a void-typed parameter
30// * Verify that a function's argument list agrees with it's declared type.
31// * It is illegal to specify a name for a void value.
32// * It is illegal to have a internal global value with no initializer
33// * It is illegal to have a ret instruction that returns a value that does not
34// agree with the function return value type.
35// * Function call argument types match the function prototype
36// * A landing pad is defined by a landingpad instruction, and can be jumped to
37// only by the unwind edge of an invoke instruction.
38// * A landingpad instruction must be the first non-PHI instruction in the
39// block.
40// * Landingpad instructions must be in a function with a personality function.
41// * Convergence control intrinsics are introduced in ConvergentOperations.rst.
42// The applied restrictions are too numerous to list here.
43// * The convergence entry intrinsic and the loop heart must be the first
44// non-PHI instruction in their respective block. This does not conflict with
45// the landing pads, since these two kinds cannot occur in the same block.
46// * All other things that are tested by asserts spread about the code...
47//
48//===----------------------------------------------------------------------===//
49
50#include "llvm/IR/Verifier.h"
51#include "llvm/ADT/APFloat.h"
52#include "llvm/ADT/APInt.h"
53#include "llvm/ADT/ArrayRef.h"
54#include "llvm/ADT/DenseMap.h"
55#include "llvm/ADT/MapVector.h"
57#include "llvm/ADT/STLExtras.h"
59#include "llvm/ADT/SmallSet.h"
62#include "llvm/ADT/StringMap.h"
63#include "llvm/ADT/StringRef.h"
64#include "llvm/ADT/Twine.h"
66#include "llvm/IR/Argument.h"
68#include "llvm/IR/Attributes.h"
69#include "llvm/IR/BasicBlock.h"
70#include "llvm/IR/CFG.h"
71#include "llvm/IR/CallingConv.h"
72#include "llvm/IR/Comdat.h"
73#include "llvm/IR/Constant.h"
75#include "llvm/IR/Constants.h"
77#include "llvm/IR/DataLayout.h"
78#include "llvm/IR/DebugInfo.h"
80#include "llvm/IR/DebugLoc.h"
82#include "llvm/IR/Dominators.h"
84#include "llvm/IR/Function.h"
85#include "llvm/IR/GCStrategy.h"
86#include "llvm/IR/GlobalAlias.h"
87#include "llvm/IR/GlobalValue.h"
89#include "llvm/IR/InlineAsm.h"
90#include "llvm/IR/InstVisitor.h"
91#include "llvm/IR/InstrTypes.h"
92#include "llvm/IR/Instruction.h"
95#include "llvm/IR/Intrinsics.h"
96#include "llvm/IR/IntrinsicsAArch64.h"
97#include "llvm/IR/IntrinsicsAMDGPU.h"
98#include "llvm/IR/IntrinsicsARM.h"
99#include "llvm/IR/IntrinsicsNVPTX.h"
100#include "llvm/IR/IntrinsicsWebAssembly.h"
101#include "llvm/IR/LLVMContext.h"
103#include "llvm/IR/Metadata.h"
104#include "llvm/IR/Module.h"
106#include "llvm/IR/PassManager.h"
107#include "llvm/IR/Statepoint.h"
108#include "llvm/IR/Type.h"
109#include "llvm/IR/Use.h"
110#include "llvm/IR/User.h"
112#include "llvm/IR/Value.h"
114#include "llvm/Pass.h"
116#include "llvm/Support/Casting.h"
120#include "llvm/Support/ModRef.h"
122#include <algorithm>
123#include <cassert>
124#include <cstdint>
125#include <memory>
126#include <optional>
127#include <string>
128#include <utility>
129
130using namespace llvm;
131
133 "verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false),
134 cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical "
135 "scopes are not dominating"));
136
137namespace llvm {
138
141 const Module &M;
146
147 /// Track the brokenness of the module while recursively visiting.
148 bool Broken = false;
149 /// Broken debug info can be "recovered" from by stripping the debug info.
150 bool BrokenDebugInfo = false;
151 /// Whether to treat broken debug info as an error.
153
155 : OS(OS), M(M), MST(&M), TT(M.getTargetTriple()), DL(M.getDataLayout()),
156 Context(M.getContext()) {}
157
158private:
159 void Write(const Module *M) {
160 *OS << "; ModuleID = '" << M->getModuleIdentifier() << "'\n";
161 }
162
163 void Write(const Value *V) {
164 if (V)
165 Write(*V);
166 }
167
168 void Write(const Value &V) {
169 if (isa<Instruction>(V)) {
170 V.print(*OS, MST);
171 *OS << '\n';
172 } else {
173 V.printAsOperand(*OS, true, MST);
174 *OS << '\n';
175 }
176 }
177
178 void Write(const DbgRecord *DR) {
179 if (DR) {
180 DR->print(*OS, MST, false);
181 *OS << '\n';
182 }
183 }
184
186 switch (Type) {
188 *OS << "value";
189 break;
191 *OS << "declare";
192 break;
194 *OS << "assign";
195 break;
197 *OS << "end";
198 break;
200 *OS << "any";
201 break;
202 };
203 }
204
205 void Write(const Metadata *MD) {
206 if (!MD)
207 return;
208 MD->print(*OS, MST, &M);
209 *OS << '\n';
210 }
211
212 template <class T> void Write(const MDTupleTypedArrayWrapper<T> &MD) {
213 Write(MD.get());
214 }
215
216 void Write(const NamedMDNode *NMD) {
217 if (!NMD)
218 return;
219 NMD->print(*OS, MST);
220 *OS << '\n';
221 }
222
223 void Write(Type *T) {
224 if (!T)
225 return;
226 *OS << ' ' << *T;
227 }
228
229 void Write(const Comdat *C) {
230 if (!C)
231 return;
232 *OS << *C;
233 }
234
235 void Write(const APInt *AI) {
236 if (!AI)
237 return;
238 *OS << *AI << '\n';
239 }
240
241 void Write(const unsigned i) { *OS << i << '\n'; }
242
243 // NOLINTNEXTLINE(readability-identifier-naming)
244 void Write(const Attribute *A) {
245 if (!A)
246 return;
247 *OS << A->getAsString() << '\n';
248 }
249
250 // NOLINTNEXTLINE(readability-identifier-naming)
251 void Write(const AttributeSet *AS) {
252 if (!AS)
253 return;
254 *OS << AS->getAsString() << '\n';
255 }
256
257 // NOLINTNEXTLINE(readability-identifier-naming)
258 void Write(const AttributeList *AL) {
259 if (!AL)
260 return;
261 AL->print(*OS);
262 }
263
264 void Write(Printable P) { *OS << P << '\n'; }
265
266 template <typename T> void Write(ArrayRef<T> Vs) {
267 for (const T &V : Vs)
268 Write(V);
269 }
270
271 template <typename T1, typename... Ts>
272 void WriteTs(const T1 &V1, const Ts &... Vs) {
273 Write(V1);
274 WriteTs(Vs...);
275 }
276
277 template <typename... Ts> void WriteTs() {}
278
279public:
280 /// A check failed, so printout out the condition and the message.
281 ///
282 /// This provides a nice place to put a breakpoint if you want to see why
283 /// something is not correct.
284 void CheckFailed(const Twine &Message) {
285 if (OS)
286 *OS << Message << '\n';
287 Broken = true;
288 }
289
290 /// A check failed (with values to print).
291 ///
292 /// This calls the Message-only version so that the above is easier to set a
293 /// breakpoint on.
294 template <typename T1, typename... Ts>
295 void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) {
296 CheckFailed(Message);
297 if (OS)
298 WriteTs(V1, Vs...);
299 }
300
301 /// A debug info check failed.
302 void DebugInfoCheckFailed(const Twine &Message) {
303 if (OS)
304 *OS << Message << '\n';
306 BrokenDebugInfo = true;
307 }
308
309 /// A debug info check failed (with values to print).
310 template <typename T1, typename... Ts>
311 void DebugInfoCheckFailed(const Twine &Message, const T1 &V1,
312 const Ts &... Vs) {
313 DebugInfoCheckFailed(Message);
314 if (OS)
315 WriteTs(V1, Vs...);
316 }
317};
318
319} // namespace llvm
320
321namespace {
322
323class Verifier : public InstVisitor<Verifier>, VerifierSupport {
324 friend class InstVisitor<Verifier>;
325
326 // ISD::ArgFlagsTy::MemAlign only have 4 bits for alignment, so
327 // the alignment size should not exceed 2^15. Since encode(Align)
328 // would plus the shift value by 1, the alignment size should
329 // not exceed 2^14, otherwise it can NOT be properly lowered
330 // in backend.
331 static constexpr unsigned ParamMaxAlignment = 1 << 14;
332 DominatorTree DT;
333
334 /// When verifying a basic block, keep track of all of the
335 /// instructions we have seen so far.
336 ///
337 /// This allows us to do efficient dominance checks for the case when an
338 /// instruction has an operand that is an instruction in the same block.
339 SmallPtrSet<Instruction *, 16> InstsInThisBlock;
340
341 /// Keep track of the metadata nodes that have been checked already.
343
344 /// Keep track which DISubprogram is attached to which function.
346
347 /// Track all DICompileUnits visited.
349
350 /// The result type for a landingpad.
351 Type *LandingPadResultTy;
352
353 /// Whether we've seen a call to @llvm.localescape in this function
354 /// already.
355 bool SawFrameEscape;
356
357 /// Whether the current function has a DISubprogram attached to it.
358 bool HasDebugInfo = false;
359
360 /// The current source language.
362
363 /// Stores the count of how many objects were passed to llvm.localescape for a
364 /// given function and the largest index passed to llvm.localrecover.
366
367 // Maps catchswitches and cleanuppads that unwind to siblings to the
368 // terminators that indicate the unwind, used to detect cycles therein.
370
371 /// Cache which blocks are in which funclet, if an EH funclet personality is
372 /// in use. Otherwise empty.
373 DenseMap<BasicBlock *, ColorVector> BlockEHFuncletColors;
374
375 /// Cache of constants visited in search of ConstantExprs.
376 SmallPtrSet<const Constant *, 32> ConstantExprVisited;
377
378 /// Cache of declarations of the llvm.experimental.deoptimize.<ty> intrinsic.
379 SmallVector<const Function *, 4> DeoptimizeDeclarations;
380
381 /// Cache of attribute lists verified.
382 SmallPtrSet<const void *, 32> AttributeListsVisited;
383
384 // Verify that this GlobalValue is only used in this module.
385 // This map is used to avoid visiting uses twice. We can arrive at a user
386 // twice, if they have multiple operands. In particular for very large
387 // constant expressions, we can arrive at a particular user many times.
388 SmallPtrSet<const Value *, 32> GlobalValueVisited;
389
390 // Keeps track of duplicate function argument debug info.
392
393 TBAAVerifier TBAAVerifyHelper;
394 ConvergenceVerifier ConvergenceVerifyHelper;
395
396 SmallVector<IntrinsicInst *, 4> NoAliasScopeDecls;
397
398 void checkAtomicMemAccessSize(Type *Ty, const Instruction *I);
399
400public:
401 explicit Verifier(raw_ostream *OS, bool ShouldTreatBrokenDebugInfoAsError,
402 const Module &M)
403 : VerifierSupport(OS, M), LandingPadResultTy(nullptr),
404 SawFrameEscape(false), TBAAVerifyHelper(this) {
405 TreatBrokenDebugInfoAsError = ShouldTreatBrokenDebugInfoAsError;
406 }
407
408 bool hasBrokenDebugInfo() const { return BrokenDebugInfo; }
409
410 bool verify(const Function &F) {
411 assert(F.getParent() == &M &&
412 "An instance of this class only works with a specific module!");
413
414 // First ensure the function is well-enough formed to compute dominance
415 // information, and directly compute a dominance tree. We don't rely on the
416 // pass manager to provide this as it isolates us from a potentially
417 // out-of-date dominator tree and makes it significantly more complex to run
418 // this code outside of a pass manager.
419 // FIXME: It's really gross that we have to cast away constness here.
420 if (!F.empty())
421 DT.recalculate(const_cast<Function &>(F));
422
423 for (const BasicBlock &BB : F) {
424 if (!BB.empty() && BB.back().isTerminator())
425 continue;
426
427 if (OS) {
428 *OS << "Basic Block in function '" << F.getName()
429 << "' does not have terminator!\n";
430 BB.printAsOperand(*OS, true, MST);
431 *OS << "\n";
432 }
433 return false;
434 }
435
436 auto FailureCB = [this](const Twine &Message) {
437 this->CheckFailed(Message);
438 };
439 ConvergenceVerifyHelper.initialize(OS, FailureCB, F);
440
441 Broken = false;
442 // FIXME: We strip const here because the inst visitor strips const.
443 visit(const_cast<Function &>(F));
444 verifySiblingFuncletUnwinds();
445
446 if (ConvergenceVerifyHelper.sawTokens())
447 ConvergenceVerifyHelper.verify(DT);
448
449 InstsInThisBlock.clear();
450 DebugFnArgs.clear();
451 LandingPadResultTy = nullptr;
452 SawFrameEscape = false;
453 SiblingFuncletInfo.clear();
454 verifyNoAliasScopeDecl();
455 NoAliasScopeDecls.clear();
456
457 return !Broken;
458 }
459
460 /// Verify the module that this instance of \c Verifier was initialized with.
461 bool verify() {
462 Broken = false;
463
464 // Collect all declarations of the llvm.experimental.deoptimize intrinsic.
465 for (const Function &F : M)
466 if (F.getIntrinsicID() == Intrinsic::experimental_deoptimize)
467 DeoptimizeDeclarations.push_back(&F);
468
469 // Now that we've visited every function, verify that we never asked to
470 // recover a frame index that wasn't escaped.
471 verifyFrameRecoverIndices();
472 for (const GlobalVariable &GV : M.globals())
473 visitGlobalVariable(GV);
474
475 for (const GlobalAlias &GA : M.aliases())
476 visitGlobalAlias(GA);
477
478 for (const GlobalIFunc &GI : M.ifuncs())
479 visitGlobalIFunc(GI);
480
481 for (const NamedMDNode &NMD : M.named_metadata())
482 visitNamedMDNode(NMD);
483
484 for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable())
485 visitComdat(SMEC.getValue());
486
487 visitModuleFlags();
488 visitModuleIdents();
489 visitModuleCommandLines();
490
491 verifyCompileUnits();
492
493 verifyDeoptimizeCallingConvs();
494 DISubprogramAttachments.clear();
495 return !Broken;
496 }
497
498private:
499 /// Whether a metadata node is allowed to be, or contain, a DILocation.
500 enum class AreDebugLocsAllowed { No, Yes };
501
502 // Verification methods...
503 void visitGlobalValue(const GlobalValue &GV);
504 void visitGlobalVariable(const GlobalVariable &GV);
505 void visitGlobalAlias(const GlobalAlias &GA);
506 void visitGlobalIFunc(const GlobalIFunc &GI);
507 void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C);
508 void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited,
509 const GlobalAlias &A, const Constant &C);
510 void visitNamedMDNode(const NamedMDNode &NMD);
511 void visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs);
512 void visitMetadataAsValue(const MetadataAsValue &MD, Function *F);
513 void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F);
514 void visitDIArgList(const DIArgList &AL, Function *F);
515 void visitComdat(const Comdat &C);
516 void visitModuleIdents();
517 void visitModuleCommandLines();
518 void visitModuleFlags();
519 void visitModuleFlag(const MDNode *Op,
521 SmallVectorImpl<const MDNode *> &Requirements);
522 void visitModuleFlagCGProfileEntry(const MDOperand &MDO);
523 void visitFunction(const Function &F);
524 void visitBasicBlock(BasicBlock &BB);
525 void verifyRangeMetadata(const Value &V, const MDNode *Range, Type *Ty,
526 bool IsAbsoluteSymbol);
527 void visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty);
528 void visitDereferenceableMetadata(Instruction &I, MDNode *MD);
529 void visitProfMetadata(Instruction &I, MDNode *MD);
530 void visitCallStackMetadata(MDNode *MD);
531 void visitMemProfMetadata(Instruction &I, MDNode *MD);
532 void visitCallsiteMetadata(Instruction &I, MDNode *MD);
533 void visitDIAssignIDMetadata(Instruction &I, MDNode *MD);
534 void visitMMRAMetadata(Instruction &I, MDNode *MD);
535 void visitAnnotationMetadata(MDNode *Annotation);
536 void visitAliasScopeMetadata(const MDNode *MD);
537 void visitAliasScopeListMetadata(const MDNode *MD);
538 void visitAccessGroupMetadata(const MDNode *MD);
539
540 template <class Ty> bool isValidMetadataArray(const MDTuple &N);
541#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N);
542#include "llvm/IR/Metadata.def"
543 void visitDIScope(const DIScope &N);
544 void visitDIVariable(const DIVariable &N);
545 void visitDILexicalBlockBase(const DILexicalBlockBase &N);
546 void visitDITemplateParameter(const DITemplateParameter &N);
547
548 void visitTemplateParams(const MDNode &N, const Metadata &RawParams);
549
550 void visit(DbgLabelRecord &DLR);
551 void visit(DbgVariableRecord &DVR);
552 // InstVisitor overrides...
554 void visitDbgRecords(Instruction &I);
555 void visit(Instruction &I);
556
557 void visitTruncInst(TruncInst &I);
558 void visitZExtInst(ZExtInst &I);
559 void visitSExtInst(SExtInst &I);
560 void visitFPTruncInst(FPTruncInst &I);
561 void visitFPExtInst(FPExtInst &I);
562 void visitFPToUIInst(FPToUIInst &I);
563 void visitFPToSIInst(FPToSIInst &I);
564 void visitUIToFPInst(UIToFPInst &I);
565 void visitSIToFPInst(SIToFPInst &I);
566 void visitIntToPtrInst(IntToPtrInst &I);
567 void visitPtrToIntInst(PtrToIntInst &I);
568 void visitBitCastInst(BitCastInst &I);
569 void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
570 void visitPHINode(PHINode &PN);
571 void visitCallBase(CallBase &Call);
572 void visitUnaryOperator(UnaryOperator &U);
573 void visitBinaryOperator(BinaryOperator &B);
574 void visitICmpInst(ICmpInst &IC);
575 void visitFCmpInst(FCmpInst &FC);
576 void visitExtractElementInst(ExtractElementInst &EI);
577 void visitInsertElementInst(InsertElementInst &EI);
578 void visitShuffleVectorInst(ShuffleVectorInst &EI);
579 void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); }
580 void visitCallInst(CallInst &CI);
581 void visitInvokeInst(InvokeInst &II);
582 void visitGetElementPtrInst(GetElementPtrInst &GEP);
583 void visitLoadInst(LoadInst &LI);
584 void visitStoreInst(StoreInst &SI);
585 void verifyDominatesUse(Instruction &I, unsigned i);
586 void visitInstruction(Instruction &I);
587 void visitTerminator(Instruction &I);
588 void visitBranchInst(BranchInst &BI);
589 void visitReturnInst(ReturnInst &RI);
590 void visitSwitchInst(SwitchInst &SI);
591 void visitIndirectBrInst(IndirectBrInst &BI);
592 void visitCallBrInst(CallBrInst &CBI);
593 void visitSelectInst(SelectInst &SI);
594 void visitUserOp1(Instruction &I);
595 void visitUserOp2(Instruction &I) { visitUserOp1(I); }
596 void visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call);
597 void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI);
598 void visitVPIntrinsic(VPIntrinsic &VPI);
599 void visitDbgIntrinsic(StringRef Kind, DbgVariableIntrinsic &DII);
600 void visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI);
601 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
602 void visitAtomicRMWInst(AtomicRMWInst &RMWI);
603 void visitFenceInst(FenceInst &FI);
604 void visitAllocaInst(AllocaInst &AI);
605 void visitExtractValueInst(ExtractValueInst &EVI);
606 void visitInsertValueInst(InsertValueInst &IVI);
607 void visitEHPadPredecessors(Instruction &I);
608 void visitLandingPadInst(LandingPadInst &LPI);
609 void visitResumeInst(ResumeInst &RI);
610 void visitCatchPadInst(CatchPadInst &CPI);
611 void visitCatchReturnInst(CatchReturnInst &CatchReturn);
612 void visitCleanupPadInst(CleanupPadInst &CPI);
613 void visitFuncletPadInst(FuncletPadInst &FPI);
614 void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch);
615 void visitCleanupReturnInst(CleanupReturnInst &CRI);
616
617 void verifySwiftErrorCall(CallBase &Call, const Value *SwiftErrorVal);
618 void verifySwiftErrorValue(const Value *SwiftErrorVal);
619 void verifyTailCCMustTailAttrs(const AttrBuilder &Attrs, StringRef Context);
620 void verifyMustTailCall(CallInst &CI);
621 bool verifyAttributeCount(AttributeList Attrs, unsigned Params);
622 void verifyAttributeTypes(AttributeSet Attrs, const Value *V);
623 void verifyParameterAttrs(AttributeSet Attrs, Type *Ty, const Value *V);
624 void checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
625 const Value *V);
626 void verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
627 const Value *V, bool IsIntrinsic, bool IsInlineAsm);
628 void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs);
629
630 void visitConstantExprsRecursively(const Constant *EntryC);
631 void visitConstantExpr(const ConstantExpr *CE);
632 void verifyInlineAsmCall(const CallBase &Call);
633 void verifyStatepoint(const CallBase &Call);
634 void verifyFrameRecoverIndices();
635 void verifySiblingFuncletUnwinds();
636
637 void verifyFragmentExpression(const DbgVariableIntrinsic &I);
638 void verifyFragmentExpression(const DbgVariableRecord &I);
639 template <typename ValueOrMetadata>
640 void verifyFragmentExpression(const DIVariable &V,
642 ValueOrMetadata *Desc);
643 void verifyFnArgs(const DbgVariableIntrinsic &I);
644 void verifyFnArgs(const DbgVariableRecord &DVR);
645 void verifyNotEntryValue(const DbgVariableIntrinsic &I);
646 void verifyNotEntryValue(const DbgVariableRecord &I);
647
648 /// Module-level debug info verification...
649 void verifyCompileUnits();
650
651 /// Module-level verification that all @llvm.experimental.deoptimize
652 /// declarations share the same calling convention.
653 void verifyDeoptimizeCallingConvs();
654
655 void verifyAttachedCallBundle(const CallBase &Call,
656 const OperandBundleUse &BU);
657
658 /// Verify the llvm.experimental.noalias.scope.decl declarations
659 void verifyNoAliasScopeDecl();
660};
661
662} // end anonymous namespace
663
664/// We know that cond should be true, if not print an error message.
665#define Check(C, ...) \
666 do { \
667 if (!(C)) { \
668 CheckFailed(__VA_ARGS__); \
669 return; \
670 } \
671 } while (false)
672
673/// We know that a debug info condition should be true, if not print
674/// an error message.
675#define CheckDI(C, ...) \
676 do { \
677 if (!(C)) { \
678 DebugInfoCheckFailed(__VA_ARGS__); \
679 return; \
680 } \
681 } while (false)
682
683void Verifier::visitDbgRecords(Instruction &I) {
684 if (!I.DebugMarker)
685 return;
686 CheckDI(I.DebugMarker->MarkedInstr == &I,
687 "Instruction has invalid DebugMarker", &I);
688 CheckDI(!isa<PHINode>(&I) || !I.hasDbgRecords(),
689 "PHI Node must not have any attached DbgRecords", &I);
690 for (DbgRecord &DR : I.getDbgRecordRange()) {
691 CheckDI(DR.getMarker() == I.DebugMarker,
692 "DbgRecord had invalid DebugMarker", &I, &DR);
693 if (auto *Loc =
694 dyn_cast_or_null<DILocation>(DR.getDebugLoc().getAsMDNode()))
695 visitMDNode(*Loc, AreDebugLocsAllowed::Yes);
696 if (auto *DVR = dyn_cast<DbgVariableRecord>(&DR)) {
697 visit(*DVR);
698 // These have to appear after `visit` for consistency with existing
699 // intrinsic behaviour.
700 verifyFragmentExpression(*DVR);
701 verifyNotEntryValue(*DVR);
702 } else if (auto *DLR = dyn_cast<DbgLabelRecord>(&DR)) {
703 visit(*DLR);
704 }
705 }
706}
707
708void Verifier::visit(Instruction &I) {
709 visitDbgRecords(I);
710 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
711 Check(I.getOperand(i) != nullptr, "Operand is null", &I);
713}
714
715// Helper to iterate over indirect users. By returning false, the callback can ask to stop traversing further.
716static void forEachUser(const Value *User,
718 llvm::function_ref<bool(const Value *)> Callback) {
719 if (!Visited.insert(User).second)
720 return;
721
724 while (!WorkList.empty()) {
725 const Value *Cur = WorkList.pop_back_val();
726 if (!Visited.insert(Cur).second)
727 continue;
728 if (Callback(Cur))
729 append_range(WorkList, Cur->materialized_users());
730 }
731}
732
733void Verifier::visitGlobalValue(const GlobalValue &GV) {
735 "Global is external, but doesn't have external or weak linkage!", &GV);
736
737 if (const GlobalObject *GO = dyn_cast<GlobalObject>(&GV)) {
738
739 if (MaybeAlign A = GO->getAlign()) {
740 Check(A->value() <= Value::MaximumAlignment,
741 "huge alignment values are unsupported", GO);
742 }
743
744 if (const MDNode *Associated =
745 GO->getMetadata(LLVMContext::MD_associated)) {
746 Check(Associated->getNumOperands() == 1,
747 "associated metadata must have one operand", &GV, Associated);
748 const Metadata *Op = Associated->getOperand(0).get();
749 Check(Op, "associated metadata must have a global value", GO, Associated);
750
751 const auto *VM = dyn_cast_or_null<ValueAsMetadata>(Op);
752 Check(VM, "associated metadata must be ValueAsMetadata", GO, Associated);
753 if (VM) {
754 Check(isa<PointerType>(VM->getValue()->getType()),
755 "associated value must be pointer typed", GV, Associated);
756
757 const Value *Stripped = VM->getValue()->stripPointerCastsAndAliases();
758 Check(isa<GlobalObject>(Stripped) || isa<Constant>(Stripped),
759 "associated metadata must point to a GlobalObject", GO, Stripped);
760 Check(Stripped != GO,
761 "global values should not associate to themselves", GO,
762 Associated);
763 }
764 }
765
766 // FIXME: Why is getMetadata on GlobalValue protected?
767 if (const MDNode *AbsoluteSymbol =
768 GO->getMetadata(LLVMContext::MD_absolute_symbol)) {
769 verifyRangeMetadata(*GO, AbsoluteSymbol, DL.getIntPtrType(GO->getType()),
770 true);
771 }
772 }
773
774 Check(!GV.hasAppendingLinkage() || isa<GlobalVariable>(GV),
775 "Only global variables can have appending linkage!", &GV);
776
777 if (GV.hasAppendingLinkage()) {
778 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV);
779 Check(GVar && GVar->getValueType()->isArrayTy(),
780 "Only global arrays can have appending linkage!", GVar);
781 }
782
783 if (GV.isDeclarationForLinker())
784 Check(!GV.hasComdat(), "Declaration may not be in a Comdat!", &GV);
785
786 if (GV.hasDLLExportStorageClass()) {
788 "dllexport GlobalValue must have default or protected visibility",
789 &GV);
790 }
791 if (GV.hasDLLImportStorageClass()) {
793 "dllimport GlobalValue must have default visibility", &GV);
794 Check(!GV.isDSOLocal(), "GlobalValue with DLLImport Storage is dso_local!",
795 &GV);
796
797 Check((GV.isDeclaration() &&
800 "Global is marked as dllimport, but not external", &GV);
801 }
802
803 if (GV.isImplicitDSOLocal())
804 Check(GV.isDSOLocal(),
805 "GlobalValue with local linkage or non-default "
806 "visibility must be dso_local!",
807 &GV);
808
809 forEachUser(&GV, GlobalValueVisited, [&](const Value *V) -> bool {
810 if (const Instruction *I = dyn_cast<Instruction>(V)) {
811 if (!I->getParent() || !I->getParent()->getParent())
812 CheckFailed("Global is referenced by parentless instruction!", &GV, &M,
813 I);
814 else if (I->getParent()->getParent()->getParent() != &M)
815 CheckFailed("Global is referenced in a different module!", &GV, &M, I,
816 I->getParent()->getParent(),
817 I->getParent()->getParent()->getParent());
818 return false;
819 } else if (const Function *F = dyn_cast<Function>(V)) {
820 if (F->getParent() != &M)
821 CheckFailed("Global is used by function in a different module", &GV, &M,
822 F, F->getParent());
823 return false;
824 }
825 return true;
826 });
827}
828
829void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
830 if (GV.hasInitializer()) {
832 "Global variable initializer type does not match global "
833 "variable type!",
834 &GV);
835 // If the global has common linkage, it must have a zero initializer and
836 // cannot be constant.
837 if (GV.hasCommonLinkage()) {
839 "'common' global must have a zero initializer!", &GV);
840 Check(!GV.isConstant(), "'common' global may not be marked constant!",
841 &GV);
842 Check(!GV.hasComdat(), "'common' global may not be in a Comdat!", &GV);
843 }
844 }
845
846 if (GV.hasName() && (GV.getName() == "llvm.global_ctors" ||
847 GV.getName() == "llvm.global_dtors")) {
849 "invalid linkage for intrinsic global variable", &GV);
851 "invalid uses of intrinsic global variable", &GV);
852
853 // Don't worry about emitting an error for it not being an array,
854 // visitGlobalValue will complain on appending non-array.
855 if (ArrayType *ATy = dyn_cast<ArrayType>(GV.getValueType())) {
856 StructType *STy = dyn_cast<StructType>(ATy->getElementType());
857 PointerType *FuncPtrTy =
858 PointerType::get(Context, DL.getProgramAddressSpace());
859 Check(STy && (STy->getNumElements() == 2 || STy->getNumElements() == 3) &&
860 STy->getTypeAtIndex(0u)->isIntegerTy(32) &&
861 STy->getTypeAtIndex(1) == FuncPtrTy,
862 "wrong type for intrinsic global variable", &GV);
863 Check(STy->getNumElements() == 3,
864 "the third field of the element type is mandatory, "
865 "specify ptr null to migrate from the obsoleted 2-field form");
866 Type *ETy = STy->getTypeAtIndex(2);
867 Check(ETy->isPointerTy(), "wrong type for intrinsic global variable",
868 &GV);
869 }
870 }
871
872 if (GV.hasName() && (GV.getName() == "llvm.used" ||
873 GV.getName() == "llvm.compiler.used")) {
875 "invalid linkage for intrinsic global variable", &GV);
877 "invalid uses of intrinsic global variable", &GV);
878
879 Type *GVType = GV.getValueType();
880 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
881 PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType());
882 Check(PTy, "wrong type for intrinsic global variable", &GV);
883 if (GV.hasInitializer()) {
884 const Constant *Init = GV.getInitializer();
885 const ConstantArray *InitArray = dyn_cast<ConstantArray>(Init);
886 Check(InitArray, "wrong initalizer for intrinsic global variable",
887 Init);
888 for (Value *Op : InitArray->operands()) {
889 Value *V = Op->stripPointerCasts();
890 Check(isa<GlobalVariable>(V) || isa<Function>(V) ||
891 isa<GlobalAlias>(V),
892 Twine("invalid ") + GV.getName() + " member", V);
893 Check(V->hasName(),
894 Twine("members of ") + GV.getName() + " must be named", V);
895 }
896 }
897 }
898 }
899
900 // Visit any debug info attachments.
902 GV.getMetadata(LLVMContext::MD_dbg, MDs);
903 for (auto *MD : MDs) {
904 if (auto *GVE = dyn_cast<DIGlobalVariableExpression>(MD))
905 visitDIGlobalVariableExpression(*GVE);
906 else
907 CheckDI(false, "!dbg attachment of global variable must be a "
908 "DIGlobalVariableExpression");
909 }
910
911 // Scalable vectors cannot be global variables, since we don't know
912 // the runtime size.
914 "Globals cannot contain scalable types", &GV);
915
916 // Check if it's a target extension type that disallows being used as a
917 // global.
918 if (auto *TTy = dyn_cast<TargetExtType>(GV.getValueType()))
919 Check(TTy->hasProperty(TargetExtType::CanBeGlobal),
920 "Global @" + GV.getName() + " has illegal target extension type",
921 TTy);
922
923 if (!GV.hasInitializer()) {
924 visitGlobalValue(GV);
925 return;
926 }
927
928 // Walk any aggregate initializers looking for bitcasts between address spaces
929 visitConstantExprsRecursively(GV.getInitializer());
930
931 visitGlobalValue(GV);
932}
933
934void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) {
936 Visited.insert(&GA);
937 visitAliaseeSubExpr(Visited, GA, C);
938}
939
940void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited,
941 const GlobalAlias &GA, const Constant &C) {
943 Check(isa<GlobalValue>(C) &&
944 cast<GlobalValue>(C).hasAvailableExternallyLinkage(),
945 "available_externally alias must point to available_externally "
946 "global value",
947 &GA);
948 }
949 if (const auto *GV = dyn_cast<GlobalValue>(&C)) {
951 Check(!GV->isDeclarationForLinker(), "Alias must point to a definition",
952 &GA);
953 }
954
955 if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) {
956 Check(Visited.insert(GA2).second, "Aliases cannot form a cycle", &GA);
957
958 Check(!GA2->isInterposable(),
959 "Alias cannot point to an interposable alias", &GA);
960 } else {
961 // Only continue verifying subexpressions of GlobalAliases.
962 // Do not recurse into global initializers.
963 return;
964 }
965 }
966
967 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
968 visitConstantExprsRecursively(CE);
969
970 for (const Use &U : C.operands()) {
971 Value *V = &*U;
972 if (const auto *GA2 = dyn_cast<GlobalAlias>(V))
973 visitAliaseeSubExpr(Visited, GA, *GA2->getAliasee());
974 else if (const auto *C2 = dyn_cast<Constant>(V))
975 visitAliaseeSubExpr(Visited, GA, *C2);
976 }
977}
978
979void Verifier::visitGlobalAlias(const GlobalAlias &GA) {
981 "Alias should have private, internal, linkonce, weak, linkonce_odr, "
982 "weak_odr, external, or available_externally linkage!",
983 &GA);
984 const Constant *Aliasee = GA.getAliasee();
985 Check(Aliasee, "Aliasee cannot be NULL!", &GA);
986 Check(GA.getType() == Aliasee->getType(),
987 "Alias and aliasee types should match!", &GA);
988
989 Check(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee),
990 "Aliasee should be either GlobalValue or ConstantExpr", &GA);
991
992 visitAliaseeSubExpr(GA, *Aliasee);
993
994 visitGlobalValue(GA);
995}
996
997void Verifier::visitGlobalIFunc(const GlobalIFunc &GI) {
999 "IFunc should have private, internal, linkonce, weak, linkonce_odr, "
1000 "weak_odr, or external linkage!",
1001 &GI);
1002 // Pierce through ConstantExprs and GlobalAliases and check that the resolver
1003 // is a Function definition.
1005 Check(Resolver, "IFunc must have a Function resolver", &GI);
1006 Check(!Resolver->isDeclarationForLinker(),
1007 "IFunc resolver must be a definition", &GI);
1008
1009 // Check that the immediate resolver operand (prior to any bitcasts) has the
1010 // correct type.
1011 const Type *ResolverTy = GI.getResolver()->getType();
1012
1013 Check(isa<PointerType>(Resolver->getFunctionType()->getReturnType()),
1014 "IFunc resolver must return a pointer", &GI);
1015
1016 const Type *ResolverFuncTy =
1018 Check(ResolverTy == ResolverFuncTy->getPointerTo(GI.getAddressSpace()),
1019 "IFunc resolver has incorrect type", &GI);
1020}
1021
1022void Verifier::visitNamedMDNode(const NamedMDNode &NMD) {
1023 // There used to be various other llvm.dbg.* nodes, but we don't support
1024 // upgrading them and we want to reserve the namespace for future uses.
1025 if (NMD.getName().starts_with("llvm.dbg."))
1026 CheckDI(NMD.getName() == "llvm.dbg.cu",
1027 "unrecognized named metadata node in the llvm.dbg namespace", &NMD);
1028 for (const MDNode *MD : NMD.operands()) {
1029 if (NMD.getName() == "llvm.dbg.cu")
1030 CheckDI(MD && isa<DICompileUnit>(MD), "invalid compile unit", &NMD, MD);
1031
1032 if (!MD)
1033 continue;
1034
1035 visitMDNode(*MD, AreDebugLocsAllowed::Yes);
1036 }
1037}
1038
1039void Verifier::visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs) {
1040 // Only visit each node once. Metadata can be mutually recursive, so this
1041 // avoids infinite recursion here, as well as being an optimization.
1042 if (!MDNodes.insert(&MD).second)
1043 return;
1044
1045 Check(&MD.getContext() == &Context,
1046 "MDNode context does not match Module context!", &MD);
1047
1048 switch (MD.getMetadataID()) {
1049 default:
1050 llvm_unreachable("Invalid MDNode subclass");
1051 case Metadata::MDTupleKind:
1052 break;
1053#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) \
1054 case Metadata::CLASS##Kind: \
1055 visit##CLASS(cast<CLASS>(MD)); \
1056 break;
1057#include "llvm/IR/Metadata.def"
1058 }
1059
1060 for (const Metadata *Op : MD.operands()) {
1061 if (!Op)
1062 continue;
1063 Check(!isa<LocalAsMetadata>(Op), "Invalid operand for global metadata!",
1064 &MD, Op);
1065 CheckDI(!isa<DILocation>(Op) || AllowLocs == AreDebugLocsAllowed::Yes,
1066 "DILocation not allowed within this metadata node", &MD, Op);
1067 if (auto *N = dyn_cast<MDNode>(Op)) {
1068 visitMDNode(*N, AllowLocs);
1069 continue;
1070 }
1071 if (auto *V = dyn_cast<ValueAsMetadata>(Op)) {
1072 visitValueAsMetadata(*V, nullptr);
1073 continue;
1074 }
1075 }
1076
1077 // Check these last, so we diagnose problems in operands first.
1078 Check(!MD.isTemporary(), "Expected no forward declarations!", &MD);
1079 Check(MD.isResolved(), "All nodes should be resolved!", &MD);
1080}
1081
1082void Verifier::visitValueAsMetadata(const ValueAsMetadata &MD, Function *F) {
1083 Check(MD.getValue(), "Expected valid value", &MD);
1084 Check(!MD.getValue()->getType()->isMetadataTy(),
1085 "Unexpected metadata round-trip through values", &MD, MD.getValue());
1086
1087 auto *L = dyn_cast<LocalAsMetadata>(&MD);
1088 if (!L)
1089 return;
1090
1091 Check(F, "function-local metadata used outside a function", L);
1092
1093 // If this was an instruction, bb, or argument, verify that it is in the
1094 // function that we expect.
1095 Function *ActualF = nullptr;
1096 if (Instruction *I = dyn_cast<Instruction>(L->getValue())) {
1097 Check(I->getParent(), "function-local metadata not in basic block", L, I);
1098 ActualF = I->getParent()->getParent();
1099 } else if (BasicBlock *BB = dyn_cast<BasicBlock>(L->getValue()))
1100 ActualF = BB->getParent();
1101 else if (Argument *A = dyn_cast<Argument>(L->getValue()))
1102 ActualF = A->getParent();
1103 assert(ActualF && "Unimplemented function local metadata case!");
1104
1105 Check(ActualF == F, "function-local metadata used in wrong function", L);
1106}
1107
1108void Verifier::visitDIArgList(const DIArgList &AL, Function *F) {
1109 for (const ValueAsMetadata *VAM : AL.getArgs())
1110 visitValueAsMetadata(*VAM, F);
1111}
1112
1113void Verifier::visitMetadataAsValue(const MetadataAsValue &MDV, Function *F) {
1114 Metadata *MD = MDV.getMetadata();
1115 if (auto *N = dyn_cast<MDNode>(MD)) {
1116 visitMDNode(*N, AreDebugLocsAllowed::No);
1117 return;
1118 }
1119
1120 // Only visit each node once. Metadata can be mutually recursive, so this
1121 // avoids infinite recursion here, as well as being an optimization.
1122 if (!MDNodes.insert(MD).second)
1123 return;
1124
1125 if (auto *V = dyn_cast<ValueAsMetadata>(MD))
1126 visitValueAsMetadata(*V, F);
1127
1128 if (auto *AL = dyn_cast<DIArgList>(MD))
1129 visitDIArgList(*AL, F);
1130}
1131
1132static bool isType(const Metadata *MD) { return !MD || isa<DIType>(MD); }
1133static bool isScope(const Metadata *MD) { return !MD || isa<DIScope>(MD); }
1134static bool isDINode(const Metadata *MD) { return !MD || isa<DINode>(MD); }
1135
1136void Verifier::visitDILocation(const DILocation &N) {
1137 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1138 "location requires a valid scope", &N, N.getRawScope());
1139 if (auto *IA = N.getRawInlinedAt())
1140 CheckDI(isa<DILocation>(IA), "inlined-at should be a location", &N, IA);
1141 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1142 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1143}
1144
1145void Verifier::visitGenericDINode(const GenericDINode &N) {
1146 CheckDI(N.getTag(), "invalid tag", &N);
1147}
1148
1149void Verifier::visitDIScope(const DIScope &N) {
1150 if (auto *F = N.getRawFile())
1151 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1152}
1153
1154void Verifier::visitDISubrange(const DISubrange &N) {
1155 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1156 bool HasAssumedSizedArraySupport = dwarf::isFortran(CurrentSourceLang);
1157 CheckDI(HasAssumedSizedArraySupport || N.getRawCountNode() ||
1158 N.getRawUpperBound(),
1159 "Subrange must contain count or upperBound", &N);
1160 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1161 "Subrange can have any one of count or upperBound", &N);
1162 auto *CBound = N.getRawCountNode();
1163 CheckDI(!CBound || isa<ConstantAsMetadata>(CBound) ||
1164 isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1165 "Count must be signed constant or DIVariable or DIExpression", &N);
1166 auto Count = N.getCount();
1167 CheckDI(!Count || !isa<ConstantInt *>(Count) ||
1168 cast<ConstantInt *>(Count)->getSExtValue() >= -1,
1169 "invalid subrange count", &N);
1170 auto *LBound = N.getRawLowerBound();
1171 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1172 isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1173 "LowerBound must be signed constant or DIVariable or DIExpression",
1174 &N);
1175 auto *UBound = N.getRawUpperBound();
1176 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1177 isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1178 "UpperBound must be signed constant or DIVariable or DIExpression",
1179 &N);
1180 auto *Stride = N.getRawStride();
1181 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1182 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1183 "Stride must be signed constant or DIVariable or DIExpression", &N);
1184}
1185
1186void Verifier::visitDIGenericSubrange(const DIGenericSubrange &N) {
1187 CheckDI(N.getTag() == dwarf::DW_TAG_generic_subrange, "invalid tag", &N);
1188 CheckDI(N.getRawCountNode() || N.getRawUpperBound(),
1189 "GenericSubrange must contain count or upperBound", &N);
1190 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1191 "GenericSubrange can have any one of count or upperBound", &N);
1192 auto *CBound = N.getRawCountNode();
1193 CheckDI(!CBound || isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1194 "Count must be signed constant or DIVariable or DIExpression", &N);
1195 auto *LBound = N.getRawLowerBound();
1196 CheckDI(LBound, "GenericSubrange must contain lowerBound", &N);
1197 CheckDI(isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1198 "LowerBound must be signed constant or DIVariable or DIExpression",
1199 &N);
1200 auto *UBound = N.getRawUpperBound();
1201 CheckDI(!UBound || isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1202 "UpperBound must be signed constant or DIVariable or DIExpression",
1203 &N);
1204 auto *Stride = N.getRawStride();
1205 CheckDI(Stride, "GenericSubrange must contain stride", &N);
1206 CheckDI(isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1207 "Stride must be signed constant or DIVariable or DIExpression", &N);
1208}
1209
1210void Verifier::visitDIEnumerator(const DIEnumerator &N) {
1211 CheckDI(N.getTag() == dwarf::DW_TAG_enumerator, "invalid tag", &N);
1212}
1213
1214void Verifier::visitDIBasicType(const DIBasicType &N) {
1215 CheckDI(N.getTag() == dwarf::DW_TAG_base_type ||
1216 N.getTag() == dwarf::DW_TAG_unspecified_type ||
1217 N.getTag() == dwarf::DW_TAG_string_type,
1218 "invalid tag", &N);
1219}
1220
1221void Verifier::visitDIStringType(const DIStringType &N) {
1222 CheckDI(N.getTag() == dwarf::DW_TAG_string_type, "invalid tag", &N);
1223 CheckDI(!(N.isBigEndian() && N.isLittleEndian()), "has conflicting flags",
1224 &N);
1225}
1226
1227void Verifier::visitDIDerivedType(const DIDerivedType &N) {
1228 // Common scope checks.
1229 visitDIScope(N);
1230
1231 CheckDI(N.getTag() == dwarf::DW_TAG_typedef ||
1232 N.getTag() == dwarf::DW_TAG_pointer_type ||
1233 N.getTag() == dwarf::DW_TAG_ptr_to_member_type ||
1234 N.getTag() == dwarf::DW_TAG_reference_type ||
1235 N.getTag() == dwarf::DW_TAG_rvalue_reference_type ||
1236 N.getTag() == dwarf::DW_TAG_const_type ||
1237 N.getTag() == dwarf::DW_TAG_immutable_type ||
1238 N.getTag() == dwarf::DW_TAG_volatile_type ||
1239 N.getTag() == dwarf::DW_TAG_restrict_type ||
1240 N.getTag() == dwarf::DW_TAG_atomic_type ||
1241 N.getTag() == dwarf::DW_TAG_LLVM_ptrauth_type ||
1242 N.getTag() == dwarf::DW_TAG_member ||
1243 (N.getTag() == dwarf::DW_TAG_variable && N.isStaticMember()) ||
1244 N.getTag() == dwarf::DW_TAG_inheritance ||
1245 N.getTag() == dwarf::DW_TAG_friend ||
1246 N.getTag() == dwarf::DW_TAG_set_type ||
1247 N.getTag() == dwarf::DW_TAG_template_alias,
1248 "invalid tag", &N);
1249 if (N.getTag() == dwarf::DW_TAG_ptr_to_member_type) {
1250 CheckDI(isType(N.getRawExtraData()), "invalid pointer to member type", &N,
1251 N.getRawExtraData());
1252 }
1253
1254 if (N.getTag() == dwarf::DW_TAG_set_type) {
1255 if (auto *T = N.getRawBaseType()) {
1256 auto *Enum = dyn_cast_or_null<DICompositeType>(T);
1257 auto *Basic = dyn_cast_or_null<DIBasicType>(T);
1258 CheckDI(
1259 (Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type) ||
1260 (Basic && (Basic->getEncoding() == dwarf::DW_ATE_unsigned ||
1261 Basic->getEncoding() == dwarf::DW_ATE_signed ||
1262 Basic->getEncoding() == dwarf::DW_ATE_unsigned_char ||
1263 Basic->getEncoding() == dwarf::DW_ATE_signed_char ||
1264 Basic->getEncoding() == dwarf::DW_ATE_boolean)),
1265 "invalid set base type", &N, T);
1266 }
1267 }
1268
1269 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1270 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1271 N.getRawBaseType());
1272
1273 if (N.getDWARFAddressSpace()) {
1274 CheckDI(N.getTag() == dwarf::DW_TAG_pointer_type ||
1275 N.getTag() == dwarf::DW_TAG_reference_type ||
1276 N.getTag() == dwarf::DW_TAG_rvalue_reference_type,
1277 "DWARF address space only applies to pointer or reference types",
1278 &N);
1279 }
1280}
1281
1282/// Detect mutually exclusive flags.
1283static bool hasConflictingReferenceFlags(unsigned Flags) {
1284 return ((Flags & DINode::FlagLValueReference) &&
1285 (Flags & DINode::FlagRValueReference)) ||
1286 ((Flags & DINode::FlagTypePassByValue) &&
1287 (Flags & DINode::FlagTypePassByReference));
1288}
1289
1290void Verifier::visitTemplateParams(const MDNode &N, const Metadata &RawParams) {
1291 auto *Params = dyn_cast<MDTuple>(&RawParams);
1292 CheckDI(Params, "invalid template params", &N, &RawParams);
1293 for (Metadata *Op : Params->operands()) {
1294 CheckDI(Op && isa<DITemplateParameter>(Op), "invalid template parameter",
1295 &N, Params, Op);
1296 }
1297}
1298
1299void Verifier::visitDICompositeType(const DICompositeType &N) {
1300 // Common scope checks.
1301 visitDIScope(N);
1302
1303 CheckDI(N.getTag() == dwarf::DW_TAG_array_type ||
1304 N.getTag() == dwarf::DW_TAG_structure_type ||
1305 N.getTag() == dwarf::DW_TAG_union_type ||
1306 N.getTag() == dwarf::DW_TAG_enumeration_type ||
1307 N.getTag() == dwarf::DW_TAG_class_type ||
1308 N.getTag() == dwarf::DW_TAG_variant_part ||
1309 N.getTag() == dwarf::DW_TAG_namelist,
1310 "invalid tag", &N);
1311
1312 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1313 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1314 N.getRawBaseType());
1315
1316 CheckDI(!N.getRawElements() || isa<MDTuple>(N.getRawElements()),
1317 "invalid composite elements", &N, N.getRawElements());
1318 CheckDI(isType(N.getRawVTableHolder()), "invalid vtable holder", &N,
1319 N.getRawVTableHolder());
1321 "invalid reference flags", &N);
1322 unsigned DIBlockByRefStruct = 1 << 4;
1323 CheckDI((N.getFlags() & DIBlockByRefStruct) == 0,
1324 "DIBlockByRefStruct on DICompositeType is no longer supported", &N);
1325
1326 if (N.isVector()) {
1327 const DINodeArray Elements = N.getElements();
1328 CheckDI(Elements.size() == 1 &&
1329 Elements[0]->getTag() == dwarf::DW_TAG_subrange_type,
1330 "invalid vector, expected one element of type subrange", &N);
1331 }
1332
1333 if (auto *Params = N.getRawTemplateParams())
1334 visitTemplateParams(N, *Params);
1335
1336 if (auto *D = N.getRawDiscriminator()) {
1337 CheckDI(isa<DIDerivedType>(D) && N.getTag() == dwarf::DW_TAG_variant_part,
1338 "discriminator can only appear on variant part");
1339 }
1340
1341 if (N.getRawDataLocation()) {
1342 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1343 "dataLocation can only appear in array type");
1344 }
1345
1346 if (N.getRawAssociated()) {
1347 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1348 "associated can only appear in array type");
1349 }
1350
1351 if (N.getRawAllocated()) {
1352 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1353 "allocated can only appear in array type");
1354 }
1355
1356 if (N.getRawRank()) {
1357 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1358 "rank can only appear in array type");
1359 }
1360
1361 if (N.getTag() == dwarf::DW_TAG_array_type) {
1362 CheckDI(N.getRawBaseType(), "array types must have a base type", &N);
1363 }
1364}
1365
1366void Verifier::visitDISubroutineType(const DISubroutineType &N) {
1367 CheckDI(N.getTag() == dwarf::DW_TAG_subroutine_type, "invalid tag", &N);
1368 if (auto *Types = N.getRawTypeArray()) {
1369 CheckDI(isa<MDTuple>(Types), "invalid composite elements", &N, Types);
1370 for (Metadata *Ty : N.getTypeArray()->operands()) {
1371 CheckDI(isType(Ty), "invalid subroutine type ref", &N, Types, Ty);
1372 }
1373 }
1375 "invalid reference flags", &N);
1376}
1377
1378void Verifier::visitDIFile(const DIFile &N) {
1379 CheckDI(N.getTag() == dwarf::DW_TAG_file_type, "invalid tag", &N);
1380 std::optional<DIFile::ChecksumInfo<StringRef>> Checksum = N.getChecksum();
1381 if (Checksum) {
1382 CheckDI(Checksum->Kind <= DIFile::ChecksumKind::CSK_Last,
1383 "invalid checksum kind", &N);
1384 size_t Size;
1385 switch (Checksum->Kind) {
1386 case DIFile::CSK_MD5:
1387 Size = 32;
1388 break;
1389 case DIFile::CSK_SHA1:
1390 Size = 40;
1391 break;
1392 case DIFile::CSK_SHA256:
1393 Size = 64;
1394 break;
1395 }
1396 CheckDI(Checksum->Value.size() == Size, "invalid checksum length", &N);
1397 CheckDI(Checksum->Value.find_if_not(llvm::isHexDigit) == StringRef::npos,
1398 "invalid checksum", &N);
1399 }
1400}
1401
1402void Verifier::visitDICompileUnit(const DICompileUnit &N) {
1403 CheckDI(N.isDistinct(), "compile units must be distinct", &N);
1404 CheckDI(N.getTag() == dwarf::DW_TAG_compile_unit, "invalid tag", &N);
1405
1406 // Don't bother verifying the compilation directory or producer string
1407 // as those could be empty.
1408 CheckDI(N.getRawFile() && isa<DIFile>(N.getRawFile()), "invalid file", &N,
1409 N.getRawFile());
1410 CheckDI(!N.getFile()->getFilename().empty(), "invalid filename", &N,
1411 N.getFile());
1412
1413 CurrentSourceLang = (dwarf::SourceLanguage)N.getSourceLanguage();
1414
1415 CheckDI((N.getEmissionKind() <= DICompileUnit::LastEmissionKind),
1416 "invalid emission kind", &N);
1417
1418 if (auto *Array = N.getRawEnumTypes()) {
1419 CheckDI(isa<MDTuple>(Array), "invalid enum list", &N, Array);
1420 for (Metadata *Op : N.getEnumTypes()->operands()) {
1421 auto *Enum = dyn_cast_or_null<DICompositeType>(Op);
1422 CheckDI(Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type,
1423 "invalid enum type", &N, N.getEnumTypes(), Op);
1424 }
1425 }
1426 if (auto *Array = N.getRawRetainedTypes()) {
1427 CheckDI(isa<MDTuple>(Array), "invalid retained type list", &N, Array);
1428 for (Metadata *Op : N.getRetainedTypes()->operands()) {
1429 CheckDI(
1430 Op && (isa<DIType>(Op) || (isa<DISubprogram>(Op) &&
1431 !cast<DISubprogram>(Op)->isDefinition())),
1432 "invalid retained type", &N, Op);
1433 }
1434 }
1435 if (auto *Array = N.getRawGlobalVariables()) {
1436 CheckDI(isa<MDTuple>(Array), "invalid global variable list", &N, Array);
1437 for (Metadata *Op : N.getGlobalVariables()->operands()) {
1438 CheckDI(Op && (isa<DIGlobalVariableExpression>(Op)),
1439 "invalid global variable ref", &N, Op);
1440 }
1441 }
1442 if (auto *Array = N.getRawImportedEntities()) {
1443 CheckDI(isa<MDTuple>(Array), "invalid imported entity list", &N, Array);
1444 for (Metadata *Op : N.getImportedEntities()->operands()) {
1445 CheckDI(Op && isa<DIImportedEntity>(Op), "invalid imported entity ref",
1446 &N, Op);
1447 }
1448 }
1449 if (auto *Array = N.getRawMacros()) {
1450 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1451 for (Metadata *Op : N.getMacros()->operands()) {
1452 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1453 }
1454 }
1455 CUVisited.insert(&N);
1456}
1457
1458void Verifier::visitDISubprogram(const DISubprogram &N) {
1459 CheckDI(N.getTag() == dwarf::DW_TAG_subprogram, "invalid tag", &N);
1460 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1461 if (auto *F = N.getRawFile())
1462 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1463 else
1464 CheckDI(N.getLine() == 0, "line specified with no file", &N, N.getLine());
1465 if (auto *T = N.getRawType())
1466 CheckDI(isa<DISubroutineType>(T), "invalid subroutine type", &N, T);
1467 CheckDI(isType(N.getRawContainingType()), "invalid containing type", &N,
1468 N.getRawContainingType());
1469 if (auto *Params = N.getRawTemplateParams())
1470 visitTemplateParams(N, *Params);
1471 if (auto *S = N.getRawDeclaration())
1472 CheckDI(isa<DISubprogram>(S) && !cast<DISubprogram>(S)->isDefinition(),
1473 "invalid subprogram declaration", &N, S);
1474 if (auto *RawNode = N.getRawRetainedNodes()) {
1475 auto *Node = dyn_cast<MDTuple>(RawNode);
1476 CheckDI(Node, "invalid retained nodes list", &N, RawNode);
1477 for (Metadata *Op : Node->operands()) {
1478 CheckDI(Op && (isa<DILocalVariable>(Op) || isa<DILabel>(Op) ||
1479 isa<DIImportedEntity>(Op)),
1480 "invalid retained nodes, expected DILocalVariable, DILabel or "
1481 "DIImportedEntity",
1482 &N, Node, Op);
1483 }
1484 }
1486 "invalid reference flags", &N);
1487
1488 auto *Unit = N.getRawUnit();
1489 if (N.isDefinition()) {
1490 // Subprogram definitions (not part of the type hierarchy).
1491 CheckDI(N.isDistinct(), "subprogram definitions must be distinct", &N);
1492 CheckDI(Unit, "subprogram definitions must have a compile unit", &N);
1493 CheckDI(isa<DICompileUnit>(Unit), "invalid unit type", &N, Unit);
1494 // There's no good way to cross the CU boundary to insert a nested
1495 // DISubprogram definition in one CU into a type defined in another CU.
1496 auto *CT = dyn_cast_or_null<DICompositeType>(N.getRawScope());
1497 if (CT && CT->getRawIdentifier() &&
1498 M.getContext().isODRUniquingDebugTypes())
1499 CheckDI(N.getDeclaration(),
1500 "definition subprograms cannot be nested within DICompositeType "
1501 "when enabling ODR",
1502 &N);
1503 } else {
1504 // Subprogram declarations (part of the type hierarchy).
1505 CheckDI(!Unit, "subprogram declarations must not have a compile unit", &N);
1506 CheckDI(!N.getRawDeclaration(),
1507 "subprogram declaration must not have a declaration field");
1508 }
1509
1510 if (auto *RawThrownTypes = N.getRawThrownTypes()) {
1511 auto *ThrownTypes = dyn_cast<MDTuple>(RawThrownTypes);
1512 CheckDI(ThrownTypes, "invalid thrown types list", &N, RawThrownTypes);
1513 for (Metadata *Op : ThrownTypes->operands())
1514 CheckDI(Op && isa<DIType>(Op), "invalid thrown type", &N, ThrownTypes,
1515 Op);
1516 }
1517
1518 if (N.areAllCallsDescribed())
1519 CheckDI(N.isDefinition(),
1520 "DIFlagAllCallsDescribed must be attached to a definition");
1521}
1522
1523void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) {
1524 CheckDI(N.getTag() == dwarf::DW_TAG_lexical_block, "invalid tag", &N);
1525 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1526 "invalid local scope", &N, N.getRawScope());
1527 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1528 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1529}
1530
1531void Verifier::visitDILexicalBlock(const DILexicalBlock &N) {
1532 visitDILexicalBlockBase(N);
1533
1534 CheckDI(N.getLine() || !N.getColumn(),
1535 "cannot have column info without line info", &N);
1536}
1537
1538void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) {
1539 visitDILexicalBlockBase(N);
1540}
1541
1542void Verifier::visitDICommonBlock(const DICommonBlock &N) {
1543 CheckDI(N.getTag() == dwarf::DW_TAG_common_block, "invalid tag", &N);
1544 if (auto *S = N.getRawScope())
1545 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1546 if (auto *S = N.getRawDecl())
1547 CheckDI(isa<DIGlobalVariable>(S), "invalid declaration", &N, S);
1548}
1549
1550void Verifier::visitDINamespace(const DINamespace &N) {
1551 CheckDI(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag", &N);
1552 if (auto *S = N.getRawScope())
1553 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1554}
1555
1556void Verifier::visitDIMacro(const DIMacro &N) {
1557 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_define ||
1558 N.getMacinfoType() == dwarf::DW_MACINFO_undef,
1559 "invalid macinfo type", &N);
1560 CheckDI(!N.getName().empty(), "anonymous macro", &N);
1561 if (!N.getValue().empty()) {
1562 assert(N.getValue().data()[0] != ' ' && "Macro value has a space prefix");
1563 }
1564}
1565
1566void Verifier::visitDIMacroFile(const DIMacroFile &N) {
1567 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_start_file,
1568 "invalid macinfo type", &N);
1569 if (auto *F = N.getRawFile())
1570 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1571
1572 if (auto *Array = N.getRawElements()) {
1573 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1574 for (Metadata *Op : N.getElements()->operands()) {
1575 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1576 }
1577 }
1578}
1579
1580void Verifier::visitDIModule(const DIModule &N) {
1581 CheckDI(N.getTag() == dwarf::DW_TAG_module, "invalid tag", &N);
1582 CheckDI(!N.getName().empty(), "anonymous module", &N);
1583}
1584
1585void Verifier::visitDITemplateParameter(const DITemplateParameter &N) {
1586 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1587}
1588
1589void Verifier::visitDITemplateTypeParameter(const DITemplateTypeParameter &N) {
1590 visitDITemplateParameter(N);
1591
1592 CheckDI(N.getTag() == dwarf::DW_TAG_template_type_parameter, "invalid tag",
1593 &N);
1594}
1595
1596void Verifier::visitDITemplateValueParameter(
1597 const DITemplateValueParameter &N) {
1598 visitDITemplateParameter(N);
1599
1600 CheckDI(N.getTag() == dwarf::DW_TAG_template_value_parameter ||
1601 N.getTag() == dwarf::DW_TAG_GNU_template_template_param ||
1602 N.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack,
1603 "invalid tag", &N);
1604}
1605
1606void Verifier::visitDIVariable(const DIVariable &N) {
1607 if (auto *S = N.getRawScope())
1608 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1609 if (auto *F = N.getRawFile())
1610 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1611}
1612
1613void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) {
1614 // Checks common to all variables.
1615 visitDIVariable(N);
1616
1617 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1618 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1619 // Check only if the global variable is not an extern
1620 if (N.isDefinition())
1621 CheckDI(N.getType(), "missing global variable type", &N);
1622 if (auto *Member = N.getRawStaticDataMemberDeclaration()) {
1623 CheckDI(isa<DIDerivedType>(Member),
1624 "invalid static data member declaration", &N, Member);
1625 }
1626}
1627
1628void Verifier::visitDILocalVariable(const DILocalVariable &N) {
1629 // Checks common to all variables.
1630 visitDIVariable(N);
1631
1632 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1633 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1634 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1635 "local variable requires a valid scope", &N, N.getRawScope());
1636 if (auto Ty = N.getType())
1637 CheckDI(!isa<DISubroutineType>(Ty), "invalid type", &N, N.getType());
1638}
1639
1640void Verifier::visitDIAssignID(const DIAssignID &N) {
1641 CheckDI(!N.getNumOperands(), "DIAssignID has no arguments", &N);
1642 CheckDI(N.isDistinct(), "DIAssignID must be distinct", &N);
1643}
1644
1645void Verifier::visitDILabel(const DILabel &N) {
1646 if (auto *S = N.getRawScope())
1647 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1648 if (auto *F = N.getRawFile())
1649 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1650
1651 CheckDI(N.getTag() == dwarf::DW_TAG_label, "invalid tag", &N);
1652 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1653 "label requires a valid scope", &N, N.getRawScope());
1654}
1655
1656void Verifier::visitDIExpression(const DIExpression &N) {
1657 CheckDI(N.isValid(), "invalid expression", &N);
1658}
1659
1660void Verifier::visitDIGlobalVariableExpression(
1661 const DIGlobalVariableExpression &GVE) {
1662 CheckDI(GVE.getVariable(), "missing variable");
1663 if (auto *Var = GVE.getVariable())
1664 visitDIGlobalVariable(*Var);
1665 if (auto *Expr = GVE.getExpression()) {
1666 visitDIExpression(*Expr);
1667 if (auto Fragment = Expr->getFragmentInfo())
1668 verifyFragmentExpression(*GVE.getVariable(), *Fragment, &GVE);
1669 }
1670}
1671
1672void Verifier::visitDIObjCProperty(const DIObjCProperty &N) {
1673 CheckDI(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag", &N);
1674 if (auto *T = N.getRawType())
1675 CheckDI(isType(T), "invalid type ref", &N, T);
1676 if (auto *F = N.getRawFile())
1677 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1678}
1679
1680void Verifier::visitDIImportedEntity(const DIImportedEntity &N) {
1681 CheckDI(N.getTag() == dwarf::DW_TAG_imported_module ||
1682 N.getTag() == dwarf::DW_TAG_imported_declaration,
1683 "invalid tag", &N);
1684 if (auto *S = N.getRawScope())
1685 CheckDI(isa<DIScope>(S), "invalid scope for imported entity", &N, S);
1686 CheckDI(isDINode(N.getRawEntity()), "invalid imported entity", &N,
1687 N.getRawEntity());
1688}
1689
1690void Verifier::visitComdat(const Comdat &C) {
1691 // In COFF the Module is invalid if the GlobalValue has private linkage.
1692 // Entities with private linkage don't have entries in the symbol table.
1693 if (TT.isOSBinFormatCOFF())
1694 if (const GlobalValue *GV = M.getNamedValue(C.getName()))
1695 Check(!GV->hasPrivateLinkage(), "comdat global value has private linkage",
1696 GV);
1697}
1698
1699void Verifier::visitModuleIdents() {
1700 const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident");
1701 if (!Idents)
1702 return;
1703
1704 // llvm.ident takes a list of metadata entry. Each entry has only one string.
1705 // Scan each llvm.ident entry and make sure that this requirement is met.
1706 for (const MDNode *N : Idents->operands()) {
1707 Check(N->getNumOperands() == 1,
1708 "incorrect number of operands in llvm.ident metadata", N);
1709 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1710 ("invalid value for llvm.ident metadata entry operand"
1711 "(the operand should be a string)"),
1712 N->getOperand(0));
1713 }
1714}
1715
1716void Verifier::visitModuleCommandLines() {
1717 const NamedMDNode *CommandLines = M.getNamedMetadata("llvm.commandline");
1718 if (!CommandLines)
1719 return;
1720
1721 // llvm.commandline takes a list of metadata entry. Each entry has only one
1722 // string. Scan each llvm.commandline entry and make sure that this
1723 // requirement is met.
1724 for (const MDNode *N : CommandLines->operands()) {
1725 Check(N->getNumOperands() == 1,
1726 "incorrect number of operands in llvm.commandline metadata", N);
1727 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1728 ("invalid value for llvm.commandline metadata entry operand"
1729 "(the operand should be a string)"),
1730 N->getOperand(0));
1731 }
1732}
1733
1734void Verifier::visitModuleFlags() {
1735 const NamedMDNode *Flags = M.getModuleFlagsMetadata();
1736 if (!Flags) return;
1737
1738 // Scan each flag, and track the flags and requirements.
1740 SmallVector<const MDNode*, 16> Requirements;
1741 uint64_t PAuthABIPlatform = -1;
1742 uint64_t PAuthABIVersion = -1;
1743 for (const MDNode *MDN : Flags->operands()) {
1744 visitModuleFlag(MDN, SeenIDs, Requirements);
1745 if (MDN->getNumOperands() != 3)
1746 continue;
1747 if (const auto *FlagName = dyn_cast_or_null<MDString>(MDN->getOperand(1))) {
1748 if (FlagName->getString() == "aarch64-elf-pauthabi-platform") {
1749 if (const auto *PAP =
1750 mdconst::dyn_extract_or_null<ConstantInt>(MDN->getOperand(2)))
1751 PAuthABIPlatform = PAP->getZExtValue();
1752 } else if (FlagName->getString() == "aarch64-elf-pauthabi-version") {
1753 if (const auto *PAV =
1754 mdconst::dyn_extract_or_null<ConstantInt>(MDN->getOperand(2)))
1755 PAuthABIVersion = PAV->getZExtValue();
1756 }
1757 }
1758 }
1759
1760 if ((PAuthABIPlatform == uint64_t(-1)) != (PAuthABIVersion == uint64_t(-1)))
1761 CheckFailed("either both or no 'aarch64-elf-pauthabi-platform' and "
1762 "'aarch64-elf-pauthabi-version' module flags must be present");
1763
1764 // Validate that the requirements in the module are valid.
1765 for (const MDNode *Requirement : Requirements) {
1766 const MDString *Flag = cast<MDString>(Requirement->getOperand(0));
1767 const Metadata *ReqValue = Requirement->getOperand(1);
1768
1769 const MDNode *Op = SeenIDs.lookup(Flag);
1770 if (!Op) {
1771 CheckFailed("invalid requirement on flag, flag is not present in module",
1772 Flag);
1773 continue;
1774 }
1775
1776 if (Op->getOperand(2) != ReqValue) {
1777 CheckFailed(("invalid requirement on flag, "
1778 "flag does not have the required value"),
1779 Flag);
1780 continue;
1781 }
1782 }
1783}
1784
1785void
1786Verifier::visitModuleFlag(const MDNode *Op,
1788 SmallVectorImpl<const MDNode *> &Requirements) {
1789 // Each module flag should have three arguments, the merge behavior (a
1790 // constant int), the flag ID (an MDString), and the value.
1791 Check(Op->getNumOperands() == 3,
1792 "incorrect number of operands in module flag", Op);
1794 if (!Module::isValidModFlagBehavior(Op->getOperand(0), MFB)) {
1795 Check(mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0)),
1796 "invalid behavior operand in module flag (expected constant integer)",
1797 Op->getOperand(0));
1798 Check(false,
1799 "invalid behavior operand in module flag (unexpected constant)",
1800 Op->getOperand(0));
1801 }
1802 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
1803 Check(ID, "invalid ID operand in module flag (expected metadata string)",
1804 Op->getOperand(1));
1805
1806 // Check the values for behaviors with additional requirements.
1807 switch (MFB) {
1808 case Module::Error:
1809 case Module::Warning:
1810 case Module::Override:
1811 // These behavior types accept any value.
1812 break;
1813
1814 case Module::Min: {
1815 auto *V = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1816 Check(V && V->getValue().isNonNegative(),
1817 "invalid value for 'min' module flag (expected constant non-negative "
1818 "integer)",
1819 Op->getOperand(2));
1820 break;
1821 }
1822
1823 case Module::Max: {
1824 Check(mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2)),
1825 "invalid value for 'max' module flag (expected constant integer)",
1826 Op->getOperand(2));
1827 break;
1828 }
1829
1830 case Module::Require: {
1831 // The value should itself be an MDNode with two operands, a flag ID (an
1832 // MDString), and a value.
1833 MDNode *Value = dyn_cast<MDNode>(Op->getOperand(2));
1834 Check(Value && Value->getNumOperands() == 2,
1835 "invalid value for 'require' module flag (expected metadata pair)",
1836 Op->getOperand(2));
1837 Check(isa<MDString>(Value->getOperand(0)),
1838 ("invalid value for 'require' module flag "
1839 "(first value operand should be a string)"),
1840 Value->getOperand(0));
1841
1842 // Append it to the list of requirements, to check once all module flags are
1843 // scanned.
1844 Requirements.push_back(Value);
1845 break;
1846 }
1847
1848 case Module::Append:
1849 case Module::AppendUnique: {
1850 // These behavior types require the operand be an MDNode.
1851 Check(isa<MDNode>(Op->getOperand(2)),
1852 "invalid value for 'append'-type module flag "
1853 "(expected a metadata node)",
1854 Op->getOperand(2));
1855 break;
1856 }
1857 }
1858
1859 // Unless this is a "requires" flag, check the ID is unique.
1860 if (MFB != Module::Require) {
1861 bool Inserted = SeenIDs.insert(std::make_pair(ID, Op)).second;
1862 Check(Inserted,
1863 "module flag identifiers must be unique (or of 'require' type)", ID);
1864 }
1865
1866 if (ID->getString() == "wchar_size") {
1868 = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1869 Check(Value, "wchar_size metadata requires constant integer argument");
1870 }
1871
1872 if (ID->getString() == "Linker Options") {
1873 // If the llvm.linker.options named metadata exists, we assume that the
1874 // bitcode reader has upgraded the module flag. Otherwise the flag might
1875 // have been created by a client directly.
1876 Check(M.getNamedMetadata("llvm.linker.options"),
1877 "'Linker Options' named metadata no longer supported");
1878 }
1879
1880 if (ID->getString() == "SemanticInterposition") {
1882 mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1883 Check(Value,
1884 "SemanticInterposition metadata requires constant integer argument");
1885 }
1886
1887 if (ID->getString() == "CG Profile") {
1888 for (const MDOperand &MDO : cast<MDNode>(Op->getOperand(2))->operands())
1889 visitModuleFlagCGProfileEntry(MDO);
1890 }
1891}
1892
1893void Verifier::visitModuleFlagCGProfileEntry(const MDOperand &MDO) {
1894 auto CheckFunction = [&](const MDOperand &FuncMDO) {
1895 if (!FuncMDO)
1896 return;
1897 auto F = dyn_cast<ValueAsMetadata>(FuncMDO);
1898 Check(F && isa<Function>(F->getValue()->stripPointerCasts()),
1899 "expected a Function or null", FuncMDO);
1900 };
1901 auto Node = dyn_cast_or_null<MDNode>(MDO);
1902 Check(Node && Node->getNumOperands() == 3, "expected a MDNode triple", MDO);
1903 CheckFunction(Node->getOperand(0));
1904 CheckFunction(Node->getOperand(1));
1905 auto Count = dyn_cast_or_null<ConstantAsMetadata>(Node->getOperand(2));
1906 Check(Count && Count->getType()->isIntegerTy(),
1907 "expected an integer constant", Node->getOperand(2));
1908}
1909
1910void Verifier::verifyAttributeTypes(AttributeSet Attrs, const Value *V) {
1911 for (Attribute A : Attrs) {
1912
1913 if (A.isStringAttribute()) {
1914#define GET_ATTR_NAMES
1915#define ATTRIBUTE_ENUM(ENUM_NAME, DISPLAY_NAME)
1916#define ATTRIBUTE_STRBOOL(ENUM_NAME, DISPLAY_NAME) \
1917 if (A.getKindAsString() == #DISPLAY_NAME) { \
1918 auto V = A.getValueAsString(); \
1919 if (!(V.empty() || V == "true" || V == "false")) \
1920 CheckFailed("invalid value for '" #DISPLAY_NAME "' attribute: " + V + \
1921 ""); \
1922 }
1923
1924#include "llvm/IR/Attributes.inc"
1925 continue;
1926 }
1927
1928 if (A.isIntAttribute() != Attribute::isIntAttrKind(A.getKindAsEnum())) {
1929 CheckFailed("Attribute '" + A.getAsString() + "' should have an Argument",
1930 V);
1931 return;
1932 }
1933 }
1934}
1935
1936// VerifyParameterAttrs - Check the given attributes for an argument or return
1937// value of the specified type. The value V is printed in error messages.
1938void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty,
1939 const Value *V) {
1940 if (!Attrs.hasAttributes())
1941 return;
1942
1943 verifyAttributeTypes(Attrs, V);
1944
1945 for (Attribute Attr : Attrs)
1946 Check(Attr.isStringAttribute() ||
1947 Attribute::canUseAsParamAttr(Attr.getKindAsEnum()),
1948 "Attribute '" + Attr.getAsString() + "' does not apply to parameters",
1949 V);
1950
1951 if (Attrs.hasAttribute(Attribute::ImmArg)) {
1952 Check(Attrs.getNumAttributes() == 1,
1953 "Attribute 'immarg' is incompatible with other attributes", V);
1954 }
1955
1956 // Check for mutually incompatible attributes. Only inreg is compatible with
1957 // sret.
1958 unsigned AttrCount = 0;
1959 AttrCount += Attrs.hasAttribute(Attribute::ByVal);
1960 AttrCount += Attrs.hasAttribute(Attribute::InAlloca);
1961 AttrCount += Attrs.hasAttribute(Attribute::Preallocated);
1962 AttrCount += Attrs.hasAttribute(Attribute::StructRet) ||
1963 Attrs.hasAttribute(Attribute::InReg);
1964 AttrCount += Attrs.hasAttribute(Attribute::Nest);
1965 AttrCount += Attrs.hasAttribute(Attribute::ByRef);
1966 Check(AttrCount <= 1,
1967 "Attributes 'byval', 'inalloca', 'preallocated', 'inreg', 'nest', "
1968 "'byref', and 'sret' are incompatible!",
1969 V);
1970
1971 Check(!(Attrs.hasAttribute(Attribute::InAlloca) &&
1972 Attrs.hasAttribute(Attribute::ReadOnly)),
1973 "Attributes "
1974 "'inalloca and readonly' are incompatible!",
1975 V);
1976
1977 Check(!(Attrs.hasAttribute(Attribute::StructRet) &&
1978 Attrs.hasAttribute(Attribute::Returned)),
1979 "Attributes "
1980 "'sret and returned' are incompatible!",
1981 V);
1982
1983 Check(!(Attrs.hasAttribute(Attribute::ZExt) &&
1984 Attrs.hasAttribute(Attribute::SExt)),
1985 "Attributes "
1986 "'zeroext and signext' are incompatible!",
1987 V);
1988
1989 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
1990 Attrs.hasAttribute(Attribute::ReadOnly)),
1991 "Attributes "
1992 "'readnone and readonly' are incompatible!",
1993 V);
1994
1995 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
1996 Attrs.hasAttribute(Attribute::WriteOnly)),
1997 "Attributes "
1998 "'readnone and writeonly' are incompatible!",
1999 V);
2000
2001 Check(!(Attrs.hasAttribute(Attribute::ReadOnly) &&
2002 Attrs.hasAttribute(Attribute::WriteOnly)),
2003 "Attributes "
2004 "'readonly and writeonly' are incompatible!",
2005 V);
2006
2007 Check(!(Attrs.hasAttribute(Attribute::NoInline) &&
2008 Attrs.hasAttribute(Attribute::AlwaysInline)),
2009 "Attributes "
2010 "'noinline and alwaysinline' are incompatible!",
2011 V);
2012
2013 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2014 Attrs.hasAttribute(Attribute::ReadNone)),
2015 "Attributes writable and readnone are incompatible!", V);
2016
2017 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2018 Attrs.hasAttribute(Attribute::ReadOnly)),
2019 "Attributes writable and readonly are incompatible!", V);
2020
2021 AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible(Ty);
2022 for (Attribute Attr : Attrs) {
2023 if (!Attr.isStringAttribute() &&
2024 IncompatibleAttrs.contains(Attr.getKindAsEnum())) {
2025 CheckFailed("Attribute '" + Attr.getAsString() +
2026 "' applied to incompatible type!", V);
2027 return;
2028 }
2029 }
2030
2031 if (isa<PointerType>(Ty)) {
2032 if (Attrs.hasAttribute(Attribute::ByVal)) {
2033 if (Attrs.hasAttribute(Attribute::Alignment)) {
2034 Align AttrAlign = Attrs.getAlignment().valueOrOne();
2035 Align MaxAlign(ParamMaxAlignment);
2036 Check(AttrAlign <= MaxAlign,
2037 "Attribute 'align' exceed the max size 2^14", V);
2038 }
2039 SmallPtrSet<Type *, 4> Visited;
2040 Check(Attrs.getByValType()->isSized(&Visited),
2041 "Attribute 'byval' does not support unsized types!", V);
2042 }
2043 if (Attrs.hasAttribute(Attribute::ByRef)) {
2044 SmallPtrSet<Type *, 4> Visited;
2045 Check(Attrs.getByRefType()->isSized(&Visited),
2046 "Attribute 'byref' does not support unsized types!", V);
2047 }
2048 if (Attrs.hasAttribute(Attribute::InAlloca)) {
2049 SmallPtrSet<Type *, 4> Visited;
2050 Check(Attrs.getInAllocaType()->isSized(&Visited),
2051 "Attribute 'inalloca' does not support unsized types!", V);
2052 }
2053 if (Attrs.hasAttribute(Attribute::Preallocated)) {
2054 SmallPtrSet<Type *, 4> Visited;
2055 Check(Attrs.getPreallocatedType()->isSized(&Visited),
2056 "Attribute 'preallocated' does not support unsized types!", V);
2057 }
2058 }
2059
2060 if (Attrs.hasAttribute(Attribute::NoFPClass)) {
2061 uint64_t Val = Attrs.getAttribute(Attribute::NoFPClass).getValueAsInt();
2062 Check(Val != 0, "Attribute 'nofpclass' must have at least one test bit set",
2063 V);
2064 Check((Val & ~static_cast<unsigned>(fcAllFlags)) == 0,
2065 "Invalid value for 'nofpclass' test mask", V);
2066 }
2067 if (Attrs.hasAttribute(Attribute::Range)) {
2068 auto CR = Attrs.getAttribute(Attribute::Range).getValueAsConstantRange();
2069 Check(Ty->isIntOrIntVectorTy(CR.getBitWidth()),
2070 "Range bit width must match type bit width!", V);
2071 }
2072}
2073
2074void Verifier::checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
2075 const Value *V) {
2076 if (Attrs.hasFnAttr(Attr)) {
2077 StringRef S = Attrs.getFnAttr(Attr).getValueAsString();
2078 unsigned N;
2079 if (S.getAsInteger(10, N))
2080 CheckFailed("\"" + Attr + "\" takes an unsigned integer: " + S, V);
2081 }
2082}
2083
2084// Check parameter attributes against a function type.
2085// The value V is printed in error messages.
2086void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
2087 const Value *V, bool IsIntrinsic,
2088 bool IsInlineAsm) {
2089 if (Attrs.isEmpty())
2090 return;
2091
2092 if (AttributeListsVisited.insert(Attrs.getRawPointer()).second) {
2093 Check(Attrs.hasParentContext(Context),
2094 "Attribute list does not match Module context!", &Attrs, V);
2095 for (const auto &AttrSet : Attrs) {
2096 Check(!AttrSet.hasAttributes() || AttrSet.hasParentContext(Context),
2097 "Attribute set does not match Module context!", &AttrSet, V);
2098 for (const auto &A : AttrSet) {
2099 Check(A.hasParentContext(Context),
2100 "Attribute does not match Module context!", &A, V);
2101 }
2102 }
2103 }
2104
2105 bool SawNest = false;
2106 bool SawReturned = false;
2107 bool SawSRet = false;
2108 bool SawSwiftSelf = false;
2109 bool SawSwiftAsync = false;
2110 bool SawSwiftError = false;
2111
2112 // Verify return value attributes.
2113 AttributeSet RetAttrs = Attrs.getRetAttrs();
2114 for (Attribute RetAttr : RetAttrs)
2115 Check(RetAttr.isStringAttribute() ||
2116 Attribute::canUseAsRetAttr(RetAttr.getKindAsEnum()),
2117 "Attribute '" + RetAttr.getAsString() +
2118 "' does not apply to function return values",
2119 V);
2120
2121 unsigned MaxParameterWidth = 0;
2122 auto GetMaxParameterWidth = [&MaxParameterWidth](Type *Ty) {
2123 if (Ty->isVectorTy()) {
2124 if (auto *VT = dyn_cast<FixedVectorType>(Ty)) {
2125 unsigned Size = VT->getPrimitiveSizeInBits().getFixedValue();
2126 if (Size > MaxParameterWidth)
2127 MaxParameterWidth = Size;
2128 }
2129 }
2130 };
2131 GetMaxParameterWidth(FT->getReturnType());
2132 verifyParameterAttrs(RetAttrs, FT->getReturnType(), V);
2133
2134 // Verify parameter attributes.
2135 for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
2136 Type *Ty = FT->getParamType(i);
2137 AttributeSet ArgAttrs = Attrs.getParamAttrs(i);
2138
2139 if (!IsIntrinsic) {
2140 Check(!ArgAttrs.hasAttribute(Attribute::ImmArg),
2141 "immarg attribute only applies to intrinsics", V);
2142 if (!IsInlineAsm)
2143 Check(!ArgAttrs.hasAttribute(Attribute::ElementType),
2144 "Attribute 'elementtype' can only be applied to intrinsics"
2145 " and inline asm.",
2146 V);
2147 }
2148
2149 verifyParameterAttrs(ArgAttrs, Ty, V);
2150 GetMaxParameterWidth(Ty);
2151
2152 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
2153 Check(!SawNest, "More than one parameter has attribute nest!", V);
2154 SawNest = true;
2155 }
2156
2157 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
2158 Check(!SawReturned, "More than one parameter has attribute returned!", V);
2159 Check(Ty->canLosslesslyBitCastTo(FT->getReturnType()),
2160 "Incompatible argument and return types for 'returned' attribute",
2161 V);
2162 SawReturned = true;
2163 }
2164
2165 if (ArgAttrs.hasAttribute(Attribute::StructRet)) {
2166 Check(!SawSRet, "Cannot have multiple 'sret' parameters!", V);
2167 Check(i == 0 || i == 1,
2168 "Attribute 'sret' is not on first or second parameter!", V);
2169 SawSRet = true;
2170 }
2171
2172 if (ArgAttrs.hasAttribute(Attribute::SwiftSelf)) {
2173 Check(!SawSwiftSelf, "Cannot have multiple 'swiftself' parameters!", V);
2174 SawSwiftSelf = true;
2175 }
2176
2177 if (ArgAttrs.hasAttribute(Attribute::SwiftAsync)) {
2178 Check(!SawSwiftAsync, "Cannot have multiple 'swiftasync' parameters!", V);
2179 SawSwiftAsync = true;
2180 }
2181
2182 if (ArgAttrs.hasAttribute(Attribute::SwiftError)) {
2183 Check(!SawSwiftError, "Cannot have multiple 'swifterror' parameters!", V);
2184 SawSwiftError = true;
2185 }
2186
2187 if (ArgAttrs.hasAttribute(Attribute::InAlloca)) {
2188 Check(i == FT->getNumParams() - 1,
2189 "inalloca isn't on the last parameter!", V);
2190 }
2191 }
2192
2193 if (!Attrs.hasFnAttrs())
2194 return;
2195
2196 verifyAttributeTypes(Attrs.getFnAttrs(), V);
2197 for (Attribute FnAttr : Attrs.getFnAttrs())
2198 Check(FnAttr.isStringAttribute() ||
2199 Attribute::canUseAsFnAttr(FnAttr.getKindAsEnum()),
2200 "Attribute '" + FnAttr.getAsString() +
2201 "' does not apply to functions!",
2202 V);
2203
2204 Check(!(Attrs.hasFnAttr(Attribute::NoInline) &&
2205 Attrs.hasFnAttr(Attribute::AlwaysInline)),
2206 "Attributes 'noinline and alwaysinline' are incompatible!", V);
2207
2208 if (Attrs.hasFnAttr(Attribute::OptimizeNone)) {
2209 Check(Attrs.hasFnAttr(Attribute::NoInline),
2210 "Attribute 'optnone' requires 'noinline'!", V);
2211
2212 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2213 "Attributes 'optsize and optnone' are incompatible!", V);
2214
2215 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2216 "Attributes 'minsize and optnone' are incompatible!", V);
2217
2218 Check(!Attrs.hasFnAttr(Attribute::OptimizeForDebugging),
2219 "Attributes 'optdebug and optnone' are incompatible!", V);
2220 }
2221
2222 if (Attrs.hasFnAttr(Attribute::OptimizeForDebugging)) {
2223 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2224 "Attributes 'optsize and optdebug' are incompatible!", V);
2225
2226 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2227 "Attributes 'minsize and optdebug' are incompatible!", V);
2228 }
2229
2230 Check(!Attrs.hasAttrSomewhere(Attribute::Writable) ||
2231 isModSet(Attrs.getMemoryEffects().getModRef(IRMemLocation::ArgMem)),
2232 "Attribute writable and memory without argmem: write are incompatible!",
2233 V);
2234
2235 if (Attrs.hasFnAttr("aarch64_pstate_sm_enabled")) {
2236 Check(!Attrs.hasFnAttr("aarch64_pstate_sm_compatible"),
2237 "Attributes 'aarch64_pstate_sm_enabled and "
2238 "aarch64_pstate_sm_compatible' are incompatible!",
2239 V);
2240 }
2241
2242 Check((Attrs.hasFnAttr("aarch64_new_za") + Attrs.hasFnAttr("aarch64_in_za") +
2243 Attrs.hasFnAttr("aarch64_inout_za") +
2244 Attrs.hasFnAttr("aarch64_out_za") +
2245 Attrs.hasFnAttr("aarch64_preserves_za")) <= 1,
2246 "Attributes 'aarch64_new_za', 'aarch64_in_za', 'aarch64_out_za', "
2247 "'aarch64_inout_za' and 'aarch64_preserves_za' are mutually exclusive",
2248 V);
2249
2250 Check(
2251 (Attrs.hasFnAttr("aarch64_new_zt0") + Attrs.hasFnAttr("aarch64_in_zt0") +
2252 Attrs.hasFnAttr("aarch64_inout_zt0") +
2253 Attrs.hasFnAttr("aarch64_out_zt0") +
2254 Attrs.hasFnAttr("aarch64_preserves_zt0")) <= 1,
2255 "Attributes 'aarch64_new_zt0', 'aarch64_in_zt0', 'aarch64_out_zt0', "
2256 "'aarch64_inout_zt0' and 'aarch64_preserves_zt0' are mutually exclusive",
2257 V);
2258
2259 if (Attrs.hasFnAttr(Attribute::JumpTable)) {
2260 const GlobalValue *GV = cast<GlobalValue>(V);
2262 "Attribute 'jumptable' requires 'unnamed_addr'", V);
2263 }
2264
2265 if (auto Args = Attrs.getFnAttrs().getAllocSizeArgs()) {
2266 auto CheckParam = [&](StringRef Name, unsigned ParamNo) {
2267 if (ParamNo >= FT->getNumParams()) {
2268 CheckFailed("'allocsize' " + Name + " argument is out of bounds", V);
2269 return false;
2270 }
2271
2272 if (!FT->getParamType(ParamNo)->isIntegerTy()) {
2273 CheckFailed("'allocsize' " + Name +
2274 " argument must refer to an integer parameter",
2275 V);
2276 return false;
2277 }
2278
2279 return true;
2280 };
2281
2282 if (!CheckParam("element size", Args->first))
2283 return;
2284
2285 if (Args->second && !CheckParam("number of elements", *Args->second))
2286 return;
2287 }
2288
2289 if (Attrs.hasFnAttr(Attribute::AllocKind)) {
2290 AllocFnKind K = Attrs.getAllocKind();
2293 if (!is_contained(
2295 Type))
2296 CheckFailed(
2297 "'allockind()' requires exactly one of alloc, realloc, and free");
2298 if ((Type == AllocFnKind::Free) &&
2301 CheckFailed("'allockind(\"free\")' doesn't allow uninitialized, zeroed, "
2302 "or aligned modifiers.");
2304 if ((K & ZeroedUninit) == ZeroedUninit)
2305 CheckFailed("'allockind()' can't be both zeroed and uninitialized");
2306 }
2307
2308 if (Attrs.hasFnAttr(Attribute::VScaleRange)) {
2309 unsigned VScaleMin = Attrs.getFnAttrs().getVScaleRangeMin();
2310 if (VScaleMin == 0)
2311 CheckFailed("'vscale_range' minimum must be greater than 0", V);
2312 else if (!isPowerOf2_32(VScaleMin))
2313 CheckFailed("'vscale_range' minimum must be power-of-two value", V);
2314 std::optional<unsigned> VScaleMax = Attrs.getFnAttrs().getVScaleRangeMax();
2315 if (VScaleMax && VScaleMin > VScaleMax)
2316 CheckFailed("'vscale_range' minimum cannot be greater than maximum", V);
2317 else if (VScaleMax && !isPowerOf2_32(*VScaleMax))
2318 CheckFailed("'vscale_range' maximum must be power-of-two value", V);
2319 }
2320
2321 if (Attrs.hasFnAttr("frame-pointer")) {
2322 StringRef FP = Attrs.getFnAttr("frame-pointer").getValueAsString();
2323 if (FP != "all" && FP != "non-leaf" && FP != "none")
2324 CheckFailed("invalid value for 'frame-pointer' attribute: " + FP, V);
2325 }
2326
2327 // Check EVEX512 feature.
2328 if (MaxParameterWidth >= 512 && Attrs.hasFnAttr("target-features") &&
2329 TT.isX86()) {
2330 StringRef TF = Attrs.getFnAttr("target-features").getValueAsString();
2331 Check(!TF.contains("+avx512f") || !TF.contains("-evex512"),
2332 "512-bit vector arguments require 'evex512' for AVX512", V);
2333 }
2334
2335 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-prefix", V);
2336 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-entry", V);
2337 checkUnsignedBaseTenFuncAttr(Attrs, "warn-stack-size", V);
2338
2339 if (auto A = Attrs.getFnAttr("sign-return-address"); A.isValid()) {
2340 StringRef S = A.getValueAsString();
2341 if (S != "none" && S != "all" && S != "non-leaf")
2342 CheckFailed("invalid value for 'sign-return-address' attribute: " + S, V);
2343 }
2344
2345 if (auto A = Attrs.getFnAttr("sign-return-address-key"); A.isValid()) {
2346 StringRef S = A.getValueAsString();
2347 if (S != "a_key" && S != "b_key")
2348 CheckFailed("invalid value for 'sign-return-address-key' attribute: " + S,
2349 V);
2350 }
2351
2352 if (auto A = Attrs.getFnAttr("branch-target-enforcement"); A.isValid()) {
2353 StringRef S = A.getValueAsString();
2354 if (S != "true" && S != "false")
2355 CheckFailed(
2356 "invalid value for 'branch-target-enforcement' attribute: " + S, V);
2357 }
2358
2359 if (auto A = Attrs.getFnAttr("vector-function-abi-variant"); A.isValid()) {
2360 StringRef S = A.getValueAsString();
2361 const std::optional<VFInfo> Info = VFABI::tryDemangleForVFABI(S, FT);
2362 if (!Info)
2363 CheckFailed("invalid name for a VFABI variant: " + S, V);
2364 }
2365}
2366
2367void Verifier::verifyFunctionMetadata(
2368 ArrayRef<std::pair<unsigned, MDNode *>> MDs) {
2369 for (const auto &Pair : MDs) {
2370 if (Pair.first == LLVMContext::MD_prof) {
2371 MDNode *MD = Pair.second;
2372 Check(MD->getNumOperands() >= 2,
2373 "!prof annotations should have no less than 2 operands", MD);
2374
2375 // Check first operand.
2376 Check(MD->getOperand(0) != nullptr, "first operand should not be null",
2377 MD);
2378 Check(isa<MDString>(MD->getOperand(0)),
2379 "expected string with name of the !prof annotation", MD);
2380 MDString *MDS = cast<MDString>(MD->getOperand(0));
2381 StringRef ProfName = MDS->getString();
2382 Check(ProfName.equals("function_entry_count") ||
2383 ProfName.equals("synthetic_function_entry_count"),
2384 "first operand should be 'function_entry_count'"
2385 " or 'synthetic_function_entry_count'",
2386 MD);
2387
2388 // Check second operand.
2389 Check(MD->getOperand(1) != nullptr, "second operand should not be null",
2390 MD);
2391 Check(isa<ConstantAsMetadata>(MD->getOperand(1)),
2392 "expected integer argument to function_entry_count", MD);
2393 } else if (Pair.first == LLVMContext::MD_kcfi_type) {
2394 MDNode *MD = Pair.second;
2395 Check(MD->getNumOperands() == 1,
2396 "!kcfi_type must have exactly one operand", MD);
2397 Check(MD->getOperand(0) != nullptr, "!kcfi_type operand must not be null",
2398 MD);
2399 Check(isa<ConstantAsMetadata>(MD->getOperand(0)),
2400 "expected a constant operand for !kcfi_type", MD);
2401 Constant *C = cast<ConstantAsMetadata>(MD->getOperand(0))->getValue();
2402 Check(isa<ConstantInt>(C) && isa<IntegerType>(C->getType()),
2403 "expected a constant integer operand for !kcfi_type", MD);
2404 Check(cast<ConstantInt>(C)->getBitWidth() == 32,
2405 "expected a 32-bit integer constant operand for !kcfi_type", MD);
2406 }
2407 }
2408}
2409
2410void Verifier::visitConstantExprsRecursively(const Constant *EntryC) {
2411 if (!ConstantExprVisited.insert(EntryC).second)
2412 return;
2413
2415 Stack.push_back(EntryC);
2416
2417 while (!Stack.empty()) {
2418 const Constant *C = Stack.pop_back_val();
2419
2420 // Check this constant expression.
2421 if (const auto *CE = dyn_cast<ConstantExpr>(C))
2422 visitConstantExpr(CE);
2423
2424 if (const auto *GV = dyn_cast<GlobalValue>(C)) {
2425 // Global Values get visited separately, but we do need to make sure
2426 // that the global value is in the correct module
2427 Check(GV->getParent() == &M, "Referencing global in another module!",
2428 EntryC, &M, GV, GV->getParent());
2429 continue;
2430 }
2431
2432 // Visit all sub-expressions.
2433 for (const Use &U : C->operands()) {
2434 const auto *OpC = dyn_cast<Constant>(U);
2435 if (!OpC)
2436 continue;
2437 if (!ConstantExprVisited.insert(OpC).second)
2438 continue;
2439 Stack.push_back(OpC);
2440 }
2441 }
2442}
2443
2444void Verifier::visitConstantExpr(const ConstantExpr *CE) {
2445 if (CE->getOpcode() == Instruction::BitCast)
2446 Check(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0),
2447 CE->getType()),
2448 "Invalid bitcast", CE);
2449}
2450
2451bool Verifier::verifyAttributeCount(AttributeList Attrs, unsigned Params) {
2452 // There shouldn't be more attribute sets than there are parameters plus the
2453 // function and return value.
2454 return Attrs.getNumAttrSets() <= Params + 2;
2455}
2456
2457void Verifier::verifyInlineAsmCall(const CallBase &Call) {
2458 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
2459 unsigned ArgNo = 0;
2460 unsigned LabelNo = 0;
2461 for (const InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) {
2462 if (CI.Type == InlineAsm::isLabel) {
2463 ++LabelNo;
2464 continue;
2465 }
2466
2467 // Only deal with constraints that correspond to call arguments.
2468 if (!CI.hasArg())
2469 continue;
2470
2471 if (CI.isIndirect) {
2472 const Value *Arg = Call.getArgOperand(ArgNo);
2473 Check(Arg->getType()->isPointerTy(),
2474 "Operand for indirect constraint must have pointer type", &Call);
2475
2476 Check(Call.getParamElementType(ArgNo),
2477 "Operand for indirect constraint must have elementtype attribute",
2478 &Call);
2479 } else {
2480 Check(!Call.paramHasAttr(ArgNo, Attribute::ElementType),
2481 "Elementtype attribute can only be applied for indirect "
2482 "constraints",
2483 &Call);
2484 }
2485
2486 ArgNo++;
2487 }
2488
2489 if (auto *CallBr = dyn_cast<CallBrInst>(&Call)) {
2490 Check(LabelNo == CallBr->getNumIndirectDests(),
2491 "Number of label constraints does not match number of callbr dests",
2492 &Call);
2493 } else {
2494 Check(LabelNo == 0, "Label constraints can only be used with callbr",
2495 &Call);
2496 }
2497}
2498
2499/// Verify that statepoint intrinsic is well formed.
2500void Verifier::verifyStatepoint(const CallBase &Call) {
2501 assert(Call.getCalledFunction() &&
2502 Call.getCalledFunction()->getIntrinsicID() ==
2503 Intrinsic::experimental_gc_statepoint);
2504
2505 Check(!Call.doesNotAccessMemory() && !Call.onlyReadsMemory() &&
2506 !Call.onlyAccessesArgMemory(),
2507 "gc.statepoint must read and write all memory to preserve "
2508 "reordering restrictions required by safepoint semantics",
2509 Call);
2510
2511 const int64_t NumPatchBytes =
2512 cast<ConstantInt>(Call.getArgOperand(1))->getSExtValue();
2513 assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!");
2514 Check(NumPatchBytes >= 0,
2515 "gc.statepoint number of patchable bytes must be "
2516 "positive",
2517 Call);
2518
2519 Type *TargetElemType = Call.getParamElementType(2);
2520 Check(TargetElemType,
2521 "gc.statepoint callee argument must have elementtype attribute", Call);
2522 FunctionType *TargetFuncType = dyn_cast<FunctionType>(TargetElemType);
2523 Check(TargetFuncType,
2524 "gc.statepoint callee elementtype must be function type", Call);
2525
2526 const int NumCallArgs = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
2527 Check(NumCallArgs >= 0,
2528 "gc.statepoint number of arguments to underlying call "
2529 "must be positive",
2530 Call);
2531 const int NumParams = (int)TargetFuncType->getNumParams();
2532 if (TargetFuncType->isVarArg()) {
2533 Check(NumCallArgs >= NumParams,
2534 "gc.statepoint mismatch in number of vararg call args", Call);
2535
2536 // TODO: Remove this limitation
2537 Check(TargetFuncType->getReturnType()->isVoidTy(),
2538 "gc.statepoint doesn't support wrapping non-void "
2539 "vararg functions yet",
2540 Call);
2541 } else
2542 Check(NumCallArgs == NumParams,
2543 "gc.statepoint mismatch in number of call args", Call);
2544
2545 const uint64_t Flags
2546 = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
2547 Check((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0,
2548 "unknown flag used in gc.statepoint flags argument", Call);
2549
2550 // Verify that the types of the call parameter arguments match
2551 // the type of the wrapped callee.
2552 AttributeList Attrs = Call.getAttributes();
2553 for (int i = 0; i < NumParams; i++) {
2554 Type *ParamType = TargetFuncType->getParamType(i);
2555 Type *ArgType = Call.getArgOperand(5 + i)->getType();
2556 Check(ArgType == ParamType,
2557 "gc.statepoint call argument does not match wrapped "
2558 "function type",
2559 Call);
2560
2561 if (TargetFuncType->isVarArg()) {
2562 AttributeSet ArgAttrs = Attrs.getParamAttrs(5 + i);
2563 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
2564 "Attribute 'sret' cannot be used for vararg call arguments!", Call);
2565 }
2566 }
2567
2568 const int EndCallArgsInx = 4 + NumCallArgs;
2569
2570 const Value *NumTransitionArgsV = Call.getArgOperand(EndCallArgsInx + 1);
2571 Check(isa<ConstantInt>(NumTransitionArgsV),
2572 "gc.statepoint number of transition arguments "
2573 "must be constant integer",
2574 Call);
2575 const int NumTransitionArgs =
2576 cast<ConstantInt>(NumTransitionArgsV)->getZExtValue();
2577 Check(NumTransitionArgs == 0,
2578 "gc.statepoint w/inline transition bundle is deprecated", Call);
2579 const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs;
2580
2581 const Value *NumDeoptArgsV = Call.getArgOperand(EndTransitionArgsInx + 1);
2582 Check(isa<ConstantInt>(NumDeoptArgsV),
2583 "gc.statepoint number of deoptimization arguments "
2584 "must be constant integer",
2585 Call);
2586 const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue();
2587 Check(NumDeoptArgs == 0,
2588 "gc.statepoint w/inline deopt operands is deprecated", Call);
2589
2590 const int ExpectedNumArgs = 7 + NumCallArgs;
2591 Check(ExpectedNumArgs == (int)Call.arg_size(),
2592 "gc.statepoint too many arguments", Call);
2593
2594 // Check that the only uses of this gc.statepoint are gc.result or
2595 // gc.relocate calls which are tied to this statepoint and thus part
2596 // of the same statepoint sequence
2597 for (const User *U : Call.users()) {
2598 const CallInst *UserCall = dyn_cast<const CallInst>(U);
2599 Check(UserCall, "illegal use of statepoint token", Call, U);
2600 if (!UserCall)
2601 continue;
2602 Check(isa<GCRelocateInst>(UserCall) || isa<GCResultInst>(UserCall),
2603 "gc.result or gc.relocate are the only value uses "
2604 "of a gc.statepoint",
2605 Call, U);
2606 if (isa<GCResultInst>(UserCall)) {
2607 Check(UserCall->getArgOperand(0) == &Call,
2608 "gc.result connected to wrong gc.statepoint", Call, UserCall);
2609 } else if (isa<GCRelocateInst>(Call)) {
2610 Check(UserCall->getArgOperand(0) == &Call,
2611 "gc.relocate connected to wrong gc.statepoint", Call, UserCall);
2612 }
2613 }
2614
2615 // Note: It is legal for a single derived pointer to be listed multiple
2616 // times. It's non-optimal, but it is legal. It can also happen after
2617 // insertion if we strip a bitcast away.
2618 // Note: It is really tempting to check that each base is relocated and
2619 // that a derived pointer is never reused as a base pointer. This turns
2620 // out to be problematic since optimizations run after safepoint insertion
2621 // can recognize equality properties that the insertion logic doesn't know
2622 // about. See example statepoint.ll in the verifier subdirectory
2623}
2624
2625void Verifier::verifyFrameRecoverIndices() {
2626 for (auto &Counts : FrameEscapeInfo) {
2627 Function *F = Counts.first;
2628 unsigned EscapedObjectCount = Counts.second.first;
2629 unsigned MaxRecoveredIndex = Counts.second.second;
2630 Check(MaxRecoveredIndex <= EscapedObjectCount,
2631 "all indices passed to llvm.localrecover must be less than the "
2632 "number of arguments passed to llvm.localescape in the parent "
2633 "function",
2634 F);
2635 }
2636}
2637
2638static Instruction *getSuccPad(Instruction *Terminator) {
2639 BasicBlock *UnwindDest;
2640 if (auto *II = dyn_cast<InvokeInst>(Terminator))
2641 UnwindDest = II->getUnwindDest();
2642 else if (auto *CSI = dyn_cast<CatchSwitchInst>(Terminator))
2643 UnwindDest = CSI->getUnwindDest();
2644 else
2645 UnwindDest = cast<CleanupReturnInst>(Terminator)->getUnwindDest();
2646 return UnwindDest->getFirstNonPHI();
2647}
2648
2649void Verifier::verifySiblingFuncletUnwinds() {
2652 for (const auto &Pair : SiblingFuncletInfo) {
2653 Instruction *PredPad = Pair.first;
2654 if (Visited.count(PredPad))
2655 continue;
2656 Active.insert(PredPad);
2657 Instruction *Terminator = Pair.second;
2658 do {
2659 Instruction *SuccPad = getSuccPad(Terminator);
2660 if (Active.count(SuccPad)) {
2661 // Found a cycle; report error
2662 Instruction *CyclePad = SuccPad;
2664 do {
2665 CycleNodes.push_back(CyclePad);
2666 Instruction *CycleTerminator = SiblingFuncletInfo[CyclePad];
2667 if (CycleTerminator != CyclePad)
2668 CycleNodes.push_back(CycleTerminator);
2669 CyclePad = getSuccPad(CycleTerminator);
2670 } while (CyclePad != SuccPad);
2671 Check(false, "EH pads can't handle each other's exceptions",
2672 ArrayRef<Instruction *>(CycleNodes));
2673 }
2674 // Don't re-walk a node we've already checked
2675 if (!Visited.insert(SuccPad).second)
2676 break;
2677 // Walk to this successor if it has a map entry.
2678 PredPad = SuccPad;
2679 auto TermI = SiblingFuncletInfo.find(PredPad);
2680 if (TermI == SiblingFuncletInfo.end())
2681 break;
2682 Terminator = TermI->second;
2683 Active.insert(PredPad);
2684 } while (true);
2685 // Each node only has one successor, so we've walked all the active
2686 // nodes' successors.
2687 Active.clear();
2688 }
2689}
2690
2691// visitFunction - Verify that a function is ok.
2692//
2693void Verifier::visitFunction(const Function &F) {
2694 visitGlobalValue(F);
2695
2696 // Check function arguments.
2697 FunctionType *FT = F.getFunctionType();
2698 unsigned NumArgs = F.arg_size();
2699
2700 Check(&Context == &F.getContext(),
2701 "Function context does not match Module context!", &F);
2702
2703 Check(!F.hasCommonLinkage(), "Functions may not have common linkage", &F);
2704 Check(FT->getNumParams() == NumArgs,
2705 "# formal arguments must match # of arguments for function type!", &F,
2706 FT);
2707 Check(F.getReturnType()->isFirstClassType() ||
2708 F.getReturnType()->isVoidTy() || F.getReturnType()->isStructTy(),
2709 "Functions cannot return aggregate values!", &F);
2710
2711 Check(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(),
2712 "Invalid struct return type!", &F);
2713
2714 AttributeList Attrs = F.getAttributes();
2715
2716 Check(verifyAttributeCount(Attrs, FT->getNumParams()),
2717 "Attribute after last parameter!", &F);
2718
2719 CheckDI(F.IsNewDbgInfoFormat == F.getParent()->IsNewDbgInfoFormat,
2720 "Function debug format should match parent module", &F,
2721 F.IsNewDbgInfoFormat, F.getParent(),
2722 F.getParent()->IsNewDbgInfoFormat);
2723
2724 bool IsIntrinsic = F.isIntrinsic();
2725
2726 // Check function attributes.
2727 verifyFunctionAttrs(FT, Attrs, &F, IsIntrinsic, /* IsInlineAsm */ false);
2728
2729 // On function declarations/definitions, we do not support the builtin
2730 // attribute. We do not check this in VerifyFunctionAttrs since that is
2731 // checking for Attributes that can/can not ever be on functions.
2732 Check(!Attrs.hasFnAttr(Attribute::Builtin),
2733 "Attribute 'builtin' can only be applied to a callsite.", &F);
2734
2735 Check(!Attrs.hasAttrSomewhere(Attribute::ElementType),
2736 "Attribute 'elementtype' can only be applied to a callsite.", &F);
2737
2738 // Check that this function meets the restrictions on this calling convention.
2739 // Sometimes varargs is used for perfectly forwarding thunks, so some of these
2740 // restrictions can be lifted.
2741 switch (F.getCallingConv()) {
2742 default:
2743 case CallingConv::C:
2744 break;
2745 case CallingConv::X86_INTR: {
2746 Check(F.arg_empty() || Attrs.hasParamAttr(0, Attribute::ByVal),
2747 "Calling convention parameter requires byval", &F);
2748 break;
2749 }
2754 Check(F.getReturnType()->isVoidTy(),
2755 "Calling convention requires void return type", &F);
2756 [[fallthrough]];
2762 Check(!F.hasStructRetAttr(), "Calling convention does not allow sret", &F);
2763 if (F.getCallingConv() != CallingConv::SPIR_KERNEL) {
2764 const unsigned StackAS = DL.getAllocaAddrSpace();
2765 unsigned i = 0;
2766 for (const Argument &Arg : F.args()) {
2767 Check(!Attrs.hasParamAttr(i, Attribute::ByVal),
2768 "Calling convention disallows byval", &F);
2769 Check(!Attrs.hasParamAttr(i, Attribute::Preallocated),
2770 "Calling convention disallows preallocated", &F);
2771 Check(!Attrs.hasParamAttr(i, Attribute::InAlloca),
2772 "Calling convention disallows inalloca", &F);
2773
2774 if (Attrs.hasParamAttr(i, Attribute::ByRef)) {
2775 // FIXME: Should also disallow LDS and GDS, but we don't have the enum
2776 // value here.
2777 Check(Arg.getType()->getPointerAddressSpace() != StackAS,
2778 "Calling convention disallows stack byref", &F);
2779 }
2780
2781 ++i;
2782 }
2783 }
2784
2785 [[fallthrough]];
2786 case CallingConv::Fast:
2787 case CallingConv::Cold:
2791 Check(!F.isVarArg(),
2792 "Calling convention does not support varargs or "
2793 "perfect forwarding!",
2794 &F);
2795 break;
2796 }
2797
2798 // Check that the argument values match the function type for this function...
2799 unsigned i = 0;
2800 for (const Argument &Arg : F.args()) {
2801 Check(Arg.getType() == FT->getParamType(i),
2802 "Argument value does not match function argument type!", &Arg,
2803 FT->getParamType(i));
2804 Check(Arg.getType()->isFirstClassType(),
2805 "Function arguments must have first-class types!", &Arg);
2806 if (!IsIntrinsic) {
2807 Check(!Arg.getType()->isMetadataTy(),
2808 "Function takes metadata but isn't an intrinsic", &Arg, &F);
2809 Check(!Arg.getType()->isTokenTy(),
2810 "Function takes token but isn't an intrinsic", &Arg, &F);
2811 Check(!Arg.getType()->isX86_AMXTy(),
2812 "Function takes x86_amx but isn't an intrinsic", &Arg, &F);
2813 }
2814
2815 // Check that swifterror argument is only used by loads and stores.
2816 if (Attrs.hasParamAttr(i, Attribute::SwiftError)) {
2817 verifySwiftErrorValue(&Arg);
2818 }
2819 ++i;
2820 }
2821
2822 if (!IsIntrinsic) {
2823 Check(!F.getReturnType()->isTokenTy(),
2824 "Function returns a token but isn't an intrinsic", &F);
2825 Check(!F.getReturnType()->isX86_AMXTy(),
2826 "Function returns a x86_amx but isn't an intrinsic", &F);
2827 }
2828
2829 // Get the function metadata attachments.
2831 F.getAllMetadata(MDs);
2832 assert(F.hasMetadata() != MDs.empty() && "Bit out-of-sync");
2833 verifyFunctionMetadata(MDs);
2834
2835 // Check validity of the personality function
2836 if (F.hasPersonalityFn()) {
2837 auto *Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
2838 if (Per)
2839 Check(Per->getParent() == F.getParent(),
2840 "Referencing personality function in another module!", &F,
2841 F.getParent(), Per, Per->getParent());
2842 }
2843
2844 // EH funclet coloring can be expensive, recompute on-demand
2845 BlockEHFuncletColors.clear();
2846
2847 if (F.isMaterializable()) {
2848 // Function has a body somewhere we can't see.
2849 Check(MDs.empty(), "unmaterialized function cannot have metadata", &F,
2850 MDs.empty() ? nullptr : MDs.front().second);
2851 } else if (F.isDeclaration()) {
2852 for (const auto &I : MDs) {
2853 // This is used for call site debug information.
2854 CheckDI(I.first != LLVMContext::MD_dbg ||
2855 !cast<DISubprogram>(I.second)->isDistinct(),
2856 "function declaration may only have a unique !dbg attachment",
2857 &F);
2858 Check(I.first != LLVMContext::MD_prof,
2859 "function declaration may not have a !prof attachment", &F);
2860
2861 // Verify the metadata itself.
2862 visitMDNode(*I.second, AreDebugLocsAllowed::Yes);
2863 }
2864 Check(!F.hasPersonalityFn(),
2865 "Function declaration shouldn't have a personality routine", &F);
2866 } else {
2867 // Verify that this function (which has a body) is not named "llvm.*". It
2868 // is not legal to define intrinsics.
2869 Check(!IsIntrinsic, "llvm intrinsics cannot be defined!", &F);
2870
2871 // Check the entry node
2872 const BasicBlock *Entry = &F.getEntryBlock();
2873 Check(pred_empty(Entry),
2874 "Entry block to function must not have predecessors!", Entry);
2875
2876 // The address of the entry block cannot be taken, unless it is dead.
2877 if (Entry->hasAddressTaken()) {
2878 Check(!BlockAddress::lookup(Entry)->isConstantUsed(),
2879 "blockaddress may not be used with the entry block!", Entry);
2880 }
2881
2882 unsigned NumDebugAttachments = 0, NumProfAttachments = 0,
2883 NumKCFIAttachments = 0;
2884 // Visit metadata attachments.
2885 for (const auto &I : MDs) {
2886 // Verify that the attachment is legal.
2887 auto AllowLocs = AreDebugLocsAllowed::No;
2888 switch (I.first) {
2889 default:
2890 break;
2891 case LLVMContext::MD_dbg: {
2892 ++NumDebugAttachments;
2893 CheckDI(NumDebugAttachments == 1,
2894 "function must have a single !dbg attachment", &F, I.second);
2895 CheckDI(isa<DISubprogram>(I.second),
2896 "function !dbg attachment must be a subprogram", &F, I.second);
2897 CheckDI(cast<DISubprogram>(I.second)->isDistinct(),
2898 "function definition may only have a distinct !dbg attachment",
2899 &F);
2900
2901 auto *SP = cast<DISubprogram>(I.second);
2902 const Function *&AttachedTo = DISubprogramAttachments[SP];
2903 CheckDI(!AttachedTo || AttachedTo == &F,
2904 "DISubprogram attached to more than one function", SP, &F);
2905 AttachedTo = &F;
2906 AllowLocs = AreDebugLocsAllowed::Yes;
2907 break;
2908 }
2909 case LLVMContext::MD_prof:
2910 ++NumProfAttachments;
2911 Check(NumProfAttachments == 1,
2912 "function must have a single !prof attachment", &F, I.second);
2913 break;
2914 case LLVMContext::MD_kcfi_type:
2915 ++NumKCFIAttachments;
2916 Check(NumKCFIAttachments == 1,
2917 "function must have a single !kcfi_type attachment", &F,
2918 I.second);
2919 break;
2920 }
2921
2922 // Verify the metadata itself.
2923 visitMDNode(*I.second, AllowLocs);
2924 }
2925 }
2926
2927 // If this function is actually an intrinsic, verify that it is only used in
2928 // direct call/invokes, never having its "address taken".
2929 // Only do this if the module is materialized, otherwise we don't have all the
2930 // uses.
2931 if (F.isIntrinsic() && F.getParent()->isMaterialized()) {
2932 const User *U;
2933 if (F.hasAddressTaken(&U, false, true, false,
2934 /*IgnoreARCAttachedCall=*/true))
2935 Check(false, "Invalid user of intrinsic instruction!", U);
2936 }
2937
2938 // Check intrinsics' signatures.
2939 switch (F.getIntrinsicID()) {
2940 case Intrinsic::experimental_gc_get_pointer_base: {
2941 FunctionType *FT = F.getFunctionType();
2942 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
2943 Check(isa<PointerType>(F.getReturnType()),
2944 "gc.get.pointer.base must return a pointer", F);
2945 Check(FT->getParamType(0) == F.getReturnType(),
2946 "gc.get.pointer.base operand and result must be of the same type", F);
2947 break;
2948 }
2949 case Intrinsic::experimental_gc_get_pointer_offset: {
2950 FunctionType *FT = F.getFunctionType();
2951 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
2952 Check(isa<PointerType>(FT->getParamType(0)),
2953 "gc.get.pointer.offset operand must be a pointer", F);
2954 Check(F.getReturnType()->isIntegerTy(),
2955 "gc.get.pointer.offset must return integer", F);
2956 break;
2957 }
2958 }
2959
2960 auto *N = F.getSubprogram();
2961 HasDebugInfo = (N != nullptr);
2962 if (!HasDebugInfo)
2963 return;
2964
2965 // Check that all !dbg attachments lead to back to N.
2966 //
2967 // FIXME: Check this incrementally while visiting !dbg attachments.
2968 // FIXME: Only check when N is the canonical subprogram for F.
2970 auto VisitDebugLoc = [&](const Instruction &I, const MDNode *Node) {
2971 // Be careful about using DILocation here since we might be dealing with
2972 // broken code (this is the Verifier after all).
2973 const DILocation *DL = dyn_cast_or_null<DILocation>(Node);
2974 if (!DL)
2975 return;
2976 if (!Seen.insert(DL).second)
2977 return;
2978
2979 Metadata *Parent = DL->getRawScope();
2980 CheckDI(Parent && isa<DILocalScope>(Parent),
2981 "DILocation's scope must be a DILocalScope", N, &F, &I, DL, Parent);
2982
2983 DILocalScope *Scope = DL->getInlinedAtScope();
2984 Check(Scope, "Failed to find DILocalScope", DL);
2985
2986 if (!Seen.insert(Scope).second)
2987 return;
2988
2989 DISubprogram *SP = Scope->getSubprogram();
2990
2991 // Scope and SP could be the same MDNode and we don't want to skip
2992 // validation in that case
2993 if (SP && ((Scope != SP) && !Seen.insert(SP).second))
2994 return;
2995
2996 CheckDI(SP->describes(&F),
2997 "!dbg attachment points at wrong subprogram for function", N, &F,
2998 &I, DL, Scope, SP);
2999 };
3000 for (auto &BB : F)
3001 for (auto &I : BB) {
3002 VisitDebugLoc(I, I.getDebugLoc().getAsMDNode());
3003 // The llvm.loop annotations also contain two DILocations.
3004 if (auto MD = I.getMetadata(LLVMContext::MD_loop))
3005 for (unsigned i = 1; i < MD->getNumOperands(); ++i)
3006 VisitDebugLoc(I, dyn_cast_or_null<MDNode>(MD->getOperand(i)));
3007 if (BrokenDebugInfo)
3008 return;
3009 }
3010}
3011
3012// verifyBasicBlock - Verify that a basic block is well formed...
3013//
3014void Verifier::visitBasicBlock(BasicBlock &BB) {
3015 InstsInThisBlock.clear();
3016 ConvergenceVerifyHelper.visit(BB);
3017
3018 // Ensure that basic blocks have terminators!
3019 Check(BB.getTerminator(), "Basic Block does not have terminator!", &BB);
3020
3021 // Check constraints that this basic block imposes on all of the PHI nodes in
3022 // it.
3023 if (isa<PHINode>(BB.front())) {
3026 llvm::sort(Preds);
3027 for (const PHINode &PN : BB.phis()) {
3028 Check(PN.getNumIncomingValues() == Preds.size(),
3029 "PHINode should have one entry for each predecessor of its "
3030 "parent basic block!",
3031 &PN);
3032
3033 // Get and sort all incoming values in the PHI node...
3034 Values.clear();
3035 Values.reserve(PN.getNumIncomingValues());
3036 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
3037 Values.push_back(
3038 std::make_pair(PN.getIncomingBlock(i), PN.getIncomingValue(i)));
3039 llvm::sort(Values);
3040
3041 for (unsigned i = 0, e = Values.size(); i != e; ++i) {
3042 // Check to make sure that if there is more than one entry for a
3043 // particular basic block in this PHI node, that the incoming values are
3044 // all identical.
3045 //
3046 Check(i == 0 || Values[i].first != Values[i - 1].first ||
3047 Values[i].second == Values[i - 1].second,
3048 "PHI node has multiple entries for the same basic block with "
3049 "different incoming values!",
3050 &PN, Values[i].first, Values[i].second, Values[i - 1].second);
3051
3052 // Check to make sure that the predecessors and PHI node entries are
3053 // matched up.
3054 Check(Values[i].first == Preds[i],
3055 "PHI node entries do not match predecessors!", &PN,
3056 Values[i].first, Preds[i]);
3057 }
3058 }
3059 }
3060
3061 // Check that all instructions have their parent pointers set up correctly.
3062 for (auto &I : BB)
3063 {
3064 Check(I.getParent() == &BB, "Instruction has bogus parent pointer!");
3065 }
3066
3067 CheckDI(BB.IsNewDbgInfoFormat == BB.getParent()->IsNewDbgInfoFormat,
3068 "BB debug format should match parent function", &BB,
3069 BB.IsNewDbgInfoFormat, BB.getParent(),
3070 BB.getParent()->IsNewDbgInfoFormat);
3071
3072 // Confirm that no issues arise from the debug program.
3073 if (BB.IsNewDbgInfoFormat)
3074 CheckDI(!BB.getTrailingDbgRecords(), "Basic Block has trailing DbgRecords!",
3075 &BB);
3076}
3077
3078void Verifier::visitTerminator(Instruction &I) {
3079 // Ensure that terminators only exist at the end of the basic block.
3080 Check(&I == I.getParent()->getTerminator(),
3081 "Terminator found in the middle of a basic block!", I.getParent());
3083}
3084
3085void Verifier::visitBranchInst(BranchInst &BI) {
3086 if (BI.isConditional()) {
3088 "Branch condition is not 'i1' type!", &BI, BI.getCondition());
3089 }
3090 visitTerminator(BI);
3091}
3092
3093void Verifier::visitReturnInst(ReturnInst &RI) {
3094 Function *F = RI.getParent()->getParent();
3095 unsigned N = RI.getNumOperands();
3096 if (F->getReturnType()->isVoidTy())
3097 Check(N == 0,
3098 "Found return instr that returns non-void in Function of void "
3099 "return type!",
3100 &RI, F->getReturnType());
3101 else
3102 Check(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(),
3103 "Function return type does not match operand "
3104 "type of return inst!",
3105 &RI, F->getReturnType());
3106
3107 // Check to make sure that the return value has necessary properties for
3108 // terminators...
3109 visitTerminator(RI);
3110}
3111
3112void Verifier::visitSwitchInst(SwitchInst &SI) {
3113 Check(SI.getType()->isVoidTy(), "Switch must have void result type!", &SI);
3114 // Check to make sure that all of the constants in the switch instruction
3115 // have the same type as the switched-on value.
3116 Type *SwitchTy = SI.getCondition()->getType();
3118 for (auto &Case : SI.cases()) {
3119 Check(isa<ConstantInt>(SI.getOperand(Case.getCaseIndex() * 2 + 2)),
3120 "Case value is not a constant integer.", &SI);
3121 Check(Case.getCaseValue()->getType() == SwitchTy,
3122 "Switch constants must all be same type as switch value!", &SI);
3123 Check(Constants.insert(Case.getCaseValue()).second,
3124 "Duplicate integer as switch case", &SI, Case.getCaseValue());
3125 }
3126
3127 visitTerminator(SI);
3128}
3129
3130void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
3132 "Indirectbr operand must have pointer type!", &BI);
3133 for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i)
3135 "Indirectbr destinations must all have pointer type!", &BI);
3136
3137 visitTerminator(BI);
3138}
3139
3140void Verifier::visitCallBrInst(CallBrInst &CBI) {
3141 Check(CBI.isInlineAsm(), "Callbr is currently only used for asm-goto!", &CBI);
3142 const InlineAsm *IA = cast<InlineAsm>(CBI.getCalledOperand());
3143 Check(!IA->canThrow(), "Unwinding from Callbr is not allowed");
3144
3145 verifyInlineAsmCall(CBI);
3146 visitTerminator(CBI);
3147}
3148
3149void Verifier::visitSelectInst(SelectInst &SI) {
3150 Check(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1),
3151 SI.getOperand(2)),
3152 "Invalid operands for select instruction!", &SI);
3153
3154 Check(SI.getTrueValue()->getType() == SI.getType(),
3155 "Select values must have same type as select instruction!", &SI);
3156 visitInstruction(SI);
3157}
3158
3159/// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of
3160/// a pass, if any exist, it's an error.
3161///
3162void Verifier::visitUserOp1(Instruction &I) {
3163 Check(false, "User-defined operators should not live outside of a pass!", &I);
3164}
3165
3166void Verifier::visitTruncInst(TruncInst &I) {
3167 // Get the source and destination types
3168 Type *SrcTy = I.getOperand(0)->getType();
3169 Type *DestTy = I.getType();
3170
3171 // Get the size of the types in bits, we'll need this later
3172 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3173 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3174
3175 Check(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer", &I);
3176 Check(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer", &I);
3177 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3178 "trunc source and destination must both be a vector or neither", &I);
3179 Check(SrcBitSize > DestBitSize, "DestTy too big for Trunc", &I);
3180
3182}
3183
3184void Verifier::visitZExtInst(ZExtInst &I) {
3185 // Get the source and destination types
3186 Type *SrcTy = I.getOperand(0)->getType();
3187 Type *DestTy = I.getType();
3188
3189 // Get the size of the types in bits, we'll need this later
3190 Check(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I);
3191 Check(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer", &I);
3192 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3193 "zext source and destination must both be a vector or neither", &I);
3194 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3195 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3196
3197 Check(SrcBitSize < DestBitSize, "Type too small for ZExt", &I);
3198
3200}
3201
3202void Verifier::visitSExtInst(SExtInst &I) {
3203 // Get the source and destination types
3204 Type *SrcTy = I.getOperand(0)->getType();
3205 Type *DestTy = I.getType();
3206
3207 // Get the size of the types in bits, we'll need this later
3208 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3209 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3210
3211 Check(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer", &I);
3212 Check(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer", &I);
3213 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3214 "sext source and destination must both be a vector or neither", &I);
3215 Check(SrcBitSize < DestBitSize, "Type too small for SExt", &I);
3216
3218}
3219
3220void Verifier::visitFPTruncInst(FPTruncInst &I) {
3221 // Get the source and destination types
3222 Type *SrcTy = I.getOperand(0)->getType();
3223 Type *DestTy = I.getType();
3224 // Get the size of the types in bits, we'll need this later
3225 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3226 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3227
3228 Check(SrcTy->isFPOrFPVectorTy(), "FPTrunc only operates on FP", &I);
3229 Check(DestTy->isFPOrFPVectorTy(), "FPTrunc only produces an FP", &I);
3230 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3231 "fptrunc source and destination must both be a vector or neither", &I);
3232 Check(SrcBitSize > DestBitSize, "DestTy too big for FPTrunc", &I);
3233
3235}
3236
3237void Verifier::visitFPExtInst(FPExtInst &I) {
3238 // Get the source and destination types
3239 Type *SrcTy = I.getOperand(0)->getType();
3240 Type *DestTy = I.getType();
3241
3242 // Get the size of the types in bits, we'll need this later
3243 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3244 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3245
3246 Check(SrcTy->isFPOrFPVectorTy(), "FPExt only operates on FP", &I);
3247 Check(DestTy->isFPOrFPVectorTy(), "FPExt only produces an FP", &I);
3248 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3249 "fpext source and destination must both be a vector or neither", &I);
3250 Check(SrcBitSize < DestBitSize, "DestTy too small for FPExt", &I);
3251
3253}
3254
3255void Verifier::visitUIToFPInst(UIToFPInst &I) {
3256 // Get the source and destination types
3257 Type *SrcTy = I.getOperand(0)->getType();
3258 Type *DestTy = I.getType();
3259
3260 bool SrcVec = SrcTy->isVectorTy();
3261 bool DstVec = DestTy->isVectorTy();
3262
3263 Check(SrcVec == DstVec,
3264 "UIToFP source and dest must both be vector or scalar", &I);
3265 Check(SrcTy->isIntOrIntVectorTy(),
3266 "UIToFP source must be integer or integer vector", &I);
3267 Check(DestTy->isFPOrFPVectorTy(), "UIToFP result must be FP or FP vector",
3268 &I);
3269
3270 if (SrcVec && DstVec)
3271 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3272 cast<VectorType>(DestTy)->getElementCount(),
3273 "UIToFP source and dest vector length mismatch", &I);
3274
3276}
3277
3278void Verifier::visitSIToFPInst(SIToFPInst &I) {
3279 // Get the source and destination types
3280 Type *SrcTy = I.getOperand(0)->getType();
3281 Type *DestTy = I.getType();
3282
3283 bool SrcVec = SrcTy->isVectorTy();
3284 bool DstVec = DestTy->isVectorTy();
3285
3286 Check(SrcVec == DstVec,
3287 "SIToFP source and dest must both be vector or scalar", &I);
3288 Check(SrcTy->isIntOrIntVectorTy(),
3289 "SIToFP source must be integer or integer vector", &I);
3290 Check(DestTy->isFPOrFPVectorTy(), "SIToFP result must be FP or FP vector",
3291 &I);
3292
3293 if (SrcVec && DstVec)
3294 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3295 cast<VectorType>(DestTy)->getElementCount(),
3296 "SIToFP source and dest vector length mismatch", &I);
3297
3299}
3300
3301void Verifier::visitFPToUIInst(FPToUIInst &I) {
3302 // Get the source and destination types
3303 Type *SrcTy = I.getOperand(0)->getType();
3304 Type *DestTy = I.getType();
3305
3306 bool SrcVec = SrcTy->isVectorTy();
3307 bool DstVec = DestTy->isVectorTy();
3308
3309 Check(SrcVec == DstVec,
3310 "FPToUI source and dest must both be vector or scalar", &I);
3311 Check(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector", &I);
3312 Check(DestTy->isIntOrIntVectorTy(),
3313 "FPToUI result must be integer or integer vector", &I);
3314
3315 if (SrcVec && DstVec)
3316 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3317 cast<VectorType>(DestTy)->getElementCount(),
3318 "FPToUI source and dest vector length mismatch", &I);
3319
3321}
3322
3323void Verifier::visitFPToSIInst(FPToSIInst &I) {
3324 // Get the source and destination types
3325 Type *SrcTy = I.getOperand(0)->getType();
3326 Type *DestTy = I.getType();
3327
3328 bool SrcVec = SrcTy->isVectorTy();
3329 bool DstVec = DestTy->isVectorTy();
3330
3331 Check(SrcVec == DstVec,
3332 "FPToSI source and dest must both be vector or scalar", &I);
3333 Check(SrcTy->isFPOrFPVectorTy(), "FPToSI source must be FP or FP vector", &I);
3334 Check(DestTy->isIntOrIntVectorTy(),
3335 "FPToSI result must be integer or integer vector", &I);
3336
3337 if (SrcVec && DstVec)
3338 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3339 cast<VectorType>(DestTy)->getElementCount(),
3340 "FPToSI source and dest vector length mismatch", &I);
3341
3343}
3344
3345void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
3346 // Get the source and destination types
3347 Type *SrcTy = I.getOperand(0)->getType();
3348 Type *DestTy = I.getType();
3349
3350 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToInt source must be pointer", &I);
3351
3352 Check(DestTy->isIntOrIntVectorTy(), "PtrToInt result must be integral", &I);
3353 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch",
3354 &I);
3355
3356 if (SrcTy->isVectorTy()) {
3357 auto *VSrc = cast<VectorType>(SrcTy);
3358 auto *VDest = cast<VectorType>(DestTy);
3359 Check(VSrc->getElementCount() == VDest->getElementCount(),
3360 "PtrToInt Vector width mismatch", &I);
3361 }
3362
3364}
3365
3366void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
3367 // Get the source and destination types
3368 Type *SrcTy = I.getOperand(0)->getType();
3369 Type *DestTy = I.getType();
3370
3371 Check(SrcTy->isIntOrIntVectorTy(), "IntToPtr source must be an integral", &I);
3372 Check(DestTy->isPtrOrPtrVectorTy(), "IntToPtr result must be a pointer", &I);
3373
3374 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch",
3375 &I);
3376 if (SrcTy->isVectorTy()) {
3377 auto *VSrc = cast<VectorType>(SrcTy);
3378 auto *VDest = cast<VectorType>(DestTy);
3379 Check(VSrc->getElementCount() == VDest->getElementCount(),
3380 "IntToPtr Vector width mismatch", &I);
3381 }
3383}
3384
3385void Verifier::visitBitCastInst(BitCastInst &I) {
3386 Check(
3387 CastInst::castIsValid(Instruction::BitCast, I.getOperand(0), I.getType()),
3388 "Invalid bitcast", &I);
3390}
3391
3392void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
3393 Type *SrcTy = I.getOperand(0)->getType();
3394 Type *DestTy = I.getType();
3395
3396 Check(SrcTy->isPtrOrPtrVectorTy(), "AddrSpaceCast source must be a pointer",
3397 &I);
3398 Check(DestTy->isPtrOrPtrVectorTy(), "AddrSpaceCast result must be a pointer",
3399 &I);
3401 "AddrSpaceCast must be between different address spaces", &I);
3402 if (auto *SrcVTy = dyn_cast<VectorType>(SrcTy))
3403 Check(SrcVTy->getElementCount() ==
3404 cast<VectorType>(DestTy)->getElementCount(),
3405 "AddrSpaceCast vector pointer number of elements mismatch", &I);
3407}
3408
3409/// visitPHINode - Ensure that a PHI node is well formed.
3410///
3411void Verifier::visitPHINode(PHINode &PN) {
3412 // Ensure that the PHI nodes are all grouped together at the top of the block.
3413 // This can be tested by checking whether the instruction before this is
3414 // either nonexistent (because this is begin()) or is a PHI node. If not,
3415 // then there is some other instruction before a PHI.
3416 Check(&PN == &PN.getParent()->front() ||
3417 isa<PHINode>(--BasicBlock::iterator(&PN)),
3418 "PHI nodes not grouped at top of basic block!", &PN, PN.getParent());
3419
3420 // Check that a PHI doesn't yield a Token.
3421 Check(!PN.getType()->isTokenTy(), "PHI nodes cannot have token type!");
3422
3423 // Check that all of the values of the PHI node have the same type as the
3424 // result.
3425 for (Value *IncValue : PN.incoming_values()) {
3426 Check(PN.getType() == IncValue->getType(),
3427 "PHI node operands are not the same type as the result!", &PN);
3428 }
3429
3430 // All other PHI node constraints are checked in the visitBasicBlock method.
3431
3432 visitInstruction(PN);
3433}
3434
3435void Verifier::visitCallBase(CallBase &Call) {
3436 Check(Call.getCalledOperand()->getType()->isPointerTy(),
3437 "Called function must be a pointer!", Call);
3438 FunctionType *FTy = Call.getFunctionType();
3439
3440 // Verify that the correct number of arguments are being passed
3441 if (FTy->isVarArg())
3442 Check(Call.arg_size() >= FTy->getNumParams(),
3443 "Called function requires more parameters than were provided!", Call);
3444 else
3445 Check(Call.arg_size() == FTy->getNumParams(),
3446 "Incorrect number of arguments passed to called function!", Call);
3447
3448 // Verify that all arguments to the call match the function type.
3449 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
3450 Check(Call.getArgOperand(i)->getType() == FTy->getParamType(i),
3451 "Call parameter type does not match function signature!",
3452 Call.getArgOperand(i), FTy->getParamType(i), Call);
3453
3454 AttributeList Attrs = Call.getAttributes();
3455
3456 Check(verifyAttributeCount(Attrs, Call.arg_size()),
3457 "Attribute after last parameter!", Call);
3458
3459 Function *Callee =
3460 dyn_cast<Function>(Call.getCalledOperand()->stripPointerCasts());
3461 bool IsIntrinsic = Callee && Callee->isIntrinsic();
3462 if (IsIntrinsic)
3463 Check(Callee->getValueType() == FTy,
3464 "Intrinsic called with incompatible signature", Call);
3465
3466 // Disallow calls to functions with the amdgpu_cs_chain[_preserve] calling
3467 // convention.
3468 auto CC = Call.getCallingConv();
3471 "Direct calls to amdgpu_cs_chain/amdgpu_cs_chain_preserve functions "
3472 "not allowed. Please use the @llvm.amdgpu.cs.chain intrinsic instead.",
3473 Call);
3474
3475 auto VerifyTypeAlign = [&](Type *Ty, const Twine &Message) {
3476 if (!Ty->isSized())
3477 return;
3478 Align ABIAlign = DL.getABITypeAlign(Ty);
3479 Align MaxAlign(ParamMaxAlignment);
3480 Check(ABIAlign <= MaxAlign,
3481 "Incorrect alignment of " + Message + " to called function!", Call);
3482 };
3483
3484 if (!IsIntrinsic) {
3485 VerifyTypeAlign(FTy->getReturnType(), "return type");
3486 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3487 Type *Ty = FTy->getParamType(i);
3488 VerifyTypeAlign(Ty, "argument passed");
3489 }
3490 }
3491
3492 if (Attrs.hasFnAttr(Attribute::Speculatable)) {
3493 // Don't allow speculatable on call sites, unless the underlying function
3494 // declaration is also speculatable.
3495 Check(Callee && Callee->isSpeculatable(),
3496 "speculatable attribute may not apply to call sites", Call);
3497 }
3498
3499 if (Attrs.hasFnAttr(Attribute::Preallocated)) {
3500 Check(Call.getCalledFunction()->getIntrinsicID() ==
3501 Intrinsic::call_preallocated_arg,
3502 "preallocated as a call site attribute can only be on "
3503 "llvm.call.preallocated.arg");
3504 }
3505
3506 // Verify call attributes.
3507 verifyFunctionAttrs(FTy, Attrs, &Call, IsIntrinsic, Call.isInlineAsm());
3508
3509 // Conservatively check the inalloca argument.
3510 // We have a bug if we can find that there is an underlying alloca without
3511 // inalloca.
3512 if (Call.hasInAllocaArgument()) {
3513 Value *InAllocaArg = Call.getArgOperand(FTy->getNumParams() - 1);
3514 if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets()))
3515 Check(AI->isUsedWithInAlloca(),
3516 "inalloca argument for call has mismatched alloca", AI, Call);
3517 }
3518
3519 // For each argument of the callsite, if it has the swifterror argument,
3520 // make sure the underlying alloca/parameter it comes from has a swifterror as
3521 // well.
3522 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3523 if (Call.paramHasAttr(i, Attribute::SwiftError)) {
3524 Value *SwiftErrorArg = Call.getArgOperand(i);
3525 if (auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets())) {
3526 Check(AI->isSwiftError(),
3527 "swifterror argument for call has mismatched alloca", AI, Call);
3528 continue;
3529 }
3530 auto ArgI = dyn_cast<Argument>(SwiftErrorArg);
3531 Check(ArgI, "swifterror argument should come from an alloca or parameter",
3532 SwiftErrorArg, Call);
3533 Check(ArgI->hasSwiftErrorAttr(),
3534 "swifterror argument for call has mismatched parameter", ArgI,
3535 Call);
3536 }
3537
3538 if (Attrs.hasParamAttr(i, Attribute::ImmArg)) {
3539 // Don't allow immarg on call sites, unless the underlying declaration
3540 // also has the matching immarg.
3541 Check(Callee && Callee->hasParamAttribute(i, Attribute::ImmArg),
3542 "immarg may not apply only to call sites", Call.getArgOperand(i),
3543 Call);
3544 }
3545
3546 if (Call.paramHasAttr(i, Attribute::ImmArg)) {
3547 Value *ArgVal = Call.getArgOperand(i);
3548 Check(isa<ConstantInt>(ArgVal) || isa<ConstantFP>(ArgVal),
3549 "immarg operand has non-immediate parameter", ArgVal, Call);
3550 }
3551
3552 if (Call.paramHasAttr(i, Attribute::Preallocated)) {
3553 Value *ArgVal = Call.getArgOperand(i);
3554 bool hasOB =
3555 Call.countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0;
3556 bool isMustTail = Call.isMustTailCall();
3557 Check(hasOB != isMustTail,
3558 "preallocated operand either requires a preallocated bundle or "
3559 "the call to be musttail (but not both)",
3560 ArgVal, Call);
3561 }
3562 }
3563
3564 if (FTy->isVarArg()) {
3565 // FIXME? is 'nest' even legal here?
3566 bool SawNest = false;
3567 bool SawReturned = false;
3568
3569 for (unsigned Idx = 0; Idx < FTy->getNumParams(); ++Idx) {
3570 if (Attrs.hasParamAttr(Idx, Attribute::Nest))
3571 SawNest = true;
3572 if (Attrs.hasParamAttr(Idx, Attribute::Returned))
3573 SawReturned = true;
3574 }
3575
3576 // Check attributes on the varargs part.
3577 for (unsigned Idx = FTy->getNumParams(); Idx < Call.arg_size(); ++Idx) {
3578 Type *Ty = Call.getArgOperand(Idx)->getType();
3579 AttributeSet ArgAttrs = Attrs.getParamAttrs(Idx);
3580 verifyParameterAttrs(ArgAttrs, Ty, &Call);
3581
3582 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
3583 Check(!SawNest, "More than one parameter has attribute nest!", Call);
3584 SawNest = true;
3585 }
3586
3587 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
3588 Check(!SawReturned, "More than one parameter has attribute returned!",
3589 Call);
3590 Check(Ty->canLosslesslyBitCastTo(FTy->getReturnType()),
3591 "Incompatible argument and return types for 'returned' "
3592 "attribute",
3593 Call);
3594 SawReturned = true;
3595 }
3596
3597 // Statepoint intrinsic is vararg but the wrapped function may be not.
3598 // Allow sret here and check the wrapped function in verifyStatepoint.
3599 if (!Call.getCalledFunction() ||
3600 Call.getCalledFunction()->getIntrinsicID() !=
3601 Intrinsic::experimental_gc_statepoint)
3602 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
3603 "Attribute 'sret' cannot be used for vararg call arguments!",
3604 Call);
3605
3606 if (ArgAttrs.hasAttribute(Attribute::InAlloca))
3607 Check(Idx == Call.arg_size() - 1,
3608 "inalloca isn't on the last argument!", Call);
3609 }
3610 }
3611
3612 // Verify that there's no metadata unless it's a direct call to an intrinsic.
3613 if (!IsIntrinsic) {
3614 for (Type *ParamTy : FTy->params()) {
3615 Check(!ParamTy->isMetadataTy(),
3616 "Function has metadata parameter but isn't an intrinsic", Call);
3617 Check(!ParamTy->isTokenTy(),
3618 "Function has token parameter but isn't an intrinsic", Call);
3619 }
3620 }
3621
3622 // Verify that indirect calls don't return tokens.
3623 if (!Call.getCalledFunction()) {
3624 Check(!FTy->getReturnType()->isTokenTy(),
3625 "Return type cannot be token for indirect call!");
3626 Check(!FTy->getReturnType()->isX86_AMXTy(),
3627 "Return type cannot be x86_amx for indirect call!");
3628 }
3629
3630 if (Function *F = Call.getCalledFunction())
3631 if (Intrinsic::ID ID = (Intrinsic::ID)F->getIntrinsicID())
3632 visitIntrinsicCall(ID, Call);
3633
3634 // Verify that a callsite has at most one "deopt", at most one "funclet", at
3635 // most one "gc-transition", at most one "cfguardtarget", at most one
3636 // "preallocated" operand bundle, and at most one "ptrauth" operand bundle.
3637 bool FoundDeoptBundle = false, FoundFuncletBundle = false,
3638 FoundGCTransitionBundle = false, FoundCFGuardTargetBundle = false,
3639 FoundPreallocatedBundle = false, FoundGCLiveBundle = false,
3640 FoundPtrauthBundle = false, FoundKCFIBundle = false,
3641 FoundAttachedCallBundle = false;
3642 for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) {
3643 OperandBundleUse BU = Call.getOperandBundleAt(i);
3644 uint32_t Tag = BU.getTagID();
3645 if (Tag == LLVMContext::OB_deopt) {
3646 Check(!FoundDeoptBundle, "Multiple deopt operand bundles", Call);
3647 FoundDeoptBundle = true;
3648 } else if (Tag == LLVMContext::OB_gc_transition) {
3649 Check(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles",
3650 Call);
3651 FoundGCTransitionBundle = true;
3652 } else if (Tag == LLVMContext::OB_funclet) {
3653 Check(!FoundFuncletBundle, "Multiple funclet operand bundles", Call);
3654 FoundFuncletBundle = true;
3655 Check(BU.Inputs.size() == 1,
3656 "Expected exactly one funclet bundle operand", Call);
3657 Check(isa<FuncletPadInst>(BU.Inputs.front()),
3658 "Funclet bundle operands should correspond to a FuncletPadInst",
3659 Call);
3660 } else if (Tag == LLVMContext::OB_cfguardtarget) {
3661 Check(!FoundCFGuardTargetBundle, "Multiple CFGuardTarget operand bundles",
3662 Call);
3663 FoundCFGuardTargetBundle = true;
3664 Check(BU.Inputs.size() == 1,
3665 "Expected exactly one cfguardtarget bundle operand", Call);
3666 } else if (Tag == LLVMContext::OB_ptrauth) {
3667 Check(!FoundPtrauthBundle, "Multiple ptrauth operand bundles", Call);
3668 FoundPtrauthBundle = true;
3669 Check(BU.Inputs.size() == 2,
3670 "Expected exactly two ptrauth bundle operands", Call);
3671 Check(isa<ConstantInt>(BU.Inputs[0]) &&
3672 BU.Inputs[0]->getType()->isIntegerTy(32),
3673 "Ptrauth bundle key operand must be an i32 constant", Call);
3674 Check(BU.Inputs[1]->getType()->isIntegerTy(64),
3675 "Ptrauth bundle discriminator operand must be an i64", Call);
3676 } else if (Tag == LLVMContext::OB_kcfi) {
3677 Check(!FoundKCFIBundle, "Multiple kcfi operand bundles", Call);
3678 FoundKCFIBundle = true;
3679 Check(BU.Inputs.size() == 1, "Expected exactly one kcfi bundle operand",
3680 Call);
3681 Check(isa<ConstantInt>(BU.Inputs[0]) &&
3682 BU.Inputs[0]->getType()->isIntegerTy(32),
3683 "Kcfi bundle operand must be an i32 constant", Call);
3684 } else if (Tag == LLVMContext::OB_preallocated) {
3685 Check(!FoundPreallocatedBundle, "Multiple preallocated operand bundles",
3686 Call);
3687 FoundPreallocatedBundle = true;
3688 Check(BU.Inputs.size() == 1,
3689 "Expected exactly one preallocated bundle operand", Call);
3690 auto Input = dyn_cast<IntrinsicInst>(BU.Inputs.front());
3691 Check(Input &&
3692 Input->getIntrinsicID() == Intrinsic::call_preallocated_setup,
3693 "\"preallocated\" argument must be a token from "
3694 "llvm.call.preallocated.setup",
3695 Call);
3696 } else if (Tag == LLVMContext::OB_gc_live) {
3697 Check(!FoundGCLiveBundle, "Multiple gc-live operand bundles", Call);
3698 FoundGCLiveBundle = true;
3700 Check(!FoundAttachedCallBundle,
3701 "Multiple \"clang.arc.attachedcall\" operand bundles", Call);
3702 FoundAttachedCallBundle = true;
3703 verifyAttachedCallBundle(Call, BU);
3704 }
3705 }
3706
3707 // Verify that callee and callsite agree on whether to use pointer auth.
3708 Check(!(Call.getCalledFunction() && FoundPtrauthBundle),
3709 "Direct call cannot have a ptrauth bundle", Call);
3710
3711 // Verify that each inlinable callsite of a debug-info-bearing function in a
3712 // debug-info-bearing function has a debug location attached to it. Failure to
3713 // do so causes assertion failures when the inliner sets up inline scope info
3714 // (Interposable functions are not inlinable, neither are functions without
3715 // definitions.)
3716 if (Call.getFunction()->getSubprogram() && Call.getCalledFunction() &&
3717 !Call.getCalledFunction()->isInterposable() &&
3718 !Call.getCalledFunction()->isDeclaration() &&
3719 Call.getCalledFunction()->getSubprogram())
3720 CheckDI(Call.getDebugLoc(),
3721 "inlinable function call in a function with "
3722 "debug info must have a !dbg location",
3723 Call);
3724
3725 if (Call.isInlineAsm())
3726 verifyInlineAsmCall(Call);
3727
3728 ConvergenceVerifyHelper.visit(Call);
3729
3730 visitInstruction(Call);
3731}
3732
3733void Verifier::verifyTailCCMustTailAttrs(const AttrBuilder &Attrs,
3734 StringRef Context) {
3735 Check(!Attrs.contains(Attribute::InAlloca),
3736 Twine("inalloca attribute not allowed in ") + Context);
3737 Check(!Attrs.contains(Attribute::InReg),
3738 Twine("inreg attribute not allowed in ") + Context);
3739 Check(!Attrs.contains(Attribute::SwiftError),
3740 Twine("swifterror attribute not allowed in ") + Context);
3741 Check(!Attrs.contains(Attribute::Preallocated),
3742 Twine("preallocated attribute not allowed in ") + Context);
3743 Check(!Attrs.contains(Attribute::ByRef),
3744 Twine("byref attribute not allowed in ") + Context);
3745}
3746
3747/// Two types are "congruent" if they are identical, or if they are both pointer
3748/// types with different pointee types and the same address space.
3749static bool isTypeCongruent(Type *L, Type *R) {
3750 if (L == R)
3751 return true;
3752 PointerType *PL = dyn_cast<PointerType>(L);
3753 PointerType *PR = dyn_cast<PointerType>(R);
3754 if (!PL || !PR)
3755 return false;
3756 return PL->getAddressSpace() == PR->getAddressSpace();
3757}
3758
3760 static const Attribute::AttrKind ABIAttrs[] = {
3761 Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
3762 Attribute::InReg, Attribute::StackAlignment, Attribute::SwiftSelf,
3763 Attribute::SwiftAsync, Attribute::SwiftError, Attribute::Preallocated,
3764 Attribute::ByRef};
3765 AttrBuilder Copy(C);
3766 for (auto AK : ABIAttrs) {
3767 Attribute Attr = Attrs.getParamAttrs(I).getAttribute(AK);
3768 if (Attr.isValid())
3769 Copy.addAttribute(Attr);
3770 }
3771
3772 // `align` is ABI-affecting only in combination with `byval` or `byref`.
3773 if (Attrs.hasParamAttr(I, Attribute::Alignment) &&
3774 (Attrs.hasParamAttr(I, Attribute::ByVal) ||
3775 Attrs.hasParamAttr(I, Attribute::ByRef)))
3776 Copy.addAlignmentAttr(Attrs.getParamAlignment(I));
3777 return Copy;
3778}
3779
3780void Verifier::verifyMustTailCall(CallInst &CI) {
3781 Check(!CI.isInlineAsm(), "cannot use musttail call with inline asm", &CI);
3782
3783 Function *F = CI.getParent()->getParent();
3784 FunctionType *CallerTy = F->getFunctionType();
3785 FunctionType *CalleeTy = CI.getFunctionType();
3786 Check(CallerTy->isVarArg() == CalleeTy->isVarArg(),
3787 "cannot guarantee tail call due to mismatched varargs", &CI);
3788 Check(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()),
3789 "cannot guarantee tail call due to mismatched return types", &CI);
3790
3791 // - The calling conventions of the caller and callee must match.
3792 Check(F->getCallingConv() == CI.getCallingConv(),
3793 "cannot guarantee tail call due to mismatched calling conv", &CI);
3794
3795 // - The call must immediately precede a :ref:`ret <i_ret>` instruction,
3796 // or a pointer bitcast followed by a ret instruction.
3797 // - The ret instruction must return the (possibly bitcasted) value
3798 // produced by the call or void.
3799 Value *RetVal = &CI;
3800 Instruction *Next = CI.getNextNode();
3801
3802 // Handle the optional bitcast.
3803 if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Next)) {
3804 Check(BI->getOperand(0) == RetVal,
3805 "bitcast following musttail call must use the call", BI);
3806 RetVal = BI;
3807 Next = BI->getNextNode();
3808 }
3809
3810 // Check the return.
3811 ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Next);
3812 Check(Ret, "musttail call must precede a ret with an optional bitcast", &CI);
3813 Check(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal ||
3814 isa<UndefValue>(Ret->getReturnValue()),
3815 "musttail call result must be returned", Ret);
3816
3817 AttributeList CallerAttrs = F->getAttributes();
3818 AttributeList CalleeAttrs = CI.getAttributes();
3821 StringRef CCName =
3822 CI.getCallingConv() == CallingConv::Tail ? "tailcc" : "swifttailcc";
3823
3824 // - Only sret, byval, swiftself, and swiftasync ABI-impacting attributes
3825 // are allowed in swifttailcc call
3826 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
3827 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
3828 SmallString<32> Context{CCName, StringRef(" musttail caller")};
3829 verifyTailCCMustTailAttrs(ABIAttrs, Context);
3830 }
3831 for (unsigned I = 0, E = CalleeTy->getNumParams(); I != E; ++I) {
3832 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
3833 SmallString<32> Context{CCName, StringRef(" musttail callee")};
3834 verifyTailCCMustTailAttrs(ABIAttrs, Context);
3835 }
3836 // - Varargs functions are not allowed
3837 Check(!CallerTy->isVarArg(), Twine("cannot guarantee ") + CCName +
3838 " tail call for varargs function");
3839 return;
3840 }
3841
3842 // - The caller and callee prototypes must match. Pointer types of
3843 // parameters or return types may differ in pointee type, but not
3844 // address space.
3845 if (!CI.getCalledFunction() || !CI.getCalledFunction()->isIntrinsic()) {
3846 Check(CallerTy->getNumParams() == CalleeTy->getNumParams(),
3847 "cannot guarantee tail call due to mismatched parameter counts", &CI);
3848 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
3849 Check(
3850 isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)),
3851 "cannot guarantee tail call due to mismatched parameter types", &CI);
3852 }
3853 }
3854
3855 // - All ABI-impacting function attributes, such as sret, byval, inreg,
3856 // returned, preallocated, and inalloca, must match.
3857 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
3858 AttrBuilder CallerABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
3859 AttrBuilder CalleeABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
3860 Check(CallerABIAttrs == CalleeABIAttrs,
3861 "cannot guarantee tail call due to mismatched ABI impacting "
3862 "function attributes",
3863 &CI, CI.getOperand(I));
3864 }
3865}
3866
3867void Verifier::visitCallInst(CallInst &CI) {
3868 visitCallBase(CI);
3869
3870 if (CI.isMustTailCall())
3871 verifyMustTailCall(CI);
3872}
3873
3874void Verifier::visitInvokeInst(InvokeInst &II) {
3875 visitCallBase(II);
3876
3877 // Verify that the first non-PHI instruction of the unwind destination is an
3878 // exception handling instruction.
3879 Check(
3880 II.getUnwindDest()->isEHPad(),
3881 "The unwind destination does not have an exception handling instruction!",
3882 &II);
3883
3884 visitTerminator(II);
3885}
3886
3887/// visitUnaryOperator - Check the argument to the unary operator.
3888///
3889void Verifier::visitUnaryOperator(UnaryOperator &U) {
3890 Check(U.getType() == U.getOperand(0)->getType(),
3891 "Unary operators must have same type for"
3892 "operands and result!",
3893 &U);
3894
3895 switch (U.getOpcode()) {
3896 // Check that floating-point arithmetic operators are only used with
3897 // floating-point operands.
3898 case Instruction::FNeg:
3899 Check(U.getType()->isFPOrFPVectorTy(),
3900 "FNeg operator only works with float types!", &U);
3901 break;
3902 default:
3903 llvm_unreachable("Unknown UnaryOperator opcode!");
3904 }
3905
3907}
3908
3909/// visitBinaryOperator - Check that both arguments to the binary operator are
3910/// of the same type!
3911///
3912void Verifier::visitBinaryOperator(BinaryOperator &B) {
3913 Check(B.getOperand(0)->getType() == B.getOperand(1)->getType(),
3914 "Both operands to a binary operator are not of the same type!", &B);
3915
3916 switch (B.getOpcode()) {
3917 // Check that integer arithmetic operators are only used with
3918 // integral operands.
3919 case Instruction::Add:
3920 case Instruction::Sub:
3921 case Instruction::Mul:
3922 case Instruction::SDiv:
3923 case Instruction::UDiv:
3924 case Instruction::SRem:
3925 case Instruction::URem:
3926 Check(B.getType()->isIntOrIntVectorTy(),
3927 "Integer arithmetic operators only work with integral types!", &B);
3928 Check(B.getType() == B.getOperand(0)->getType(),
3929 "Integer arithmetic operators must have same type "
3930 "for operands and result!",
3931 &B);
3932 break;
3933 // Check that floating-point arithmetic operators are only used with
3934 // floating-point operands.
3935 case Instruction::FAdd:
3936 case Instruction::FSub:
3937 case Instruction::FMul:
3938 case Instruction::FDiv:
3939 case Instruction::FRem:
3940 Check(B.getType()->isFPOrFPVectorTy(),
3941 "Floating-point arithmetic operators only work with "
3942 "floating-point types!",
3943 &B);
3944 Check(B.getType() == B.getOperand(0)->getType(),
3945 "Floating-point arithmetic operators must have same type "
3946 "for operands and result!",
3947 &B);
3948 break;
3949 // Check that logical operators are only used with integral operands.
3950 case Instruction::And:
3951 case Instruction::Or:
3952 case Instruction::Xor:
3953 Check(B.getType()->isIntOrIntVectorTy(),
3954 "Logical operators only work with integral types!", &B);
3955 Check(B.getType() == B.getOperand(0)->getType(),
3956 "Logical operators must have same type for operands and result!", &B);
3957 break;
3958 case Instruction::Shl:
3959 case Instruction::LShr:
3960 case Instruction::AShr:
3961 Check(B.getType()->isIntOrIntVectorTy(),
3962 "Shifts only work with integral types!", &B);
3963 Check(B.getType() == B.getOperand(0)->getType(),
3964 "Shift return type must be same as operands!", &B);
3965 break;
3966 default:
3967 llvm_unreachable("Unknown BinaryOperator opcode!");
3968 }
3969
3971}
3972
3973void Verifier::visitICmpInst(ICmpInst &IC) {
3974 // Check that the operands are the same type
3975 Type *Op0Ty = IC.getOperand(0)->getType();
3976 Type *Op1Ty = IC.getOperand(1)->getType();
3977 Check(Op0Ty == Op1Ty,
3978 "Both operands to ICmp instruction are not of the same type!", &IC);
3979 // Check that the operands are the right type
3980 Check(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPtrOrPtrVectorTy(),
3981 "Invalid operand types for ICmp instruction", &IC);
3982 // Check that the predicate is valid.
3983 Check(IC.isIntPredicate(), "Invalid predicate in ICmp instruction!", &IC);
3984
3985 visitInstruction(IC);
3986}
3987
3988void Verifier::visitFCmpInst(FCmpInst &FC) {
3989 // Check that the operands are the same type
3990 Type *Op0Ty = FC.getOperand(0)->getType();
3991 Type *Op1Ty = FC.getOperand(1)->getType();
3992 Check(Op0Ty == Op1Ty,
3993 "Both operands to FCmp instruction are not of the same type!", &FC);
3994 // Check that the operands are the right type
3995 Check(Op0Ty->isFPOrFPVectorTy(), "Invalid operand types for FCmp instruction",
3996 &FC);
3997 // Check that the predicate is valid.
3998 Check(FC.isFPPredicate(), "Invalid predicate in FCmp instruction!", &FC);
3999
4000 visitInstruction(FC);
4001}
4002
4003void Verifier::visitExtractElementInst(ExtractElementInst &EI) {
4005 "Invalid extractelement operands!", &EI);
4006 visitInstruction(EI);
4007}
4008
4009void Verifier::visitInsertElementInst(InsertElementInst &IE) {
4010 Check(InsertElementInst::isValidOperands(IE.getOperand(0), IE.getOperand(1),
4011 IE.getOperand(2)),
4012 "Invalid insertelement operands!", &IE);
4013 visitInstruction(IE);
4014}
4015
4016void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) {
4018 SV.getShuffleMask()),
4019 "Invalid shufflevector operands!", &SV);
4020 visitInstruction(SV);
4021}
4022
4023void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
4024 Type *TargetTy = GEP.getPointerOperandType()->getScalarType();
4025
4026 Check(isa<PointerType>(TargetTy),
4027 "GEP base pointer is not a vector or a vector of pointers", &GEP);
4028 Check(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP);
4029
4030 if (auto *STy = dyn_cast<StructType>(GEP.getSourceElementType())) {
4031 SmallPtrSet<Type *, 4> Visited;
4032 Check(!STy->containsScalableVectorType(&Visited),
4033 "getelementptr cannot target structure that contains scalable vector"
4034 "type",
4035 &GEP);
4036 }
4037
4038 SmallVector<Value *, 16> Idxs(GEP.indices());
4039 Check(
4040 all_of(Idxs, [](Value *V) { return V->getType()->isIntOrIntVectorTy(); }),
4041 "GEP indexes must be integers", &GEP);
4042 Type *ElTy =
4043 GetElementPtrInst::getIndexedType(GEP.getSourceElementType(), Idxs);
4044 Check(ElTy, "Invalid indices for GEP pointer type!", &GEP);
4045
4046 Check(GEP.getType()->isPtrOrPtrVectorTy() &&
4047 GEP.getResultElementType() == ElTy,
4048 "GEP is not of right type for indices!", &GEP, ElTy);
4049
4050 if (auto *GEPVTy = dyn_cast<VectorType>(GEP.getType())) {
4051 // Additional checks for vector GEPs.
4052 ElementCount GEPWidth = GEPVTy->getElementCount();
4053 if (GEP.getPointerOperandType()->isVectorTy())
4054 Check(
4055 GEPWidth ==
4056 cast<VectorType>(GEP.getPointerOperandType())->getElementCount(),
4057 "Vector GEP result width doesn't match operand's", &GEP);
4058 for (Value *Idx : Idxs) {
4059 Type *IndexTy = Idx->getType();
4060 if (auto *IndexVTy = dyn_cast<VectorType>(IndexTy)) {
4061 ElementCount IndexWidth = IndexVTy->getElementCount();
4062 Check(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP);
4063 }
4064 Check(IndexTy->isIntOrIntVectorTy(),
4065 "All GEP indices should be of integer type");
4066 }
4067 }
4068
4069 if (auto *PTy = dyn_cast<PointerType>(GEP.getType())) {
4070 Check(GEP.getAddressSpace() == PTy->getAddressSpace(),
4071 "GEP address space doesn't match type", &GEP);
4072 }
4073
4075}
4076
4077static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
4078 return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
4079}
4080
4081/// Verify !range and !absolute_symbol metadata. These have the same
4082/// restrictions, except !absolute_symbol allows the full set.
4083void Verifier::verifyRangeMetadata(const Value &I, const MDNode *Range,
4084 Type *Ty, bool IsAbsoluteSymbol) {
4085 unsigned NumOperands = Range->getNumOperands();
4086 Check(NumOperands % 2 == 0, "Unfinished range!", Range);
4087 unsigned NumRanges = NumOperands / 2;
4088 Check(NumRanges >= 1, "It should have at least one range!", Range);
4089
4090 ConstantRange LastRange(1, true); // Dummy initial value
4091 for (unsigned i = 0; i < NumRanges; ++i) {
4092 ConstantInt *Low =
4093 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i));
4094 Check(Low, "The lower limit must be an integer!", Low);
4095 ConstantInt *High =
4096 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i + 1));
4097 Check(High, "The upper limit must be an integer!", High);
4098 Check(High->getType() == Low->getType() &&
4099 High->getType() == Ty->getScalarType(),
4100 "Range types must match instruction type!", &I);
4101
4102 APInt HighV = High->getValue();
4103 APInt LowV = Low->getValue();
4104
4105 // ConstantRange asserts if the ranges are the same except for the min/max
4106 // value. Leave the cases it tolerates for the empty range error below.
4107 Check(LowV != HighV || LowV.isMaxValue() || LowV.isMinValue(),
4108 "The upper and lower limits cannot be the same value", &I);
4109
4110 ConstantRange CurRange(LowV, HighV);
4111 Check(!CurRange.isEmptySet() && (IsAbsoluteSymbol || !CurRange.isFullSet()),
4112 "Range must not be empty!", Range);
4113 if (i != 0) {
4114 Check(CurRange.intersectWith(LastRange).isEmptySet(),
4115 "Intervals are overlapping", Range);
4116 Check(LowV.sgt(LastRange.getLower()), "Intervals are not in order",
4117 Range);
4118 Check(!isContiguous(CurRange, LastRange), "Intervals are contiguous",
4119 Range);
4120 }
4121 LastRange = ConstantRange(LowV, HighV);
4122 }
4123 if (NumRanges > 2) {
4124 APInt FirstLow =
4125 mdconst::dyn_extract<ConstantInt>(Range->getOperand(0))->getValue();
4126 APInt FirstHigh =
4127 mdconst::dyn_extract<ConstantInt>(Range->getOperand(1))->getValue();
4128 ConstantRange FirstRange(FirstLow, FirstHigh);
4129 Check(FirstRange.intersectWith(LastRange).isEmptySet(),
4130 "Intervals are overlapping", Range);
4131 Check(!isContiguous(FirstRange, LastRange), "Intervals are contiguous",
4132 Range);
4133 }
4134}
4135
4136void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) {
4137 assert(Range && Range == I.getMetadata(LLVMContext::MD_range) &&
4138 "precondition violation");
4139 verifyRangeMetadata(I, Range, Ty, false);
4140}
4141
4142void Verifier::checkAtomicMemAccessSize(Type *Ty, const Instruction *I) {
4143 unsigned Size = DL.getTypeSizeInBits(Ty);
4144 Check(Size >= 8, "atomic memory access' size must be byte-sized", Ty, I);
4145 Check(!(Size & (Size - 1)),
4146 "atomic memory access' operand must have a power-of-two size", Ty, I);
4147}
4148
4149void Verifier::visitLoadInst(LoadInst &LI) {
4150 PointerType *PTy = dyn_cast<PointerType>(LI.getOperand(0)->getType());
4151 Check(PTy, "Load operand must be a pointer.", &LI);
4152 Type *ElTy = LI.getType();
4153 if (MaybeAlign A = LI.getAlign()) {
4154 Check(A->value() <= Value::MaximumAlignment,
4155 "huge alignment values are unsupported", &LI);
4156 }
4157 Check(ElTy->isSized(), "loading unsized types is not allowed", &LI);
4158 if (LI.isAtomic()) {
4161 "Load cannot have Release ordering", &LI);
4162 Check(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
4163 "atomic load operand must have integer, pointer, or floating point "
4164 "type!",
4165 ElTy, &LI);
4166 checkAtomicMemAccessSize(ElTy, &LI);
4167 } else {
4169 "Non-atomic load cannot have SynchronizationScope specified", &LI);
4170 }
4171
4172 visitInstruction(LI);
4173}
4174
4175void Verifier::visitStoreInst(StoreInst &SI) {
4176 PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType());
4177 Check(PTy, "Store operand must be a pointer.", &SI);
4178 Type *ElTy = SI.getOperand(0)->getType();
4179 if (MaybeAlign A = SI.getAlign()) {
4180 Check(A->value() <= Value::MaximumAlignment,
4181 "huge alignment values are unsupported", &SI);
4182 }
4183 Check(ElTy->isSized(), "storing unsized types is not allowed", &SI);
4184 if (SI.isAtomic()) {
4185 Check(SI.getOrdering() != AtomicOrdering::Acquire &&
4186 SI.getOrdering() != AtomicOrdering::AcquireRelease,
4187 "Store cannot have Acquire ordering", &SI);
4188 Check(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
4189 "atomic store operand must have integer, pointer, or floating point "
4190 "type!",
4191 ElTy, &SI);
4192 checkAtomicMemAccessSize(ElTy, &SI);
4193 } else {
4194 Check(SI.getSyncScopeID() == SyncScope::System,
4195 "Non-atomic store cannot have SynchronizationScope specified", &SI);
4196 }
4197 visitInstruction(SI);
4198}
4199
4200/// Check that SwiftErrorVal is used as a swifterror argument in CS.
4201void Verifier::verifySwiftErrorCall(CallBase &Call,
4202 const Value *SwiftErrorVal) {
4203 for (const auto &I : llvm::enumerate(Call.args())) {
4204 if (I.value() == SwiftErrorVal) {
4205 Check(Call.paramHasAttr(I.index(), Attribute::SwiftError),
4206 "swifterror value when used in a callsite should be marked "
4207 "with swifterror attribute",
4208 SwiftErrorVal, Call);
4209 }
4210 }
4211}
4212
4213void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) {
4214 // Check that swifterror value is only used by loads, stores, or as
4215 // a swifterror argument.
4216 for (const User *U : SwiftErrorVal->users()) {
4217 Check(isa<LoadInst>(U) || isa<StoreInst>(U) || isa<CallInst>(U) ||
4218 isa<InvokeInst>(U),
4219 "swifterror value can only be loaded and stored from, or "
4220 "as a swifterror argument!",
4221 SwiftErrorVal, U);
4222 // If it is used by a store, check it is the second operand.
4223 if (auto StoreI = dyn_cast<StoreInst>(U))
4224 Check(StoreI->getOperand(1) == SwiftErrorVal,
4225 "swifterror value should be the second operand when used "
4226 "by stores",
4227 SwiftErrorVal, U);
4228 if (auto *Call = dyn_cast<CallBase>(U))
4229 verifySwiftErrorCall(*const_cast<CallBase *>(Call), SwiftErrorVal);
4230 }
4231}
4232
4233void Verifier::visitAllocaInst(AllocaInst &AI) {
4234 SmallPtrSet<Type*, 4> Visited;
4235 Check(AI.getAllocatedType()->isSized(&Visited),
4236 "Cannot allocate unsized type", &AI);
4238 "Alloca array size must have integer type", &AI);
4239 if (MaybeAlign A = AI.getAlign()) {
4240 Check(A->value() <= Value::MaximumAlignment,
4241 "huge alignment values are unsupported", &AI);
4242 }
4243
4244 if (AI.isSwiftError()) {
4246 "swifterror alloca must have pointer type", &AI);
4248 "swifterror alloca must not be array allocation", &AI);
4249 verifySwiftErrorValue(&AI);
4250 }
4251
4252 visitInstruction(AI);
4253}
4254
4255void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
4256 Type *ElTy = CXI.getOperand(1)->getType();
4257 Check(ElTy->isIntOrPtrTy(),
4258 "cmpxchg operand must have integer or pointer type", ElTy, &CXI);
4259 checkAtomicMemAccessSize(ElTy, &CXI);
4260 visitInstruction(CXI);
4261}
4262
4263void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
4265 "atomicrmw instructions cannot be unordered.", &RMWI);
4266 auto Op = RMWI.getOperation();
4267 Type *ElTy = RMWI.getOperand(1)->getType();
4268 if (Op == AtomicRMWInst::Xchg) {
4269 Check(ElTy->isIntegerTy() || ElTy->isFloatingPointTy() ||
4270 ElTy->isPointerTy(),
4271 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4272 " operand must have integer or floating point type!",
4273 &RMWI, ElTy);
4274 } else if (AtomicRMWInst::isFPOperation(Op)) {
4275 Check(ElTy->isFPOrFPVectorTy() && !isa<ScalableVectorType>(ElTy),
4276 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4277 " operand must have floating-point or fixed vector of floating-point "
4278 "type!",
4279 &RMWI, ElTy);
4280 } else {
4281 Check(ElTy->isIntegerTy(),
4282 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4283 " operand must have integer type!",
4284 &RMWI, ElTy);
4285 }
4286 checkAtomicMemAccessSize(ElTy, &RMWI);
4288 "Invalid binary operation!", &RMWI);
4289 visitInstruction(RMWI);
4290}
4291
4292void Verifier::visitFenceInst(FenceInst &FI) {
4293 const AtomicOrdering Ordering = FI.getOrdering();
4294 Check(Ordering == AtomicOrdering::Acquire ||
4295 Ordering == AtomicOrdering::Release ||
4296 Ordering == AtomicOrdering::AcquireRelease ||
4298 "fence instructions may only have acquire, release, acq_rel, or "
4299 "seq_cst ordering.",
4300 &FI);
4301 visitInstruction(FI);
4302}
4303
4304void Verifier::visitExtractValueInst(ExtractValueInst &EVI) {
4306 EVI.getIndices()) == EVI.getType(),
4307 "Invalid ExtractValueInst operands!", &EVI);
4308
4309 visitInstruction(EVI);
4310}
4311
4312void Verifier::visitInsertValueInst(InsertValueInst &IVI) {
4314 IVI.getIndices()) ==
4315 IVI.getOperand(1)->getType(),
4316 "Invalid InsertValueInst operands!", &IVI);
4317
4318 visitInstruction(IVI);
4319}
4320
4321static Value *getParentPad(Value *EHPad) {
4322 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
4323 return FPI->getParentPad();
4324
4325 return cast<CatchSwitchInst>(EHPad)->getParentPad();
4326}
4327
4328void Verifier::visitEHPadPredecessors(Instruction &I) {
4329 assert(I.isEHPad());
4330
4331 BasicBlock *BB = I.getParent();
4332 Function *F = BB->getParent();
4333
4334 Check(BB != &F->getEntryBlock(), "EH pad cannot be in entry block.", &I);
4335
4336 if (auto *LPI = dyn_cast<LandingPadInst>(&I)) {
4337 // The landingpad instruction defines its parent as a landing pad block. The
4338 // landing pad block may be branched to only by the unwind edge of an
4339 // invoke.
4340 for (BasicBlock *PredBB : predecessors(BB)) {
4341 const auto *II = dyn_cast<InvokeInst>(PredBB->getTerminator());
4342 Check(II && II->getUnwindDest() == BB && II->getNormalDest() != BB,
4343 "Block containing LandingPadInst must be jumped to "
4344 "only by the unwind edge of an invoke.",
4345 LPI);
4346 }
4347 return;
4348 }
4349 if (auto *CPI = dyn_cast<CatchPadInst>(&I)) {
4350 if (!pred_empty(BB))
4351 Check(BB->getUniquePredecessor() == CPI->getCatchSwitch()->getParent(),
4352 "Block containg CatchPadInst must be jumped to "
4353 "only by its catchswitch.",
4354 CPI);
4355 Check(BB != CPI->getCatchSwitch()->getUnwindDest(),
4356 "Catchswitch cannot unwind to one of its catchpads",
4357 CPI->getCatchSwitch(), CPI);
4358 return;
4359 }
4360
4361 // Verify that each pred has a legal terminator with a legal to/from EH
4362 // pad relationship.
4363 Instruction *ToPad = &I;
4364 Value *ToPadParent = getParentPad(ToPad);
4365 for (BasicBlock *PredBB : predecessors(BB)) {
4366 Instruction *TI = PredBB->getTerminator();
4367 Value *FromPad;
4368 if (auto *II = dyn_cast<InvokeInst>(TI)) {
4369 Check(II->getUnwindDest() == BB && II->getNormalDest() != BB,
4370 "EH pad must be jumped to via an unwind edge", ToPad, II);
4371 auto *CalledFn =
4372 dyn_cast<Function>(II->getCalledOperand()->stripPointerCasts());
4373 if (CalledFn && CalledFn->isIntrinsic() && II->doesNotThrow() &&
4374 !IntrinsicInst::mayLowerToFunctionCall(CalledFn->getIntrinsicID()))
4375 continue;
4376 if (auto Bundle = II->getOperandBundle(LLVMContext::OB_funclet))
4377 FromPad = Bundle->Inputs[0];
4378 else
4379 FromPad = ConstantTokenNone::get(II->getContext());
4380 } else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
4381 FromPad = CRI->getOperand(0);
4382 Check(FromPad != ToPadParent, "A cleanupret must exit its cleanup", CRI);
4383 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) {
4384 FromPad = CSI;
4385 } else {
4386 Check(false, "EH pad must be jumped to via an unwind edge", ToPad, TI);
4387 }
4388
4389 // The edge may exit from zero or more nested pads.
4391 for (;; FromPad = getParentPad(FromPad)) {
4392 Check(FromPad != ToPad,
4393 "EH pad cannot handle exceptions raised within it", FromPad, TI);
4394 if (FromPad == ToPadParent) {
4395 // This is a legal unwind edge.
4396 break;
4397 }
4398 Check(!isa<ConstantTokenNone>(FromPad),
4399 "A single unwind edge may only enter one EH pad", TI);
4400 Check(Seen.insert(FromPad).second, "EH pad jumps through a cycle of pads",
4401 FromPad);
4402
4403 // This will be diagnosed on the corresponding instruction already. We
4404 // need the extra check here to make sure getParentPad() works.
4405 Check(isa<FuncletPadInst>(FromPad) || isa<CatchSwitchInst>(FromPad),
4406 "Parent pad must be catchpad/cleanuppad/catchswitch", TI);
4407 }
4408 }
4409}
4410
4411void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
4412 // The landingpad instruction is ill-formed if it doesn't have any clauses and
4413 // isn't a cleanup.
4414 Check(LPI.getNumClauses() > 0 || LPI.isCleanup(),
4415 "LandingPadInst needs at least one clause or to be a cleanup.", &LPI);
4416
4417 visitEHPadPredecessors(LPI);
4418
4419 if (!LandingPadResultTy)
4420 LandingPadResultTy = LPI.getType();
4421 else
4422 Check(LandingPadResultTy == LPI.getType(),
4423 "The landingpad instruction should have a consistent result type "
4424 "inside a function.",
4425 &LPI);
4426
4427 Function *F = LPI.getParent()->getParent();
4428 Check(F->hasPersonalityFn(),
4429 "LandingPadInst needs to be in a function with a personality.", &LPI);
4430
4431 // The landingpad instruction must be the first non-PHI instruction in the
4432 // block.
4433 Check(LPI.getParent()->getLandingPadInst() == &LPI,
4434 "LandingPadInst not the first non-PHI instruction in the block.", &LPI);
4435
4436 for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) {
4437 Constant *Clause = LPI.getClause(i);
4438 if (LPI.isCatch(i)) {
4439 Check(isa<PointerType>(Clause->getType()),
4440 "Catch operand does not have pointer type!", &LPI);
4441 } else {
4442 Check(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI);
4443 Check(isa<ConstantArray>(Clause) || isa<ConstantAggregateZero>(Clause),
4444 "Filter operand is not an array of constants!", &LPI);
4445 }
4446 }
4447
4448 visitInstruction(LPI);
4449}
4450
4451void Verifier::visitResumeInst(ResumeInst &RI) {
4453 "ResumeInst needs to be in a function with a personality.", &RI);
4454
4455 if (!LandingPadResultTy)
4456 LandingPadResultTy = RI.getValue()->getType();
4457 else
4458 Check(LandingPadResultTy == RI.getValue()->getType(),
4459 "The resume instruction should have a consistent result type "
4460 "inside a function.",
4461 &RI);
4462
4463 visitTerminator(RI);
4464}
4465
4466void Verifier::visitCatchPadInst(CatchPadInst &CPI) {
4467 BasicBlock *BB = CPI.getParent();
4468
4469 Function *F = BB->getParent();
4470 Check(F->hasPersonalityFn(),
4471 "CatchPadInst needs to be in a function with a personality.", &CPI);
4472
4473 Check(isa<CatchSwitchInst>(CPI.getParentPad()),
4474 "CatchPadInst needs to be directly nested in a CatchSwitchInst.",
4475 CPI.getParentPad());
4476
4477 // The catchpad instruction must be the first non-PHI instruction in the
4478 // block.
4479 Check(BB->getFirstNonPHI() == &CPI,
4480 "CatchPadInst not the first non-PHI instruction in the block.", &CPI);
4481
4482 visitEHPadPredecessors(CPI);
4484}
4485
4486void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) {
4487 Check(isa<CatchPadInst>(CatchReturn.getOperand(0)),
4488 "CatchReturnInst needs to be provided a CatchPad", &CatchReturn,
4489 CatchReturn.getOperand(0));
4490
4491 visitTerminator(CatchReturn);
4492}
4493
4494void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) {
4495 BasicBlock *BB = CPI.getParent();
4496
4497 Function *F = BB->getParent();
4498 Check(F->hasPersonalityFn(),
4499 "CleanupPadInst needs to be in a function with a personality.", &CPI);
4500
4501 // The cleanuppad instruction must be the first non-PHI instruction in the
4502 // block.
4503 Check(BB->getFirstNonPHI() == &CPI,
4504 "CleanupPadInst not the first non-PHI instruction in the block.", &CPI);
4505
4506 auto *ParentPad = CPI.getParentPad();
4507 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
4508 "CleanupPadInst has an invalid parent.", &CPI);
4509
4510 visitEHPadPredecessors(CPI);
4512}
4513
4514void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) {
4515 User *FirstUser = nullptr;
4516 Value *FirstUnwindPad = nullptr;
4517 SmallVector<FuncletPadInst *, 8> Worklist({&FPI});
4519
4520 while (!Worklist.empty()) {
4521 FuncletPadInst *CurrentPad = Worklist.pop_back_val();
4522 Check(Seen.insert(CurrentPad).second,
4523 "FuncletPadInst must not be nested within itself", CurrentPad);
4524 Value *UnresolvedAncestorPad = nullptr;
4525 for (User *U : CurrentPad->users()) {
4526 BasicBlock *UnwindDest;
4527 if (auto *CRI = dyn_cast<CleanupReturnInst>(U)) {
4528 UnwindDest = CRI->getUnwindDest();
4529 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(U)) {
4530 // We allow catchswitch unwind to caller to nest
4531 // within an outer pad that unwinds somewhere else,
4532 // because catchswitch doesn't have a nounwind variant.
4533 // See e.g. SimplifyCFGOpt::SimplifyUnreachable.
4534 if (CSI->unwindsToCaller())
4535 continue;
4536 UnwindDest = CSI->getUnwindDest();
4537 } else if (auto *II = dyn_cast<InvokeInst>(U)) {
4538 UnwindDest = II->getUnwindDest();
4539 } else if (isa<CallInst>(U)) {
4540 // Calls which don't unwind may be found inside funclet
4541 // pads that unwind somewhere else. We don't *require*
4542 // such calls to be annotated nounwind.
4543 continue;
4544 } else if (auto *CPI = dyn_cast<CleanupPadInst>(U)) {
4545 // The unwind dest for a cleanup can only be found by
4546 // recursive search. Add it to the worklist, and we'll
4547 // search for its first use that determines where it unwinds.
4548 Worklist.push_back(CPI);
4549 continue;
4550 } else {
4551 Check(isa<CatchReturnInst>(U), "Bogus funclet pad use", U);
4552 continue;
4553 }
4554
4555 Value *UnwindPad;
4556 bool ExitsFPI;
4557 if (UnwindDest) {
4558 UnwindPad = UnwindDest->getFirstNonPHI();
4559 if (!cast<Instruction>(UnwindPad)->isEHPad())
4560 continue;
4561 Value *UnwindParent = getParentPad(UnwindPad);
4562 // Ignore unwind edges that don't exit CurrentPad.
4563 if (UnwindParent == CurrentPad)
4564 continue;
4565 // Determine whether the original funclet pad is exited,
4566 // and if we are scanning nested pads determine how many
4567 // of them are exited so we can stop searching their
4568 // children.
4569 Value *ExitedPad = CurrentPad;
4570 ExitsFPI = false;
4571 do {
4572 if (ExitedPad == &FPI) {
4573 ExitsFPI = true;
4574 // Now we can resolve any ancestors of CurrentPad up to
4575 // FPI, but not including FPI since we need to make sure
4576 // to check all direct users of FPI for consistency.
4577 UnresolvedAncestorPad = &FPI;
4578 break;
4579 }
4580 Value *ExitedParent = getParentPad(ExitedPad);
4581 if (ExitedParent == UnwindParent) {
4582 // ExitedPad is the ancestor-most pad which this unwind
4583 // edge exits, so we can resolve up to it, meaning that
4584 // ExitedParent is the first ancestor still unresolved.
4585 UnresolvedAncestorPad = ExitedParent;
4586 break;
4587 }
4588 ExitedPad = ExitedParent;
4589 } while (!isa<ConstantTokenNone>(ExitedPad));
4590 } else {
4591 // Unwinding to caller exits all pads.
4592 UnwindPad = ConstantTokenNone::get(FPI.getContext());
4593 ExitsFPI = true;
4594 UnresolvedAncestorPad = &FPI;
4595 }
4596
4597 if (ExitsFPI) {
4598 // This unwind edge exits FPI. Make sure it agrees with other
4599 // such edges.
4600 if (FirstUser) {
4601 Check(UnwindPad == FirstUnwindPad,
4602 "Unwind edges out of a funclet "
4603 "pad must have the same unwind "
4604 "dest",
4605 &FPI, U, FirstUser);
4606 } else {
4607 FirstUser = U;
4608 FirstUnwindPad = UnwindPad;
4609 // Record cleanup sibling unwinds for verifySiblingFuncletUnwinds
4610 if (isa<CleanupPadInst>(&FPI) && !isa<ConstantTokenNone>(UnwindPad) &&
4611 getParentPad(UnwindPad) == getParentPad(&FPI))
4612 SiblingFuncletInfo[&FPI] = cast<Instruction>(U);
4613 }
4614 }
4615 // Make sure we visit all uses of FPI, but for nested pads stop as
4616 // soon as we know where they unwind to.
4617 if (CurrentPad != &FPI)
4618 break;
4619 }
4620 if (UnresolvedAncestorPad) {
4621 if (CurrentPad == UnresolvedAncestorPad) {
4622 // When CurrentPad is FPI itself, we don't mark it as resolved even if
4623 // we've found an unwind edge that exits it, because we need to verify
4624 // all direct uses of FPI.
4625 assert(CurrentPad == &FPI);
4626 continue;
4627 }
4628 // Pop off the worklist any nested pads that we've found an unwind
4629 // destination for. The pads on the worklist are the uncles,
4630 // great-uncles, etc. of CurrentPad. We've found an unwind destination
4631 // for all ancestors of CurrentPad up to but not including
4632 // UnresolvedAncestorPad.
4633 Value *ResolvedPad = CurrentPad;
4634 while (!Worklist.empty()) {
4635 Value *UnclePad = Worklist.back();
4636 Value *AncestorPad = getParentPad(UnclePad);
4637 // Walk ResolvedPad up the ancestor list until we either find the
4638 // uncle's parent or the last resolved ancestor.
4639 while (ResolvedPad != AncestorPad) {
4640 Value *ResolvedParent = getParentPad(ResolvedPad);
4641 if (ResolvedParent == UnresolvedAncestorPad) {
4642 break;
4643 }
4644 ResolvedPad = ResolvedParent;
4645 }
4646 // If the resolved ancestor search didn't find the uncle's parent,
4647 // then the uncle is not yet resolved.
4648 if (ResolvedPad != AncestorPad)
4649 break;
4650 // This uncle is resolved, so pop it from the worklist.
4651 Worklist.pop_back();
4652 }
4653 }
4654 }
4655
4656 if (FirstUnwindPad) {
4657 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FPI.getParentPad())) {
4658 BasicBlock *SwitchUnwindDest = CatchSwitch->getUnwindDest();
4659 Value *SwitchUnwindPad;
4660 if (SwitchUnwindDest)
4661 SwitchUnwindPad = SwitchUnwindDest->getFirstNonPHI();
4662 else
4663 SwitchUnwindPad = ConstantTokenNone::get(FPI.getContext());
4664 Check(SwitchUnwindPad == FirstUnwindPad,
4665 "Unwind edges out of a catch must have the same unwind dest as "
4666 "the parent catchswitch",
4667 &FPI, FirstUser, CatchSwitch);
4668 }
4669 }
4670
4671 visitInstruction(FPI);
4672}
4673
4674void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) {
4675 BasicBlock *BB = CatchSwitch.getParent();
4676
4677 Function *F = BB->getParent();
4678 Check(F->hasPersonalityFn(),
4679 "CatchSwitchInst needs to be in a function with a personality.",
4680 &CatchSwitch);
4681
4682 // The catchswitch instruction must be the first non-PHI instruction in the
4683 // block.
4684 Check(BB->getFirstNonPHI() == &CatchSwitch,
4685 "CatchSwitchInst not the first non-PHI instruction in the block.",
4686 &CatchSwitch);
4687
4688 auto *ParentPad = CatchSwitch.getParentPad();
4689 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
4690 "CatchSwitchInst has an invalid parent.", ParentPad);
4691
4692 if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) {
4693 Instruction *I = UnwindDest->getFirstNonPHI();
4694 Check(I->isEHPad() && !isa<LandingPadInst>(I),
4695 "CatchSwitchInst must unwind to an EH block which is not a "
4696 "landingpad.",
4697 &CatchSwitch);
4698
4699 // Record catchswitch sibling unwinds for verifySiblingFuncletUnwinds
4700 if (getParentPad(I) == ParentPad)
4701 SiblingFuncletInfo[&CatchSwitch] = &CatchSwitch;
4702 }
4703
4704 Check(CatchSwitch.getNumHandlers() != 0,
4705 "CatchSwitchInst cannot have empty handler list", &CatchSwitch);
4706
4707 for (BasicBlock *Handler : CatchSwitch.handlers()) {
4708 Check(isa<CatchPadInst>(Handler->getFirstNonPHI()),
4709 "CatchSwitchInst handlers must be catchpads", &CatchSwitch, Handler);
4710 }
4711
4712 visitEHPadPredecessors(CatchSwitch);
4713 visitTerminator(CatchSwitch);
4714}
4715
4716void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) {
4717 Check(isa<CleanupPadInst>(CRI.getOperand(0)),
4718 "CleanupReturnInst needs to be provided a CleanupPad", &CRI,
4719 CRI.getOperand(0));
4720
4721 if (BasicBlock *UnwindDest = CRI.getUnwindDest()) {
4722 Instruction *I = UnwindDest->getFirstNonPHI();
4723 Check(I->isEHPad() && !isa<LandingPadInst>(I),
4724 "CleanupReturnInst must unwind to an EH block which is not a "
4725 "landingpad.",
4726 &CRI);
4727 }
4728
4729 visitTerminator(CRI);
4730}
4731
4732void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
4733 Instruction *Op = cast<Instruction>(I.getOperand(i));
4734 // If the we have an invalid invoke, don't try to compute the dominance.
4735 // We already reject it in the invoke specific checks and the dominance
4736 // computation doesn't handle multiple edges.
4737 if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) {
4738 if (II->getNormalDest() == II->getUnwindDest())
4739 return;
4740 }
4741
4742 // Quick check whether the def has already been encountered in the same block.
4743 // PHI nodes are not checked to prevent accepting preceding PHIs, because PHI
4744 // uses are defined to happen on the incoming edge, not at the instruction.
4745 //
4746 // FIXME: If this operand is a MetadataAsValue (wrapping a LocalAsMetadata)
4747 // wrapping an SSA value, assert that we've already encountered it. See
4748 // related FIXME in Mapper::mapLocalAsMetadata in ValueMapper.cpp.
4749 if (!isa<PHINode>(I) && InstsInThisBlock.count(Op))
4750 return;
4751
4752 const Use &U = I.getOperandUse(i);
4753 Check(DT.dominates(Op, U), "Instruction does not dominate all uses!", Op, &I);
4754}
4755
4756void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) {
4757 Check(I.getType()->isPointerTy(),
4758 "dereferenceable, dereferenceable_or_null "
4759 "apply only to pointer types",
4760 &I);
4761 Check((isa<LoadInst>(I) || isa<IntToPtrInst>(I)),
4762 "dereferenceable, dereferenceable_or_null apply only to load"
4763 " and inttoptr instructions, use attributes for calls or invokes",
4764 &I);
4765 Check(MD->getNumOperands() == 1,
4766 "dereferenceable, dereferenceable_or_null "
4767 "take one operand!",
4768 &I);
4769 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0));
4770 Check(CI && CI->getType()->isIntegerTy(64),
4771 "dereferenceable, "
4772 "dereferenceable_or_null metadata value must be an i64!",
4773 &I);
4774}
4775
4776void Verifier::visitProfMetadata(Instruction &I, MDNode *MD) {
4777 Check(MD->getNumOperands() >= 2,
4778 "!prof annotations should have no less than 2 operands", MD);
4779
4780 // Check first operand.
4781 Check(MD->getOperand(0) != nullptr, "first operand should not be null", MD);
4782 Check(isa<MDString>(MD->getOperand(0)),
4783 "expected string with name of the !prof annotation", MD);
4784 MDString *MDS = cast<MDString>(MD->getOperand(0));
4785 StringRef ProfName = MDS->getString();
4786
4787 // Check consistency of !prof branch_weights metadata.
4788 if (ProfName.equals("branch_weights")) {
4789 if (isa<InvokeInst>(&I)) {
4790 Check(MD->getNumOperands() == 2 || MD->getNumOperands() == 3,
4791 "Wrong number of InvokeInst branch_weights operands", MD);
4792 } else {
4793 unsigned ExpectedNumOperands = 0;
4794 if (BranchInst *BI = dyn_cast<BranchInst>(&I))
4795 ExpectedNumOperands = BI->getNumSuccessors();
4796 else if (SwitchInst *SI = dyn_cast<SwitchInst>(&I))
4797 ExpectedNumOperands = SI->getNumSuccessors();
4798 else if (isa<CallInst>(&I))
4799 ExpectedNumOperands = 1;
4800 else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(&I))
4801 ExpectedNumOperands = IBI->getNumDestinations();
4802 else if (isa<SelectInst>(&I))
4803 ExpectedNumOperands = 2;
4804 else if (CallBrInst *CI = dyn_cast<CallBrInst>(&I))
4805 ExpectedNumOperands = CI->getNumSuccessors();
4806 else
4807 CheckFailed("!prof branch_weights are not allowed for this instruction",
4808 MD);
4809
4810 Check(MD->getNumOperands() == 1 + ExpectedNumOperands,
4811 "Wrong number of operands", MD);
4812 }
4813 for (unsigned i = 1; i < MD->getNumOperands(); ++i) {
4814 auto &MDO = MD->getOperand(i);
4815 Check(MDO, "second operand should not be null", MD);
4816 Check(mdconst::dyn_extract<ConstantInt>(MDO),
4817 "!prof brunch_weights operand is not a const int");
4818 }
4819 }
4820}
4821
4822void Verifier::visitDIAssignIDMetadata(Instruction &I, MDNode *MD) {
4823 assert(I.hasMetadata(LLVMContext::MD_DIAssignID));
4824 bool ExpectedInstTy =
4825 isa<AllocaInst>(I) || isa<StoreInst>(I) || isa<MemIntrinsic>(I);
4826 CheckDI(ExpectedInstTy, "!DIAssignID attached to unexpected instruction kind",
4827 I, MD);
4828 // Iterate over the MetadataAsValue uses of the DIAssignID - these should
4829 // only be found as DbgAssignIntrinsic operands.
4830 if (auto *AsValue = MetadataAsValue::getIfExists(Context, MD)) {
4831 for (auto *User : AsValue->users()) {
4832 CheckDI(isa<DbgAssignIntrinsic>(User),
4833 "!DIAssignID should only be used by llvm.dbg.assign intrinsics",
4834 MD, User);
4835 // All of the dbg.assign intrinsics should be in the same function as I.
4836 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(User))
4837 CheckDI(DAI->getFunction() == I.getFunction(),
4838 "dbg.assign not in same function as inst", DAI, &I);
4839 }
4840 }
4841 for (DbgVariableRecord *DVR :
4842 cast<DIAssignID>(MD)->getAllDbgVariableRecordUsers()) {
4843 CheckDI(DVR->isDbgAssign(),
4844 "!DIAssignID should only be used by Assign DVRs.", MD, DVR);
4845 CheckDI(DVR->getFunction() == I.getFunction(),
4846 "DVRAssign not in same function as inst", DVR, &I);
4847 }
4848}
4849
4850void Verifier::visitMMRAMetadata(Instruction &I, MDNode *MD) {
4852 "!mmra metadata attached to unexpected instruction kind", I, MD);
4853
4854 // MMRA Metadata should either be a tag, e.g. !{!"foo", !"bar"}, or a
4855 // list of tags such as !2 in the following example:
4856 // !0 = !{!"a", !"b"}
4857 // !1 = !{!"c", !"d"}
4858 // !2 = !{!0, !1}
4859 if (MMRAMetadata::isTagMD(MD))
4860 return;
4861
4862 Check(isa<MDTuple>(MD), "!mmra expected to be a metadata tuple", I, MD);
4863 for (const MDOperand &MDOp : MD->operands())
4864 Check(MMRAMetadata::isTagMD(MDOp.get()),
4865 "!mmra metadata tuple operand is not an MMRA tag", I, MDOp.get());
4866}
4867
4868void Verifier::visitCallStackMetadata(MDNode *MD) {
4869 // Call stack metadata should consist of a list of at least 1 constant int
4870 // (representing a hash of the location).
4871 Check(MD->getNumOperands() >= 1,
4872 "call stack metadata should have at least 1 operand", MD);
4873
4874 for (const auto &Op : MD->operands())
4875 Check(mdconst::dyn_extract_or_null<ConstantInt>(Op),
4876 "call stack metadata operand should be constant integer", Op);
4877}
4878
4879void Verifier::visitMemProfMetadata(Instruction &I, MDNode *MD) {
4880 Check(isa<CallBase>(I), "!memprof metadata should only exist on calls", &I);
4881 Check(MD->getNumOperands() >= 1,
4882 "!memprof annotations should have at least 1 metadata operand "
4883 "(MemInfoBlock)",
4884 MD);
4885
4886 // Check each MIB
4887 for (auto &MIBOp : MD->operands()) {
4888 MDNode *MIB = dyn_cast<MDNode>(MIBOp);
4889 // The first operand of an MIB should be the call stack metadata.
4890 // There rest of the operands should be MDString tags, and there should be
4891 // at least one.
4892 Check(MIB->getNumOperands() >= 2,
4893 "Each !memprof MemInfoBlock should have at least 2 operands", MIB);
4894
4895 // Check call stack metadata (first operand).
4896 Check(MIB->getOperand(0) != nullptr,
4897 "!memprof MemInfoBlock first operand should not be null", MIB);
4898 Check(isa<MDNode>(MIB->getOperand(0)),
4899 "!memprof MemInfoBlock first operand should be an MDNode", MIB);
4900 MDNode *StackMD = dyn_cast<MDNode>(MIB->getOperand(0));
4901 visitCallStackMetadata(StackMD);
4902
4903 // Check that remaining operands are MDString.
4905 [](const MDOperand &Op) { return isa<MDString>(Op); }),
4906 "Not all !memprof MemInfoBlock operands 1 to N are MDString", MIB);
4907 }
4908}
4909
4910void Verifier::visitCallsiteMetadata(Instruction &I, MDNode *MD) {
4911 Check(isa<CallBase>(I), "!callsite metadata should only exist on calls", &I);
4912 // Verify the partial callstack annotated from memprof profiles. This callsite
4913 // is a part of a profiled allocation callstack.
4914 visitCallStackMetadata(MD);
4915}
4916
4917void Verifier::visitAnnotationMetadata(MDNode *Annotation) {
4918 Check(isa<MDTuple>(Annotation), "annotation must be a tuple");
4919 Check(Annotation->getNumOperands() >= 1,
4920 "annotation must have at least one operand");
4921 for (const MDOperand &Op : Annotation->operands()) {
4922 bool TupleOfStrings =
4923 isa<MDTuple>(Op.get()) &&
4924 all_of(cast<MDTuple>(Op)->operands(), [](auto &Annotation) {
4925 return isa<MDString>(Annotation.get());
4926 });
4927 Check(isa<MDString>(Op.get()) || TupleOfStrings,
4928 "operands must be a string or a tuple of strings");
4929 }
4930}
4931
4932void Verifier::visitAliasScopeMetadata(const MDNode *MD) {
4933 unsigned NumOps = MD->getNumOperands();
4934 Check(NumOps >= 2 && NumOps <= 3, "scope must have two or three operands",
4935 MD);
4936 Check(MD->getOperand(0).get() == MD || isa<MDString>(MD->getOperand(0)),
4937 "first scope operand must be self-referential or string", MD);
4938 if (NumOps == 3)
4939 Check(isa<MDString>(MD->getOperand(2)),
4940 "third scope operand must be string (if used)", MD);
4941
4942 MDNode *Domain = dyn_cast<MDNode>(MD->getOperand(1));
4943 Check(Domain != nullptr, "second scope operand must be MDNode", MD);
4944
4945 unsigned NumDomainOps = Domain->getNumOperands();
4946 Check(NumDomainOps >= 1 && NumDomainOps <= 2,
4947 "domain must have one or two operands", Domain);
4948 Check(Domain->getOperand(0).get() == Domain ||
4949 isa<MDString>(Domain->getOperand(0)),
4950 "first domain operand must be self-referential or string", Domain);
4951 if (NumDomainOps == 2)
4952 Check(isa<MDString>(Domain->getOperand(1)),
4953 "second domain operand must be string (if used)", Domain);
4954}
4955
4956void Verifier::visitAliasScopeListMetadata(const MDNode *MD) {
4957 for (const MDOperand &Op : MD->operands()) {
4958 const MDNode *OpMD = dyn_cast<MDNode>(Op);
4959 Check(OpMD != nullptr, "scope list must consist of MDNodes", MD);
4960 visitAliasScopeMetadata(OpMD);
4961 }
4962}
4963
4964void Verifier::visitAccessGroupMetadata(const MDNode *MD) {
4965 auto IsValidAccessScope = [](const MDNode *MD) {
4966 return MD->getNumOperands() == 0 && MD->isDistinct();
4967 };
4968
4969 // It must be either an access scope itself...
4970 if (IsValidAccessScope(MD))
4971 return;
4972
4973 // ...or a list of access scopes.
4974 for (const MDOperand &Op : MD->operands()) {
4975 const MDNode *OpMD = dyn_cast<MDNode>(Op);
4976 Check(OpMD != nullptr, "Access scope list must consist of MDNodes", MD);
4977 Check(IsValidAccessScope(OpMD),
4978 "Access scope list contains invalid access scope", MD);
4979 }
4980}
4981
4982/// verifyInstruction - Verify that an instruction is well formed.
4983///
4984void Verifier::visitInstruction(Instruction &I) {
4985 BasicBlock *BB = I.getParent();
4986 Check(BB, "Instruction not embedded in basic block!", &I);
4987
4988 if (!isa<PHINode>(I)) { // Check that non-phi nodes are not self referential
4989 for (User *U : I.users()) {
4990 Check(U != (User *)&I || !DT.isReachableFromEntry(BB),
4991 "Only PHI nodes may reference their own value!", &I);
4992 }
4993 }
4994
4995 // Check that void typed values don't have names
4996 Check(!I.getType()->isVoidTy() || !I.hasName(),
4997 "Instruction has a name, but provides a void value!", &I);
4998
4999 // Check that the return value of the instruction is either void or a legal
5000 // value type.
5001 Check(I.getType()->isVoidTy() || I.getType()->isFirstClassType(),
5002 "Instruction returns a non-scalar type!", &I);
5003
5004 // Check that the instruction doesn't produce metadata. Calls are already
5005 // checked against the callee type.
5006 Check(!I.getType()->isMetadataTy() || isa<CallInst>(I) || isa<InvokeInst>(I),
5007 "Invalid use of metadata!", &I);
5008
5009 // Check that all uses of the instruction, if they are instructions
5010 // themselves, actually have parent basic blocks. If the use is not an
5011 // instruction, it is an error!
5012 for (Use &U : I.uses()) {
5013 if (Instruction *Used = dyn_cast<Instruction>(U.getUser()))
5014 Check(Used->getParent() != nullptr,
5015 "Instruction referencing"
5016 " instruction not embedded in a basic block!",
5017 &I, Used);
5018 else {
5019 CheckFailed("Use of instruction is not an instruction!", U);
5020 return;
5021 }
5022 }
5023
5024 // Get a pointer to the call base of the instruction if it is some form of
5025 // call.
5026 const CallBase *CBI = dyn_cast<CallBase>(&I);
5027
5028 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
5029 Check(I.getOperand(i) != nullptr, "Instruction has null operand!", &I);
5030
5031 // Check to make sure that only first-class-values are operands to
5032 // instructions.
5033 if (!I.getOperand(i)->getType()->isFirstClassType()) {
5034 Check(false, "Instruction operands must be first-class values!", &I);
5035 }
5036
5037 if (Function *F = dyn_cast<Function>(I.getOperand(i))) {
5038 // This code checks whether the function is used as the operand of a
5039 // clang_arc_attachedcall operand bundle.
5040 auto IsAttachedCallOperand = [](Function *F, const CallBase *CBI,
5041 int Idx) {
5042 return CBI && CBI->isOperandBundleOfType(
5044 };
5045
5046 // Check to make sure that the "address of" an intrinsic function is never
5047 // taken. Ignore cases where the address of the intrinsic function is used
5048 // as the argument of operand bundle "clang.arc.attachedcall" as those
5049 // cases are handled in verifyAttachedCallBundle.
5050 Check((!F->isIntrinsic() ||
5051 (CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i)) ||
5052 IsAttachedCallOperand(F, CBI, i)),
5053 "Cannot take the address of an intrinsic!", &I);
5054 Check(!F->isIntrinsic() || isa<CallInst>(I) ||
5055 F->getIntrinsicID() == Intrinsic::donothing ||
5056 F->getIntrinsicID() == Intrinsic::seh_try_begin ||
5057 F->getIntrinsicID() == Intrinsic::seh_try_end ||
5058 F->getIntrinsicID() == Intrinsic::seh_scope_begin ||
5059 F->getIntrinsicID() == Intrinsic::seh_scope_end ||
5060 F->getIntrinsicID() == Intrinsic::coro_resume ||
5061 F->getIntrinsicID() == Intrinsic::coro_destroy ||
5062 F->getIntrinsicID() == Intrinsic::coro_await_suspend_void ||
5063 F->getIntrinsicID() == Intrinsic::coro_await_suspend_bool ||
5064 F->getIntrinsicID() == Intrinsic::coro_await_suspend_handle ||
5065 F->getIntrinsicID() ==
5066 Intrinsic::experimental_patchpoint_void ||
5067 F->getIntrinsicID() == Intrinsic::experimental_patchpoint ||
5068 F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint ||
5069 F->getIntrinsicID() == Intrinsic::wasm_rethrow ||
5070 IsAttachedCallOperand(F, CBI, i),
5071 "Cannot invoke an intrinsic other than donothing, patchpoint, "
5072 "statepoint, coro_resume, coro_destroy or clang.arc.attachedcall",
5073 &I);
5074 Check(F->getParent() == &M, "Referencing function in another module!", &I,
5075 &M, F, F->getParent());
5076 } else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) {
5077 Check(OpBB->getParent() == BB->getParent(),
5078 "Referring to a basic block in another function!", &I);
5079 } else if (Argument *OpArg = dyn_cast<Argument>(I.getOperand(i))) {
5080 Check(OpArg->getParent() == BB->getParent(),
5081 "Referring to an argument in another function!", &I);
5082 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) {
5083 Check(GV->getParent() == &M, "Referencing global in another module!", &I,
5084 &M, GV, GV->getParent());
5085 } else if (Instruction *OpInst = dyn_cast<Instruction>(I.getOperand(i))) {
5086 Check(OpInst->getFunction() == BB->getParent(),
5087 "Referring to an instruction in another function!", &I);
5088 verifyDominatesUse(I, i);
5089 } else if (isa<InlineAsm>(I.getOperand(i))) {
5090 Check(CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i),
5091 "Cannot take the address of an inline asm!", &I);
5092 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(I.getOperand(i))) {
5093 if (CE->getType()->isPtrOrPtrVectorTy()) {
5094 // If we have a ConstantExpr pointer, we need to see if it came from an
5095 // illegal bitcast.
5096 visitConstantExprsRecursively(CE);
5097 }
5098 }
5099 }
5100
5101 if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) {
5102 Check(I.getType()->isFPOrFPVectorTy(),
5103 "fpmath requires a floating point result!", &I);
5104 Check(MD->getNumOperands() == 1, "fpmath takes one operand!", &I);
5105 if (ConstantFP *CFP0 =
5106 mdconst::dyn_extract_or_null<ConstantFP>(MD->getOperand(0))) {
5107 const APFloat &Accuracy = CFP0->getValueAPF();
5108 Check(&Accuracy.getSemantics() == &APFloat::IEEEsingle(),
5109 "fpmath accuracy must have float type", &I);
5110 Check(Accuracy.isFiniteNonZero() && !Accuracy.isNegative(),
5111 "fpmath accuracy not a positive number!", &I);
5112 } else {
5113 Check(false, "invalid fpmath accuracy!", &I);
5114 }
5115 }
5116
5117 if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) {
5118 Check(isa<LoadInst>(I) || isa<CallInst>(I) || isa<InvokeInst>(I),
5119 "Ranges are only for loads, calls and invokes!", &I);
5120 visitRangeMetadata(I, Range, I.getType());
5121 }
5122
5123 if (I.hasMetadata(LLVMContext::MD_invariant_group)) {
5124 Check(isa<LoadInst>(I) || isa<StoreInst>(I),
5125 "invariant.group metadata is only for loads and stores", &I);
5126 }
5127
5128 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nonnull)) {
5129 Check(I.getType()->isPointerTy(), "nonnull applies only to pointer types",
5130 &I);
5131 Check(isa<LoadInst>(I),
5132 "nonnull applies only to load instructions, use attributes"
5133 " for calls or invokes",
5134 &I);
5135 Check(MD->getNumOperands() == 0, "nonnull metadata must be empty", &I);
5136 }
5137
5138 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable))
5139 visitDereferenceableMetadata(I, MD);
5140
5141 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable_or_null))
5142 visitDereferenceableMetadata(I, MD);
5143
5144 if (MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa))
5145 TBAAVerifyHelper.visitTBAAMetadata(I, TBAA);
5146
5147 if (MDNode *MD = I.getMetadata(LLVMContext::MD_noalias))
5148 visitAliasScopeListMetadata(MD);
5149 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alias_scope))
5150 visitAliasScopeListMetadata(MD);
5151
5152 if (MDNode *MD = I.getMetadata(LLVMContext::MD_access_group))
5153 visitAccessGroupMetadata(MD);
5154
5155 if (MDNode *AlignMD = I.getMetadata(LLVMContext::MD_align)) {
5156 Check(I.getType()->isPointerTy(), "align applies only to pointer types",
5157 &I);
5158 Check(isa<LoadInst>(I),
5159 "align applies only to load instructions, "
5160 "use attributes for calls or invokes",
5161 &I);
5162 Check(AlignMD->getNumOperands() == 1, "align takes one operand!", &I);
5163 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(AlignMD->getOperand(0));
5164 Check(CI && CI->getType()->isIntegerTy(64),
5165 "align metadata value must be an i64!", &I);
5166 uint64_t Align = CI->getZExtValue();
5167 Check(isPowerOf2_64(Align), "align metadata value must be a power of 2!",
5168 &I);
5170 "alignment is larger that implementation defined limit", &I);
5171 }
5172
5173 if (MDNode *MD = I.getMetadata(LLVMContext::MD_prof))
5174 visitProfMetadata(I, MD);
5175
5176 if (MDNode *MD = I.getMetadata(LLVMContext::MD_memprof))
5177 visitMemProfMetadata(I, MD);
5178
5179 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callsite))
5180 visitCallsiteMetadata(I, MD);
5181
5182 if (MDNode *MD = I.getMetadata(LLVMContext::MD_DIAssignID))
5183 visitDIAssignIDMetadata(I, MD);
5184
5185 if (MDNode *MMRA = I.getMetadata(LLVMContext::MD_mmra))
5186 visitMMRAMetadata(I, MMRA);
5187
5188 if (MDNode *Annotation = I.getMetadata(LLVMContext::MD_annotation))
5189 visitAnnotationMetadata(Annotation);
5190
5191 if (MDNode *N = I.getDebugLoc().getAsMDNode()) {
5192 CheckDI(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N);
5193 visitMDNode(*N, AreDebugLocsAllowed::Yes);
5194 }
5195
5196 if (auto *DII = dyn_cast<DbgVariableIntrinsic>(&I)) {
5197 verifyFragmentExpression(*DII);
5198 verifyNotEntryValue(*DII);
5199 }
5200
5202 I.getAllMetadata(MDs);
5203 for (auto Attachment : MDs) {
5204 unsigned Kind = Attachment.first;
5205 auto AllowLocs =
5206 (Kind == LLVMContext::MD_dbg || Kind == LLVMContext::MD_loop)
5207 ? AreDebugLocsAllowed::Yes
5208 : AreDebugLocsAllowed::No;
5209 visitMDNode(*Attachment.second, AllowLocs);
5210 }
5211
5212 InstsInThisBlock.insert(&I);
5213}
5214
5215/// Allow intrinsics to be verified in different ways.
5216void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
5217 Function *IF = Call.getCalledFunction();
5218 Check(IF->isDeclaration(), "Intrinsic functions should never be defined!",
5219 IF);
5220
5221 // Verify that the intrinsic prototype lines up with what the .td files
5222 // describe.
5223 FunctionType *IFTy = IF->getFunctionType();
5224 bool IsVarArg = IFTy->isVarArg();
5225
5229
5230 // Walk the descriptors to extract overloaded types.
5235 "Intrinsic has incorrect return type!", IF);
5237 "Intrinsic has incorrect argument type!", IF);
5238
5239 // Verify if the intrinsic call matches the vararg property.
5240 if (IsVarArg)
5242 "Intrinsic was not defined with variable arguments!", IF);
5243 else
5245 "Callsite was not defined with variable arguments!", IF);
5246
5247 // All descriptors should be absorbed by now.
5248 Check(TableRef.empty(), "Intrinsic has too few arguments!", IF);
5249
5250 // Now that we have the intrinsic ID and the actual argument types (and we
5251 // know they are legal for the intrinsic!) get the intrinsic name through the
5252 // usual means. This allows us to verify the mangling of argument types into
5253 // the name.
5254 const std::string ExpectedName =
5255 Intrinsic::getName(ID, ArgTys, IF->getParent(), IFTy);
5256 Check(ExpectedName == IF->getName(),
5257 "Intrinsic name not mangled correctly for type arguments! "
5258 "Should be: " +
5259 ExpectedName,
5260 IF);
5261
5262 // If the intrinsic takes MDNode arguments, verify that they are either global
5263 // or are local to *this* function.
5264 for (Value *V : Call.args()) {
5265 if (auto *MD = dyn_cast<MetadataAsValue>(V))
5266 visitMetadataAsValue(*MD, Call.getCaller());
5267 if (auto *Const = dyn_cast<Constant>(V))
5268 Check(!Const->getType()->isX86_AMXTy(),
5269 "const x86_amx is not allowed in argument!");
5270 }
5271
5272 switch (ID) {
5273 default:
5274 break;
5275 case Intrinsic::assume: {
5276 for (auto &Elem : Call.bundle_op_infos()) {
5277 unsigned ArgCount = Elem.End - Elem.Begin;
5278 // Separate storage assumptions are special insofar as they're the only
5279 // operand bundles allowed on assumes that aren't parameter attributes.
5280 if (Elem.Tag->getKey() == "separate_storage") {
5281 Check(ArgCount == 2,
5282 "separate_storage assumptions should have 2 arguments", Call);
5283 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy() &&
5284 Call.getOperand(Elem.Begin + 1)->getType()->isPointerTy(),
5285 "arguments to separate_storage assumptions should be pointers",
5286 Call);
5287 return;
5288 }
5289 Check(Elem.Tag->getKey() == "ignore" ||
5290 Attribute::isExistingAttribute(Elem.Tag->getKey()),
5291 "tags must be valid attribute names", Call);
5293 Attribute::getAttrKindFromName(Elem.Tag->getKey());
5294 if (Kind == Attribute::Alignment) {
5295 Check(ArgCount <= 3 && ArgCount >= 2,
5296 "alignment assumptions should have 2 or 3 arguments", Call);
5297 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5298 "first argument should be a pointer", Call);
5299 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5300 "second argument should be an integer", Call);
5301 if (ArgCount == 3)
5302 Check(Call.getOperand(Elem.Begin + 2)->getType()->isIntegerTy(),
5303 "third argument should be an integer if present", Call);
5304 return;
5305 }
5306 Check(ArgCount <= 2, "too many arguments", Call);
5307 if (Kind == Attribute::None)
5308 break;
5309 if (Attribute::isIntAttrKind(Kind)) {
5310 Check(ArgCount == 2, "this attribute should have 2 arguments", Call);
5311 Check(isa<ConstantInt>(Call.getOperand(Elem.Begin + 1)),
5312 "the second argument should be a constant integral value", Call);
5313 } else if (Attribute::canUseAsParamAttr(Kind)) {
5314 Check((ArgCount) == 1, "this attribute should have one argument", Call);
5315 } else if (Attribute::canUseAsFnAttr(Kind)) {
5316 Check((ArgCount) == 0, "this attribute has no argument", Call);
5317 }
5318 }
5319 break;
5320 }
5321 case Intrinsic::ucmp:
5322 case Intrinsic::scmp: {
5323 Type *SrcTy = Call.getOperand(0)->getType();
5324 Type *DestTy = Call.getType();
5325
5326 Check(DestTy->getScalarSizeInBits() >= 2,
5327 "result type must be at least 2 bits wide", Call);
5328
5329 bool IsDestTypeVector = DestTy->isVectorTy();
5330 Check(SrcTy->isVectorTy() == IsDestTypeVector,
5331 "ucmp/scmp argument and result types must both be either vector or "
5332 "scalar types",
5333 Call);
5334 if (IsDestTypeVector) {
5335 auto SrcVecLen = cast<VectorType>(SrcTy)->getElementCount();
5336 auto DestVecLen = cast<VectorType>(DestTy)->getElementCount();
5337 Check(SrcVecLen == DestVecLen,
5338 "return type and arguments must have the same number of "
5339 "elements",
5340 Call);
5341 }
5342 break;
5343 }
5344 case Intrinsic::coro_id: {
5345 auto *InfoArg = Call.getArgOperand(3)->stripPointerCasts();
5346 if (isa<ConstantPointerNull>(InfoArg))
5347 break;
5348 auto *GV = dyn_cast<GlobalVariable>(InfoArg);
5349 Check(GV && GV->isConstant() && GV->hasDefinitiveInitializer(),
5350 "info argument of llvm.coro.id must refer to an initialized "
5351 "constant");
5352 Constant *Init = GV->getInitializer();
5353 Check(isa<ConstantStruct>(Init) || isa<ConstantArray>(Init),
5354 "info argument of llvm.coro.id must refer to either a struct or "
5355 "an array");
5356 break;
5357 }
5358 case Intrinsic::is_fpclass: {
5359 const ConstantInt *TestMask = cast<ConstantInt>(Call.getOperand(1));
5360 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
5361 "unsupported bits for llvm.is.fpclass test mask");
5362 break;
5363 }
5364 case Intrinsic::fptrunc_round: {
5365 // Check the rounding mode
5366 Metadata *MD = nullptr;
5367 auto *MAV = dyn_cast<MetadataAsValue>(Call.getOperand(1));
5368 if (MAV)
5369 MD = MAV->getMetadata();
5370
5371 Check(MD != nullptr, "missing rounding mode argument", Call);
5372
5373 Check(isa<MDString>(MD),
5374 ("invalid value for llvm.fptrunc.round metadata operand"
5375 " (the operand should be a string)"),
5376 MD);
5377
5378 std::optional<RoundingMode> RoundMode =
5379 convertStrToRoundingMode(cast<MDString>(MD)->getString());
5380 Check(RoundMode && *RoundMode != RoundingMode::Dynamic,
5381 "unsupported rounding mode argument", Call);
5382 break;
5383 }
5384#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
5385#include "llvm/IR/VPIntrinsics.def"
5386 visitVPIntrinsic(cast<VPIntrinsic>(Call));
5387 break;
5388#define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC) \
5389 case Intrinsic::INTRINSIC:
5390#include "llvm/IR/ConstrainedOps.def"
5391 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(Call));
5392 break;
5393 case Intrinsic::dbg_declare: // llvm.dbg.declare
5394 Check(isa<MetadataAsValue>(Call.getArgOperand(0)),
5395 "invalid llvm.dbg.declare intrinsic call 1", Call);
5396 visitDbgIntrinsic("declare", cast<DbgVariableIntrinsic>(Call));
5397 break;
5398 case Intrinsic::dbg_value: // llvm.dbg.value
5399 visitDbgIntrinsic("value", cast<DbgVariableIntrinsic>(Call));
5400 break;
5401 case Intrinsic::dbg_assign: // llvm.dbg.assign
5402 visitDbgIntrinsic("assign", cast<DbgVariableIntrinsic>(Call));
5403 break;
5404 case Intrinsic::dbg_label: // llvm.dbg.label
5405 visitDbgLabelIntrinsic("label", cast<DbgLabelInst>(Call));
5406 break;
5407 case Intrinsic::memcpy:
5408 case Intrinsic::memcpy_inline:
5409 case Intrinsic::memmove:
5410 case Intrinsic::memset:
5411 case Intrinsic::memset_inline: {
5412 break;
5413 }
5414 case Intrinsic::memcpy_element_unordered_atomic:
5415 case Intrinsic::memmove_element_unordered_atomic:
5416 case Intrinsic::memset_element_unordered_atomic: {
5417 const auto *AMI = cast<AtomicMemIntrinsic>(&Call);
5418
5419 ConstantInt *ElementSizeCI =
5420 cast<ConstantInt>(AMI->getRawElementSizeInBytes());
5421 const APInt &ElementSizeVal = ElementSizeCI->getValue();
5422 Check(ElementSizeVal.isPowerOf2(),
5423 "element size of the element-wise atomic memory intrinsic "
5424 "must be a power of 2",
5425 Call);
5426
5427 auto IsValidAlignment = [&](MaybeAlign Alignment) {
5428 return Alignment && ElementSizeVal.ule(Alignment->value());
5429 };
5430 Check(IsValidAlignment(AMI->getDestAlign()),
5431 "incorrect alignment of the destination argument", Call);
5432 if (const auto *AMT = dyn_cast<AtomicMemTransferInst>(AMI)) {
5433 Check(IsValidAlignment(AMT->getSourceAlign()),
5434 "incorrect alignment of the source argument", Call);
5435 }
5436 break;
5437 }
5438 case Intrinsic::call_preallocated_setup: {
5439 auto *NumArgs = dyn_cast<ConstantInt>(Call.getArgOperand(0));
5440 Check(NumArgs != nullptr,
5441 "llvm.call.preallocated.setup argument must be a constant");
5442 bool FoundCall = false;
5443 for (User *U : Call.users()) {
5444 auto *UseCall = dyn_cast<CallBase>(U);
5445 Check(UseCall != nullptr,
5446 "Uses of llvm.call.preallocated.setup must be calls");
5447 const Function *Fn = UseCall->getCalledFunction();
5448 if (Fn && Fn->getIntrinsicID() == Intrinsic::call_preallocated_arg) {
5449 auto *AllocArgIndex = dyn_cast<ConstantInt>(UseCall->getArgOperand(1));
5450 Check(AllocArgIndex != nullptr,
5451 "llvm.call.preallocated.alloc arg index must be a constant");
5452 auto AllocArgIndexInt = AllocArgIndex->getValue();
5453 Check(AllocArgIndexInt.sge(0) &&
5454 AllocArgIndexInt.slt(NumArgs->getValue()),
5455 "llvm.call.preallocated.alloc arg index must be between 0 and "
5456 "corresponding "
5457 "llvm.call.preallocated.setup's argument count");
5458 } else if (Fn && Fn->getIntrinsicID() ==
5459 Intrinsic::call_preallocated_teardown) {
5460 // nothing to do
5461 } else {
5462 Check(!FoundCall, "Can have at most one call corresponding to a "
5463 "llvm.call.preallocated.setup");
5464 FoundCall = true;
5465 size_t NumPreallocatedArgs = 0;
5466 for (unsigned i = 0; i < UseCall->arg_size(); i++) {
5467 if (UseCall->paramHasAttr(i, Attribute::Preallocated)) {
5468 ++NumPreallocatedArgs;
5469 }
5470 }
5471 Check(NumPreallocatedArgs != 0,
5472 "cannot use preallocated intrinsics on a call without "
5473 "preallocated arguments");
5474 Check(NumArgs->equalsInt(NumPreallocatedArgs),
5475 "llvm.call.preallocated.setup arg size must be equal to number "
5476 "of preallocated arguments "
5477 "at call site",
5478 Call, *UseCall);
5479 // getOperandBundle() cannot be called if more than one of the operand
5480 // bundle exists. There is already a check elsewhere for this, so skip
5481 // here if we see more than one.
5482 if (UseCall->countOperandBundlesOfType(LLVMContext::OB_preallocated) >
5483 1) {
5484 return;
5485 }
5486 auto PreallocatedBundle =
5487 UseCall->getOperandBundle(LLVMContext::OB_preallocated);
5488 Check(PreallocatedBundle,
5489 "Use of llvm.call.preallocated.setup outside intrinsics "
5490 "must be in \"preallocated\" operand bundle");
5491 Check(PreallocatedBundle->Inputs.front().get() == &Call,
5492 "preallocated bundle must have token from corresponding "
5493 "llvm.call.preallocated.setup");
5494 }
5495 }
5496 break;
5497 }
5498 case Intrinsic::call_preallocated_arg: {
5499 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
5500 Check(Token && Token->getCalledFunction()->getIntrinsicID() ==
5501 Intrinsic::call_preallocated_setup,
5502 "llvm.call.preallocated.arg token argument must be a "
5503 "llvm.call.preallocated.setup");
5504 Check(Call.hasFnAttr(Attribute::Preallocated),
5505 "llvm.call.preallocated.arg must be called with a \"preallocated\" "
5506 "call site attribute");
5507 break;
5508 }
5509 case Intrinsic::call_preallocated_teardown: {
5510 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
5511 Check(Token && Token->getCalledFunction()->getIntrinsicID() ==
5512 Intrinsic::call_preallocated_setup,
5513 "llvm.call.preallocated.teardown token argument must be a "
5514 "llvm.call.preallocated.setup");
5515 break;
5516 }
5517 case Intrinsic::gcroot:
5518 case Intrinsic::gcwrite:
5519 case Intrinsic::gcread:
5520 if (ID == Intrinsic::gcroot) {
5521 AllocaInst *AI =
5522 dyn_cast<AllocaInst>(Call.getArgOperand(0)->stripPointerCasts());
5523 Check(AI, "llvm.gcroot parameter #1 must be an alloca.", Call);
5524 Check(isa<Constant>(Call.getArgOperand(1)),
5525 "llvm.gcroot parameter #2 must be a constant.", Call);
5526 if (!AI->getAllocatedType()->isPointerTy()) {
5527 Check(!isa<ConstantPointerNull>(Call.getArgOperand(1)),
5528 "llvm.gcroot parameter #1 must either be a pointer alloca, "
5529 "or argument #2 must be a non-null constant.",
5530 Call);
5531 }
5532 }
5533
5534 Check(Call.getParent()->getParent()->hasGC(),
5535 "Enclosing function does not use GC.", Call);
5536 break;
5537 case Intrinsic::init_trampoline:
5538 Check(isa<Function>(Call.getArgOperand(1)->stripPointerCasts()),
5539 "llvm.init_trampoline parameter #2 must resolve to a function.",
5540 Call);
5541 break;
5542 case Intrinsic::prefetch:
5543 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
5544 "rw argument to llvm.prefetch must be 0-1", Call);
5545 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
5546 "locality argument to llvm.prefetch must be 0-3", Call);
5547 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
5548 "cache type argument to llvm.prefetch must be 0-1", Call);
5549 break;
5550 case Intrinsic::stackprotector:
5551 Check(isa<AllocaInst>(Call.getArgOperand(1)->stripPointerCasts()),
5552 "llvm.stackprotector parameter #2 must resolve to an alloca.", Call);
5553 break;
5554 case Intrinsic::localescape: {
5555 BasicBlock *BB = Call.getParent();
5556 Check(BB->isEntryBlock(), "llvm.localescape used outside of entry block",
5557 Call);
5558 Check(!SawFrameEscape, "multiple calls to llvm.localescape in one function",
5559 Call);
5560 for (Value *Arg : Call.args()) {
5561 if (isa<ConstantPointerNull>(Arg))
5562 continue; // Null values are allowed as placeholders.
5563 auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
5564 Check(AI && AI->isStaticAlloca(),
5565 "llvm.localescape only accepts static allocas", Call);
5566 }
5567 FrameEscapeInfo[BB->getParent()].first = Call.arg_size();
5568 SawFrameEscape = true;
5569 break;
5570 }
5571 case Intrinsic::localrecover: {
5572 Value *FnArg = Call.getArgOperand(0)->stripPointerCasts();
5573 Function *Fn = dyn_cast<Function>(FnArg);
5574 Check(Fn && !Fn->isDeclaration(),
5575 "llvm.localrecover first "
5576 "argument must be function defined in this module",
5577 Call);
5578 auto *IdxArg = cast<ConstantInt>(Call.getArgOperand(2));
5579 auto &Entry = FrameEscapeInfo[Fn];
5580 Entry.second = unsigned(
5581 std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1));
5582 break;
5583 }
5584
5585 case Intrinsic::experimental_gc_statepoint:
5586 if (auto *CI = dyn_cast<CallInst>(&Call))
5587 Check(!CI->isInlineAsm(),
5588 "gc.statepoint support for inline assembly unimplemented", CI);
5589 Check(Call.getParent()->getParent()->hasGC(),
5590 "Enclosing function does not use GC.", Call);
5591
5592 verifyStatepoint(Call);
5593 break;
5594 case Intrinsic::experimental_gc_result: {
5595 Check(Call.getParent()->getParent()->hasGC(),
5596 "Enclosing function does not use GC.", Call);
5597
5598 auto *Statepoint = Call.getArgOperand(0);
5599 if (isa<UndefValue>(Statepoint))
5600 break;
5601
5602 // Are we tied to a statepoint properly?
5603 const auto *StatepointCall = dyn_cast<CallBase>(Statepoint);
5604 const Function *StatepointFn =
5605 StatepointCall ? StatepointCall->getCalledFunction() : nullptr;
5606 Check(StatepointFn && StatepointFn->isDeclaration() &&
5607 StatepointFn->getIntrinsicID() ==
5608 Intrinsic::experimental_gc_statepoint,
5609 "gc.result operand #1 must be from a statepoint", Call,
5610 Call.getArgOperand(0));
5611
5612 // Check that result type matches wrapped callee.
5613 auto *TargetFuncType =
5614 cast<FunctionType>(StatepointCall->getParamElementType(2));
5615 Check(Call.getType() == TargetFuncType->getReturnType(),
5616 "gc.result result type does not match wrapped callee", Call);
5617 break;
5618 }
5619 case Intrinsic::experimental_gc_relocate: {
5620 Check(Call.arg_size() == 3, "wrong number of arguments", Call);
5621
5622 Check(isa<PointerType>(Call.getType()->getScalarType()),
5623 "gc.relocate must return a pointer or a vector of pointers", Call);
5624
5625 // Check that this relocate is correctly tied to the statepoint
5626
5627 // This is case for relocate on the unwinding path of an invoke statepoint
5628 if (LandingPadInst *LandingPad =
5629 dyn_cast<LandingPadInst>(Call.getArgOperand(0))) {
5630
5631 const BasicBlock *InvokeBB =
5632 LandingPad->getParent()->getUniquePredecessor();
5633
5634 // Landingpad relocates should have only one predecessor with invoke
5635 // statepoint terminator
5636 Check(InvokeBB, "safepoints should have unique landingpads",
5637 LandingPad->getParent());
5638 Check(InvokeBB->getTerminator(), "safepoint block should be well formed",
5639 InvokeBB);
5640 Check(isa<GCStatepointInst>(InvokeBB->getTerminator()),
5641 "gc relocate should be linked to a statepoint", InvokeBB);
5642 } else {
5643 // In all other cases relocate should be tied to the statepoint directly.
5644 // This covers relocates on a normal return path of invoke statepoint and
5645 // relocates of a call statepoint.
5646 auto *Token = Call.getArgOperand(0);
5647 Check(isa<GCStatepointInst>(Token) || isa<UndefValue>(Token),
5648 "gc relocate is incorrectly tied to the statepoint", Call, Token);
5649 }
5650
5651 // Verify rest of the relocate arguments.
5652 const Value &StatepointCall = *cast<GCRelocateInst>(Call).getStatepoint();
5653
5654 // Both the base and derived must be piped through the safepoint.
5655 Value *Base = Call.getArgOperand(1);
5656 Check(isa<ConstantInt>(Base),
5657 "gc.relocate operand #2 must be integer offset", Call);
5658
5659 Value *Derived = Call.getArgOperand(2);
5660 Check(isa<ConstantInt>(Derived),
5661 "gc.relocate operand #3 must be integer offset", Call);
5662
5663 const uint64_t BaseIndex = cast<ConstantInt>(Base)->getZExtValue();
5664 const uint64_t DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue();
5665
5666 // Check the bounds
5667 if (isa<UndefValue>(StatepointCall))
5668 break;
5669 if (auto Opt = cast<GCStatepointInst>(StatepointCall)
5670 .getOperandBundle(LLVMContext::OB_gc_live)) {
5671 Check(BaseIndex < Opt->Inputs.size(),
5672 "gc.relocate: statepoint base index out of bounds", Call);
5673 Check(DerivedIndex < Opt->Inputs.size(),
5674 "gc.relocate: statepoint derived index out of bounds", Call);
5675 }
5676
5677 // Relocated value must be either a pointer type or vector-of-pointer type,
5678 // but gc_relocate does not need to return the same pointer type as the
5679 // relocated pointer. It can be casted to the correct type later if it's
5680 // desired. However, they must have the same address space and 'vectorness'
5681 GCRelocateInst &Relocate = cast<GCRelocateInst>(Call);
5682 auto *ResultType = Call.getType();
5683 auto *DerivedType = Relocate.getDerivedPtr()->getType();
5684 auto *BaseType = Relocate.getBasePtr()->getType();
5685
5686 Check(BaseType->isPtrOrPtrVectorTy(),
5687 "gc.relocate: relocated value must be a pointer", Call);
5688 Check(DerivedType->isPtrOrPtrVectorTy(),
5689 "gc.relocate: relocated value must be a pointer", Call);
5690
5691 Check(ResultType->isVectorTy() == DerivedType->isVectorTy(),
5692 "gc.relocate: vector relocates to vector and pointer to pointer",
5693 Call);
5694 Check(
5695 ResultType->getPointerAddressSpace() ==
5696 DerivedType->getPointerAddressSpace(),
5697 "gc.relocate: relocating a pointer shouldn't change its address space",
5698 Call);
5699
5700 auto GC = llvm::getGCStrategy(Relocate.getFunction()->getGC());
5701 Check(GC, "gc.relocate: calling function must have GCStrategy",
5702 Call.getFunction());
5703 if (GC) {
5704 auto isGCPtr = [&GC](Type *PTy) {
5705 return GC->isGCManagedPointer(PTy->getScalarType()).value_or(true);
5706 };
5707 Check(isGCPtr(ResultType), "gc.relocate: must return gc pointer", Call);
5708 Check(isGCPtr(BaseType),
5709 "gc.relocate: relocated value must be a gc pointer", Call);
5710 Check(isGCPtr(DerivedType),
5711 "gc.relocate: relocated value must be a gc pointer", Call);
5712 }
5713 break;
5714 }
5715 case Intrinsic::experimental_patchpoint: {
5716 if (Call.getCallingConv() == CallingConv::AnyReg) {
5717 Check(Call.getType()->isSingleValueType(),
5718 "patchpoint: invalid return type used with anyregcc", Call);
5719 }
5720 break;
5721 }
5722 case Intrinsic::eh_exceptioncode:
5723 case Intrinsic::eh_exceptionpointer: {
5724 Check(isa<CatchPadInst>(Call.getArgOperand(0)),
5725 "eh.exceptionpointer argument must be a catchpad", Call);
5726 break;
5727 }
5728 case Intrinsic::get_active_lane_mask: {
5729 Check(Call.getType()->isVectorTy(),
5730 "get_active_lane_mask: must return a "
5731 "vector",
5732 Call);
5733 auto *ElemTy = Call.getType()->getScalarType();
5734 Check(ElemTy->isIntegerTy(1),
5735 "get_active_lane_mask: element type is not "
5736 "i1",
5737 Call);
5738 break;
5739 }
5740 case Intrinsic::experimental_get_vector_length: {
5741 ConstantInt *VF = cast<ConstantInt>(Call.getArgOperand(1));
5742 Check(!VF->isNegative() && !VF->isZero(),
5743 "get_vector_length: VF must be positive", Call);
5744 break;
5745 }
5746 case Intrinsic::masked_load: {
5747 Check(Call.getType()->isVectorTy(), "masked_load: must return a vector",
5748 Call);
5749
5750 ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(1));
5751 Value *Mask = Call.getArgOperand(2);
5752 Value *PassThru = Call.getArgOperand(3);
5753 Check(Mask->getType()->isVectorTy(), "masked_load: mask must be vector",
5754 Call);
5755 Check(Alignment->getValue().isPowerOf2(),
5756 "masked_load: alignment must be a power of 2", Call);
5757 Check(PassThru->getType() == Call.getType(),
5758 "masked_load: pass through and return type must match", Call);
5759 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
5760 cast<VectorType>(Call.getType())->getElementCount(),
5761 "masked_load: vector mask must be same length as return", Call);
5762 break;
5763 }
5764 case Intrinsic::masked_store: {
5765 Value *Val = Call.getArgOperand(0);
5766 ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(2));
5767 Value *Mask = Call.getArgOperand(3);
5768 Check(Mask->getType()->isVectorTy(), "masked_store: mask must be vector",
5769 Call);
5770 Check(Alignment->getValue().isPowerOf2(),
5771 "masked_store: alignment must be a power of 2", Call);
5772 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
5773 cast<VectorType>(Val->getType())->getElementCount(),
5774 "masked_store: vector mask must be same length as value", Call);
5775 break;
5776 }
5777
5778 case Intrinsic::masked_gather: {
5779 const APInt &Alignment =
5780 cast<ConstantInt>(Call.getArgOperand(1))->getValue();
5781 Check(Alignment.isZero() || Alignment.isPowerOf2(),
5782 "masked_gather: alignment must be 0 or a power of 2", Call);
5783 break;
5784 }
5785 case Intrinsic::masked_scatter: {
5786 const APInt &Alignment =
5787 cast<ConstantInt>(Call.getArgOperand(2))->getValue();
5788 Check(Alignment.isZero() || Alignment.isPowerOf2(),
5789 "masked_scatter: alignment must be 0 or a power of 2", Call);
5790 break;
5791 }
5792
5793 case Intrinsic::experimental_guard: {
5794 Check(isa<CallInst>(Call), "experimental_guard cannot be invoked", Call);
5795 Check(Call.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
5796 "experimental_guard must have exactly one "
5797 "\"deopt\" operand bundle");
5798 break;
5799 }
5800
5801 case Intrinsic::experimental_deoptimize: {
5802 Check(isa<CallInst>(Call), "experimental_deoptimize cannot be invoked",
5803 Call);
5804 Check(Call.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
5805 "experimental_deoptimize must have exactly one "
5806 "\"deopt\" operand bundle");
5807 Check(Call.getType() == Call.getFunction()->getReturnType(),
5808 "experimental_deoptimize return type must match caller return type");
5809
5810 if (isa<CallInst>(Call)) {
5811 auto *RI = dyn_cast<ReturnInst>(Call.getNextNode());
5812 Check(RI,
5813 "calls to experimental_deoptimize must be followed by a return");
5814
5815 if (!Call.getType()->isVoidTy() && RI)
5816 Check(RI->getReturnValue() == &Call,
5817 "calls to experimental_deoptimize must be followed by a return "
5818 "of the value computed by experimental_deoptimize");
5819 }
5820
5821 break;
5822 }
5823 case Intrinsic::vastart: {
5824 Check(Call.getFunction()->isVarArg(),
5825 "va_start called in a non-varargs function");
5826 break;
5827 }
5828 case Intrinsic::vector_reduce_and:
5829 case Intrinsic::vector_reduce_or:
5830 case Intrinsic::vector_reduce_xor:
5831 case Intrinsic::vector_reduce_add:
5832 case Intrinsic::vector_reduce_mul:
5833 case Intrinsic::vector_reduce_smax:
5834 case Intrinsic::vector_reduce_smin:
5835 case Intrinsic::vector_reduce_umax:
5836 case Intrinsic::vector_reduce_umin: {
5837 Type *ArgTy = Call.getArgOperand(0)->getType();
5838 Check(ArgTy->isIntOrIntVectorTy() && ArgTy->isVectorTy(),
5839 "Intrinsic has incorrect argument type!");
5840 break;
5841 }
5842 case Intrinsic::vector_reduce_fmax:
5843 case Intrinsic::vector_reduce_fmin: {
5844 Type *ArgTy = Call.getArgOperand(0)->getType();
5845 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
5846 "Intrinsic has incorrect argument type!");
5847 break;
5848 }
5849 case Intrinsic::vector_reduce_fadd:
5850 case Intrinsic::vector_reduce_fmul: {
5851 // Unlike the other reductions, the first argument is a start value. The
5852 // second argument is the vector to be reduced.
5853 Type *ArgTy = Call.getArgOperand(1)->getType();
5854 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
5855 "Intrinsic has incorrect argument type!");
5856 break;
5857 }
5858 case Intrinsic::smul_fix:
5859 case Intrinsic::smul_fix_sat:
5860 case Intrinsic::umul_fix:
5861 case Intrinsic::umul_fix_sat:
5862 case Intrinsic::sdiv_fix:
5863 case Intrinsic::sdiv_fix_sat:
5864 case Intrinsic::udiv_fix:
5865 case Intrinsic::udiv_fix_sat: {
5866 Value *Op1 = Call.getArgOperand(0);
5867 Value *Op2 = Call.getArgOperand(1);
5869 "first operand of [us][mul|div]_fix[_sat] must be an int type or "
5870 "vector of ints");
5872 "second operand of [us][mul|div]_fix[_sat] must be an int type or "
5873 "vector of ints");
5874
5875 auto *Op3 = cast<ConstantInt>(Call.getArgOperand(2));
5876 Check(Op3->getType()->isIntegerTy(),
5877 "third operand of [us][mul|div]_fix[_sat] must be an int type");
5878 Check(Op3->getBitWidth() <= 32,
5879 "third operand of [us][mul|div]_fix[_sat] must fit within 32 bits");
5880
5881 if (ID == Intrinsic::smul_fix || ID == Intrinsic::smul_fix_sat ||
5882 ID == Intrinsic::sdiv_fix || ID == Intrinsic::sdiv_fix_sat) {
5883 Check(Op3->getZExtValue() < Op1->getType()->getScalarSizeInBits(),
5884 "the scale of s[mul|div]_fix[_sat] must be less than the width of "
5885 "the operands");
5886 } else {
5887 Check(Op3->getZExtValue() <= Op1->getType()->getScalarSizeInBits(),
5888 "the scale of u[mul|div]_fix[_sat] must be less than or equal "
5889 "to the width of the operands");
5890 }
5891 break;
5892 }
5893 case Intrinsic::lrint:
5894 case Intrinsic::llrint: {
5895 Type *ValTy = Call.getArgOperand(0)->getType();
5896 Type *ResultTy = Call.getType();
5897 Check(
5898 ValTy->isFPOrFPVectorTy() && ResultTy->isIntOrIntVectorTy(),
5899 "llvm.lrint, llvm.llrint: argument must be floating-point or vector "
5900 "of floating-points, and result must be integer or vector of integers",
5901 &Call);
5902 Check(ValTy->isVectorTy() == ResultTy->isVectorTy(),
5903 "llvm.lrint, llvm.llrint: argument and result disagree on vector use",
5904 &Call);
5905 if (ValTy->isVectorTy()) {
5906 Check(cast<VectorType>(ValTy)->getElementCount() ==
5907 cast<VectorType>(ResultTy)->getElementCount(),
5908 "llvm.lrint, llvm.llrint: argument must be same length as result",
5909 &Call);
5910 }
5911 break;
5912 }
5913 case Intrinsic::lround:
5914 case Intrinsic::llround: {
5915 Type *ValTy = Call.getArgOperand(0)->getType();
5916 Type *ResultTy = Call.getType();
5917 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
5918 "Intrinsic does not support vectors", &Call);
5919 break;
5920 }
5921 case Intrinsic::bswap: {
5922 Type *Ty = Call.getType();
5923 unsigned Size = Ty->getScalarSizeInBits();
5924 Check(Size % 16 == 0, "bswap must be an even number of bytes", &Call);
5925 break;
5926 }
5927 case Intrinsic::invariant_start: {
5928 ConstantInt *InvariantSize = dyn_cast<ConstantInt>(Call.getArgOperand(0));
5929 Check(InvariantSize &&
5930 (!InvariantSize->isNegative() || InvariantSize->isMinusOne()),
5931 "invariant_start parameter must be -1, 0 or a positive number",
5932 &Call);
5933 break;
5934 }
5935 case Intrinsic::matrix_multiply:
5936 case Intrinsic::matrix_transpose:
5937 case Intrinsic::matrix_column_major_load:
5938 case Intrinsic::matrix_column_major_store: {
5939 Function *IF = Call.getCalledFunction();
5940 ConstantInt *Stride = nullptr;
5941 ConstantInt *NumRows;
5942 ConstantInt *NumColumns;
5943 VectorType *ResultTy;
5944 Type *Op0ElemTy = nullptr;
5945 Type *Op1ElemTy = nullptr;
5946 switch (ID) {
5947 case Intrinsic::matrix_multiply: {
5948 NumRows = cast<ConstantInt>(Call.getArgOperand(2));
5949 ConstantInt *N = cast<ConstantInt>(Call.getArgOperand(3));
5950 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
5951 Check(cast<FixedVectorType>(Call.getArgOperand(0)->getType())
5952 ->getNumElements() ==
5953 NumRows->getZExtValue() * N->getZExtValue(),
5954 "First argument of a matrix operation does not match specified "
5955 "shape!");
5956 Check(cast<FixedVectorType>(Call.getArgOperand(1)->getType())
5957 ->getNumElements() ==
5958 N->getZExtValue() * NumColumns->getZExtValue(),
5959 "Second argument of a matrix operation does not match specified "
5960 "shape!");
5961
5962 ResultTy = cast<VectorType>(Call.getType());
5963 Op0ElemTy =
5964 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
5965 Op1ElemTy =
5966 cast<VectorType>(Call.getArgOperand(1)->getType())->getElementType();
5967 break;
5968 }
5969 case Intrinsic::matrix_transpose:
5970 NumRows = cast<ConstantInt>(Call.getArgOperand(1));
5971 NumColumns = cast<ConstantInt>(Call.getArgOperand(2));
5972 ResultTy = cast<VectorType>(Call.getType());
5973 Op0ElemTy =
5974 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
5975 break;
5976 case Intrinsic::matrix_column_major_load: {
5977 Stride = dyn_cast<ConstantInt>(Call.getArgOperand(1));
5978 NumRows = cast<ConstantInt>(Call.getArgOperand(3));
5979 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
5980 ResultTy = cast<VectorType>(Call.getType());
5981 break;
5982 }
5983 case Intrinsic::matrix_column_major_store: {
5984 Stride = dyn_cast<ConstantInt>(Call.getArgOperand(2));
5985 NumRows = cast<ConstantInt>(Call.getArgOperand(4));
5986 NumColumns = cast<ConstantInt>(Call.getArgOperand(5));
5987 ResultTy = cast<VectorType>(Call.getArgOperand(0)->getType());
5988 Op0ElemTy =
5989 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
5990 break;
5991 }
5992 default:
5993 llvm_unreachable("unexpected intrinsic");
5994 }
5995
5996 Check(ResultTy->getElementType()->isIntegerTy() ||
5997 ResultTy->getElementType()->isFloatingPointTy(),
5998 "Result type must be an integer or floating-point type!", IF);
5999
6000 if (Op0ElemTy)
6001 Check(ResultTy->getElementType() == Op0ElemTy,
6002 "Vector element type mismatch of the result and first operand "
6003 "vector!",
6004 IF);
6005
6006 if (Op1ElemTy)
6007 Check(ResultTy->getElementType() == Op1ElemTy,
6008 "Vector element type mismatch of the result and second operand "
6009 "vector!",
6010 IF);
6011
6012 Check(cast<FixedVectorType>(ResultTy)->getNumElements() ==
6013 NumRows->getZExtValue() * NumColumns->getZExtValue(),
6014 "Result of a matrix operation does not fit in the returned vector!");
6015
6016 if (Stride)
6017 Check(Stride->getZExtValue() >= NumRows->getZExtValue(),
6018 "Stride must be greater or equal than the number of rows!", IF);
6019
6020 break;
6021 }
6022 case Intrinsic::experimental_vector_splice: {
6023 VectorType *VecTy = cast<VectorType>(Call.getType());
6024 int64_t Idx = cast<ConstantInt>(Call.getArgOperand(2))->getSExtValue();
6025 int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
6026 if (Call.getParent() && Call.getParent()->getParent()) {
6027 AttributeList Attrs = Call.getParent()->getParent()->getAttributes();
6028 if (Attrs.hasFnAttr(Attribute::VScaleRange))
6029 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
6030 }
6031 Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
6032 (Idx >= 0 && Idx < KnownMinNumElements),
6033 "The splice index exceeds the range [-VL, VL-1] where VL is the "
6034 "known minimum number of elements in the vector. For scalable "
6035 "vectors the minimum number of elements is determined from "
6036 "vscale_range.",
6037 &Call);
6038 break;
6039 }
6040 case Intrinsic::experimental_stepvector: {
6041 VectorType *VecTy = dyn_cast<VectorType>(Call.getType());
6042 Check(VecTy && VecTy->getScalarType()->isIntegerTy() &&
6043 VecTy->getScalarSizeInBits() >= 8,
6044 "experimental_stepvector only supported for vectors of integers "
6045 "with a bitwidth of at least 8.",
6046 &Call);
6047 break;
6048 }
6049 case Intrinsic::vector_insert: {
6050 Value *Vec = Call.getArgOperand(0);
6051 Value *SubVec = Call.getArgOperand(1);
6052 Value *Idx = Call.getArgOperand(2);
6053 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6054
6055 VectorType *VecTy = cast<VectorType>(Vec->getType());
6056 VectorType *SubVecTy = cast<VectorType>(SubVec->getType());
6057
6058 ElementCount VecEC = VecTy->getElementCount();
6059 ElementCount SubVecEC = SubVecTy->getElementCount();
6060 Check(VecTy->getElementType() == SubVecTy->getElementType(),
6061 "vector_insert parameters must have the same element "
6062 "type.",
6063 &Call);
6064 Check(IdxN % SubVecEC.getKnownMinValue() == 0,
6065 "vector_insert index must be a constant multiple of "
6066 "the subvector's known minimum vector length.");
6067
6068 // If this insertion is not the 'mixed' case where a fixed vector is
6069 // inserted into a scalable vector, ensure that the insertion of the
6070 // subvector does not overrun the parent vector.
6071 if (VecEC.isScalable() == SubVecEC.isScalable()) {
6072 Check(IdxN < VecEC.getKnownMinValue() &&
6073 IdxN + SubVecEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6074 "subvector operand of vector_insert would overrun the "
6075 "vector being inserted into.");
6076 }
6077 break;
6078 }
6079 case Intrinsic::vector_extract: {
6080 Value *Vec = Call.getArgOperand(0);
6081 Value *Idx = Call.getArgOperand(1);
6082 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6083
6084 VectorType *ResultTy = cast<VectorType>(Call.getType());
6085 VectorType *VecTy = cast<VectorType>(Vec->getType());
6086
6087 ElementCount VecEC = VecTy->getElementCount();
6088 ElementCount ResultEC = ResultTy->getElementCount();
6089
6090 Check(ResultTy->getElementType() == VecTy->getElementType(),
6091 "vector_extract result must have the same element "
6092 "type as the input vector.",
6093 &Call);
6094 Check(IdxN % ResultEC.getKnownMinValue() == 0,
6095 "vector_extract index must be a constant multiple of "
6096 "the result type's known minimum vector length.");
6097
6098 // If this extraction is not the 'mixed' case where a fixed vector is
6099 // extracted from a scalable vector, ensure that the extraction does not
6100 // overrun the parent vector.
6101 if (VecEC.isScalable() == ResultEC.isScalable()) {
6102 Check(IdxN < VecEC.getKnownMinValue() &&
6103 IdxN + ResultEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6104 "vector_extract would overrun.");
6105 }
6106 break;
6107 }
6108 case Intrinsic::experimental_noalias_scope_decl: {
6109 NoAliasScopeDecls.push_back(cast<IntrinsicInst>(&Call));
6110 break;
6111 }
6112 case Intrinsic::preserve_array_access_index:
6113 case Intrinsic::preserve_struct_access_index:
6114 case Intrinsic::aarch64_ldaxr:
6115 case Intrinsic::aarch64_ldxr:
6116 case Intrinsic::arm_ldaex:
6117 case Intrinsic::arm_ldrex: {
6118 Type *ElemTy = Call.getParamElementType(0);
6119 Check(ElemTy, "Intrinsic requires elementtype attribute on first argument.",
6120 &Call);
6121 break;
6122 }
6123 case Intrinsic::aarch64_stlxr:
6124 case Intrinsic::aarch64_stxr:
6125 case Intrinsic::arm_stlex:
6126 case Intrinsic::arm_strex: {
6127 Type *ElemTy = Call.getAttributes().getParamElementType(1);
6128 Check(ElemTy,
6129 "Intrinsic requires elementtype attribute on second argument.",
6130 &Call);
6131 break;
6132 }
6133 case Intrinsic::aarch64_prefetch: {
6134 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6135 "write argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6136 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6137 "target argument to llvm.aarch64.prefetch must be 0-3", Call);
6138 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6139 "stream argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6140 Check(cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue() < 2,
6141 "isdata argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6142 break;
6143 }
6144 case Intrinsic::callbr_landingpad: {
6145 const auto *CBR = dyn_cast<CallBrInst>(Call.getOperand(0));
6146 Check(CBR, "intrinstic requires callbr operand", &Call);
6147 if (!CBR)
6148 break;
6149
6150 const BasicBlock *LandingPadBB = Call.getParent();
6151 const BasicBlock *PredBB = LandingPadBB->getUniquePredecessor();
6152 if (!PredBB) {
6153 CheckFailed("Intrinsic in block must have 1 unique predecessor", &Call);
6154 break;
6155 }
6156 if (!isa<CallBrInst>(PredBB->getTerminator())) {
6157 CheckFailed("Intrinsic must have corresponding callbr in predecessor",
6158 &Call);
6159 break;
6160 }
6161 Check(llvm::any_of(CBR->getIndirectDests(),
6162 [LandingPadBB](const BasicBlock *IndDest) {
6163 return IndDest == LandingPadBB;
6164 }),
6165 "Intrinsic's corresponding callbr must have intrinsic's parent basic "
6166 "block in indirect destination list",
6167 &Call);
6168 const Instruction &First = *LandingPadBB->begin();
6169 Check(&First == &Call, "No other instructions may proceed intrinsic",
6170 &Call);
6171 break;
6172 }
6173 case Intrinsic::amdgcn_cs_chain: {
6174 auto CallerCC = Call.getCaller()->getCallingConv();
6175 switch (CallerCC) {
6179 break;
6180 default:
6181 CheckFailed("Intrinsic can only be used from functions with the "
6182 "amdgpu_cs, amdgpu_cs_chain or amdgpu_cs_chain_preserve "
6183 "calling conventions",
6184 &Call);
6185 break;
6186 }
6187
6188 Check(Call.paramHasAttr(2, Attribute::InReg),
6189 "SGPR arguments must have the `inreg` attribute", &Call);
6190 Check(!Call.paramHasAttr(3, Attribute::InReg),
6191 "VGPR arguments must not have the `inreg` attribute", &Call);
6192 break;
6193 }
6194 case Intrinsic::amdgcn_set_inactive_chain_arg: {
6195 auto CallerCC = Call.getCaller()->getCallingConv();
6196 switch (CallerCC) {
6199 break;
6200 default:
6201 CheckFailed("Intrinsic can only be used from functions with the "
6202 "amdgpu_cs_chain or amdgpu_cs_chain_preserve "
6203 "calling conventions",
6204 &Call);
6205 break;
6206 }
6207
6208 unsigned InactiveIdx = 1;
6209 Check(!Call.paramHasAttr(InactiveIdx, Attribute::InReg),
6210 "Value for inactive lanes must not have the `inreg` attribute",
6211 &Call);
6212 Check(isa<Argument>(Call.getArgOperand(InactiveIdx)),
6213 "Value for inactive lanes must be a function argument", &Call);
6214 Check(!cast<Argument>(Call.getArgOperand(InactiveIdx))->hasInRegAttr(),
6215 "Value for inactive lanes must be a VGPR function argument", &Call);
6216 break;
6217 }
6218 case Intrinsic::nvvm_setmaxnreg_inc_sync_aligned_u32:
6219 case Intrinsic::nvvm_setmaxnreg_dec_sync_aligned_u32: {
6220 Value *V = Call.getArgOperand(0);
6221 unsigned RegCount = cast<ConstantInt>(V)->getZExtValue();
6222 Check(RegCount % 8 == 0,
6223 "reg_count argument to nvvm.setmaxnreg must be in multiples of 8");
6224 Check((RegCount >= 24 && RegCount <= 256),
6225 "reg_count argument to nvvm.setmaxnreg must be within [24, 256]");
6226 break;
6227 }
6228 case Intrinsic::experimental_convergence_entry:
6229 case Intrinsic::experimental_convergence_anchor:
6230 break;
6231 case Intrinsic::experimental_convergence_loop:
6232 break;
6233 case Intrinsic::ptrmask: {
6234 Type *Ty0 = Call.getArgOperand(0)->getType();
6235 Type *Ty1 = Call.getArgOperand(1)->getType();
6237 "llvm.ptrmask intrinsic first argument must be pointer or vector "
6238 "of pointers",
6239 &Call);
6240 Check(
6241 Ty0->isVectorTy() == Ty1->isVectorTy(),
6242 "llvm.ptrmask intrinsic arguments must be both scalars or both vectors",
6243 &Call);
6244 if (Ty0->isVectorTy())
6245 Check(cast<VectorType>(Ty0)->getElementCount() ==
6246 cast<VectorType>(Ty1)->getElementCount(),
6247 "llvm.ptrmask intrinsic arguments must have the same number of "
6248 "elements",
6249 &Call);
6250 Check(DL.getIndexTypeSizeInBits(Ty0) == Ty1->getScalarSizeInBits(),
6251 "llvm.ptrmask intrinsic second argument bitwidth must match "
6252 "pointer index type size of first argument",
6253 &Call);
6254 break;
6255 }
6256 case Intrinsic::threadlocal_address: {
6257 const Value &Arg0 = *Call.getArgOperand(0);
6258 Check(isa<GlobalValue>(Arg0),
6259 "llvm.threadlocal.address first argument must be a GlobalValue");
6260 Check(cast<GlobalValue>(Arg0).isThreadLocal(),
6261 "llvm.threadlocal.address operand isThreadLocal() must be true");
6262 break;
6263 }
6264 };
6265
6266 // Verify that there aren't any unmediated control transfers between funclets.
6268 Function *F = Call.getParent()->getParent();
6269 if (F->hasPersonalityFn() &&
6270 isScopedEHPersonality(classifyEHPersonality(F->getPersonalityFn()))) {
6271 // Run EH funclet coloring on-demand and cache results for other intrinsic
6272 // calls in this function
6273 if (BlockEHFuncletColors.empty())
6274 BlockEHFuncletColors = colorEHFunclets(*F);
6275
6276 // Check for catch-/cleanup-pad in first funclet block
6277 bool InEHFunclet = false;
6278 BasicBlock *CallBB = Call.getParent();
6279 const ColorVector &CV = BlockEHFuncletColors.find(CallBB)->second;
6280 assert(CV.size() > 0 && "Uncolored block");
6281 for (BasicBlock *ColorFirstBB : CV)
6282 if (dyn_cast_or_null<FuncletPadInst>(ColorFirstBB->getFirstNonPHI()))
6283 InEHFunclet = true;
6284
6285 // Check for funclet operand bundle
6286 bool HasToken = false;
6287 for (unsigned I = 0, E = Call.getNumOperandBundles(); I != E; ++I)
6288 if (Call.getOperandBundleAt(I).getTagID() == LLVMContext::OB_funclet)
6289 HasToken = true;
6290
6291 // This would cause silent code truncation in WinEHPrepare
6292 if (InEHFunclet)
6293 Check(HasToken, "Missing funclet token on intrinsic call", &Call);
6294 }
6295 }
6296}
6297
6298/// Carefully grab the subprogram from a local scope.
6299///
6300/// This carefully grabs the subprogram from a local scope, avoiding the
6301/// built-in assertions that would typically fire.
6303 if (!LocalScope)
6304 return nullptr;
6305
6306 if (auto *SP = dyn_cast<DISubprogram>(LocalScope))
6307 return SP;
6308
6309 if (auto *LB = dyn_cast<DILexicalBlockBase>(LocalScope))
6310 return getSubprogram(LB->getRawScope());
6311
6312 // Just return null; broken scope chains are checked elsewhere.
6313 assert(!isa<DILocalScope>(LocalScope) && "Unknown type of local scope");
6314 return nullptr;
6315}
6316
6317void Verifier::visit(DbgLabelRecord &DLR) {
6318 CheckDI(isa<DILabel>(DLR.getRawLabel()),
6319 "invalid #dbg_label intrinsic variable", &DLR, DLR.getRawLabel());
6320
6321 // Ignore broken !dbg attachments; they're checked elsewhere.
6322 if (MDNode *N = DLR.getDebugLoc().getAsMDNode())
6323 if (!isa<DILocation>(N))
6324 return;
6325
6326 BasicBlock *BB = DLR.getParent();
6327 Function *F = BB ? BB->getParent() : nullptr;
6328
6329 // The scopes for variables and !dbg attachments must agree.
6330 DILabel *Label = DLR.getLabel();
6331 DILocation *Loc = DLR.getDebugLoc();
6332 CheckDI(Loc, "#dbg_label record requires a !dbg attachment", &DLR, BB, F);
6333
6334 DISubprogram *LabelSP = getSubprogram(Label->getRawScope());
6335 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
6336 if (!LabelSP || !LocSP)
6337 return;
6338
6339 CheckDI(LabelSP == LocSP,
6340 "mismatched subprogram between #dbg_label label and !dbg attachment",
6341 &DLR, BB, F, Label, Label->getScope()->getSubprogram(), Loc,
6342 Loc->getScope()->getSubprogram());
6343}
6344
6345void Verifier::visit(DbgVariableRecord &DVR) {
6346 BasicBlock *BB = DVR.getParent();
6347 Function *F = BB->getParent();
6348
6352 "invalid #dbg record type", &DVR, DVR.getType());
6353
6354 // The location for a DbgVariableRecord must be either a ValueAsMetadata,
6355 // DIArgList, or an empty MDNode (which is a legacy representation for an
6356 // "undef" location).
6357 auto *MD = DVR.getRawLocation();
6358 CheckDI(MD && (isa<ValueAsMetadata>(MD) || isa<DIArgList>(MD) ||
6359 (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands())),
6360 "invalid #dbg record address/value", &DVR, MD);
6361 if (auto *VAM = dyn_cast<ValueAsMetadata>(MD))
6362 visitValueAsMetadata(*VAM, F);
6363 else if (auto *AL = dyn_cast<DIArgList>(MD))
6364 visitDIArgList(*AL, F);
6365
6366 CheckDI(isa_and_nonnull<DILocalVariable>(DVR.getRawVariable()),
6367 "invalid #dbg record variable", &DVR, DVR.getRawVariable());
6368 visitMDNode(*DVR.getRawVariable(), AreDebugLocsAllowed::No);
6369
6370 CheckDI(isa_and_nonnull<DIExpression>(DVR.getRawExpression()),
6371 "invalid #dbg record expression", &DVR, DVR.getRawExpression());
6372 visitMDNode(*DVR.getExpression(), AreDebugLocsAllowed::No);
6373
6374 if (DVR.isDbgAssign()) {
6375 CheckDI(isa_and_nonnull<DIAssignID>(DVR.getRawAssignID()),
6376 "invalid #dbg_assign DIAssignID", &DVR, DVR.getRawAssignID());
6377 visitMDNode(*cast<DIAssignID>(DVR.getRawAssignID()),
6378 AreDebugLocsAllowed::No);
6379
6380 const auto *RawAddr = DVR.getRawAddress();
6381 // Similarly to the location above, the address for an assign
6382 // DbgVariableRecord must be a ValueAsMetadata or an empty MDNode, which
6383 // represents an undef address.
6384 CheckDI(
6385 isa<ValueAsMetadata>(RawAddr) ||
6386 (isa<MDNode>(RawAddr) && !cast<MDNode>(RawAddr)->getNumOperands()),
6387 "invalid #dbg_assign address", &DVR, DVR.getRawAddress());
6388 if (auto *VAM = dyn_cast<ValueAsMetadata>(RawAddr))
6389 visitValueAsMetadata(*VAM, F);
6390
6391 CheckDI(isa_and_nonnull<DIExpression>(DVR.getRawAddressExpression()),
6392 "invalid #dbg_assign address expression", &DVR,
6394 visitMDNode(*DVR.getAddressExpression(), AreDebugLocsAllowed::No);
6395
6396 // All of the linked instructions should be in the same function as DVR.
6397 for (Instruction *I : at::getAssignmentInsts(&DVR))
6398 CheckDI(DVR.getFunction() == I->getFunction(),
6399 "inst not in same function as #dbg_assign", I, &DVR);
6400 }
6401
6402 // This check is redundant with one in visitLocalVariable().
6403 DILocalVariable *Var = DVR.getVariable();
6404 CheckDI(isType(Var->getRawType()), "invalid type ref", Var,
6405 Var->getRawType());
6406
6407 auto *DLNode = DVR.getDebugLoc().getAsMDNode();
6408 CheckDI(isa_and_nonnull<DILocation>(DLNode), "invalid #dbg record DILocation",
6409 &DVR, DLNode);
6410 DILocation *Loc = DVR.getDebugLoc();
6411
6412 // The scopes for variables and !dbg attachments must agree.
6413 DISubprogram *VarSP = getSubprogram(Var->getRawScope());
6414 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
6415 if (!VarSP || !LocSP)
6416 return; // Broken scope chains are checked elsewhere.
6417
6418 CheckDI(VarSP == LocSP,
6419 "mismatched subprogram between #dbg record variable and DILocation",
6420 &DVR, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
6421 Loc->getScope()->getSubprogram());
6422
6423 verifyFnArgs(DVR);
6424}
6425
6426void Verifier::visitVPIntrinsic(VPIntrinsic &VPI) {
6427 if (auto *VPCast = dyn_cast<VPCastIntrinsic>(&VPI)) {
6428 auto *RetTy = cast<VectorType>(VPCast->getType());
6429 auto *ValTy = cast<VectorType>(VPCast->getOperand(0)->getType());
6430 Check(RetTy->getElementCount() == ValTy->getElementCount(),
6431 "VP cast intrinsic first argument and result vector lengths must be "
6432 "equal",
6433 *VPCast);
6434
6435 switch (VPCast->getIntrinsicID()) {
6436 default:
6437 llvm_unreachable("Unknown VP cast intrinsic");
6438 case Intrinsic::vp_trunc:
6439 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
6440 "llvm.vp.trunc intrinsic first argument and result element type "
6441 "must be integer",
6442 *VPCast);
6443 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
6444 "llvm.vp.trunc intrinsic the bit size of first argument must be "
6445 "larger than the bit size of the return type",
6446 *VPCast);
6447 break;
6448 case Intrinsic::vp_zext:
6449 case Intrinsic::vp_sext:
6450 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
6451 "llvm.vp.zext or llvm.vp.sext intrinsic first argument and result "
6452 "element type must be integer",
6453 *VPCast);
6454 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
6455 "llvm.vp.zext or llvm.vp.sext intrinsic the bit size of first "
6456 "argument must be smaller than the bit size of the return type",
6457 *VPCast);
6458 break;
6459 case Intrinsic::vp_fptoui:
6460 case Intrinsic::vp_fptosi:
6461 case Intrinsic::vp_lrint:
6462 case Intrinsic::vp_llrint:
6463 Check(
6464 RetTy->isIntOrIntVectorTy() && ValTy->isFPOrFPVectorTy(),
6465 "llvm.vp.fptoui, llvm.vp.fptosi, llvm.vp.lrint or llvm.vp.llrint" "intrinsic first argument element "
6466 "type must be floating-point and result element type must be integer",
6467 *VPCast);
6468 break;
6469 case Intrinsic::vp_uitofp:
6470 case Intrinsic::vp_sitofp:
6471 Check(
6472 RetTy->isFPOrFPVectorTy() && ValTy->isIntOrIntVectorTy(),
6473 "llvm.vp.uitofp or llvm.vp.sitofp intrinsic first argument element "
6474 "type must be integer and result element type must be floating-point",
6475 *VPCast);
6476 break;
6477 case Intrinsic::vp_fptrunc:
6478 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
6479 "llvm.vp.fptrunc intrinsic first argument and result element type "
6480 "must be floating-point",
6481 *VPCast);
6482 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
6483 "llvm.vp.fptrunc intrinsic the bit size of first argument must be "
6484 "larger than the bit size of the return type",
6485 *VPCast);
6486 break;
6487 case Intrinsic::vp_fpext:
6488 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
6489 "llvm.vp.fpext intrinsic first argument and result element type "
6490 "must be floating-point",
6491 *VPCast);
6492 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
6493 "llvm.vp.fpext intrinsic the bit size of first argument must be "
6494 "smaller than the bit size of the return type",
6495 *VPCast);
6496 break;
6497 case Intrinsic::vp_ptrtoint:
6498 Check(RetTy->isIntOrIntVectorTy() && ValTy->isPtrOrPtrVectorTy(),
6499 "llvm.vp.ptrtoint intrinsic first argument element type must be "
6500 "pointer and result element type must be integer",
6501 *VPCast);
6502 break;
6503 case Intrinsic::vp_inttoptr:
6504 Check(RetTy->isPtrOrPtrVectorTy() && ValTy->isIntOrIntVectorTy(),
6505 "llvm.vp.inttoptr intrinsic first argument element type must be "
6506 "integer and result element type must be pointer",
6507 *VPCast);
6508 break;
6509 }
6510 }
6511 if (VPI.getIntrinsicID() == Intrinsic::vp_fcmp) {
6512 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
6514 "invalid predicate for VP FP comparison intrinsic", &VPI);
6515 }
6516 if (VPI.getIntrinsicID() == Intrinsic::vp_icmp) {
6517 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
6519 "invalid predicate for VP integer comparison intrinsic", &VPI);
6520 }
6521 if (VPI.getIntrinsicID() == Intrinsic::vp_is_fpclass) {
6522 auto TestMask = cast<ConstantInt>(VPI.getOperand(1));
6523 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
6524 "unsupported bits for llvm.vp.is.fpclass test mask");
6525 }
6526}
6527
6528void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
6529 unsigned NumOperands;
6530 bool HasRoundingMD;
6531 switch (FPI.getIntrinsicID()) {
6532#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
6533 case Intrinsic::INTRINSIC: \
6534 NumOperands = NARG; \
6535 HasRoundingMD = ROUND_MODE; \
6536 break;
6537#include "llvm/IR/ConstrainedOps.def"
6538 default:
6539 llvm_unreachable("Invalid constrained FP intrinsic!");
6540 }
6541 NumOperands += (1 + HasRoundingMD);
6542 // Compare intrinsics carry an extra predicate metadata operand.
6543 if (isa<ConstrainedFPCmpIntrinsic>(FPI))
6544 NumOperands += 1;
6545 Check((FPI.arg_size() == NumOperands),
6546 "invalid arguments for constrained FP intrinsic", &FPI);
6547
6548 switch (FPI.getIntrinsicID()) {
6549 case Intrinsic::experimental_constrained_lrint:
6550 case Intrinsic::experimental_constrained_llrint: {
6551 Type *ValTy = FPI.getArgOperand(0)->getType();
6552 Type *ResultTy = FPI.getType();
6553 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
6554 "Intrinsic does not support vectors", &FPI);
6555 }
6556 break;
6557
6558 case Intrinsic::experimental_constrained_lround:
6559 case Intrinsic::experimental_constrained_llround: {
6560 Type *ValTy = FPI.getArgOperand(0)->getType();
6561 Type *ResultTy = FPI.getType();
6562 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
6563 "Intrinsic does not support vectors", &FPI);
6564 break;
6565 }
6566
6567 case Intrinsic::experimental_constrained_fcmp:
6568 case Intrinsic::experimental_constrained_fcmps: {
6569 auto Pred = cast<ConstrainedFPCmpIntrinsic>(&FPI)->getPredicate();
6571 "invalid predicate for constrained FP comparison intrinsic", &FPI);
6572 break;
6573 }
6574
6575 case Intrinsic::experimental_constrained_fptosi:
6576 case Intrinsic::experimental_constrained_fptoui: {
6577 Value *Operand = FPI.getArgOperand(0);
6578 ElementCount SrcEC;
6579 Check(Operand->getType()->isFPOrFPVectorTy(),
6580 "Intrinsic first argument must be floating point", &FPI);
6581 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
6582 SrcEC = cast<VectorType>(OperandT)->getElementCount();
6583 }
6584
6585 Operand = &FPI;
6586 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
6587 "Intrinsic first argument and result disagree on vector use", &FPI);
6588 Check(Operand->getType()->isIntOrIntVectorTy(),
6589 "Intrinsic result must be an integer", &FPI);
6590 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
6591 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
6592 "Intrinsic first argument and result vector lengths must be equal",
6593 &FPI);
6594 }
6595 }
6596 break;
6597
6598 case Intrinsic::experimental_constrained_sitofp:
6599 case Intrinsic::experimental_constrained_uitofp: {
6600 Value *Operand = FPI.getArgOperand(0);
6601 ElementCount SrcEC;
6602 Check(Operand->getType()->isIntOrIntVectorTy(),
6603 "Intrinsic first argument must be integer", &FPI);
6604 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
6605 SrcEC = cast<VectorType>(OperandT)->getElementCount();
6606 }
6607
6608 Operand = &FPI;
6609 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
6610 "Intrinsic first argument and result disagree on vector use", &FPI);
6611 Check(Operand->getType()->isFPOrFPVectorTy(),
6612 "Intrinsic result must be a floating point", &FPI);
6613 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
6614 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
6615 "Intrinsic first argument and result vector lengths must be equal",
6616 &FPI);
6617 }
6618 } break;
6619
6620 case Intrinsic::experimental_constrained_fptrunc:
6621 case Intrinsic::experimental_constrained_fpext: {
6622 Value *Operand = FPI.getArgOperand(0);
6623 Type *OperandTy = Operand->getType();
6624 Value *Result = &FPI;
6625 Type *ResultTy = Result->getType();
6626 Check(OperandTy->isFPOrFPVectorTy(),
6627 "Intrinsic first argument must be FP or FP vector", &FPI);
6628 Check(ResultTy->isFPOrFPVectorTy(),
6629 "Intrinsic result must be FP or FP vector", &FPI);
6630 Check(OperandTy->isVectorTy() == ResultTy->isVectorTy(),
6631 "Intrinsic first argument and result disagree on vector use", &FPI);
6632 if (OperandTy->isVectorTy()) {
6633 Check(cast<VectorType>(OperandTy)->getElementCount() ==
6634 cast<VectorType>(ResultTy)->getElementCount(),
6635 "Intrinsic first argument and result vector lengths must be equal",
6636 &FPI);
6637 }
6638 if (FPI.getIntrinsicID() == Intrinsic::experimental_constrained_fptrunc) {
6639 Check(OperandTy->getScalarSizeInBits() > ResultTy->getScalarSizeInBits(),
6640 "Intrinsic first argument's type must be larger than result type",
6641 &FPI);
6642 } else {
6643 Check(OperandTy->getScalarSizeInBits() < ResultTy->getScalarSizeInBits(),
6644 "Intrinsic first argument's type must be smaller than result type",
6645 &FPI);
6646 }
6647 }
6648 break;
6649
6650 default:
6651 break;
6652 }
6653
6654 // If a non-metadata argument is passed in a metadata slot then the
6655 // error will be caught earlier when the incorrect argument doesn't
6656 // match the specification in the intrinsic call table. Thus, no
6657 // argument type check is needed here.
6658
6659 Check(FPI.getExceptionBehavior().has_value(),
6660 "invalid exception behavior argument", &FPI);
6661 if (HasRoundingMD) {
6662 Check(FPI.getRoundingMode().has_value(), "invalid rounding mode argument",
6663 &FPI);
6664 }
6665}
6666
6667void Verifier::visitDbgIntrinsic(StringRef Kind, DbgVariableIntrinsic &DII) {
6668 auto *MD = DII.getRawLocation();
6669 CheckDI(isa<ValueAsMetadata>(MD) || isa<DIArgList>(MD) ||
6670 (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands()),
6671 "invalid llvm.dbg." + Kind + " intrinsic address/value", &DII, MD);
6672 CheckDI(isa<DILocalVariable>(DII.getRawVariable()),
6673 "invalid llvm.dbg." + Kind + " intrinsic variable", &DII,
6674 DII.getRawVariable());
6675 CheckDI(isa<DIExpression>(DII.getRawExpression()),
6676 "invalid llvm.dbg." + Kind + " intrinsic expression", &DII,
6677 DII.getRawExpression());
6678
6679 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(&DII)) {
6680 CheckDI(isa<DIAssignID>(DAI->getRawAssignID()),
6681 "invalid llvm.dbg.assign intrinsic DIAssignID", &DII,
6682 DAI->getRawAssignID());
6683 const auto *RawAddr = DAI->getRawAddress();
6684 CheckDI(
6685 isa<ValueAsMetadata>(RawAddr) ||
6686 (isa<MDNode>(RawAddr) && !cast<MDNode>(RawAddr)->getNumOperands()),
6687 "invalid llvm.dbg.assign intrinsic address", &DII,
6688 DAI->getRawAddress());
6689 CheckDI(isa<DIExpression>(DAI->getRawAddressExpression()),
6690 "invalid llvm.dbg.assign intrinsic address expression", &DII,
6691 DAI->getRawAddressExpression());
6692 // All of the linked instructions should be in the same function as DII.
6694 CheckDI(DAI->getFunction() == I->getFunction(),
6695 "inst not in same function as dbg.assign", I, DAI);
6696 }
6697
6698 // Ignore broken !dbg attachments; they're checked elsewhere.
6699 if (MDNode *N = DII.getDebugLoc().getAsMDNode())
6700 if (!isa<DILocation>(N))
6701 return;
6702
6703 BasicBlock *BB = DII.getParent();
6704 Function *F = BB ? BB->getParent() : nullptr;
6705
6706 // The scopes for variables and !dbg attachments must agree.
6707 DILocalVariable *Var = DII.getVariable();
6708 DILocation *Loc = DII.getDebugLoc();
6709 CheckDI(Loc, "llvm.dbg." + Kind + " intrinsic requires a !dbg attachment",
6710 &DII, BB, F);
6711
6712 DISubprogram *VarSP = getSubprogram(Var->getRawScope());
6713 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
6714 if (!VarSP || !LocSP)
6715 return; // Broken scope chains are checked elsewhere.
6716
6717 CheckDI(VarSP == LocSP,
6718 "mismatched subprogram between llvm.dbg." + Kind +
6719 " variable and !dbg attachment",
6720 &DII, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
6721 Loc->getScope()->getSubprogram());
6722
6723 // This check is redundant with one in visitLocalVariable().
6724 CheckDI(isType(Var->getRawType()), "invalid type ref", Var,
6725 Var->getRawType());
6726 verifyFnArgs(DII);
6727}
6728
6729void Verifier::visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI) {
6730 CheckDI(isa<DILabel>(DLI.getRawLabel()),
6731 "invalid llvm.dbg." + Kind + " intrinsic variable", &DLI,
6732 DLI.getRawLabel());
6733
6734 // Ignore broken !dbg attachments; they're checked elsewhere.
6735 if (MDNode *N = DLI.getDebugLoc().getAsMDNode())
6736 if (!isa<DILocation>(N))
6737 return;
6738
6739 BasicBlock *BB = DLI.getParent();
6740 Function *F = BB ? BB->getParent() : nullptr;
6741
6742 // The scopes for variables and !dbg attachments must agree.
6743 DILabel *Label = DLI.getLabel();
6744 DILocation *Loc = DLI.getDebugLoc();
6745 Check(Loc, "llvm.dbg." + Kind + " intrinsic requires a !dbg attachment", &DLI,
6746 BB, F);
6747
6748 DISubprogram *LabelSP = getSubprogram(Label->getRawScope());
6749 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
6750 if (!LabelSP || !LocSP)
6751 return;
6752
6753 CheckDI(LabelSP == LocSP,
6754 "mismatched subprogram between llvm.dbg." + Kind +
6755 " label and !dbg attachment",
6756 &DLI, BB, F, Label, Label->getScope()->getSubprogram(), Loc,
6757 Loc->getScope()->getSubprogram());
6758}
6759
6760void Verifier::verifyFragmentExpression(const DbgVariableIntrinsic &I) {
6761 DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(I.getRawVariable());
6762 DIExpression *E = dyn_cast_or_null<DIExpression>(I.getRawExpression());
6763
6764 // We don't know whether this intrinsic verified correctly.
6765 if (!V || !E || !E->isValid())
6766 return;
6767
6768 // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
6769 auto Fragment = E->getFragmentInfo();
6770 if (!Fragment)
6771 return;
6772
6773 // The frontend helps out GDB by emitting the members of local anonymous
6774 // unions as artificial local variables with shared storage. When SROA splits
6775 // the storage for artificial local variables that are smaller than the entire
6776 // union, the overhang piece will be outside of the allotted space for the
6777 // variable and this check fails.
6778 // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
6779 if (V->isArtificial())
6780 return;
6781
6782 verifyFragmentExpression(*V, *Fragment, &I);
6783}
6784void Verifier::verifyFragmentExpression(const DbgVariableRecord &DVR) {
6785 DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(DVR.getRawVariable());
6786 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
6787
6788 // We don't know whether this intrinsic verified correctly.
6789 if (!V || !E || !E->isValid())
6790 return;
6791
6792 // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
6793 auto Fragment = E->getFragmentInfo();
6794 if (!Fragment)
6795 return;
6796
6797 // The frontend helps out GDB by emitting the members of local anonymous
6798 // unions as artificial local variables with shared storage. When SROA splits
6799 // the storage for artificial local variables that are smaller than the entire
6800 // union, the overhang piece will be outside of the allotted space for the
6801 // variable and this check fails.
6802 // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
6803 if (V->isArtificial())
6804 return;
6805
6806 verifyFragmentExpression(*V, *Fragment, &DVR);
6807}
6808
6809template <typename ValueOrMetadata>
6810void Verifier::verifyFragmentExpression(const DIVariable &V,
6812 ValueOrMetadata *Desc) {
6813 // If there's no size, the type is broken, but that should be checked
6814 // elsewhere.
6815 auto VarSize = V.getSizeInBits();
6816 if (!VarSize)
6817 return;
6818
6819 unsigned FragSize = Fragment.SizeInBits;
6820 unsigned FragOffset = Fragment.OffsetInBits;
6821 CheckDI(FragSize + FragOffset <= *VarSize,
6822 "fragment is larger than or outside of variable", Desc, &V);
6823 CheckDI(FragSize != *VarSize, "fragment covers entire variable", Desc, &V);
6824}
6825
6826void Verifier::verifyFnArgs(const DbgVariableIntrinsic &I) {
6827 // This function does not take the scope of noninlined function arguments into
6828 // account. Don't run it if current function is nodebug, because it may
6829 // contain inlined debug intrinsics.
6830 if (!HasDebugInfo)
6831 return;
6832
6833 // For performance reasons only check non-inlined ones.
6834 if (I.getDebugLoc()->getInlinedAt())
6835 return;
6836
6837 DILocalVariable *Var = I.getVariable();
6838 CheckDI(Var, "dbg intrinsic without variable");
6839
6840 unsigned ArgNo = Var->getArg();
6841 if (!ArgNo)
6842 return;
6843
6844 // Verify there are no duplicate function argument debug info entries.
6845 // These will cause hard-to-debug assertions in the DWARF backend.
6846 if (DebugFnArgs.size() < ArgNo)
6847 DebugFnArgs.resize(ArgNo, nullptr);
6848
6849 auto *Prev = DebugFnArgs[ArgNo - 1];
6850 DebugFnArgs[ArgNo - 1] = Var;
6851 CheckDI(!Prev || (Prev == Var), "conflicting debug info for argument", &I,
6852 Prev, Var);
6853}
6854void Verifier::verifyFnArgs(const DbgVariableRecord &DVR) {
6855 // This function does not take the scope of noninlined function arguments into
6856 // account. Don't run it if current function is nodebug, because it may
6857 // contain inlined debug intrinsics.
6858 if (!HasDebugInfo)
6859 return;
6860
6861 // For performance reasons only check non-inlined ones.
6862 if (DVR.getDebugLoc()->getInlinedAt())
6863 return;
6864
6865 DILocalVariable *Var = DVR.getVariable();
6866 CheckDI(Var, "#dbg record without variable");
6867
6868 unsigned ArgNo = Var->getArg();
6869 if (!ArgNo)
6870 return;
6871
6872 // Verify there are no duplicate function argument debug info entries.
6873 // These will cause hard-to-debug assertions in the DWARF backend.
6874 if (DebugFnArgs.size() < ArgNo)
6875 DebugFnArgs.resize(ArgNo, nullptr);
6876
6877 auto *Prev = DebugFnArgs[ArgNo - 1];
6878 DebugFnArgs[ArgNo - 1] = Var;
6879 CheckDI(!Prev || (Prev == Var), "conflicting debug info for argument", &DVR,
6880 Prev, Var);
6881}
6882
6883void Verifier::verifyNotEntryValue(const DbgVariableIntrinsic &I) {
6884 DIExpression *E = dyn_cast_or_null<DIExpression>(I.getRawExpression());
6885
6886 // We don't know whether this intrinsic verified correctly.
6887 if (!E || !E->isValid())
6888 return;
6889
6890 if (isa<ValueAsMetadata>(I.getRawLocation())) {
6891 Value *VarValue = I.getVariableLocationOp(0);
6892 if (isa<UndefValue>(VarValue) || isa<PoisonValue>(VarValue))
6893 return;
6894 // We allow EntryValues for swift async arguments, as they have an
6895 // ABI-guarantee to be turned into a specific register.
6896 if (auto *ArgLoc = dyn_cast_or_null<Argument>(VarValue);
6897 ArgLoc && ArgLoc->hasAttribute(Attribute::SwiftAsync))
6898 return;
6899 }
6900
6901 CheckDI(!E->isEntryValue(),
6902 "Entry values are only allowed in MIR unless they target a "
6903 "swiftasync Argument",
6904 &I);
6905}
6906void Verifier::verifyNotEntryValue(const DbgVariableRecord &DVR) {
6907 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
6908
6909 // We don't know whether this intrinsic verified correctly.
6910 if (!E || !E->isValid())
6911 return;
6912
6913 if (isa<ValueAsMetadata>(DVR.getRawLocation())) {
6914 Value *VarValue = DVR.getVariableLocationOp(0);
6915 if (isa<UndefValue>(VarValue) || isa<PoisonValue>(VarValue))
6916 return;
6917 // We allow EntryValues for swift async arguments, as they have an
6918 // ABI-guarantee to be turned into a specific register.
6919 if (auto *ArgLoc = dyn_cast_or_null<Argument>(VarValue);
6920 ArgLoc && ArgLoc->hasAttribute(Attribute::SwiftAsync))
6921 return;
6922 }
6923
6924 CheckDI(!E->isEntryValue(),
6925 "Entry values are only allowed in MIR unless they target a "
6926 "swiftasync Argument",
6927 &DVR);
6928}
6929
6930void Verifier::verifyCompileUnits() {
6931 // When more than one Module is imported into the same context, such as during
6932 // an LTO build before linking the modules, ODR type uniquing may cause types
6933 // to point to a different CU. This check does not make sense in this case.
6934 if (M.getContext().isODRUniquingDebugTypes())
6935 return;
6936 auto *CUs = M.getNamedMetadata("llvm.dbg.cu");
6938 if (CUs)
6939 Listed.insert(CUs->op_begin(), CUs->op_end());
6940 for (const auto *CU : CUVisited)
6941 CheckDI(Listed.count(CU), "DICompileUnit not listed in llvm.dbg.cu", CU);
6942 CUVisited.clear();
6943}
6944
6945void Verifier::verifyDeoptimizeCallingConvs() {
6946 if (DeoptimizeDeclarations.empty())
6947 return;
6948
6949 const Function *First = DeoptimizeDeclarations[0];
6950 for (const auto *F : ArrayRef(DeoptimizeDeclarations).slice(1)) {
6951 Check(First->getCallingConv() == F->getCallingConv(),
6952 "All llvm.experimental.deoptimize declarations must have the same "
6953 "calling convention",
6954 First, F);
6955 }
6956}
6957
6958void Verifier::verifyAttachedCallBundle(const CallBase &Call,
6959 const OperandBundleUse &BU) {
6960 FunctionType *FTy = Call.getFunctionType();
6961
6962 Check((FTy->getReturnType()->isPointerTy() ||
6963 (Call.doesNotReturn() && FTy->getReturnType()->isVoidTy())),
6964 "a call with operand bundle \"clang.arc.attachedcall\" must call a "
6965 "function returning a pointer or a non-returning function that has a "
6966 "void return type",
6967 Call);
6968
6969 Check(BU.Inputs.size() == 1 && isa<Function>(BU.Inputs.front()),
6970 "operand bundle \"clang.arc.attachedcall\" requires one function as "
6971 "an argument",
6972 Call);
6973
6974 auto *Fn = cast<Function>(BU.Inputs.front());
6975 Intrinsic::ID IID = Fn->getIntrinsicID();
6976
6977 if (IID) {
6978 Check((IID == Intrinsic::objc_retainAutoreleasedReturnValue ||
6979 IID == Intrinsic::objc_unsafeClaimAutoreleasedReturnValue),
6980 "invalid function argument", Call);
6981 } else {
6982 StringRef FnName = Fn->getName();
6983 Check((FnName == "objc_retainAutoreleasedReturnValue" ||
6984 FnName == "objc_unsafeClaimAutoreleasedReturnValue"),
6985 "invalid function argument", Call);
6986 }
6987}
6988
6989void Verifier::verifyNoAliasScopeDecl() {
6990 if (NoAliasScopeDecls.empty())
6991 return;
6992
6993 // only a single scope must be declared at a time.
6994 for (auto *II : NoAliasScopeDecls) {
6995 assert(II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl &&
6996 "Not a llvm.experimental.noalias.scope.decl ?");
6997 const auto *ScopeListMV = dyn_cast<MetadataAsValue>(
6998 II->getOperand(Intrinsic::NoAliasScopeDeclScopeArg));
6999 Check(ScopeListMV != nullptr,
7000 "llvm.experimental.noalias.scope.decl must have a MetadataAsValue "
7001 "argument",
7002 II);
7003
7004 const auto *ScopeListMD = dyn_cast<MDNode>(ScopeListMV->getMetadata());
7005 Check(ScopeListMD != nullptr, "!id.scope.list must point to an MDNode", II);
7006 Check(ScopeListMD->getNumOperands() == 1,
7007 "!id.scope.list must point to a list with a single scope", II);
7008 visitAliasScopeListMetadata(ScopeListMD);
7009 }
7010
7011 // Only check the domination rule when requested. Once all passes have been
7012 // adapted this option can go away.
7014 return;
7015
7016 // Now sort the intrinsics based on the scope MDNode so that declarations of
7017 // the same scopes are next to each other.
7018 auto GetScope = [](IntrinsicInst *II) {
7019 const auto *ScopeListMV = cast<MetadataAsValue>(
7020 II->getOperand(Intrinsic::NoAliasScopeDeclScopeArg));
7021 return &cast<MDNode>(ScopeListMV->getMetadata())->getOperand(0);
7022 };
7023
7024 // We are sorting on MDNode pointers here. For valid input IR this is ok.
7025 // TODO: Sort on Metadata ID to avoid non-deterministic error messages.
7026 auto Compare = [GetScope](IntrinsicInst *Lhs, IntrinsicInst *Rhs) {
7027 return GetScope(Lhs) < GetScope(Rhs);
7028 };
7029
7030 llvm::sort(NoAliasScopeDecls, Compare);
7031
7032 // Go over the intrinsics and check that for the same scope, they are not
7033 // dominating each other.
7034 auto ItCurrent = NoAliasScopeDecls.begin();
7035 while (ItCurrent != NoAliasScopeDecls.end()) {
7036 auto CurScope = GetScope(*ItCurrent);
7037 auto ItNext = ItCurrent;
7038 do {
7039 ++ItNext;
7040 } while (ItNext != NoAliasScopeDecls.end() &&
7041 GetScope(*ItNext) == CurScope);
7042
7043 // [ItCurrent, ItNext) represents the declarations for the same scope.
7044 // Ensure they are not dominating each other.. but only if it is not too
7045 // expensive.
7046 if (ItNext - ItCurrent < 32)
7047 for (auto *I : llvm::make_range(ItCurrent, ItNext))
7048 for (auto *J : llvm::make_range(ItCurrent, ItNext))
7049 if (I != J)
7050 Check(!DT.dominates(I, J),
7051 "llvm.experimental.noalias.scope.decl dominates another one "
7052 "with the same scope",
7053 I);
7054 ItCurrent = ItNext;
7055 }
7056}
7057
7058//===----------------------------------------------------------------------===//
7059// Implement the public interfaces to this file...
7060//===----------------------------------------------------------------------===//
7061
7063 Function &F = const_cast<Function &>(f);
7064
7065 // Don't use a raw_null_ostream. Printing IR is expensive.
7066 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/true, *f.getParent());
7067
7068 // Note that this function's return value is inverted from what you would
7069 // expect of a function called "verify".
7070 return !V.verify(F);
7071}
7072
7074 bool *BrokenDebugInfo) {
7075 // Don't use a raw_null_ostream. Printing IR is expensive.
7076 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/!BrokenDebugInfo, M);
7077
7078 bool Broken = false;
7079 for (const Function &F : M)
7080 Broken |= !V.verify(F);
7081
7082 Broken |= !V.verify();
7083 if (BrokenDebugInfo)
7084 *BrokenDebugInfo = V.hasBrokenDebugInfo();
7085 // Note that this function's return value is inverted from what you would
7086 // expect of a function called "verify".
7087 return Broken;
7088}
7089
7090namespace {
7091
7092struct VerifierLegacyPass : public FunctionPass {
7093 static char ID;
7094
7095 std::unique_ptr<Verifier> V;
7096 bool FatalErrors = true;
7097
7098 VerifierLegacyPass() : FunctionPass(ID) {
7100 }
7101 explicit VerifierLegacyPass(bool FatalErrors)
7102 : FunctionPass(ID),
7103 FatalErrors(FatalErrors) {
7105 }
7106
7107 bool doInitialization(Module &M) override {
7108 V = std::make_unique<Verifier>(
7109 &dbgs(), /*ShouldTreatBrokenDebugInfoAsError=*/false, M);
7110 return false;
7111 }
7112
7113 bool runOnFunction(Function &F) override {
7114 if (!V->verify(F) && FatalErrors) {
7115 errs() << "in function " << F.getName() << '\n';
7116 report_fatal_error("Broken function found, compilation aborted!");
7117 }
7118 return false;
7119 }
7120
7121 bool doFinalization(Module &M) override {
7122 bool HasErrors = false;
7123 for (Function &F : M)
7124 if (F.isDeclaration())
7125 HasErrors |= !V->verify(F);
7126
7127 HasErrors |= !V->verify();
7128 if (FatalErrors && (HasErrors || V->hasBrokenDebugInfo()))
7129 report_fatal_error("Broken module found, compilation aborted!");
7130 return false;
7131 }
7132
7133 void getAnalysisUsage(AnalysisUsage &AU) const override {
7134 AU.setPreservesAll();
7135 }
7136};
7137
7138} // end anonymous namespace
7139
7140/// Helper to issue failure from the TBAA verification
7141template <typename... Tys> void TBAAVerifier::CheckFailed(Tys &&... Args) {
7142 if (Diagnostic)
7143 return Diagnostic->CheckFailed(Args...);
7144}
7145
7146#define CheckTBAA(C, ...) \
7147 do { \
7148 if (!(C)) { \
7149 CheckFailed(__VA_ARGS__); \
7150 return false; \
7151 } \
7152 } while (false)
7153
7154/// Verify that \p BaseNode can be used as the "base type" in the struct-path
7155/// TBAA scheme. This means \p BaseNode is either a scalar node, or a
7156/// struct-type node describing an aggregate data structure (like a struct).
7157TBAAVerifier::TBAABaseNodeSummary
7158TBAAVerifier::verifyTBAABaseNode(Instruction &I, const MDNode *BaseNode,
7159 bool IsNewFormat) {
7160 if (BaseNode->getNumOperands() < 2) {
7161 CheckFailed("Base nodes must have at least two operands", &I, BaseNode);
7162 return {true, ~0u};
7163 }
7164
7165 auto Itr = TBAABaseNodes.find(BaseNode);
7166 if (Itr != TBAABaseNodes.end())
7167 return Itr->second;
7168
7169 auto Result = verifyTBAABaseNodeImpl(I, BaseNode, IsNewFormat);
7170 auto InsertResult = TBAABaseNodes.insert({BaseNode, Result});
7171 (void)InsertResult;
7172 assert(InsertResult.second && "We just checked!");
7173 return Result;
7174}
7175
7176TBAAVerifier::TBAABaseNodeSummary
7177TBAAVerifier::verifyTBAABaseNodeImpl(Instruction &I, const MDNode *BaseNode,
7178 bool IsNewFormat) {
7179 const TBAAVerifier::TBAABaseNodeSummary InvalidNode = {true, ~0u};
7180
7181 if (BaseNode->getNumOperands() == 2) {
7182 // Scalar nodes can only be accessed at offset 0.
7183 return isValidScalarTBAANode(BaseNode)
7184 ? TBAAVerifier::TBAABaseNodeSummary({false, 0})
7185 : InvalidNode;
7186 }
7187
7188 if (IsNewFormat) {
7189 if (BaseNode->getNumOperands() % 3 != 0) {
7190 CheckFailed("Access tag nodes must have the number of operands that is a "
7191 "multiple of 3!", BaseNode);
7192 return InvalidNode;
7193 }
7194 } else {
7195 if (BaseNode->getNumOperands() % 2 != 1) {
7196 CheckFailed("Struct tag nodes must have an odd number of operands!",
7197 BaseNode);
7198 return InvalidNode;
7199 }
7200 }
7201
7202 // Check the type size field.
7203 if (IsNewFormat) {
7204 auto *TypeSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7205 BaseNode->getOperand(1));
7206 if (!TypeSizeNode) {
7207 CheckFailed("Type size nodes must be constants!", &I, BaseNode);
7208 return InvalidNode;
7209 }
7210 }
7211
7212 // Check the type name field. In the new format it can be anything.
7213 if (!IsNewFormat && !isa<MDString>(BaseNode->getOperand(0))) {
7214 CheckFailed("Struct tag nodes have a string as their first operand",
7215 BaseNode);
7216 return InvalidNode;
7217 }
7218
7219 bool Failed = false;
7220
7221 std::optional<APInt> PrevOffset;
7222 unsigned BitWidth = ~0u;
7223
7224 // We've already checked that BaseNode is not a degenerate root node with one
7225 // operand in \c verifyTBAABaseNode, so this loop should run at least once.
7226 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
7227 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
7228 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
7229 Idx += NumOpsPerField) {
7230 const MDOperand &FieldTy = BaseNode->getOperand(Idx);
7231 const MDOperand &FieldOffset = BaseNode->getOperand(Idx + 1);
7232 if (!isa<MDNode>(FieldTy)) {
7233 CheckFailed("Incorrect field entry in struct type node!", &I, BaseNode);
7234 Failed = true;
7235 continue;
7236 }
7237
7238 auto *OffsetEntryCI =
7239 mdconst::dyn_extract_or_null<ConstantInt>(FieldOffset);
7240 if (!OffsetEntryCI) {
7241 CheckFailed("Offset entries must be constants!", &I, BaseNode);
7242 Failed = true;
7243 continue;
7244 }
7245
7246 if (BitWidth == ~0u)
7247 BitWidth = OffsetEntryCI->getBitWidth();
7248
7249 if (OffsetEntryCI->getBitWidth() != BitWidth) {
7250 CheckFailed(
7251 "Bitwidth between the offsets and struct type entries must match", &I,
7252 BaseNode);
7253 Failed = true;
7254 continue;
7255 }
7256
7257 // NB! As far as I can tell, we generate a non-strictly increasing offset
7258 // sequence only from structs that have zero size bit fields. When
7259 // recursing into a contained struct in \c getFieldNodeFromTBAABaseNode we
7260 // pick the field lexically the latest in struct type metadata node. This
7261 // mirrors the actual behavior of the alias analysis implementation.
7262 bool IsAscending =
7263 !PrevOffset || PrevOffset->ule(OffsetEntryCI->getValue());
7264
7265 if (!IsAscending) {
7266 CheckFailed("Offsets must be increasing!", &I, BaseNode);
7267 Failed = true;
7268 }
7269
7270 PrevOffset = OffsetEntryCI->getValue();
7271
7272 if (IsNewFormat) {
7273 auto *MemberSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7274 BaseNode->getOperand(Idx + 2));
7275 if (!MemberSizeNode) {
7276 CheckFailed("Member size entries must be constants!", &I, BaseNode);
7277 Failed = true;
7278 continue;
7279 }
7280 }
7281 }
7282
7283 return Failed ? InvalidNode
7284 : TBAAVerifier::TBAABaseNodeSummary(false, BitWidth);
7285}
7286
7287static bool IsRootTBAANode(const MDNode *MD) {
7288 return MD->getNumOperands() < 2;
7289}
7290
7291static bool IsScalarTBAANodeImpl(const MDNode *MD,
7293 if (MD->getNumOperands() != 2 && MD->getNumOperands() != 3)
7294 return false;
7295
7296 if (!isa<MDString>(MD->getOperand(0)))
7297 return false;
7298
7299 if (MD->getNumOperands() == 3) {
7300 auto *Offset = mdconst::dyn_extract<ConstantInt>(MD->getOperand(2));
7301 if (!(Offset && Offset->isZero() && isa<MDString>(MD->getOperand(0))))
7302 return false;
7303 }
7304
7305 auto *Parent = dyn_cast_or_null<MDNode>(MD->getOperand(1));
7306 return Parent && Visited.insert(Parent).second &&
7307 (IsRootTBAANode(Parent) || IsScalarTBAANodeImpl(Parent, Visited));
7308}
7309
7310bool TBAAVerifier::isValidScalarTBAANode(const MDNode *MD) {
7311 auto ResultIt = TBAAScalarNodes.find(MD);
7312 if (ResultIt != TBAAScalarNodes.end())
7313 return ResultIt->second;
7314
7316 bool Result = IsScalarTBAANodeImpl(MD, Visited);
7317 auto InsertResult = TBAAScalarNodes.insert({MD, Result});
7318 (void)InsertResult;
7319 assert(InsertResult.second && "Just checked!");
7320
7321 return Result;
7322}
7323
7324/// Returns the field node at the offset \p Offset in \p BaseNode. Update \p
7325/// Offset in place to be the offset within the field node returned.
7326///
7327/// We assume we've okayed \p BaseNode via \c verifyTBAABaseNode.
7328MDNode *TBAAVerifier::getFieldNodeFromTBAABaseNode(Instruction &I,
7329 const MDNode *BaseNode,
7330 APInt &Offset,
7331 bool IsNewFormat) {
7332 assert(BaseNode->getNumOperands() >= 2 && "Invalid base node!");
7333
7334 // Scalar nodes have only one possible "field" -- their parent in the access
7335 // hierarchy. Offset must be zero at this point, but our caller is supposed
7336 // to check that.
7337 if (BaseNode->getNumOperands() == 2)
7338 return cast<MDNode>(BaseNode->getOperand(1));
7339
7340 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
7341 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
7342 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
7343 Idx += NumOpsPerField) {
7344 auto *OffsetEntryCI =
7345 mdconst::extract<ConstantInt>(BaseNode->getOperand(Idx + 1));
7346 if (OffsetEntryCI->getValue().ugt(Offset)) {
7347 if (Idx == FirstFieldOpNo) {
7348 CheckFailed("Could not find TBAA parent in struct type node", &I,
7349 BaseNode, &Offset);
7350 return nullptr;
7351 }
7352
7353 unsigned PrevIdx = Idx - NumOpsPerField;
7354 auto *PrevOffsetEntryCI =
7355 mdconst::extract<ConstantInt>(BaseNode->getOperand(PrevIdx + 1));
7356 Offset -= PrevOffsetEntryCI->getValue();
7357 return cast<MDNode>(BaseNode->getOperand(PrevIdx));
7358 }
7359 }
7360
7361 unsigned LastIdx = BaseNode->getNumOperands() - NumOpsPerField;
7362 auto *LastOffsetEntryCI = mdconst::extract<ConstantInt>(
7363 BaseNode->getOperand(LastIdx + 1));
7364 Offset -= LastOffsetEntryCI->getValue();
7365 return cast<MDNode>(BaseNode->getOperand(LastIdx));
7366}
7367
7369 if (!Type || Type->getNumOperands() < 3)
7370 return false;
7371
7372 // In the new format type nodes shall have a reference to the parent type as
7373 // its first operand.
7374 return isa_and_nonnull<MDNode>(Type->getOperand(0));
7375}
7376
7378 CheckTBAA(MD->getNumOperands() > 0, "TBAA metadata cannot have 0 operands",
7379 &I, MD);
7380
7381 CheckTBAA(isa<LoadInst>(I) || isa<StoreInst>(I) || isa<CallInst>(I) ||
7382 isa<VAArgInst>(I) || isa<AtomicRMWInst>(I) ||
7383 isa<AtomicCmpXchgInst>(I),
7384 "This instruction shall not have a TBAA access tag!", &I);
7385
7386 bool IsStructPathTBAA =
7387 isa<MDNode>(MD->getOperand(0)) && MD->getNumOperands() >= 3;
7388
7389 CheckTBAA(IsStructPathTBAA,
7390 "Old-style TBAA is no longer allowed, use struct-path TBAA instead",
7391 &I);
7392
7393 MDNode *BaseNode = dyn_cast_or_null<MDNode>(MD->getOperand(0));
7394 MDNode *AccessType = dyn_cast_or_null<MDNode>(MD->getOperand(1));
7395
7396 bool IsNewFormat = isNewFormatTBAATypeNode(AccessType);
7397
7398 if (IsNewFormat) {
7399 CheckTBAA(MD->getNumOperands() == 4 || MD->getNumOperands() == 5,
7400 "Access tag metadata must have either 4 or 5 operands", &I, MD);
7401 } else {
7402 CheckTBAA(MD->getNumOperands() < 5,
7403 "Struct tag metadata must have either 3 or 4 operands", &I, MD);
7404 }
7405
7406 // Check the access size field.
7407 if (IsNewFormat) {
7408 auto *AccessSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7409 MD->getOperand(3));
7410 CheckTBAA(AccessSizeNode, "Access size field must be a constant", &I, MD);
7411 }
7412
7413 // Check the immutability flag.
7414 unsigned ImmutabilityFlagOpNo = IsNewFormat ? 4 : 3;
7415 if (MD->getNumOperands() == ImmutabilityFlagOpNo + 1) {
7416 auto *IsImmutableCI = mdconst::dyn_extract_or_null<ConstantInt>(
7417 MD->getOperand(ImmutabilityFlagOpNo));
7418 CheckTBAA(IsImmutableCI,
7419 "Immutability tag on struct tag metadata must be a constant", &I,
7420 MD);
7421 CheckTBAA(
7422 IsImmutableCI->isZero() || IsImmutableCI->isOne(),
7423 "Immutability part of the struct tag metadata must be either 0 or 1",
7424 &I, MD);
7425 }
7426
7427 CheckTBAA(BaseNode && AccessType,
7428 "Malformed struct tag metadata: base and access-type "
7429 "should be non-null and point to Metadata nodes",
7430 &I, MD, BaseNode, AccessType);
7431
7432 if (!IsNewFormat) {
7433 CheckTBAA(isValidScalarTBAANode(AccessType),
7434 "Access type node must be a valid scalar type", &I, MD,
7435 AccessType);
7436 }
7437
7438 auto *OffsetCI = mdconst::dyn_extract_or_null<ConstantInt>(MD->getOperand(2));
7439 CheckTBAA(OffsetCI, "Offset must be constant integer", &I, MD);
7440
7441 APInt Offset = OffsetCI->getValue();
7442 bool SeenAccessTypeInPath = false;
7443
7444 SmallPtrSet<MDNode *, 4> StructPath;
7445
7446 for (/* empty */; BaseNode && !IsRootTBAANode(BaseNode);
7447 BaseNode = getFieldNodeFromTBAABaseNode(I, BaseNode, Offset,
7448 IsNewFormat)) {
7449 if (!StructPath.insert(BaseNode).second) {
7450 CheckFailed("Cycle detected in struct path", &I, MD);
7451 return false;
7452 }
7453
7454 bool Invalid;
7455 unsigned BaseNodeBitWidth;
7456 std::tie(Invalid, BaseNodeBitWidth) = verifyTBAABaseNode(I, BaseNode,
7457 IsNewFormat);
7458
7459 // If the base node is invalid in itself, then we've already printed all the
7460 // errors we wanted to print.
7461 if (Invalid)
7462 return false;
7463
7464 SeenAccessTypeInPath |= BaseNode == AccessType;
7465
7466 if (isValidScalarTBAANode(BaseNode) || BaseNode == AccessType)
7467 CheckTBAA(Offset == 0, "Offset not zero at the point of scalar access",
7468 &I, MD, &Offset);
7469
7470 CheckTBAA(BaseNodeBitWidth == Offset.getBitWidth() ||
7471 (BaseNodeBitWidth == 0 && Offset == 0) ||
7472 (IsNewFormat && BaseNodeBitWidth == ~0u),
7473 "Access bit-width not the same as description bit-width", &I, MD,
7474 BaseNodeBitWidth, Offset.getBitWidth());
7475
7476 if (IsNewFormat && SeenAccessTypeInPath)
7477 break;
7478 }
7479
7480 CheckTBAA(SeenAccessTypeInPath, "Did not see access type in access path!", &I,
7481 MD);
7482 return true;
7483}
7484
7485char VerifierLegacyPass::ID = 0;
7486INITIALIZE_PASS(VerifierLegacyPass, "verify", "Module Verifier", false, false)
7487
7489 return new VerifierLegacyPass(FatalErrors);
7490}
7491
7492AnalysisKey VerifierAnalysis::Key;
7495 Result Res;
7497 return Res;
7498}
7499
7502 return { llvm::verifyFunction(F, &dbgs()), false };
7503}
7504
7506 auto Res = AM.getResult<VerifierAnalysis>(M);
7507 if (FatalErrors && (Res.IRBroken || Res.DebugInfoBroken))
7508 report_fatal_error("Broken module found, compilation aborted!");
7509
7510 return PreservedAnalyses::all();
7511}
7512
7514 auto res = AM.getResult<VerifierAnalysis>(F);
7515 if (res.IRBroken && FatalErrors)
7516 report_fatal_error("Broken function found, compilation aborted!");
7517
7518 return PreservedAnalyses::all();
7519}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file defines the StringMap class.
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
Atomic ordering constants.
@ RetAttr
Definition: Attributes.cpp:668
@ FnAttr
Definition: Attributes.cpp:666
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file declares the LLVM IR specialization of the GenericConvergenceVerifier template.
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines the DenseMap class.
This file contains constants used for implementing Dwarf debug support.
std::string Name
uint64_t Size
static bool runOnFunction(Function &F, bool PostInlining)
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file implements a map that provides insertion order iteration.
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
This file contains the declarations for metadata subclasses.
#define T1
Module.h This file contains the declarations for the Module class.
uint64_t High
LLVMContext & Context
#define P(N)
ppc ctr loops verify
This header defines various interfaces for pass management in LLVM.
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:38
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
verify safepoint Safepoint IR Verifier
raw_pwrite_stream & OS
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
This defines the Use class.
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
static bool IsScalarTBAANodeImpl(const MDNode *MD, SmallPtrSetImpl< const MDNode * > &Visited)
Definition: Verifier.cpp:7291
static bool isType(const Metadata *MD)
Definition: Verifier.cpp:1132
static Instruction * getSuccPad(Instruction *Terminator)
Definition: Verifier.cpp:2638
#define Check(C,...)
We know that cond should be true, if not print an error message.
Definition: Verifier.cpp:665
static bool isNewFormatTBAATypeNode(llvm::MDNode *Type)
Definition: Verifier.cpp:7368
#define CheckDI(C,...)
We know that a debug info condition should be true, if not print an error message.
Definition: Verifier.cpp:675
static void forEachUser(const Value *User, SmallPtrSet< const Value *, 32 > &Visited, llvm::function_ref< bool(const Value *)> Callback)
Definition: Verifier.cpp:716
static bool isDINode(const Metadata *MD)
Definition: Verifier.cpp:1134
static bool isScope(const Metadata *MD)
Definition: Verifier.cpp:1133
static cl::opt< bool > VerifyNoAliasScopeDomination("verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false), cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical " "scopes are not dominating"))
static DISubprogram * getSubprogram(Metadata *LocalScope)
Carefully grab the subprogram from a local scope.
Definition: Verifier.cpp:6302
static bool isTypeCongruent(Type *L, Type *R)
Two types are "congruent" if they are identical, or if they are both pointer types with different poi...
Definition: Verifier.cpp:3749
#define CheckTBAA(C,...)
Definition: Verifier.cpp:7146
static bool IsRootTBAANode(const MDNode *MD)
Definition: Verifier.cpp:7287
static bool isContiguous(const ConstantRange &A, const ConstantRange &B)
Definition: Verifier.cpp:4077
static Value * getParentPad(Value *EHPad)
Definition: Verifier.cpp:4321
static bool hasConflictingReferenceFlags(unsigned Flags)
Detect mutually exclusive flags.
Definition: Verifier.cpp:1283
static AttrBuilder getParameterABIAttributes(LLVMContext &C, unsigned I, AttributeList Attrs)
Definition: Verifier.cpp:3759
bool isFiniteNonZero() const
Definition: APFloat.h:1305
bool isNegative() const
Definition: APFloat.h:1295
const fltSemantics & getSemantics() const
Definition: APFloat.h:1303
Class for arbitrary precision integers.
Definition: APInt.h:76
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition: APInt.h:1179
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition: APInt.h:358
bool isMinValue() const
Determine if this is the smallest unsigned value.
Definition: APInt.h:395
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition: APInt.h:1128
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition: APInt.h:418
bool isMaxValue() const
Determine if this is the largest unsigned value.
Definition: APInt.h:377
This class represents a conversion between pointers from one address space to another.
an instruction to allocate memory on the stack
Definition: Instructions.h:59
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Definition: Instructions.h:157
bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:132
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:125
bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:103
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:321
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:473
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:160
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:539
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:748
static bool isFPOperation(BinOp Op)
Definition: Instructions.h:849
BinOp getOperation() const
Definition: Instructions.h:845
static StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
Definition: Instructions.h:887
bool contains(Attribute::AttrKind A) const
Return true if the builder has the specified attribute.
Definition: AttributeMask.h:67
bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
Definition: Attributes.cpp:841
std::string getAsString(bool InAttrGrp=false) const
Definition: Attributes.cpp:928
static Attribute::AttrKind getAttrKindFromName(StringRef AttrName)
Definition: Attributes.cpp:265
static bool canUseAsRetAttr(AttrKind Kind)
Definition: Attributes.cpp:689
static bool isExistingAttribute(StringRef Name)
Return true if the provided string matches the IR name of an attribute.
Definition: Attributes.cpp:288
static bool canUseAsFnAttr(AttrKind Kind)
Definition: Attributes.cpp:681
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition: Attributes.h:85
@ None
No attributes have been set.
Definition: Attributes.h:87
static bool isIntAttrKind(AttrKind Kind)
Definition: Attributes.h:101
static bool canUseAsParamAttr(AttrKind Kind)
Definition: Attributes.cpp:685
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition: Attributes.h:193
LLVM Basic Block Representation.
Definition: BasicBlock.h:60
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:430
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition: BasicBlock.h:499
const LandingPadInst * getLandingPadInst() const
Return the landingpad instruction associated with the landing pad.
Definition: BasicBlock.cpp:676
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:360
const Instruction & front() const
Definition: BasicBlock.h:453
bool isEntryBlock() const
Return true if this is the entry block of the containing function.
Definition: BasicBlock.cpp:564
const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
Definition: BasicBlock.cpp:460
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:206
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:165
bool isEHPad() const
Return true if this basic block is an exception handling block.
Definition: BasicBlock.h:657
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:221
This class represents a no-op cast from one type to another.
static BlockAddress * lookup(const BasicBlock *BB)
Lookup an existing BlockAddress constant for the given BasicBlock.
Definition: Constants.cpp:1864
Conditional or Unconditional Branch instruction.
bool isConditional() const
Value * getCondition() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1494
bool isInlineAsm() const
Check if this call is an inline asm statement.
Definition: InstrTypes.h:1809
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Definition: InstrTypes.h:1742
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1800
Value * getCalledOperand() const
Definition: InstrTypes.h:1735
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1687
FunctionType * getFunctionType() const
Definition: InstrTypes.h:1600
unsigned arg_size() const
Definition: InstrTypes.h:1685
AttributeList getAttributes() const
Return the parameter attributes for this call.
Definition: InstrTypes.h:1819
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
This class represents a function call, abstracting a target machine's calling convention.
bool isMustTailCall() const
static bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
unsigned getNumHandlers() const
return the number of 'handlers' in this catchswitch instruction, except the default handler
Value * getParentPad() const
BasicBlock * getUnwindDest() const
handler_range handlers()
iteration adapter for range-for loops.
BasicBlock * getUnwindDest() const
bool isFPPredicate() const
Definition: InstrTypes.h:1122
bool isIntPredicate() const
Definition: InstrTypes.h:1123
static bool isIntPredicate(Predicate P)
Definition: InstrTypes.h:1116
ConstantArray - Constant Array Declarations.
Definition: Constants.h:423
A constant value that is initialized with an expression using other constant values.
Definition: Constants.h:1017
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:268
This is the shared class of boolean and integer constants.
Definition: Constants.h:80
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
Definition: Constants.h:217
bool isNegative() const
Definition: Constants.h:200
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition: Constants.h:205
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:154
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:145
This class represents a range of values.
Definition: ConstantRange.h:47
static ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
Definition: Constants.cpp:1499
This is an important base class in LLVM.
Definition: Constant.h:41
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition: Constants.cpp:90
This is the common base class for constrained floating point intrinsics.
std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
std::optional< RoundingMode > getRoundingMode() const
List of ValueAsMetadata, to be used as an argument to a dbg.value intrinsic.
Assignment ID.
Basic type, like 'int' or 'float'.
Debug common block.
Enumeration value.
DWARF expression.
bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static std::optional< FragmentInfo > getFragmentInfo(expr_op_iterator Start, expr_op_iterator End)
Retrieve the details of this fragment expression.
A pair of DIGlobalVariable and DIExpression.
DIGlobalVariable * getVariable() const
DIExpression * getExpression() const
An imported module (C++ using directive or similar).
Debug lexical block.
A scope for locals.
DISubprogram * getSubprogram() const
Get the subprogram for this scope.
DILocalScope * getScope() const
Get the local scope for this variable.
Debug location.
Metadata * getRawScope() const
Represents a module in the programming language, for example, a Clang module, or a Fortran module.
Debug lexical block.
Base class for scope-like contexts.
String type, Fortran CHARACTER(n)
Subprogram description.
Array subrange.
Type array for a subprogram.
Base class for template parameters.
Base class for variables.
Metadata * getRawType() const
Metadata * getRawScope() const
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
This represents the llvm.dbg.label instruction.
Metadata * getRawLabel() const
DILabel * getLabel() const
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
void print(raw_ostream &O, bool IsForDebug=false) const
DebugLoc getDebugLoc() const
const BasicBlock * getParent() const
This is the common base class for debug info intrinsics for variables.
Metadata * getRawLocation() const
DILocalVariable * getVariable() const
Metadata * getRawVariable() const
Metadata * getRawExpression() const
Record of a variable value-assignment, aka a non instruction representation of the dbg....
MDNode * getRawAddressExpression() const
DIExpression * getExpression() const
Value * getVariableLocationOp(unsigned OpIdx) const
DILocalVariable * getVariable() const
Metadata * getRawLocation() const
Returns the metadata operand for the first location description.
@ End
Marks the end of the concrete types.
@ Any
To indicate all LocationTypes in searches.
DIExpression * getAddressExpression() const
MDNode * getAsMDNode() const
Return this as a bar MDNode.
Definition: DebugLoc.h:106
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:202
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
bool empty() const
Definition: DenseMap.h:98
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:220
void recalculate(ParentType &Func)
recalculate - compute a dominator tree for the given function
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
Definition: Dominators.cpp:321
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
This instruction extracts a single (scalar) element from a VectorType value.
static bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
This instruction extracts a struct member or array element value from an aggregate value.
ArrayRef< unsigned > getIndices() const
static Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
This instruction compares its operands according to the predicate given to the constructor.
This class represents an extension of floating point types.
This class represents a cast from floating point to signed integer.
This class represents a cast from floating point to unsigned integer.
This class represents a truncation of floating point types.
An instruction for ordering other memory operations.
Definition: Instructions.h:460
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Definition: Instructions.h:487
Value * getParentPad() const
Convenience accessors.
Definition: InstrTypes.h:2711
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:311
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition: Function.h:232
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition: Function.h:855
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
Definition: Function.h:237
const std::string & getGC() const
Definition: Function.cpp:770
Represents calls to the gc.relocate intrinsic.
Value * getBasePtr() const
Value * getDerivedPtr() const
void initialize(raw_ostream *OS, function_ref< void(const Twine &Message)> FailureCB, const FunctionT &F)
Generic tagged DWARF-like metadata node.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:973
static Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
static bool isValidLinkage(LinkageTypes L)
Definition: GlobalAlias.h:95
const Constant * getAliasee() const
Definition: GlobalAlias.h:84
const Function * getResolverFunction() const
Definition: Globals.cpp:592
static FunctionType * getResolverFunctionType(Type *IFuncValTy)
Definition: GlobalIFunc.h:83
static bool isValidLinkage(LinkageTypes L)
Definition: GlobalIFunc.h:87
const Constant * getResolver() const
Definition: GlobalIFunc.h:70
bool hasComdat() const
Definition: GlobalObject.h:128
MDNode * getMetadata(unsigned KindID) const
Get the current metadata attachments for the given kind, if any.
Definition: Value.h:565
bool hasExternalLinkage() const
Definition: GlobalValue.h:511
bool isDSOLocal() const
Definition: GlobalValue.h:305
bool isImplicitDSOLocal() const
Definition: GlobalValue.h:298
bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition: Globals.cpp:281
bool hasValidDeclarationLinkage() const
Definition: GlobalValue.h:533
LinkageTypes getLinkage() const
Definition: GlobalValue.h:546
bool hasDefaultVisibility() const
Definition: GlobalValue.h:249
bool hasPrivateLinkage() const
Definition: GlobalValue.h:527
bool hasHiddenVisibility() const
Definition: GlobalValue.h:250
bool hasExternalWeakLinkage() const
Definition: GlobalValue.h:529
bool hasDLLImportStorageClass() const
Definition: GlobalValue.h:278
bool hasDLLExportStorageClass() const
Definition: GlobalValue.h:281
bool isDeclarationForLinker() const
Definition: GlobalValue.h:618
unsigned getAddressSpace() const
Definition: GlobalValue.h:205
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:656
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:294
bool hasComdat() const
Definition: GlobalValue.h:241
bool hasCommonLinkage() const
Definition: GlobalValue.h:532
bool hasGlobalUnnamedAddr() const
Definition: GlobalValue.h:215
bool hasAppendingLinkage() const
Definition: GlobalValue.h:525
bool hasAvailableExternallyLinkage() const
Definition: GlobalValue.h:512
Type * getValueType() const
Definition: GlobalValue.h:296
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool hasInitializer() const
Definitions have initializers, declarations don't.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
This instruction compares its operands according to the predicate given to the constructor.
Indirect Branch Instruction.
BasicBlock * getDestination(unsigned i)
Return the specified destination.
unsigned getNumDestinations() const
return the number of possible destinations in this indirectbr instruction.
unsigned getNumSuccessors() const
This instruction inserts a single (scalar) element into a VectorType value.
static bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
This instruction inserts a struct field of array element value into an aggregate value.
Value * getAggregateOperand()
ArrayRef< unsigned > getIndices() const
Base class for instruction visitors.
Definition: InstVisitor.h:78
RetTy visitTerminator(Instruction &I)
Definition: InstVisitor.h:253
RetTy visitCallBase(CallBase &I)
Definition: InstVisitor.h:267
void visitFunction(Function &F)
Definition: InstVisitor.h:142
void visitBasicBlock(BasicBlock &BB)
Definition: InstVisitor.h:143
void visit(Iterator Start, Iterator End)
Definition: InstVisitor.h:87
RetTy visitFuncletPadInst(FuncletPadInst &I)
Definition: InstVisitor.h:197
void visitInstruction(Instruction &I)
Definition: InstVisitor.h:280
unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:454
bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
const BasicBlock * getParent() const
Definition: Instruction.h:152
const Function * getFunction() const
Return the function this instruction belongs to.
Definition: Instruction.cpp:87
This class represents a cast from an integer to a pointer.
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:47
static bool mayLowerToFunctionCall(Intrinsic::ID IID)
Check if the intrinsic might lower into a regular function call in the course of IR transformations.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:54
Invoke instruction.
BasicBlock * getUnwindDest() const
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
An instruction for reading from memory.
Definition: Instructions.h:184
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
Definition: Instructions.h:245
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Definition: Instructions.h:255
Align getAlign() const
Return the alignment of the access that is being performed.
Definition: Instructions.h:236
Metadata node.
Definition: Metadata.h:1067
const MDOperand & getOperand(unsigned I) const
Definition: Metadata.h:1428
bool isTemporary() const
Definition: Metadata.h:1251
ArrayRef< MDOperand > operands() const
Definition: Metadata.h:1426
unsigned getNumOperands() const
Return number of MDNode operands.
Definition: Metadata.h:1434
bool isDistinct() const
Definition: Metadata.h:1250
bool isResolved() const
Check if node is fully resolved.
Definition: Metadata.h:1247
LLVMContext & getContext() const
Definition: Metadata.h:1231
Tracking metadata reference owned by Metadata.
Definition: Metadata.h:889
Metadata * get() const
Definition: Metadata.h:918
A single uniqued string.
Definition: Metadata.h:720
StringRef getString() const
Definition: Metadata.cpp:610
Typed, array-like tuple of metadata.
Definition: Metadata.h:1627
Tuple of metadata.
Definition: Metadata.h:1470
static bool isTagMD(const Metadata *MD)
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
void clear()
Definition: MapVector.h:88
Metadata wrapper in the Value hierarchy.
Definition: Metadata.h:176
static MetadataAsValue * getIfExists(LLVMContext &Context, Metadata *MD)
Definition: Metadata.cpp:111
Metadata * getMetadata() const
Definition: Metadata.h:193
Root of the metadata hierarchy.
Definition: Metadata.h:62
void print(raw_ostream &OS, const Module *M=nullptr, bool IsForDebug=false) const
Print.
Definition: AsmWriter.cpp:5195
unsigned getMetadataID() const
Definition: Metadata.h:102
Manage lifetime of a slot tracker for printing IR.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
ModFlagBehavior
This enumeration defines the supported behaviors of module flags.
Definition: Module.h:115
@ AppendUnique
Appends the two values, which are required to be metadata nodes.
Definition: Module.h:144
@ Override
Uses the specified value, regardless of the behavior or value of the other module.
Definition: Module.h:136
@ Warning
Emits a warning if two values disagree.
Definition: Module.h:122
@ Error
Emits an error if two values disagree, otherwise the resulting value is that of the operands.
Definition: Module.h:118
@ Min
Takes the min of the two values, which are required to be integers.
Definition: Module.h:150
@ Append
Appends the two values, which are required to be metadata nodes.
Definition: Module.h:139
@ Max
Takes the max of the two values, which are required to be integers.
Definition: Module.h:147
@ Require
Adds a requirement that another module flag be present and have a specified value after linking is pe...
Definition: Module.h:131
const std::string & getModuleIdentifier() const
Get the module identifier which is, essentially, the name of the module.
Definition: Module.h:267
static bool isValidModFlagBehavior(Metadata *MD, ModFlagBehavior &MFB)
Checks if Metadata represents a valid ModFlagBehavior, and stores the converted result in MFB.
Definition: Module.cpp:286
A tuple of MDNodes.
Definition: Metadata.h:1729
StringRef getName() const
Definition: Metadata.cpp:1399
void print(raw_ostream &ROS, bool IsForDebug=false) const
Definition: AsmWriter.cpp:4856
iterator_range< op_iterator > operands()
Definition: Metadata.h:1825
op_range incoming_values()
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:109
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:115
Simple wrapper around std::function<void(raw_ostream&)>.
Definition: Printable.h:38
This class represents a cast from a pointer to an integer.
Interface for looking up the initializer for a variable name, used by Init::resolveReferences.
Definition: Record.h:2213
Resume the propagation of an exception.
Value * getValue() const
Convenience accessor.
Return a value (possibly void), from a function.
This class represents a sign extension of integer types.
This class represents a cast from signed integer to floating point.
This class represents the LLVM 'select' instruction.
static const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
This instruction constructs a fixed permutation of two input vectors.
static bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
static void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:321
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:360
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:342
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:427
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:179
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void reserve(size_type N)
Definition: SmallVector.h:676
iterator insert(iterator I, T &&Elt)
Definition: SmallVector.h:818
void resize(size_type N)
Definition: SmallVector.h:651
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
An instruction for storing to memory.
Definition: Instructions.h:317
StringMapEntry - This is used to represent one value that is inserted into a StringMap.
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition: StringRef.h:456
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:257
bool contains(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition: StringRef.h:410
bool equals(StringRef RHS) const
equals - Check for string equality, this is more efficient than compare() when the relative ordering ...
Definition: StringRef.h:164
static constexpr size_t npos
Definition: StringRef.h:52
Class to represent struct types.
Definition: DerivedTypes.h:216
unsigned getNumElements() const
Random access to the elements.
Definition: DerivedTypes.h:341
bool containsScalableVectorType(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Returns true if this struct contains a scalable vector.
Definition: Type.cpp:400
Type * getTypeAtIndex(const Value *V) const
Given an index value into the type, return the type of the element.
Definition: Type.cpp:612
Multiway switch.
Verify that the TBAA Metadatas are valid.
Definition: Verifier.h:39
bool visitTBAAMetadata(Instruction &I, const MDNode *MD)
Visit an instruction and return true if it is valid, return false if an invalid TBAA is attached.
Definition: Verifier.cpp:7377
@ CanBeGlobal
This type may be used as the value type of a global variable.
Definition: DerivedTypes.h:771
TinyPtrVector - This class is specialized for cases where there are normally 0 or 1 element in a vect...
Definition: TinyPtrVector.h:29
unsigned size() const
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:265
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition: Type.h:252
bool isLabelTy() const
Return true if this is 'label'.
Definition: Type.h:219
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition: Type.h:234
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:255
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:302
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition: Type.h:185
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition: Type.h:262
bool isScalableTy() const
Return true if this is a type whose size is a known multiple of vscale.
bool canLosslesslyBitCastTo(Type *Ty) const
Return true if this type could be converted with a lossless BitCast to type 'Ty'.
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
Definition: Type.h:243
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:228
bool isTokenTy() const
Return true if this is 'token'.
Definition: Type.h:225
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition: Type.h:216
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition: Type.h:348
bool isMetadataTy() const
Return true if this is 'metadata'.
Definition: Type.h:222
This class represents a cast unsigned integer to floating point.
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
op_range operands()
Definition: User.h:242
Value * getOperand(unsigned i) const
Definition: User.h:169
unsigned getNumOperands() const
Definition: User.h:191
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
This is the common base class for vector predication intrinsics.
Value wrapper in the Metadata hierarchy.
Definition: Metadata.h:450
Value * getValue() const
Definition: Metadata.h:490
LLVM Value Representation.
Definition: Value.h:74
iterator_range< user_iterator > materialized_users()
Definition: Value.h:415
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
static constexpr uint64_t MaximumAlignment
Definition: Value.h:807
const Value * stripPointerCastsAndAliases() const
Strip off pointer casts, all-zero GEPs, address space casts, and aliases.
Definition: Value.cpp:697
const Value * stripInBoundsOffsets(function_ref< void(const Value *)> Func=[](const Value *) {}) const
Strip off pointer casts and inbounds GEPs.
Definition: Value.cpp:785
iterator_range< user_iterator > users()
Definition: Value.h:421
bool materialized_use_empty() const
Definition: Value.h:349
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1074
bool hasName() const
Definition: Value.h:261
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
Check a module for errors, and report separate error states for IR and debug info errors.
Definition: Verifier.h:107
Result run(Module &M, ModuleAnalysisManager &)
Definition: Verifier.cpp:7493
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
Definition: Verifier.cpp:7505
This class represents zero extension of integer types.
constexpr bool isNonZero() const
Definition: TypeSize.h:158
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:171
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:168
An efficient, type-erasing, non-owning reference to a callable.
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:316
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
This file contains the declaration of the Comdat class, which represents a single COMDAT in LLVM.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
AttributeMask typeIncompatible(Type *Ty, AttributeSafetyKind ASK=ASK_ALL)
Which attributes cannot be applied to a type.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:121
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
@ AMDGPU_CS
Used for Mesa/AMDPAL compute shaders.
Definition: CallingConv.h:197
@ AMDGPU_VS
Used for Mesa vertex shaders, or AMDPAL last shader stage before rasterization (vertex shader if tess...
Definition: CallingConv.h:188
@ AMDGPU_KERNEL
Used for AMDGPU code object kernels.
Definition: CallingConv.h:200
@ AnyReg
OBSOLETED - Used for stack based JavaScript calls.
Definition: CallingConv.h:60
@ AMDGPU_CS_ChainPreserve
Used on AMDGPUs to give the middle-end more control over argument placement.
Definition: CallingConv.h:249
@ AMDGPU_HS
Used for Mesa/AMDPAL hull shaders (= tessellation control shaders).
Definition: CallingConv.h:206
@ AMDGPU_GS
Used for Mesa/AMDPAL geometry shaders.
Definition: CallingConv.h:191
@ X86_INTR
x86 hardware interrupt context.
Definition: CallingConv.h:173
@ AMDGPU_CS_Chain
Used on AMDGPUs to give the middle-end more control over argument placement.
Definition: CallingConv.h:245
@ AMDGPU_PS
Used for Mesa/AMDPAL pixel shaders.
Definition: CallingConv.h:194
@ Cold
Attempts to make code in the caller as efficient as possible under the assumption that the call is no...
Definition: CallingConv.h:47
@ PTX_Device
Call to a PTX device function.
Definition: CallingConv.h:129
@ SPIR_KERNEL
Used for SPIR kernel functions.
Definition: CallingConv.h:144
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:41
@ Intel_OCL_BI
Used for Intel OpenCL built-ins.
Definition: CallingConv.h:147
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition: CallingConv.h:76
@ PTX_Kernel
Call to a PTX kernel. Passes all arguments in parameter space.
Definition: CallingConv.h:125
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
Definition: CallingConv.h:87
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
MatchIntrinsicTypesResult matchIntrinsicSignature(FunctionType *FTy, ArrayRef< IITDescriptor > &Infos, SmallVectorImpl< Type * > &ArgTys)
Match the specified function type with the type constraints specified by the .td file.
Definition: Function.cpp:1708
void getIntrinsicInfoTableEntries(ID id, SmallVectorImpl< IITDescriptor > &T)
Return the IIT table descriptor for the specified intrinsic into an array of IITDescriptors.
Definition: Function.cpp:1313
@ MatchIntrinsicTypes_NoMatchRet
Definition: Intrinsics.h:214
@ MatchIntrinsicTypes_NoMatchArg
Definition: Intrinsics.h:215
StringRef getName(ID id)
Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
Definition: Function.cpp:1027
static const int NoAliasScopeDeclScopeArg
Definition: Intrinsics.h:37
bool matchIntrinsicVarArg(bool isVarArg, ArrayRef< IITDescriptor > &Infos)
Verify if the intrinsic has variable arguments.
Definition: Function.cpp:1734
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Definition: MCInstrDesc.h:148
@ System
Synchronized with respect to all concurrently executing threads.
Definition: LLVMContext.h:57
std::optional< VFInfo > tryDemangleForVFABI(StringRef MangledName, const FunctionType *FTy)
Function to construct a VFInfo out of a mangled names in the following format:
@ CE
Windows NT (Windows on ARM)
AssignmentInstRange getAssignmentInsts(DIAssignID *ID)
Return a range of instructions (typically just one) that have ID as an attachment.
Definition: DebugInfo.cpp:1886
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:450
bool isFortran(SourceLanguage S)
Definition: Dwarf.h:279
SourceLanguage
Definition: Dwarf.h:204
@ DW_LANG_lo_user
Definition: Dwarf.h:208
@ DW_MACINFO_undef
Definition: Dwarf.h:473
@ DW_MACINFO_start_file
Definition: Dwarf.h:474
@ DW_MACINFO_define
Definition: Dwarf.h:472
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
@ Offset
Definition: DWP.cpp:456
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1722
bool canInstructionHaveMMRAs(const Instruction &I)
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are are tuples (A,...
Definition: STLExtras.h:2406
bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
Definition: Verifier.cpp:7062
AllocFnKind
Definition: Attributes.h:48
testing::Matcher< const detail::ErrorHolder & > Failed()
Definition: Error.h:198
void initializeVerifierLegacyPassPass(PassRegistry &)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition: STLExtras.h:2073
DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:280
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1729
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:275
bool isModSet(const ModRefInfo MRI)
Definition: ModRef.h:48
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1647
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:156
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ ArgMem
Access to memory via argument pointers.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
FunctionPass * createVerifierPass(bool FatalErrors=true)
Definition: Verifier.cpp:7488
@ Invalid
Denotes invalid value.
@ Dynamic
Denotes mode unknown at compile time.
@ MaskAll
A bitmask that includes all valid flags.
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:191
std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
Definition: FPEnv.cpp:24
std::unique_ptr< GCStrategy > getGCStrategy(const StringRef Name)
Lookup the GCStrategy object associated with the given gc name.
Definition: GCStrategy.cpp:24
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1879
bool pred_empty(const BasicBlock *BB)
Definition: CFG.h:118
bool verifyModule(const Module &M, raw_ostream *OS=nullptr, bool *BrokenDebugInfo=nullptr)
Check a module for errors.
Definition: Verifier.cpp:7073
#define N
static const fltSemantics & IEEEsingle() LLVM_READNONE
Definition: APFloat.cpp:249
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition: Analysis.h:26
Holds the characteristics of one fragment of a larger variable.
Description of the encoding of one expression Op.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
A lightweight accessor for an operand bundle meant to be passed around by value.
Definition: InstrTypes.h:1389
uint32_t getTagID() const
Return the tag of this operand bundle as an integer.
Definition: InstrTypes.h:1417
ArrayRef< Use > Inputs
Definition: InstrTypes.h:1390
void DebugInfoCheckFailed(const Twine &Message)
A debug info check failed.
Definition: Verifier.cpp:302
VerifierSupport(raw_ostream *OS, const Module &M)
Definition: Verifier.cpp:154
bool Broken
Track the brokenness of the module while recursively visiting.
Definition: Verifier.cpp:148
raw_ostream * OS
Definition: Verifier.cpp:140
void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A check failed (with values to print).
Definition: Verifier.cpp:295
bool BrokenDebugInfo
Broken debug info can be "recovered" from by stripping the debug info.
Definition: Verifier.cpp:150
LLVMContext & Context
Definition: Verifier.cpp:145
bool TreatBrokenDebugInfoAsError
Whether to treat broken debug info as an error.
Definition: Verifier.cpp:152
void CheckFailed(const Twine &Message)
A check failed, so printout out the condition and the message.
Definition: Verifier.cpp:284
const Module & M
Definition: Verifier.cpp:141
const DataLayout & DL
Definition: Verifier.cpp:144
void DebugInfoCheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A debug info check failed (with values to print).
Definition: Verifier.cpp:311
ModuleSlotTracker MST
Definition: Verifier.cpp:142