LLVM 22.0.0git
Verifier.cpp
Go to the documentation of this file.
1//===-- Verifier.cpp - Implement the Module Verifier -----------------------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the function verifier interface, that can be used for some
10// basic correctness checking of input to the system.
11//
12// Note that this does not provide full `Java style' security and verifications,
13// instead it just tries to ensure that code is well-formed.
14//
15// * Both of a binary operator's parameters are of the same type
16// * Verify that the indices of mem access instructions match other operands
17// * Verify that arithmetic and other things are only performed on first-class
18// types. Verify that shifts & logicals only happen on integrals f.e.
19// * All of the constants in a switch statement are of the correct type
20// * The code is in valid SSA form
21// * It should be illegal to put a label into any other type (like a structure)
22// or to return one. [except constant arrays!]
23// * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad
24// * PHI nodes must have an entry for each predecessor, with no extras.
25// * PHI nodes must be the first thing in a basic block, all grouped together
26// * All basic blocks should only end with terminator insts, not contain them
27// * The entry node to a function must not have predecessors
28// * All Instructions must be embedded into a basic block
29// * Functions cannot take a void-typed parameter
30// * Verify that a function's argument list agrees with it's declared type.
31// * It is illegal to specify a name for a void value.
32// * It is illegal to have a internal global value with no initializer
33// * It is illegal to have a ret instruction that returns a value that does not
34// agree with the function return value type.
35// * Function call argument types match the function prototype
36// * A landing pad is defined by a landingpad instruction, and can be jumped to
37// only by the unwind edge of an invoke instruction.
38// * A landingpad instruction must be the first non-PHI instruction in the
39// block.
40// * Landingpad instructions must be in a function with a personality function.
41// * Convergence control intrinsics are introduced in ConvergentOperations.rst.
42// The applied restrictions are too numerous to list here.
43// * The convergence entry intrinsic and the loop heart must be the first
44// non-PHI instruction in their respective block. This does not conflict with
45// the landing pads, since these two kinds cannot occur in the same block.
46// * All other things that are tested by asserts spread about the code...
47//
48//===----------------------------------------------------------------------===//
49
50#include "llvm/IR/Verifier.h"
51#include "llvm/ADT/APFloat.h"
52#include "llvm/ADT/APInt.h"
53#include "llvm/ADT/ArrayRef.h"
54#include "llvm/ADT/DenseMap.h"
55#include "llvm/ADT/MapVector.h"
56#include "llvm/ADT/STLExtras.h"
60#include "llvm/ADT/StringRef.h"
61#include "llvm/ADT/Twine.h"
63#include "llvm/IR/Argument.h"
65#include "llvm/IR/Attributes.h"
66#include "llvm/IR/BasicBlock.h"
67#include "llvm/IR/CFG.h"
68#include "llvm/IR/CallingConv.h"
69#include "llvm/IR/Comdat.h"
70#include "llvm/IR/Constant.h"
73#include "llvm/IR/Constants.h"
75#include "llvm/IR/DataLayout.h"
76#include "llvm/IR/DebugInfo.h"
78#include "llvm/IR/DebugLoc.h"
80#include "llvm/IR/Dominators.h"
82#include "llvm/IR/Function.h"
83#include "llvm/IR/GCStrategy.h"
84#include "llvm/IR/GlobalAlias.h"
85#include "llvm/IR/GlobalValue.h"
87#include "llvm/IR/InlineAsm.h"
88#include "llvm/IR/InstVisitor.h"
89#include "llvm/IR/InstrTypes.h"
90#include "llvm/IR/Instruction.h"
93#include "llvm/IR/Intrinsics.h"
94#include "llvm/IR/IntrinsicsAArch64.h"
95#include "llvm/IR/IntrinsicsAMDGPU.h"
96#include "llvm/IR/IntrinsicsARM.h"
97#include "llvm/IR/IntrinsicsNVPTX.h"
98#include "llvm/IR/IntrinsicsWebAssembly.h"
99#include "llvm/IR/LLVMContext.h"
101#include "llvm/IR/Metadata.h"
102#include "llvm/IR/Module.h"
104#include "llvm/IR/PassManager.h"
106#include "llvm/IR/Statepoint.h"
107#include "llvm/IR/Type.h"
108#include "llvm/IR/Use.h"
109#include "llvm/IR/User.h"
111#include "llvm/IR/Value.h"
113#include "llvm/Pass.h"
117#include "llvm/Support/Casting.h"
121#include "llvm/Support/ModRef.h"
124#include <algorithm>
125#include <cassert>
126#include <cstdint>
127#include <memory>
128#include <optional>
129#include <string>
130#include <utility>
131
132using namespace llvm;
133
135 "verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false),
136 cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical "
137 "scopes are not dominating"));
138
141 const Module &M;
143 const Triple &TT;
146
147 /// Track the brokenness of the module while recursively visiting.
148 bool Broken = false;
149 /// Broken debug info can be "recovered" from by stripping the debug info.
150 bool BrokenDebugInfo = false;
151 /// Whether to treat broken debug info as an error.
153
155 : OS(OS), M(M), MST(&M), TT(M.getTargetTriple()), DL(M.getDataLayout()),
156 Context(M.getContext()) {}
157
158private:
159 void Write(const Module *M) {
160 *OS << "; ModuleID = '" << M->getModuleIdentifier() << "'\n";
161 }
162
163 void Write(const Value *V) {
164 if (V)
165 Write(*V);
166 }
167
168 void Write(const Value &V) {
169 if (isa<Instruction>(V)) {
170 V.print(*OS, MST);
171 *OS << '\n';
172 } else {
173 V.printAsOperand(*OS, true, MST);
174 *OS << '\n';
175 }
176 }
177
178 void Write(const DbgRecord *DR) {
179 if (DR) {
180 DR->print(*OS, MST, false);
181 *OS << '\n';
182 }
183 }
184
186 switch (Type) {
188 *OS << "value";
189 break;
191 *OS << "declare";
192 break;
194 *OS << "declare_value";
195 break;
197 *OS << "assign";
198 break;
200 *OS << "end";
201 break;
203 *OS << "any";
204 break;
205 };
206 }
207
208 void Write(const Metadata *MD) {
209 if (!MD)
210 return;
211 MD->print(*OS, MST, &M);
212 *OS << '\n';
213 }
214
215 template <class T> void Write(const MDTupleTypedArrayWrapper<T> &MD) {
216 Write(MD.get());
217 }
218
219 void Write(const NamedMDNode *NMD) {
220 if (!NMD)
221 return;
222 NMD->print(*OS, MST);
223 *OS << '\n';
224 }
225
226 void Write(Type *T) {
227 if (!T)
228 return;
229 *OS << ' ' << *T;
230 }
231
232 void Write(const Comdat *C) {
233 if (!C)
234 return;
235 *OS << *C;
236 }
237
238 void Write(const APInt *AI) {
239 if (!AI)
240 return;
241 *OS << *AI << '\n';
242 }
243
244 void Write(const unsigned i) { *OS << i << '\n'; }
245
246 // NOLINTNEXTLINE(readability-identifier-naming)
247 void Write(const Attribute *A) {
248 if (!A)
249 return;
250 *OS << A->getAsString() << '\n';
251 }
252
253 // NOLINTNEXTLINE(readability-identifier-naming)
254 void Write(const AttributeSet *AS) {
255 if (!AS)
256 return;
257 *OS << AS->getAsString() << '\n';
258 }
259
260 // NOLINTNEXTLINE(readability-identifier-naming)
261 void Write(const AttributeList *AL) {
262 if (!AL)
263 return;
264 AL->print(*OS);
265 }
266
267 void Write(Printable P) { *OS << P << '\n'; }
268
269 template <typename T> void Write(ArrayRef<T> Vs) {
270 for (const T &V : Vs)
271 Write(V);
272 }
273
274 template <typename T1, typename... Ts>
275 void WriteTs(const T1 &V1, const Ts &... Vs) {
276 Write(V1);
277 WriteTs(Vs...);
278 }
279
280 template <typename... Ts> void WriteTs() {}
281
282public:
283 /// A check failed, so printout out the condition and the message.
284 ///
285 /// This provides a nice place to put a breakpoint if you want to see why
286 /// something is not correct.
287 void CheckFailed(const Twine &Message) {
288 if (OS)
289 *OS << Message << '\n';
290 Broken = true;
291 }
292
293 /// A check failed (with values to print).
294 ///
295 /// This calls the Message-only version so that the above is easier to set a
296 /// breakpoint on.
297 template <typename T1, typename... Ts>
298 void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) {
299 CheckFailed(Message);
300 if (OS)
301 WriteTs(V1, Vs...);
302 }
303
304 /// A debug info check failed.
305 void DebugInfoCheckFailed(const Twine &Message) {
306 if (OS)
307 *OS << Message << '\n';
309 BrokenDebugInfo = true;
310 }
311
312 /// A debug info check failed (with values to print).
313 template <typename T1, typename... Ts>
314 void DebugInfoCheckFailed(const Twine &Message, const T1 &V1,
315 const Ts &... Vs) {
316 DebugInfoCheckFailed(Message);
317 if (OS)
318 WriteTs(V1, Vs...);
319 }
320};
321
322namespace {
323
324class Verifier : public InstVisitor<Verifier>, VerifierSupport {
325 friend class InstVisitor<Verifier>;
326 DominatorTree DT;
327
328 /// When verifying a basic block, keep track of all of the
329 /// instructions we have seen so far.
330 ///
331 /// This allows us to do efficient dominance checks for the case when an
332 /// instruction has an operand that is an instruction in the same block.
333 SmallPtrSet<Instruction *, 16> InstsInThisBlock;
334
335 /// Keep track of the metadata nodes that have been checked already.
337
338 /// Keep track which DISubprogram is attached to which function.
340
341 /// Track all DICompileUnits visited.
343
344 /// The result type for a landingpad.
345 Type *LandingPadResultTy;
346
347 /// Whether we've seen a call to @llvm.localescape in this function
348 /// already.
349 bool SawFrameEscape;
350
351 /// Whether the current function has a DISubprogram attached to it.
352 bool HasDebugInfo = false;
353
354 /// Stores the count of how many objects were passed to llvm.localescape for a
355 /// given function and the largest index passed to llvm.localrecover.
357
358 // Maps catchswitches and cleanuppads that unwind to siblings to the
359 // terminators that indicate the unwind, used to detect cycles therein.
361
362 /// Cache which blocks are in which funclet, if an EH funclet personality is
363 /// in use. Otherwise empty.
364 DenseMap<BasicBlock *, ColorVector> BlockEHFuncletColors;
365
366 /// Cache of constants visited in search of ConstantExprs.
367 SmallPtrSet<const Constant *, 32> ConstantExprVisited;
368
369 /// Cache of declarations of the llvm.experimental.deoptimize.<ty> intrinsic.
370 SmallVector<const Function *, 4> DeoptimizeDeclarations;
371
372 /// Cache of attribute lists verified.
373 SmallPtrSet<const void *, 32> AttributeListsVisited;
374
375 // Verify that this GlobalValue is only used in this module.
376 // This map is used to avoid visiting uses twice. We can arrive at a user
377 // twice, if they have multiple operands. In particular for very large
378 // constant expressions, we can arrive at a particular user many times.
379 SmallPtrSet<const Value *, 32> GlobalValueVisited;
380
381 // Keeps track of duplicate function argument debug info.
383
384 TBAAVerifier TBAAVerifyHelper;
385 ConvergenceVerifier ConvergenceVerifyHelper;
386
387 SmallVector<IntrinsicInst *, 4> NoAliasScopeDecls;
388
389 void checkAtomicMemAccessSize(Type *Ty, const Instruction *I);
390
391public:
392 explicit Verifier(raw_ostream *OS, bool ShouldTreatBrokenDebugInfoAsError,
393 const Module &M)
394 : VerifierSupport(OS, M), LandingPadResultTy(nullptr),
395 SawFrameEscape(false), TBAAVerifyHelper(this) {
396 TreatBrokenDebugInfoAsError = ShouldTreatBrokenDebugInfoAsError;
397 }
398
399 bool hasBrokenDebugInfo() const { return BrokenDebugInfo; }
400
401 bool verify(const Function &F) {
402 llvm::TimeTraceScope timeScope("Verifier");
403 assert(F.getParent() == &M &&
404 "An instance of this class only works with a specific module!");
405
406 // First ensure the function is well-enough formed to compute dominance
407 // information, and directly compute a dominance tree. We don't rely on the
408 // pass manager to provide this as it isolates us from a potentially
409 // out-of-date dominator tree and makes it significantly more complex to run
410 // this code outside of a pass manager.
411 // FIXME: It's really gross that we have to cast away constness here.
412 if (!F.empty())
413 DT.recalculate(const_cast<Function &>(F));
414
415 for (const BasicBlock &BB : F) {
416 if (!BB.empty() && BB.back().isTerminator())
417 continue;
418
419 if (OS) {
420 *OS << "Basic Block in function '" << F.getName()
421 << "' does not have terminator!\n";
422 BB.printAsOperand(*OS, true, MST);
423 *OS << "\n";
424 }
425 return false;
426 }
427
428 auto FailureCB = [this](const Twine &Message) {
429 this->CheckFailed(Message);
430 };
431 ConvergenceVerifyHelper.initialize(OS, FailureCB, F);
432
433 Broken = false;
434 // FIXME: We strip const here because the inst visitor strips const.
435 visit(const_cast<Function &>(F));
436 verifySiblingFuncletUnwinds();
437
438 if (ConvergenceVerifyHelper.sawTokens())
439 ConvergenceVerifyHelper.verify(DT);
440
441 InstsInThisBlock.clear();
442 DebugFnArgs.clear();
443 LandingPadResultTy = nullptr;
444 SawFrameEscape = false;
445 SiblingFuncletInfo.clear();
446 verifyNoAliasScopeDecl();
447 NoAliasScopeDecls.clear();
448
449 return !Broken;
450 }
451
452 /// Verify the module that this instance of \c Verifier was initialized with.
453 bool verify() {
454 Broken = false;
455
456 // Collect all declarations of the llvm.experimental.deoptimize intrinsic.
457 for (const Function &F : M)
458 if (F.getIntrinsicID() == Intrinsic::experimental_deoptimize)
459 DeoptimizeDeclarations.push_back(&F);
460
461 // Now that we've visited every function, verify that we never asked to
462 // recover a frame index that wasn't escaped.
463 verifyFrameRecoverIndices();
464 for (const GlobalVariable &GV : M.globals())
465 visitGlobalVariable(GV);
466
467 for (const GlobalAlias &GA : M.aliases())
468 visitGlobalAlias(GA);
469
470 for (const GlobalIFunc &GI : M.ifuncs())
471 visitGlobalIFunc(GI);
472
473 for (const NamedMDNode &NMD : M.named_metadata())
474 visitNamedMDNode(NMD);
475
476 for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable())
477 visitComdat(SMEC.getValue());
478
479 visitModuleFlags();
480 visitModuleIdents();
481 visitModuleCommandLines();
482 visitModuleErrnoTBAA();
483
484 verifyCompileUnits();
485
486 verifyDeoptimizeCallingConvs();
487 DISubprogramAttachments.clear();
488 return !Broken;
489 }
490
491private:
492 /// Whether a metadata node is allowed to be, or contain, a DILocation.
493 enum class AreDebugLocsAllowed { No, Yes };
494
495 /// Metadata that should be treated as a range, with slightly different
496 /// requirements.
497 enum class RangeLikeMetadataKind {
498 Range, // MD_range
499 AbsoluteSymbol, // MD_absolute_symbol
500 NoaliasAddrspace // MD_noalias_addrspace
501 };
502
503 // Verification methods...
504 void visitGlobalValue(const GlobalValue &GV);
505 void visitGlobalVariable(const GlobalVariable &GV);
506 void visitGlobalAlias(const GlobalAlias &GA);
507 void visitGlobalIFunc(const GlobalIFunc &GI);
508 void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C);
509 void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited,
510 const GlobalAlias &A, const Constant &C);
511 void visitNamedMDNode(const NamedMDNode &NMD);
512 void visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs);
513 void visitMetadataAsValue(const MetadataAsValue &MD, Function *F);
514 void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F);
515 void visitDIArgList(const DIArgList &AL, Function *F);
516 void visitComdat(const Comdat &C);
517 void visitModuleIdents();
518 void visitModuleCommandLines();
519 void visitModuleErrnoTBAA();
520 void visitModuleFlags();
521 void visitModuleFlag(const MDNode *Op,
522 DenseMap<const MDString *, const MDNode *> &SeenIDs,
523 SmallVectorImpl<const MDNode *> &Requirements);
524 void visitModuleFlagCGProfileEntry(const MDOperand &MDO);
525 void visitFunction(const Function &F);
526 void visitBasicBlock(BasicBlock &BB);
527 void verifyRangeLikeMetadata(const Value &V, const MDNode *Range, Type *Ty,
528 RangeLikeMetadataKind Kind);
529 void visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty);
530 void visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range, Type *Ty);
531 void visitDereferenceableMetadata(Instruction &I, MDNode *MD);
532 void visitNofreeMetadata(Instruction &I, MDNode *MD);
533 void visitProfMetadata(Instruction &I, MDNode *MD);
534 void visitCallStackMetadata(MDNode *MD);
535 void visitMemProfMetadata(Instruction &I, MDNode *MD);
536 void visitCallsiteMetadata(Instruction &I, MDNode *MD);
537 void visitCalleeTypeMetadata(Instruction &I, MDNode *MD);
538 void visitDIAssignIDMetadata(Instruction &I, MDNode *MD);
539 void visitMMRAMetadata(Instruction &I, MDNode *MD);
540 void visitAnnotationMetadata(MDNode *Annotation);
541 void visitAliasScopeMetadata(const MDNode *MD);
542 void visitAliasScopeListMetadata(const MDNode *MD);
543 void visitAccessGroupMetadata(const MDNode *MD);
544 void visitCapturesMetadata(Instruction &I, const MDNode *Captures);
545 void visitAllocTokenMetadata(Instruction &I, MDNode *MD);
546
547 template <class Ty> bool isValidMetadataArray(const MDTuple &N);
548#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N);
549#include "llvm/IR/Metadata.def"
550 void visitDIScope(const DIScope &N);
551 void visitDIVariable(const DIVariable &N);
552 void visitDILexicalBlockBase(const DILexicalBlockBase &N);
553 void visitDITemplateParameter(const DITemplateParameter &N);
554
555 void visitTemplateParams(const MDNode &N, const Metadata &RawParams);
556
557 void visit(DbgLabelRecord &DLR);
558 void visit(DbgVariableRecord &DVR);
559 // InstVisitor overrides...
560 using InstVisitor<Verifier>::visit;
561 void visitDbgRecords(Instruction &I);
562 void visit(Instruction &I);
563
564 void visitTruncInst(TruncInst &I);
565 void visitZExtInst(ZExtInst &I);
566 void visitSExtInst(SExtInst &I);
567 void visitFPTruncInst(FPTruncInst &I);
568 void visitFPExtInst(FPExtInst &I);
569 void visitFPToUIInst(FPToUIInst &I);
570 void visitFPToSIInst(FPToSIInst &I);
571 void visitUIToFPInst(UIToFPInst &I);
572 void visitSIToFPInst(SIToFPInst &I);
573 void visitIntToPtrInst(IntToPtrInst &I);
574 void checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V);
575 void visitPtrToAddrInst(PtrToAddrInst &I);
576 void visitPtrToIntInst(PtrToIntInst &I);
577 void visitBitCastInst(BitCastInst &I);
578 void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
579 void visitPHINode(PHINode &PN);
580 void visitCallBase(CallBase &Call);
581 void visitUnaryOperator(UnaryOperator &U);
582 void visitBinaryOperator(BinaryOperator &B);
583 void visitICmpInst(ICmpInst &IC);
584 void visitFCmpInst(FCmpInst &FC);
585 void visitExtractElementInst(ExtractElementInst &EI);
586 void visitInsertElementInst(InsertElementInst &EI);
587 void visitShuffleVectorInst(ShuffleVectorInst &EI);
588 void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); }
589 void visitCallInst(CallInst &CI);
590 void visitInvokeInst(InvokeInst &II);
591 void visitGetElementPtrInst(GetElementPtrInst &GEP);
592 void visitLoadInst(LoadInst &LI);
593 void visitStoreInst(StoreInst &SI);
594 void verifyDominatesUse(Instruction &I, unsigned i);
595 void visitInstruction(Instruction &I);
596 void visitTerminator(Instruction &I);
597 void visitBranchInst(BranchInst &BI);
598 void visitReturnInst(ReturnInst &RI);
599 void visitSwitchInst(SwitchInst &SI);
600 void visitIndirectBrInst(IndirectBrInst &BI);
601 void visitCallBrInst(CallBrInst &CBI);
602 void visitSelectInst(SelectInst &SI);
603 void visitUserOp1(Instruction &I);
604 void visitUserOp2(Instruction &I) { visitUserOp1(I); }
605 void visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call);
606 void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI);
607 void visitVPIntrinsic(VPIntrinsic &VPI);
608 void visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI);
609 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
610 void visitAtomicRMWInst(AtomicRMWInst &RMWI);
611 void visitFenceInst(FenceInst &FI);
612 void visitAllocaInst(AllocaInst &AI);
613 void visitExtractValueInst(ExtractValueInst &EVI);
614 void visitInsertValueInst(InsertValueInst &IVI);
615 void visitEHPadPredecessors(Instruction &I);
616 void visitLandingPadInst(LandingPadInst &LPI);
617 void visitResumeInst(ResumeInst &RI);
618 void visitCatchPadInst(CatchPadInst &CPI);
619 void visitCatchReturnInst(CatchReturnInst &CatchReturn);
620 void visitCleanupPadInst(CleanupPadInst &CPI);
621 void visitFuncletPadInst(FuncletPadInst &FPI);
622 void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch);
623 void visitCleanupReturnInst(CleanupReturnInst &CRI);
624
625 void verifySwiftErrorCall(CallBase &Call, const Value *SwiftErrorVal);
626 void verifySwiftErrorValue(const Value *SwiftErrorVal);
627 void verifyTailCCMustTailAttrs(const AttrBuilder &Attrs, StringRef Context);
628 void verifyMustTailCall(CallInst &CI);
629 bool verifyAttributeCount(AttributeList Attrs, unsigned Params);
630 void verifyAttributeTypes(AttributeSet Attrs, const Value *V);
631 void verifyParameterAttrs(AttributeSet Attrs, Type *Ty, const Value *V);
632 void checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
633 const Value *V);
634 void verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
635 const Value *V, bool IsIntrinsic, bool IsInlineAsm);
636 void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs);
637 void verifyUnknownProfileMetadata(MDNode *MD);
638 void visitConstantExprsRecursively(const Constant *EntryC);
639 void visitConstantExpr(const ConstantExpr *CE);
640 void visitConstantPtrAuth(const ConstantPtrAuth *CPA);
641 void verifyInlineAsmCall(const CallBase &Call);
642 void verifyStatepoint(const CallBase &Call);
643 void verifyFrameRecoverIndices();
644 void verifySiblingFuncletUnwinds();
645
646 void verifyFragmentExpression(const DbgVariableRecord &I);
647 template <typename ValueOrMetadata>
648 void verifyFragmentExpression(const DIVariable &V,
650 ValueOrMetadata *Desc);
651 void verifyFnArgs(const DbgVariableRecord &DVR);
652 void verifyNotEntryValue(const DbgVariableRecord &I);
653
654 /// Module-level debug info verification...
655 void verifyCompileUnits();
656
657 /// Module-level verification that all @llvm.experimental.deoptimize
658 /// declarations share the same calling convention.
659 void verifyDeoptimizeCallingConvs();
660
661 void verifyAttachedCallBundle(const CallBase &Call,
662 const OperandBundleUse &BU);
663
664 /// Verify the llvm.experimental.noalias.scope.decl declarations
665 void verifyNoAliasScopeDecl();
666};
667
668} // end anonymous namespace
669
670/// We know that cond should be true, if not print an error message.
671#define Check(C, ...) \
672 do { \
673 if (!(C)) { \
674 CheckFailed(__VA_ARGS__); \
675 return; \
676 } \
677 } while (false)
678
679/// We know that a debug info condition should be true, if not print
680/// an error message.
681#define CheckDI(C, ...) \
682 do { \
683 if (!(C)) { \
684 DebugInfoCheckFailed(__VA_ARGS__); \
685 return; \
686 } \
687 } while (false)
688
689void Verifier::visitDbgRecords(Instruction &I) {
690 if (!I.DebugMarker)
691 return;
692 CheckDI(I.DebugMarker->MarkedInstr == &I,
693 "Instruction has invalid DebugMarker", &I);
694 CheckDI(!isa<PHINode>(&I) || !I.hasDbgRecords(),
695 "PHI Node must not have any attached DbgRecords", &I);
696 for (DbgRecord &DR : I.getDbgRecordRange()) {
697 CheckDI(DR.getMarker() == I.DebugMarker,
698 "DbgRecord had invalid DebugMarker", &I, &DR);
699 if (auto *Loc =
701 visitMDNode(*Loc, AreDebugLocsAllowed::Yes);
702 if (auto *DVR = dyn_cast<DbgVariableRecord>(&DR)) {
703 visit(*DVR);
704 // These have to appear after `visit` for consistency with existing
705 // intrinsic behaviour.
706 verifyFragmentExpression(*DVR);
707 verifyNotEntryValue(*DVR);
708 } else if (auto *DLR = dyn_cast<DbgLabelRecord>(&DR)) {
709 visit(*DLR);
710 }
711 }
712}
713
714void Verifier::visit(Instruction &I) {
715 visitDbgRecords(I);
716 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
717 Check(I.getOperand(i) != nullptr, "Operand is null", &I);
719}
720
721// Helper to iterate over indirect users. By returning false, the callback can ask to stop traversing further.
722static void forEachUser(const Value *User,
724 llvm::function_ref<bool(const Value *)> Callback) {
725 if (!Visited.insert(User).second)
726 return;
727
729 while (!WorkList.empty()) {
730 const Value *Cur = WorkList.pop_back_val();
731 if (!Visited.insert(Cur).second)
732 continue;
733 if (Callback(Cur))
734 append_range(WorkList, Cur->materialized_users());
735 }
736}
737
738void Verifier::visitGlobalValue(const GlobalValue &GV) {
740 "Global is external, but doesn't have external or weak linkage!", &GV);
741
742 if (const GlobalObject *GO = dyn_cast<GlobalObject>(&GV)) {
743 if (const MDNode *Associated =
744 GO->getMetadata(LLVMContext::MD_associated)) {
745 Check(Associated->getNumOperands() == 1,
746 "associated metadata must have one operand", &GV, Associated);
747 const Metadata *Op = Associated->getOperand(0).get();
748 Check(Op, "associated metadata must have a global value", GO, Associated);
749
750 const auto *VM = dyn_cast_or_null<ValueAsMetadata>(Op);
751 Check(VM, "associated metadata must be ValueAsMetadata", GO, Associated);
752 if (VM) {
753 Check(isa<PointerType>(VM->getValue()->getType()),
754 "associated value must be pointer typed", GV, Associated);
755
756 const Value *Stripped = VM->getValue()->stripPointerCastsAndAliases();
757 Check(isa<GlobalObject>(Stripped) || isa<Constant>(Stripped),
758 "associated metadata must point to a GlobalObject", GO, Stripped);
759 Check(Stripped != GO,
760 "global values should not associate to themselves", GO,
761 Associated);
762 }
763 }
764
765 // FIXME: Why is getMetadata on GlobalValue protected?
766 if (const MDNode *AbsoluteSymbol =
767 GO->getMetadata(LLVMContext::MD_absolute_symbol)) {
768 verifyRangeLikeMetadata(*GO, AbsoluteSymbol,
769 DL.getIntPtrType(GO->getType()),
770 RangeLikeMetadataKind::AbsoluteSymbol);
771 }
772 }
773
775 "Only global variables can have appending linkage!", &GV);
776
777 if (GV.hasAppendingLinkage()) {
778 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV);
779 Check(GVar && GVar->getValueType()->isArrayTy(),
780 "Only global arrays can have appending linkage!", GVar);
781 }
782
783 if (GV.isDeclarationForLinker())
784 Check(!GV.hasComdat(), "Declaration may not be in a Comdat!", &GV);
785
786 if (GV.hasDLLExportStorageClass()) {
788 "dllexport GlobalValue must have default or protected visibility",
789 &GV);
790 }
791 if (GV.hasDLLImportStorageClass()) {
793 "dllimport GlobalValue must have default visibility", &GV);
794 Check(!GV.isDSOLocal(), "GlobalValue with DLLImport Storage is dso_local!",
795 &GV);
796
797 Check((GV.isDeclaration() &&
800 "Global is marked as dllimport, but not external", &GV);
801 }
802
803 if (GV.isImplicitDSOLocal())
804 Check(GV.isDSOLocal(),
805 "GlobalValue with local linkage or non-default "
806 "visibility must be dso_local!",
807 &GV);
808
809 forEachUser(&GV, GlobalValueVisited, [&](const Value *V) -> bool {
810 if (const Instruction *I = dyn_cast<Instruction>(V)) {
811 if (!I->getParent() || !I->getParent()->getParent())
812 CheckFailed("Global is referenced by parentless instruction!", &GV, &M,
813 I);
814 else if (I->getParent()->getParent()->getParent() != &M)
815 CheckFailed("Global is referenced in a different module!", &GV, &M, I,
816 I->getParent()->getParent(),
817 I->getParent()->getParent()->getParent());
818 return false;
819 } else if (const Function *F = dyn_cast<Function>(V)) {
820 if (F->getParent() != &M)
821 CheckFailed("Global is used by function in a different module", &GV, &M,
822 F, F->getParent());
823 return false;
824 }
825 return true;
826 });
827}
828
829void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
830 Type *GVType = GV.getValueType();
831
832 if (MaybeAlign A = GV.getAlign()) {
833 Check(A->value() <= Value::MaximumAlignment,
834 "huge alignment values are unsupported", &GV);
835 }
836
837 if (GV.hasInitializer()) {
838 Check(GV.getInitializer()->getType() == GVType,
839 "Global variable initializer type does not match global "
840 "variable type!",
841 &GV);
843 "Global variable initializer must be sized", &GV);
844 visitConstantExprsRecursively(GV.getInitializer());
845 // If the global has common linkage, it must have a zero initializer and
846 // cannot be constant.
847 if (GV.hasCommonLinkage()) {
849 "'common' global must have a zero initializer!", &GV);
850 Check(!GV.isConstant(), "'common' global may not be marked constant!",
851 &GV);
852 Check(!GV.hasComdat(), "'common' global may not be in a Comdat!", &GV);
853 }
854 }
855
856 if (GV.hasName() && (GV.getName() == "llvm.global_ctors" ||
857 GV.getName() == "llvm.global_dtors")) {
859 "invalid linkage for intrinsic global variable", &GV);
861 "invalid uses of intrinsic global variable", &GV);
862
863 // Don't worry about emitting an error for it not being an array,
864 // visitGlobalValue will complain on appending non-array.
865 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
866 StructType *STy = dyn_cast<StructType>(ATy->getElementType());
867 PointerType *FuncPtrTy =
868 PointerType::get(Context, DL.getProgramAddressSpace());
869 Check(STy && (STy->getNumElements() == 2 || STy->getNumElements() == 3) &&
870 STy->getTypeAtIndex(0u)->isIntegerTy(32) &&
871 STy->getTypeAtIndex(1) == FuncPtrTy,
872 "wrong type for intrinsic global variable", &GV);
873 Check(STy->getNumElements() == 3,
874 "the third field of the element type is mandatory, "
875 "specify ptr null to migrate from the obsoleted 2-field form");
876 Type *ETy = STy->getTypeAtIndex(2);
877 Check(ETy->isPointerTy(), "wrong type for intrinsic global variable",
878 &GV);
879 }
880 }
881
882 if (GV.hasName() && (GV.getName() == "llvm.used" ||
883 GV.getName() == "llvm.compiler.used")) {
885 "invalid linkage for intrinsic global variable", &GV);
887 "invalid uses of intrinsic global variable", &GV);
888
889 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
890 PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType());
891 Check(PTy, "wrong type for intrinsic global variable", &GV);
892 if (GV.hasInitializer()) {
893 const Constant *Init = GV.getInitializer();
894 const ConstantArray *InitArray = dyn_cast<ConstantArray>(Init);
895 Check(InitArray, "wrong initializer for intrinsic global variable",
896 Init);
897 for (Value *Op : InitArray->operands()) {
898 Value *V = Op->stripPointerCasts();
901 Twine("invalid ") + GV.getName() + " member", V);
902 Check(V->hasName(),
903 Twine("members of ") + GV.getName() + " must be named", V);
904 }
905 }
906 }
907 }
908
909 // Visit any debug info attachments.
911 GV.getMetadata(LLVMContext::MD_dbg, MDs);
912 for (auto *MD : MDs) {
913 if (auto *GVE = dyn_cast<DIGlobalVariableExpression>(MD))
914 visitDIGlobalVariableExpression(*GVE);
915 else
916 CheckDI(false, "!dbg attachment of global variable must be a "
917 "DIGlobalVariableExpression");
918 }
919
920 // Scalable vectors cannot be global variables, since we don't know
921 // the runtime size.
922 Check(!GVType->isScalableTy(), "Globals cannot contain scalable types", &GV);
923
924 // Check if it is or contains a target extension type that disallows being
925 // used as a global.
927 "Global @" + GV.getName() + " has illegal target extension type",
928 GVType);
929
930 if (!GV.hasInitializer()) {
931 visitGlobalValue(GV);
932 return;
933 }
934
935 // Walk any aggregate initializers looking for bitcasts between address spaces
936 visitConstantExprsRecursively(GV.getInitializer());
937
938 visitGlobalValue(GV);
939}
940
941void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) {
942 SmallPtrSet<const GlobalAlias*, 4> Visited;
943 Visited.insert(&GA);
944 visitAliaseeSubExpr(Visited, GA, C);
945}
946
947void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited,
948 const GlobalAlias &GA, const Constant &C) {
951 cast<GlobalValue>(C).hasAvailableExternallyLinkage(),
952 "available_externally alias must point to available_externally "
953 "global value",
954 &GA);
955 }
956 if (const auto *GV = dyn_cast<GlobalValue>(&C)) {
958 Check(!GV->isDeclarationForLinker(), "Alias must point to a definition",
959 &GA);
960 }
961
962 if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) {
963 Check(Visited.insert(GA2).second, "Aliases cannot form a cycle", &GA);
964
965 Check(!GA2->isInterposable(),
966 "Alias cannot point to an interposable alias", &GA);
967 } else {
968 // Only continue verifying subexpressions of GlobalAliases.
969 // Do not recurse into global initializers.
970 return;
971 }
972 }
973
974 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
975 visitConstantExprsRecursively(CE);
976
977 for (const Use &U : C.operands()) {
978 Value *V = &*U;
979 if (const auto *GA2 = dyn_cast<GlobalAlias>(V))
980 visitAliaseeSubExpr(Visited, GA, *GA2->getAliasee());
981 else if (const auto *C2 = dyn_cast<Constant>(V))
982 visitAliaseeSubExpr(Visited, GA, *C2);
983 }
984}
985
986void Verifier::visitGlobalAlias(const GlobalAlias &GA) {
988 "Alias should have private, internal, linkonce, weak, linkonce_odr, "
989 "weak_odr, external, or available_externally linkage!",
990 &GA);
991 const Constant *Aliasee = GA.getAliasee();
992 Check(Aliasee, "Aliasee cannot be NULL!", &GA);
993 Check(GA.getType() == Aliasee->getType(),
994 "Alias and aliasee types should match!", &GA);
995
996 Check(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee),
997 "Aliasee should be either GlobalValue or ConstantExpr", &GA);
998
999 visitAliaseeSubExpr(GA, *Aliasee);
1000
1001 visitGlobalValue(GA);
1002}
1003
1004void Verifier::visitGlobalIFunc(const GlobalIFunc &GI) {
1005 visitGlobalValue(GI);
1006
1008 GI.getAllMetadata(MDs);
1009 for (const auto &I : MDs) {
1010 CheckDI(I.first != LLVMContext::MD_dbg,
1011 "an ifunc may not have a !dbg attachment", &GI);
1012 Check(I.first != LLVMContext::MD_prof,
1013 "an ifunc may not have a !prof attachment", &GI);
1014 visitMDNode(*I.second, AreDebugLocsAllowed::No);
1015 }
1016
1018 "IFunc should have private, internal, linkonce, weak, linkonce_odr, "
1019 "weak_odr, or external linkage!",
1020 &GI);
1021 // Pierce through ConstantExprs and GlobalAliases and check that the resolver
1022 // is a Function definition.
1023 const Function *Resolver = GI.getResolverFunction();
1024 Check(Resolver, "IFunc must have a Function resolver", &GI);
1025 Check(!Resolver->isDeclarationForLinker(),
1026 "IFunc resolver must be a definition", &GI);
1027
1028 // Check that the immediate resolver operand (prior to any bitcasts) has the
1029 // correct type.
1030 const Type *ResolverTy = GI.getResolver()->getType();
1031
1033 "IFunc resolver must return a pointer", &GI);
1034
1035 Check(ResolverTy == PointerType::get(Context, GI.getAddressSpace()),
1036 "IFunc resolver has incorrect type", &GI);
1037}
1038
1039void Verifier::visitNamedMDNode(const NamedMDNode &NMD) {
1040 // There used to be various other llvm.dbg.* nodes, but we don't support
1041 // upgrading them and we want to reserve the namespace for future uses.
1042 if (NMD.getName().starts_with("llvm.dbg."))
1043 CheckDI(NMD.getName() == "llvm.dbg.cu",
1044 "unrecognized named metadata node in the llvm.dbg namespace", &NMD);
1045 for (const MDNode *MD : NMD.operands()) {
1046 if (NMD.getName() == "llvm.dbg.cu")
1047 CheckDI(MD && isa<DICompileUnit>(MD), "invalid compile unit", &NMD, MD);
1048
1049 if (!MD)
1050 continue;
1051
1052 visitMDNode(*MD, AreDebugLocsAllowed::Yes);
1053 }
1054}
1055
1056void Verifier::visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs) {
1057 // Only visit each node once. Metadata can be mutually recursive, so this
1058 // avoids infinite recursion here, as well as being an optimization.
1059 if (!MDNodes.insert(&MD).second)
1060 return;
1061
1062 Check(&MD.getContext() == &Context,
1063 "MDNode context does not match Module context!", &MD);
1064
1065 switch (MD.getMetadataID()) {
1066 default:
1067 llvm_unreachable("Invalid MDNode subclass");
1068 case Metadata::MDTupleKind:
1069 break;
1070#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) \
1071 case Metadata::CLASS##Kind: \
1072 visit##CLASS(cast<CLASS>(MD)); \
1073 break;
1074#include "llvm/IR/Metadata.def"
1075 }
1076
1077 for (const Metadata *Op : MD.operands()) {
1078 if (!Op)
1079 continue;
1080 Check(!isa<LocalAsMetadata>(Op), "Invalid operand for global metadata!",
1081 &MD, Op);
1082 CheckDI(!isa<DILocation>(Op) || AllowLocs == AreDebugLocsAllowed::Yes,
1083 "DILocation not allowed within this metadata node", &MD, Op);
1084 if (auto *N = dyn_cast<MDNode>(Op)) {
1085 visitMDNode(*N, AllowLocs);
1086 continue;
1087 }
1088 if (auto *V = dyn_cast<ValueAsMetadata>(Op)) {
1089 visitValueAsMetadata(*V, nullptr);
1090 continue;
1091 }
1092 }
1093
1094 // Check llvm.loop.estimated_trip_count.
1095 if (MD.getNumOperands() > 0 &&
1097 Check(MD.getNumOperands() == 2, "Expected two operands", &MD);
1099 Check(Count && Count->getType()->isIntegerTy() &&
1100 cast<IntegerType>(Count->getType())->getBitWidth() <= 32,
1101 "Expected second operand to be an integer constant of type i32 or "
1102 "smaller",
1103 &MD);
1104 }
1105
1106 // Check these last, so we diagnose problems in operands first.
1107 Check(!MD.isTemporary(), "Expected no forward declarations!", &MD);
1108 Check(MD.isResolved(), "All nodes should be resolved!", &MD);
1109}
1110
1111void Verifier::visitValueAsMetadata(const ValueAsMetadata &MD, Function *F) {
1112 Check(MD.getValue(), "Expected valid value", &MD);
1113 Check(!MD.getValue()->getType()->isMetadataTy(),
1114 "Unexpected metadata round-trip through values", &MD, MD.getValue());
1115
1116 auto *L = dyn_cast<LocalAsMetadata>(&MD);
1117 if (!L)
1118 return;
1119
1120 Check(F, "function-local metadata used outside a function", L);
1121
1122 // If this was an instruction, bb, or argument, verify that it is in the
1123 // function that we expect.
1124 Function *ActualF = nullptr;
1125 if (Instruction *I = dyn_cast<Instruction>(L->getValue())) {
1126 Check(I->getParent(), "function-local metadata not in basic block", L, I);
1127 ActualF = I->getParent()->getParent();
1128 } else if (BasicBlock *BB = dyn_cast<BasicBlock>(L->getValue()))
1129 ActualF = BB->getParent();
1130 else if (Argument *A = dyn_cast<Argument>(L->getValue()))
1131 ActualF = A->getParent();
1132 assert(ActualF && "Unimplemented function local metadata case!");
1133
1134 Check(ActualF == F, "function-local metadata used in wrong function", L);
1135}
1136
1137void Verifier::visitDIArgList(const DIArgList &AL, Function *F) {
1138 for (const ValueAsMetadata *VAM : AL.getArgs())
1139 visitValueAsMetadata(*VAM, F);
1140}
1141
1142void Verifier::visitMetadataAsValue(const MetadataAsValue &MDV, Function *F) {
1143 Metadata *MD = MDV.getMetadata();
1144 if (auto *N = dyn_cast<MDNode>(MD)) {
1145 visitMDNode(*N, AreDebugLocsAllowed::No);
1146 return;
1147 }
1148
1149 // Only visit each node once. Metadata can be mutually recursive, so this
1150 // avoids infinite recursion here, as well as being an optimization.
1151 if (!MDNodes.insert(MD).second)
1152 return;
1153
1154 if (auto *V = dyn_cast<ValueAsMetadata>(MD))
1155 visitValueAsMetadata(*V, F);
1156
1157 if (auto *AL = dyn_cast<DIArgList>(MD))
1158 visitDIArgList(*AL, F);
1159}
1160
1161static bool isType(const Metadata *MD) { return !MD || isa<DIType>(MD); }
1162static bool isScope(const Metadata *MD) { return !MD || isa<DIScope>(MD); }
1163static bool isDINode(const Metadata *MD) { return !MD || isa<DINode>(MD); }
1164static bool isMDTuple(const Metadata *MD) { return !MD || isa<MDTuple>(MD); }
1165
1166void Verifier::visitDILocation(const DILocation &N) {
1167 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1168 "location requires a valid scope", &N, N.getRawScope());
1169 if (auto *IA = N.getRawInlinedAt())
1170 CheckDI(isa<DILocation>(IA), "inlined-at should be a location", &N, IA);
1171 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1172 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1173}
1174
1175void Verifier::visitGenericDINode(const GenericDINode &N) {
1176 CheckDI(N.getTag(), "invalid tag", &N);
1177}
1178
1179void Verifier::visitDIScope(const DIScope &N) {
1180 if (auto *F = N.getRawFile())
1181 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1182}
1183
1184void Verifier::visitDISubrangeType(const DISubrangeType &N) {
1185 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1186 auto *BaseType = N.getRawBaseType();
1187 CheckDI(!BaseType || isType(BaseType), "BaseType must be a type");
1188 auto *LBound = N.getRawLowerBound();
1189 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1190 isa<DIVariable>(LBound) || isa<DIExpression>(LBound) ||
1191 isa<DIDerivedType>(LBound),
1192 "LowerBound must be signed constant or DIVariable or DIExpression or "
1193 "DIDerivedType",
1194 &N);
1195 auto *UBound = N.getRawUpperBound();
1196 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1197 isa<DIVariable>(UBound) || isa<DIExpression>(UBound) ||
1198 isa<DIDerivedType>(UBound),
1199 "UpperBound must be signed constant or DIVariable or DIExpression or "
1200 "DIDerivedType",
1201 &N);
1202 auto *Stride = N.getRawStride();
1203 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1204 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1205 "Stride must be signed constant or DIVariable or DIExpression", &N);
1206 auto *Bias = N.getRawBias();
1207 CheckDI(!Bias || isa<ConstantAsMetadata>(Bias) || isa<DIVariable>(Bias) ||
1208 isa<DIExpression>(Bias),
1209 "Bias must be signed constant or DIVariable or DIExpression", &N);
1210 // Subrange types currently only support constant size.
1211 auto *Size = N.getRawSizeInBits();
1213 "SizeInBits must be a constant");
1214}
1215
1216void Verifier::visitDISubrange(const DISubrange &N) {
1217 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1218 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1219 "Subrange can have any one of count or upperBound", &N);
1220 auto *CBound = N.getRawCountNode();
1221 CheckDI(!CBound || isa<ConstantAsMetadata>(CBound) ||
1222 isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1223 "Count must be signed constant or DIVariable or DIExpression", &N);
1224 auto Count = N.getCount();
1226 cast<ConstantInt *>(Count)->getSExtValue() >= -1,
1227 "invalid subrange count", &N);
1228 auto *LBound = N.getRawLowerBound();
1229 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1230 isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1231 "LowerBound must be signed constant or DIVariable or DIExpression",
1232 &N);
1233 auto *UBound = N.getRawUpperBound();
1234 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1235 isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1236 "UpperBound must be signed constant or DIVariable or DIExpression",
1237 &N);
1238 auto *Stride = N.getRawStride();
1239 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1240 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1241 "Stride must be signed constant or DIVariable or DIExpression", &N);
1242}
1243
1244void Verifier::visitDIGenericSubrange(const DIGenericSubrange &N) {
1245 CheckDI(N.getTag() == dwarf::DW_TAG_generic_subrange, "invalid tag", &N);
1246 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1247 "GenericSubrange can have any one of count or upperBound", &N);
1248 auto *CBound = N.getRawCountNode();
1249 CheckDI(!CBound || isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1250 "Count must be signed constant or DIVariable or DIExpression", &N);
1251 auto *LBound = N.getRawLowerBound();
1252 CheckDI(LBound, "GenericSubrange must contain lowerBound", &N);
1253 CheckDI(isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1254 "LowerBound must be signed constant or DIVariable or DIExpression",
1255 &N);
1256 auto *UBound = N.getRawUpperBound();
1257 CheckDI(!UBound || isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1258 "UpperBound must be signed constant or DIVariable or DIExpression",
1259 &N);
1260 auto *Stride = N.getRawStride();
1261 CheckDI(Stride, "GenericSubrange must contain stride", &N);
1262 CheckDI(isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1263 "Stride must be signed constant or DIVariable or DIExpression", &N);
1264}
1265
1266void Verifier::visitDIEnumerator(const DIEnumerator &N) {
1267 CheckDI(N.getTag() == dwarf::DW_TAG_enumerator, "invalid tag", &N);
1268}
1269
1270void Verifier::visitDIBasicType(const DIBasicType &N) {
1271 CheckDI(N.getTag() == dwarf::DW_TAG_base_type ||
1272 N.getTag() == dwarf::DW_TAG_unspecified_type ||
1273 N.getTag() == dwarf::DW_TAG_string_type,
1274 "invalid tag", &N);
1275 // Basic types currently only support constant size.
1276 auto *Size = N.getRawSizeInBits();
1278 "SizeInBits must be a constant");
1279}
1280
1281void Verifier::visitDIFixedPointType(const DIFixedPointType &N) {
1282 visitDIBasicType(N);
1283
1284 CheckDI(N.getTag() == dwarf::DW_TAG_base_type, "invalid tag", &N);
1285 CheckDI(N.getEncoding() == dwarf::DW_ATE_signed_fixed ||
1286 N.getEncoding() == dwarf::DW_ATE_unsigned_fixed,
1287 "invalid encoding", &N);
1291 "invalid kind", &N);
1293 N.getFactorRaw() == 0,
1294 "factor should be 0 for rationals", &N);
1296 (N.getNumeratorRaw() == 0 && N.getDenominatorRaw() == 0),
1297 "numerator and denominator should be 0 for non-rationals", &N);
1298}
1299
1300void Verifier::visitDIStringType(const DIStringType &N) {
1301 CheckDI(N.getTag() == dwarf::DW_TAG_string_type, "invalid tag", &N);
1302 CheckDI(!(N.isBigEndian() && N.isLittleEndian()), "has conflicting flags",
1303 &N);
1304}
1305
1306void Verifier::visitDIDerivedType(const DIDerivedType &N) {
1307 // Common scope checks.
1308 visitDIScope(N);
1309
1310 CheckDI(N.getTag() == dwarf::DW_TAG_typedef ||
1311 N.getTag() == dwarf::DW_TAG_pointer_type ||
1312 N.getTag() == dwarf::DW_TAG_ptr_to_member_type ||
1313 N.getTag() == dwarf::DW_TAG_reference_type ||
1314 N.getTag() == dwarf::DW_TAG_rvalue_reference_type ||
1315 N.getTag() == dwarf::DW_TAG_const_type ||
1316 N.getTag() == dwarf::DW_TAG_immutable_type ||
1317 N.getTag() == dwarf::DW_TAG_volatile_type ||
1318 N.getTag() == dwarf::DW_TAG_restrict_type ||
1319 N.getTag() == dwarf::DW_TAG_atomic_type ||
1320 N.getTag() == dwarf::DW_TAG_LLVM_ptrauth_type ||
1321 N.getTag() == dwarf::DW_TAG_member ||
1322 (N.getTag() == dwarf::DW_TAG_variable && N.isStaticMember()) ||
1323 N.getTag() == dwarf::DW_TAG_inheritance ||
1324 N.getTag() == dwarf::DW_TAG_friend ||
1325 N.getTag() == dwarf::DW_TAG_set_type ||
1326 N.getTag() == dwarf::DW_TAG_template_alias,
1327 "invalid tag", &N);
1328 if (N.getTag() == dwarf::DW_TAG_ptr_to_member_type) {
1329 CheckDI(isType(N.getRawExtraData()), "invalid pointer to member type", &N,
1330 N.getRawExtraData());
1331 } else if (N.getTag() == dwarf::DW_TAG_template_alias) {
1332 CheckDI(isMDTuple(N.getRawExtraData()), "invalid template parameters", &N,
1333 N.getRawExtraData());
1334 } else if (N.getTag() == dwarf::DW_TAG_inheritance ||
1335 N.getTag() == dwarf::DW_TAG_member ||
1336 N.getTag() == dwarf::DW_TAG_variable) {
1337 auto *ExtraData = N.getRawExtraData();
1338 auto IsValidExtraData = [&]() {
1339 if (ExtraData == nullptr)
1340 return true;
1341 if (isa<ConstantAsMetadata>(ExtraData) || isa<MDString>(ExtraData) ||
1342 isa<DIObjCProperty>(ExtraData))
1343 return true;
1344 if (auto *Tuple = dyn_cast<MDTuple>(ExtraData)) {
1345 if (Tuple->getNumOperands() != 1)
1346 return false;
1347 return isa_and_nonnull<ConstantAsMetadata>(Tuple->getOperand(0).get());
1348 }
1349 return false;
1350 };
1351 CheckDI(IsValidExtraData(),
1352 "extraData must be ConstantAsMetadata, MDString, DIObjCProperty, "
1353 "or MDTuple with single ConstantAsMetadata operand",
1354 &N, ExtraData);
1355 }
1356
1357 if (N.getTag() == dwarf::DW_TAG_set_type) {
1358 if (auto *T = N.getRawBaseType()) {
1362 CheckDI(
1363 (Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type) ||
1364 (Subrange && Subrange->getTag() == dwarf::DW_TAG_subrange_type) ||
1365 (Basic && (Basic->getEncoding() == dwarf::DW_ATE_unsigned ||
1366 Basic->getEncoding() == dwarf::DW_ATE_signed ||
1367 Basic->getEncoding() == dwarf::DW_ATE_unsigned_char ||
1368 Basic->getEncoding() == dwarf::DW_ATE_signed_char ||
1369 Basic->getEncoding() == dwarf::DW_ATE_boolean)),
1370 "invalid set base type", &N, T);
1371 }
1372 }
1373
1374 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1375 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1376 N.getRawBaseType());
1377
1378 if (N.getDWARFAddressSpace()) {
1379 CheckDI(N.getTag() == dwarf::DW_TAG_pointer_type ||
1380 N.getTag() == dwarf::DW_TAG_reference_type ||
1381 N.getTag() == dwarf::DW_TAG_rvalue_reference_type,
1382 "DWARF address space only applies to pointer or reference types",
1383 &N);
1384 }
1385
1386 auto *Size = N.getRawSizeInBits();
1389 "SizeInBits must be a constant or DIVariable or DIExpression");
1390}
1391
1392/// Detect mutually exclusive flags.
1393static bool hasConflictingReferenceFlags(unsigned Flags) {
1394 return ((Flags & DINode::FlagLValueReference) &&
1395 (Flags & DINode::FlagRValueReference)) ||
1396 ((Flags & DINode::FlagTypePassByValue) &&
1397 (Flags & DINode::FlagTypePassByReference));
1398}
1399
1400void Verifier::visitTemplateParams(const MDNode &N, const Metadata &RawParams) {
1401 auto *Params = dyn_cast<MDTuple>(&RawParams);
1402 CheckDI(Params, "invalid template params", &N, &RawParams);
1403 for (Metadata *Op : Params->operands()) {
1404 CheckDI(Op && isa<DITemplateParameter>(Op), "invalid template parameter",
1405 &N, Params, Op);
1406 }
1407}
1408
1409void Verifier::visitDICompositeType(const DICompositeType &N) {
1410 // Common scope checks.
1411 visitDIScope(N);
1412
1413 CheckDI(N.getTag() == dwarf::DW_TAG_array_type ||
1414 N.getTag() == dwarf::DW_TAG_structure_type ||
1415 N.getTag() == dwarf::DW_TAG_union_type ||
1416 N.getTag() == dwarf::DW_TAG_enumeration_type ||
1417 N.getTag() == dwarf::DW_TAG_class_type ||
1418 N.getTag() == dwarf::DW_TAG_variant_part ||
1419 N.getTag() == dwarf::DW_TAG_variant ||
1420 N.getTag() == dwarf::DW_TAG_namelist,
1421 "invalid tag", &N);
1422
1423 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1424 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1425 N.getRawBaseType());
1426
1427 CheckDI(!N.getRawElements() || isa<MDTuple>(N.getRawElements()),
1428 "invalid composite elements", &N, N.getRawElements());
1429 CheckDI(isType(N.getRawVTableHolder()), "invalid vtable holder", &N,
1430 N.getRawVTableHolder());
1432 "invalid reference flags", &N);
1433 unsigned DIBlockByRefStruct = 1 << 4;
1434 CheckDI((N.getFlags() & DIBlockByRefStruct) == 0,
1435 "DIBlockByRefStruct on DICompositeType is no longer supported", &N);
1436 CheckDI(llvm::all_of(N.getElements(), [](const DINode *N) { return N; }),
1437 "DISubprogram contains null entry in `elements` field", &N);
1438
1439 if (N.isVector()) {
1440 const DINodeArray Elements = N.getElements();
1441 CheckDI(Elements.size() == 1 &&
1442 Elements[0]->getTag() == dwarf::DW_TAG_subrange_type,
1443 "invalid vector, expected one element of type subrange", &N);
1444 }
1445
1446 if (auto *Params = N.getRawTemplateParams())
1447 visitTemplateParams(N, *Params);
1448
1449 if (auto *D = N.getRawDiscriminator()) {
1450 CheckDI(isa<DIDerivedType>(D) && N.getTag() == dwarf::DW_TAG_variant_part,
1451 "discriminator can only appear on variant part");
1452 }
1453
1454 if (N.getRawDataLocation()) {
1455 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1456 "dataLocation can only appear in array type");
1457 }
1458
1459 if (N.getRawAssociated()) {
1460 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1461 "associated can only appear in array type");
1462 }
1463
1464 if (N.getRawAllocated()) {
1465 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1466 "allocated can only appear in array type");
1467 }
1468
1469 if (N.getRawRank()) {
1470 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1471 "rank can only appear in array type");
1472 }
1473
1474 if (N.getTag() == dwarf::DW_TAG_array_type) {
1475 CheckDI(N.getRawBaseType(), "array types must have a base type", &N);
1476 }
1477
1478 auto *Size = N.getRawSizeInBits();
1481 "SizeInBits must be a constant or DIVariable or DIExpression");
1482}
1483
1484void Verifier::visitDISubroutineType(const DISubroutineType &N) {
1485 CheckDI(N.getTag() == dwarf::DW_TAG_subroutine_type, "invalid tag", &N);
1486 if (auto *Types = N.getRawTypeArray()) {
1487 CheckDI(isa<MDTuple>(Types), "invalid composite elements", &N, Types);
1488 for (Metadata *Ty : N.getTypeArray()->operands()) {
1489 CheckDI(isType(Ty), "invalid subroutine type ref", &N, Types, Ty);
1490 }
1491 }
1493 "invalid reference flags", &N);
1494}
1495
1496void Verifier::visitDIFile(const DIFile &N) {
1497 CheckDI(N.getTag() == dwarf::DW_TAG_file_type, "invalid tag", &N);
1498 std::optional<DIFile::ChecksumInfo<StringRef>> Checksum = N.getChecksum();
1499 if (Checksum) {
1500 CheckDI(Checksum->Kind <= DIFile::ChecksumKind::CSK_Last,
1501 "invalid checksum kind", &N);
1502 size_t Size;
1503 switch (Checksum->Kind) {
1504 case DIFile::CSK_MD5:
1505 Size = 32;
1506 break;
1507 case DIFile::CSK_SHA1:
1508 Size = 40;
1509 break;
1510 case DIFile::CSK_SHA256:
1511 Size = 64;
1512 break;
1513 }
1514 CheckDI(Checksum->Value.size() == Size, "invalid checksum length", &N);
1515 CheckDI(Checksum->Value.find_if_not(llvm::isHexDigit) == StringRef::npos,
1516 "invalid checksum", &N);
1517 }
1518}
1519
1520void Verifier::visitDICompileUnit(const DICompileUnit &N) {
1521 CheckDI(N.isDistinct(), "compile units must be distinct", &N);
1522 CheckDI(N.getTag() == dwarf::DW_TAG_compile_unit, "invalid tag", &N);
1523
1524 // Don't bother verifying the compilation directory or producer string
1525 // as those could be empty.
1526 CheckDI(N.getRawFile() && isa<DIFile>(N.getRawFile()), "invalid file", &N,
1527 N.getRawFile());
1528 CheckDI(!N.getFile()->getFilename().empty(), "invalid filename", &N,
1529 N.getFile());
1530
1531 CheckDI((N.getEmissionKind() <= DICompileUnit::LastEmissionKind),
1532 "invalid emission kind", &N);
1533
1534 if (auto *Array = N.getRawEnumTypes()) {
1535 CheckDI(isa<MDTuple>(Array), "invalid enum list", &N, Array);
1536 for (Metadata *Op : N.getEnumTypes()->operands()) {
1538 CheckDI(Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type,
1539 "invalid enum type", &N, N.getEnumTypes(), Op);
1540 }
1541 }
1542 if (auto *Array = N.getRawRetainedTypes()) {
1543 CheckDI(isa<MDTuple>(Array), "invalid retained type list", &N, Array);
1544 for (Metadata *Op : N.getRetainedTypes()->operands()) {
1545 CheckDI(
1546 Op && (isa<DIType>(Op) || (isa<DISubprogram>(Op) &&
1547 !cast<DISubprogram>(Op)->isDefinition())),
1548 "invalid retained type", &N, Op);
1549 }
1550 }
1551 if (auto *Array = N.getRawGlobalVariables()) {
1552 CheckDI(isa<MDTuple>(Array), "invalid global variable list", &N, Array);
1553 for (Metadata *Op : N.getGlobalVariables()->operands()) {
1555 "invalid global variable ref", &N, Op);
1556 }
1557 }
1558 if (auto *Array = N.getRawImportedEntities()) {
1559 CheckDI(isa<MDTuple>(Array), "invalid imported entity list", &N, Array);
1560 for (Metadata *Op : N.getImportedEntities()->operands()) {
1561 CheckDI(Op && isa<DIImportedEntity>(Op), "invalid imported entity ref",
1562 &N, Op);
1563 }
1564 }
1565 if (auto *Array = N.getRawMacros()) {
1566 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1567 for (Metadata *Op : N.getMacros()->operands()) {
1568 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1569 }
1570 }
1571 CUVisited.insert(&N);
1572}
1573
1574void Verifier::visitDISubprogram(const DISubprogram &N) {
1575 CheckDI(N.getTag() == dwarf::DW_TAG_subprogram, "invalid tag", &N);
1576 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1577 if (auto *F = N.getRawFile())
1578 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1579 else
1580 CheckDI(N.getLine() == 0, "line specified with no file", &N, N.getLine());
1581 if (auto *T = N.getRawType())
1582 CheckDI(isa<DISubroutineType>(T), "invalid subroutine type", &N, T);
1583 CheckDI(isType(N.getRawContainingType()), "invalid containing type", &N,
1584 N.getRawContainingType());
1585 if (auto *Params = N.getRawTemplateParams())
1586 visitTemplateParams(N, *Params);
1587 if (auto *S = N.getRawDeclaration())
1588 CheckDI(isa<DISubprogram>(S) && !cast<DISubprogram>(S)->isDefinition(),
1589 "invalid subprogram declaration", &N, S);
1590 if (auto *RawNode = N.getRawRetainedNodes()) {
1591 auto *Node = dyn_cast<MDTuple>(RawNode);
1592 CheckDI(Node, "invalid retained nodes list", &N, RawNode);
1593 for (Metadata *Op : Node->operands()) {
1594 CheckDI(Op, "nullptr in retained nodes", &N, Node);
1595
1596 auto True = [](const Metadata *) { return true; };
1597 auto False = [](const Metadata *) { return false; };
1598 bool IsTypeCorrect =
1599 DISubprogram::visitRetainedNode<bool>(Op, True, True, True, False);
1600 CheckDI(IsTypeCorrect,
1601 "invalid retained nodes, expected DILocalVariable, DILabel or "
1602 "DIImportedEntity",
1603 &N, Node, Op);
1604
1605 auto *RetainedNode = cast<DINode>(Op);
1606 auto *RetainedNodeScope = dyn_cast_or_null<DILocalScope>(
1608 CheckDI(RetainedNodeScope,
1609 "invalid retained nodes, retained node is not local", &N, Node,
1610 RetainedNode);
1611 CheckDI(
1612 RetainedNodeScope->getSubprogram() == &N,
1613 "invalid retained nodes, retained node does not belong to subprogram",
1614 &N, Node, RetainedNode, RetainedNodeScope);
1615 }
1616 }
1618 "invalid reference flags", &N);
1619
1620 auto *Unit = N.getRawUnit();
1621 if (N.isDefinition()) {
1622 // Subprogram definitions (not part of the type hierarchy).
1623 CheckDI(N.isDistinct(), "subprogram definitions must be distinct", &N);
1624 CheckDI(Unit, "subprogram definitions must have a compile unit", &N);
1625 CheckDI(isa<DICompileUnit>(Unit), "invalid unit type", &N, Unit);
1626 // There's no good way to cross the CU boundary to insert a nested
1627 // DISubprogram definition in one CU into a type defined in another CU.
1628 auto *CT = dyn_cast_or_null<DICompositeType>(N.getRawScope());
1629 if (CT && CT->getRawIdentifier() &&
1630 M.getContext().isODRUniquingDebugTypes())
1631 CheckDI(N.getDeclaration(),
1632 "definition subprograms cannot be nested within DICompositeType "
1633 "when enabling ODR",
1634 &N);
1635 } else {
1636 // Subprogram declarations (part of the type hierarchy).
1637 CheckDI(!Unit, "subprogram declarations must not have a compile unit", &N);
1638 CheckDI(!N.getRawDeclaration(),
1639 "subprogram declaration must not have a declaration field");
1640 }
1641
1642 if (auto *RawThrownTypes = N.getRawThrownTypes()) {
1643 auto *ThrownTypes = dyn_cast<MDTuple>(RawThrownTypes);
1644 CheckDI(ThrownTypes, "invalid thrown types list", &N, RawThrownTypes);
1645 for (Metadata *Op : ThrownTypes->operands())
1646 CheckDI(Op && isa<DIType>(Op), "invalid thrown type", &N, ThrownTypes,
1647 Op);
1648 }
1649
1650 if (N.areAllCallsDescribed())
1651 CheckDI(N.isDefinition(),
1652 "DIFlagAllCallsDescribed must be attached to a definition");
1653}
1654
1655void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) {
1656 CheckDI(N.getTag() == dwarf::DW_TAG_lexical_block, "invalid tag", &N);
1657 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1658 "invalid local scope", &N, N.getRawScope());
1659 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1660 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1661}
1662
1663void Verifier::visitDILexicalBlock(const DILexicalBlock &N) {
1664 visitDILexicalBlockBase(N);
1665
1666 CheckDI(N.getLine() || !N.getColumn(),
1667 "cannot have column info without line info", &N);
1668}
1669
1670void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) {
1671 visitDILexicalBlockBase(N);
1672}
1673
1674void Verifier::visitDICommonBlock(const DICommonBlock &N) {
1675 CheckDI(N.getTag() == dwarf::DW_TAG_common_block, "invalid tag", &N);
1676 if (auto *S = N.getRawScope())
1677 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1678 if (auto *S = N.getRawDecl())
1679 CheckDI(isa<DIGlobalVariable>(S), "invalid declaration", &N, S);
1680}
1681
1682void Verifier::visitDINamespace(const DINamespace &N) {
1683 CheckDI(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag", &N);
1684 if (auto *S = N.getRawScope())
1685 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1686}
1687
1688void Verifier::visitDIMacro(const DIMacro &N) {
1689 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_define ||
1690 N.getMacinfoType() == dwarf::DW_MACINFO_undef,
1691 "invalid macinfo type", &N);
1692 CheckDI(!N.getName().empty(), "anonymous macro", &N);
1693 if (!N.getValue().empty()) {
1694 assert(N.getValue().data()[0] != ' ' && "Macro value has a space prefix");
1695 }
1696}
1697
1698void Verifier::visitDIMacroFile(const DIMacroFile &N) {
1699 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_start_file,
1700 "invalid macinfo type", &N);
1701 if (auto *F = N.getRawFile())
1702 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1703
1704 if (auto *Array = N.getRawElements()) {
1705 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1706 for (Metadata *Op : N.getElements()->operands()) {
1707 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1708 }
1709 }
1710}
1711
1712void Verifier::visitDIModule(const DIModule &N) {
1713 CheckDI(N.getTag() == dwarf::DW_TAG_module, "invalid tag", &N);
1714 CheckDI(!N.getName().empty(), "anonymous module", &N);
1715}
1716
1717void Verifier::visitDITemplateParameter(const DITemplateParameter &N) {
1718 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1719}
1720
1721void Verifier::visitDITemplateTypeParameter(const DITemplateTypeParameter &N) {
1722 visitDITemplateParameter(N);
1723
1724 CheckDI(N.getTag() == dwarf::DW_TAG_template_type_parameter, "invalid tag",
1725 &N);
1726}
1727
1728void Verifier::visitDITemplateValueParameter(
1729 const DITemplateValueParameter &N) {
1730 visitDITemplateParameter(N);
1731
1732 CheckDI(N.getTag() == dwarf::DW_TAG_template_value_parameter ||
1733 N.getTag() == dwarf::DW_TAG_GNU_template_template_param ||
1734 N.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack,
1735 "invalid tag", &N);
1736}
1737
1738void Verifier::visitDIVariable(const DIVariable &N) {
1739 if (auto *S = N.getRawScope())
1740 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1741 if (auto *F = N.getRawFile())
1742 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1743}
1744
1745void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) {
1746 // Checks common to all variables.
1747 visitDIVariable(N);
1748
1749 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1750 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1751 // Check only if the global variable is not an extern
1752 if (N.isDefinition())
1753 CheckDI(N.getType(), "missing global variable type", &N);
1754 if (auto *Member = N.getRawStaticDataMemberDeclaration()) {
1756 "invalid static data member declaration", &N, Member);
1757 }
1758}
1759
1760void Verifier::visitDILocalVariable(const DILocalVariable &N) {
1761 // Checks common to all variables.
1762 visitDIVariable(N);
1763
1764 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1765 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1766 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1767 "local variable requires a valid scope", &N, N.getRawScope());
1768 if (auto Ty = N.getType())
1769 CheckDI(!isa<DISubroutineType>(Ty), "invalid type", &N, N.getType());
1770}
1771
1772void Verifier::visitDIAssignID(const DIAssignID &N) {
1773 CheckDI(!N.getNumOperands(), "DIAssignID has no arguments", &N);
1774 CheckDI(N.isDistinct(), "DIAssignID must be distinct", &N);
1775}
1776
1777void Verifier::visitDILabel(const DILabel &N) {
1778 if (auto *S = N.getRawScope())
1779 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1780 if (auto *F = N.getRawFile())
1781 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1782
1783 CheckDI(N.getTag() == dwarf::DW_TAG_label, "invalid tag", &N);
1784 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1785 "label requires a valid scope", &N, N.getRawScope());
1786}
1787
1788void Verifier::visitDIExpression(const DIExpression &N) {
1789 CheckDI(N.isValid(), "invalid expression", &N);
1790}
1791
1792void Verifier::visitDIGlobalVariableExpression(
1793 const DIGlobalVariableExpression &GVE) {
1794 CheckDI(GVE.getVariable(), "missing variable");
1795 if (auto *Var = GVE.getVariable())
1796 visitDIGlobalVariable(*Var);
1797 if (auto *Expr = GVE.getExpression()) {
1798 visitDIExpression(*Expr);
1799 if (auto Fragment = Expr->getFragmentInfo())
1800 verifyFragmentExpression(*GVE.getVariable(), *Fragment, &GVE);
1801 }
1802}
1803
1804void Verifier::visitDIObjCProperty(const DIObjCProperty &N) {
1805 CheckDI(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag", &N);
1806 if (auto *T = N.getRawType())
1807 CheckDI(isType(T), "invalid type ref", &N, T);
1808 if (auto *F = N.getRawFile())
1809 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1810}
1811
1812void Verifier::visitDIImportedEntity(const DIImportedEntity &N) {
1813 CheckDI(N.getTag() == dwarf::DW_TAG_imported_module ||
1814 N.getTag() == dwarf::DW_TAG_imported_declaration,
1815 "invalid tag", &N);
1816 if (auto *S = N.getRawScope())
1817 CheckDI(isa<DIScope>(S), "invalid scope for imported entity", &N, S);
1818 CheckDI(isDINode(N.getRawEntity()), "invalid imported entity", &N,
1819 N.getRawEntity());
1820}
1821
1822void Verifier::visitComdat(const Comdat &C) {
1823 // In COFF the Module is invalid if the GlobalValue has private linkage.
1824 // Entities with private linkage don't have entries in the symbol table.
1825 if (TT.isOSBinFormatCOFF())
1826 if (const GlobalValue *GV = M.getNamedValue(C.getName()))
1827 Check(!GV->hasPrivateLinkage(), "comdat global value has private linkage",
1828 GV);
1829}
1830
1831void Verifier::visitModuleIdents() {
1832 const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident");
1833 if (!Idents)
1834 return;
1835
1836 // llvm.ident takes a list of metadata entry. Each entry has only one string.
1837 // Scan each llvm.ident entry and make sure that this requirement is met.
1838 for (const MDNode *N : Idents->operands()) {
1839 Check(N->getNumOperands() == 1,
1840 "incorrect number of operands in llvm.ident metadata", N);
1841 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1842 ("invalid value for llvm.ident metadata entry operand"
1843 "(the operand should be a string)"),
1844 N->getOperand(0));
1845 }
1846}
1847
1848void Verifier::visitModuleCommandLines() {
1849 const NamedMDNode *CommandLines = M.getNamedMetadata("llvm.commandline");
1850 if (!CommandLines)
1851 return;
1852
1853 // llvm.commandline takes a list of metadata entry. Each entry has only one
1854 // string. Scan each llvm.commandline entry and make sure that this
1855 // requirement is met.
1856 for (const MDNode *N : CommandLines->operands()) {
1857 Check(N->getNumOperands() == 1,
1858 "incorrect number of operands in llvm.commandline metadata", N);
1859 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1860 ("invalid value for llvm.commandline metadata entry operand"
1861 "(the operand should be a string)"),
1862 N->getOperand(0));
1863 }
1864}
1865
1866void Verifier::visitModuleErrnoTBAA() {
1867 const NamedMDNode *ErrnoTBAA = M.getNamedMetadata("llvm.errno.tbaa");
1868 if (!ErrnoTBAA)
1869 return;
1870
1871 Check(ErrnoTBAA->getNumOperands() >= 1,
1872 "llvm.errno.tbaa must have at least one operand", ErrnoTBAA);
1873
1874 for (const MDNode *N : ErrnoTBAA->operands())
1875 TBAAVerifyHelper.visitTBAAMetadata(nullptr, N);
1876}
1877
1878void Verifier::visitModuleFlags() {
1879 const NamedMDNode *Flags = M.getModuleFlagsMetadata();
1880 if (!Flags) return;
1881
1882 // Scan each flag, and track the flags and requirements.
1883 DenseMap<const MDString*, const MDNode*> SeenIDs;
1884 SmallVector<const MDNode*, 16> Requirements;
1885 uint64_t PAuthABIPlatform = -1;
1886 uint64_t PAuthABIVersion = -1;
1887 for (const MDNode *MDN : Flags->operands()) {
1888 visitModuleFlag(MDN, SeenIDs, Requirements);
1889 if (MDN->getNumOperands() != 3)
1890 continue;
1891 if (const auto *FlagName = dyn_cast_or_null<MDString>(MDN->getOperand(1))) {
1892 if (FlagName->getString() == "aarch64-elf-pauthabi-platform") {
1893 if (const auto *PAP =
1895 PAuthABIPlatform = PAP->getZExtValue();
1896 } else if (FlagName->getString() == "aarch64-elf-pauthabi-version") {
1897 if (const auto *PAV =
1899 PAuthABIVersion = PAV->getZExtValue();
1900 }
1901 }
1902 }
1903
1904 if ((PAuthABIPlatform == uint64_t(-1)) != (PAuthABIVersion == uint64_t(-1)))
1905 CheckFailed("either both or no 'aarch64-elf-pauthabi-platform' and "
1906 "'aarch64-elf-pauthabi-version' module flags must be present");
1907
1908 // Validate that the requirements in the module are valid.
1909 for (const MDNode *Requirement : Requirements) {
1910 const MDString *Flag = cast<MDString>(Requirement->getOperand(0));
1911 const Metadata *ReqValue = Requirement->getOperand(1);
1912
1913 const MDNode *Op = SeenIDs.lookup(Flag);
1914 if (!Op) {
1915 CheckFailed("invalid requirement on flag, flag is not present in module",
1916 Flag);
1917 continue;
1918 }
1919
1920 if (Op->getOperand(2) != ReqValue) {
1921 CheckFailed(("invalid requirement on flag, "
1922 "flag does not have the required value"),
1923 Flag);
1924 continue;
1925 }
1926 }
1927}
1928
1929void
1930Verifier::visitModuleFlag(const MDNode *Op,
1931 DenseMap<const MDString *, const MDNode *> &SeenIDs,
1932 SmallVectorImpl<const MDNode *> &Requirements) {
1933 // Each module flag should have three arguments, the merge behavior (a
1934 // constant int), the flag ID (an MDString), and the value.
1935 Check(Op->getNumOperands() == 3,
1936 "incorrect number of operands in module flag", Op);
1937 Module::ModFlagBehavior MFB;
1938 if (!Module::isValidModFlagBehavior(Op->getOperand(0), MFB)) {
1940 "invalid behavior operand in module flag (expected constant integer)",
1941 Op->getOperand(0));
1942 Check(false,
1943 "invalid behavior operand in module flag (unexpected constant)",
1944 Op->getOperand(0));
1945 }
1946 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
1947 Check(ID, "invalid ID operand in module flag (expected metadata string)",
1948 Op->getOperand(1));
1949
1950 // Check the values for behaviors with additional requirements.
1951 switch (MFB) {
1952 case Module::Error:
1953 case Module::Warning:
1954 case Module::Override:
1955 // These behavior types accept any value.
1956 break;
1957
1958 case Module::Min: {
1959 auto *V = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1960 Check(V && V->getValue().isNonNegative(),
1961 "invalid value for 'min' module flag (expected constant non-negative "
1962 "integer)",
1963 Op->getOperand(2));
1964 break;
1965 }
1966
1967 case Module::Max: {
1969 "invalid value for 'max' module flag (expected constant integer)",
1970 Op->getOperand(2));
1971 break;
1972 }
1973
1974 case Module::Require: {
1975 // The value should itself be an MDNode with two operands, a flag ID (an
1976 // MDString), and a value.
1977 MDNode *Value = dyn_cast<MDNode>(Op->getOperand(2));
1978 Check(Value && Value->getNumOperands() == 2,
1979 "invalid value for 'require' module flag (expected metadata pair)",
1980 Op->getOperand(2));
1981 Check(isa<MDString>(Value->getOperand(0)),
1982 ("invalid value for 'require' module flag "
1983 "(first value operand should be a string)"),
1984 Value->getOperand(0));
1985
1986 // Append it to the list of requirements, to check once all module flags are
1987 // scanned.
1988 Requirements.push_back(Value);
1989 break;
1990 }
1991
1992 case Module::Append:
1993 case Module::AppendUnique: {
1994 // These behavior types require the operand be an MDNode.
1995 Check(isa<MDNode>(Op->getOperand(2)),
1996 "invalid value for 'append'-type module flag "
1997 "(expected a metadata node)",
1998 Op->getOperand(2));
1999 break;
2000 }
2001 }
2002
2003 // Unless this is a "requires" flag, check the ID is unique.
2004 if (MFB != Module::Require) {
2005 bool Inserted = SeenIDs.insert(std::make_pair(ID, Op)).second;
2006 Check(Inserted,
2007 "module flag identifiers must be unique (or of 'require' type)", ID);
2008 }
2009
2010 if (ID->getString() == "wchar_size") {
2011 ConstantInt *Value
2013 Check(Value, "wchar_size metadata requires constant integer argument");
2014 }
2015
2016 if (ID->getString() == "Linker Options") {
2017 // If the llvm.linker.options named metadata exists, we assume that the
2018 // bitcode reader has upgraded the module flag. Otherwise the flag might
2019 // have been created by a client directly.
2020 Check(M.getNamedMetadata("llvm.linker.options"),
2021 "'Linker Options' named metadata no longer supported");
2022 }
2023
2024 if (ID->getString() == "SemanticInterposition") {
2025 ConstantInt *Value =
2027 Check(Value,
2028 "SemanticInterposition metadata requires constant integer argument");
2029 }
2030
2031 if (ID->getString() == "CG Profile") {
2032 for (const MDOperand &MDO : cast<MDNode>(Op->getOperand(2))->operands())
2033 visitModuleFlagCGProfileEntry(MDO);
2034 }
2035}
2036
2037void Verifier::visitModuleFlagCGProfileEntry(const MDOperand &MDO) {
2038 auto CheckFunction = [&](const MDOperand &FuncMDO) {
2039 if (!FuncMDO)
2040 return;
2041 auto F = dyn_cast<ValueAsMetadata>(FuncMDO);
2042 Check(F && isa<Function>(F->getValue()->stripPointerCasts()),
2043 "expected a Function or null", FuncMDO);
2044 };
2045 auto Node = dyn_cast_or_null<MDNode>(MDO);
2046 Check(Node && Node->getNumOperands() == 3, "expected a MDNode triple", MDO);
2047 CheckFunction(Node->getOperand(0));
2048 CheckFunction(Node->getOperand(1));
2049 auto Count = dyn_cast_or_null<ConstantAsMetadata>(Node->getOperand(2));
2050 Check(Count && Count->getType()->isIntegerTy(),
2051 "expected an integer constant", Node->getOperand(2));
2052}
2053
2054void Verifier::verifyAttributeTypes(AttributeSet Attrs, const Value *V) {
2055 for (Attribute A : Attrs) {
2056
2057 if (A.isStringAttribute()) {
2058#define GET_ATTR_NAMES
2059#define ATTRIBUTE_ENUM(ENUM_NAME, DISPLAY_NAME)
2060#define ATTRIBUTE_STRBOOL(ENUM_NAME, DISPLAY_NAME) \
2061 if (A.getKindAsString() == #DISPLAY_NAME) { \
2062 auto V = A.getValueAsString(); \
2063 if (!(V.empty() || V == "true" || V == "false")) \
2064 CheckFailed("invalid value for '" #DISPLAY_NAME "' attribute: " + V + \
2065 ""); \
2066 }
2067
2068#include "llvm/IR/Attributes.inc"
2069 continue;
2070 }
2071
2072 if (A.isIntAttribute() != Attribute::isIntAttrKind(A.getKindAsEnum())) {
2073 CheckFailed("Attribute '" + A.getAsString() + "' should have an Argument",
2074 V);
2075 return;
2076 }
2077 }
2078}
2079
2080// VerifyParameterAttrs - Check the given attributes for an argument or return
2081// value of the specified type. The value V is printed in error messages.
2082void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty,
2083 const Value *V) {
2084 if (!Attrs.hasAttributes())
2085 return;
2086
2087 verifyAttributeTypes(Attrs, V);
2088
2089 for (Attribute Attr : Attrs)
2090 Check(Attr.isStringAttribute() ||
2091 Attribute::canUseAsParamAttr(Attr.getKindAsEnum()),
2092 "Attribute '" + Attr.getAsString() + "' does not apply to parameters",
2093 V);
2094
2095 if (Attrs.hasAttribute(Attribute::ImmArg)) {
2096 unsigned AttrCount =
2097 Attrs.getNumAttributes() - Attrs.hasAttribute(Attribute::Range);
2098 Check(AttrCount == 1,
2099 "Attribute 'immarg' is incompatible with other attributes except the "
2100 "'range' attribute",
2101 V);
2102 }
2103
2104 // Check for mutually incompatible attributes. Only inreg is compatible with
2105 // sret.
2106 unsigned AttrCount = 0;
2107 AttrCount += Attrs.hasAttribute(Attribute::ByVal);
2108 AttrCount += Attrs.hasAttribute(Attribute::InAlloca);
2109 AttrCount += Attrs.hasAttribute(Attribute::Preallocated);
2110 AttrCount += Attrs.hasAttribute(Attribute::StructRet) ||
2111 Attrs.hasAttribute(Attribute::InReg);
2112 AttrCount += Attrs.hasAttribute(Attribute::Nest);
2113 AttrCount += Attrs.hasAttribute(Attribute::ByRef);
2114 Check(AttrCount <= 1,
2115 "Attributes 'byval', 'inalloca', 'preallocated', 'inreg', 'nest', "
2116 "'byref', and 'sret' are incompatible!",
2117 V);
2118
2119 Check(!(Attrs.hasAttribute(Attribute::InAlloca) &&
2120 Attrs.hasAttribute(Attribute::ReadOnly)),
2121 "Attributes "
2122 "'inalloca and readonly' are incompatible!",
2123 V);
2124
2125 Check(!(Attrs.hasAttribute(Attribute::StructRet) &&
2126 Attrs.hasAttribute(Attribute::Returned)),
2127 "Attributes "
2128 "'sret and returned' are incompatible!",
2129 V);
2130
2131 Check(!(Attrs.hasAttribute(Attribute::ZExt) &&
2132 Attrs.hasAttribute(Attribute::SExt)),
2133 "Attributes "
2134 "'zeroext and signext' are incompatible!",
2135 V);
2136
2137 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
2138 Attrs.hasAttribute(Attribute::ReadOnly)),
2139 "Attributes "
2140 "'readnone and readonly' are incompatible!",
2141 V);
2142
2143 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
2144 Attrs.hasAttribute(Attribute::WriteOnly)),
2145 "Attributes "
2146 "'readnone and writeonly' are incompatible!",
2147 V);
2148
2149 Check(!(Attrs.hasAttribute(Attribute::ReadOnly) &&
2150 Attrs.hasAttribute(Attribute::WriteOnly)),
2151 "Attributes "
2152 "'readonly and writeonly' are incompatible!",
2153 V);
2154
2155 Check(!(Attrs.hasAttribute(Attribute::NoInline) &&
2156 Attrs.hasAttribute(Attribute::AlwaysInline)),
2157 "Attributes "
2158 "'noinline and alwaysinline' are incompatible!",
2159 V);
2160
2161 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2162 Attrs.hasAttribute(Attribute::ReadNone)),
2163 "Attributes writable and readnone are incompatible!", V);
2164
2165 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2166 Attrs.hasAttribute(Attribute::ReadOnly)),
2167 "Attributes writable and readonly are incompatible!", V);
2168
2169 AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible(Ty, Attrs);
2170 for (Attribute Attr : Attrs) {
2171 if (!Attr.isStringAttribute() &&
2172 IncompatibleAttrs.contains(Attr.getKindAsEnum())) {
2173 CheckFailed("Attribute '" + Attr.getAsString() +
2174 "' applied to incompatible type!", V);
2175 return;
2176 }
2177 }
2178
2179 if (isa<PointerType>(Ty)) {
2180 if (Attrs.hasAttribute(Attribute::Alignment)) {
2181 Align AttrAlign = Attrs.getAlignment().valueOrOne();
2182 Check(AttrAlign.value() <= Value::MaximumAlignment,
2183 "huge alignment values are unsupported", V);
2184 }
2185 if (Attrs.hasAttribute(Attribute::ByVal)) {
2186 Type *ByValTy = Attrs.getByValType();
2187 SmallPtrSet<Type *, 4> Visited;
2188 Check(ByValTy->isSized(&Visited),
2189 "Attribute 'byval' does not support unsized types!", V);
2190 // Check if it is or contains a target extension type that disallows being
2191 // used on the stack.
2193 "'byval' argument has illegal target extension type", V);
2194 Check(DL.getTypeAllocSize(ByValTy).getKnownMinValue() < (1ULL << 32),
2195 "huge 'byval' arguments are unsupported", V);
2196 }
2197 if (Attrs.hasAttribute(Attribute::ByRef)) {
2198 SmallPtrSet<Type *, 4> Visited;
2199 Check(Attrs.getByRefType()->isSized(&Visited),
2200 "Attribute 'byref' does not support unsized types!", V);
2201 Check(DL.getTypeAllocSize(Attrs.getByRefType()).getKnownMinValue() <
2202 (1ULL << 32),
2203 "huge 'byref' arguments are unsupported", V);
2204 }
2205 if (Attrs.hasAttribute(Attribute::InAlloca)) {
2206 SmallPtrSet<Type *, 4> Visited;
2207 Check(Attrs.getInAllocaType()->isSized(&Visited),
2208 "Attribute 'inalloca' does not support unsized types!", V);
2209 Check(DL.getTypeAllocSize(Attrs.getInAllocaType()).getKnownMinValue() <
2210 (1ULL << 32),
2211 "huge 'inalloca' arguments are unsupported", V);
2212 }
2213 if (Attrs.hasAttribute(Attribute::Preallocated)) {
2214 SmallPtrSet<Type *, 4> Visited;
2215 Check(Attrs.getPreallocatedType()->isSized(&Visited),
2216 "Attribute 'preallocated' does not support unsized types!", V);
2217 Check(
2218 DL.getTypeAllocSize(Attrs.getPreallocatedType()).getKnownMinValue() <
2219 (1ULL << 32),
2220 "huge 'preallocated' arguments are unsupported", V);
2221 }
2222 }
2223
2224 if (Attrs.hasAttribute(Attribute::Initializes)) {
2225 auto Inits = Attrs.getAttribute(Attribute::Initializes).getInitializes();
2226 Check(!Inits.empty(), "Attribute 'initializes' does not support empty list",
2227 V);
2229 "Attribute 'initializes' does not support unordered ranges", V);
2230 }
2231
2232 if (Attrs.hasAttribute(Attribute::NoFPClass)) {
2233 uint64_t Val = Attrs.getAttribute(Attribute::NoFPClass).getValueAsInt();
2234 Check(Val != 0, "Attribute 'nofpclass' must have at least one test bit set",
2235 V);
2236 Check((Val & ~static_cast<unsigned>(fcAllFlags)) == 0,
2237 "Invalid value for 'nofpclass' test mask", V);
2238 }
2239 if (Attrs.hasAttribute(Attribute::Range)) {
2240 const ConstantRange &CR =
2241 Attrs.getAttribute(Attribute::Range).getValueAsConstantRange();
2243 "Range bit width must match type bit width!", V);
2244 }
2245}
2246
2247void Verifier::checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
2248 const Value *V) {
2249 if (Attrs.hasFnAttr(Attr)) {
2250 StringRef S = Attrs.getFnAttr(Attr).getValueAsString();
2251 unsigned N;
2252 if (S.getAsInteger(10, N))
2253 CheckFailed("\"" + Attr + "\" takes an unsigned integer: " + S, V);
2254 }
2255}
2256
2257// Check parameter attributes against a function type.
2258// The value V is printed in error messages.
2259void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
2260 const Value *V, bool IsIntrinsic,
2261 bool IsInlineAsm) {
2262 if (Attrs.isEmpty())
2263 return;
2264
2265 if (AttributeListsVisited.insert(Attrs.getRawPointer()).second) {
2266 Check(Attrs.hasParentContext(Context),
2267 "Attribute list does not match Module context!", &Attrs, V);
2268 for (const auto &AttrSet : Attrs) {
2269 Check(!AttrSet.hasAttributes() || AttrSet.hasParentContext(Context),
2270 "Attribute set does not match Module context!", &AttrSet, V);
2271 for (const auto &A : AttrSet) {
2272 Check(A.hasParentContext(Context),
2273 "Attribute does not match Module context!", &A, V);
2274 }
2275 }
2276 }
2277
2278 bool SawNest = false;
2279 bool SawReturned = false;
2280 bool SawSRet = false;
2281 bool SawSwiftSelf = false;
2282 bool SawSwiftAsync = false;
2283 bool SawSwiftError = false;
2284
2285 // Verify return value attributes.
2286 AttributeSet RetAttrs = Attrs.getRetAttrs();
2287 for (Attribute RetAttr : RetAttrs)
2288 Check(RetAttr.isStringAttribute() ||
2289 Attribute::canUseAsRetAttr(RetAttr.getKindAsEnum()),
2290 "Attribute '" + RetAttr.getAsString() +
2291 "' does not apply to function return values",
2292 V);
2293
2294 unsigned MaxParameterWidth = 0;
2295 auto GetMaxParameterWidth = [&MaxParameterWidth](Type *Ty) {
2296 if (Ty->isVectorTy()) {
2297 if (auto *VT = dyn_cast<FixedVectorType>(Ty)) {
2298 unsigned Size = VT->getPrimitiveSizeInBits().getFixedValue();
2299 if (Size > MaxParameterWidth)
2300 MaxParameterWidth = Size;
2301 }
2302 }
2303 };
2304 GetMaxParameterWidth(FT->getReturnType());
2305 verifyParameterAttrs(RetAttrs, FT->getReturnType(), V);
2306
2307 // Verify parameter attributes.
2308 for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
2309 Type *Ty = FT->getParamType(i);
2310 AttributeSet ArgAttrs = Attrs.getParamAttrs(i);
2311
2312 if (!IsIntrinsic) {
2313 Check(!ArgAttrs.hasAttribute(Attribute::ImmArg),
2314 "immarg attribute only applies to intrinsics", V);
2315 if (!IsInlineAsm)
2316 Check(!ArgAttrs.hasAttribute(Attribute::ElementType),
2317 "Attribute 'elementtype' can only be applied to intrinsics"
2318 " and inline asm.",
2319 V);
2320 }
2321
2322 verifyParameterAttrs(ArgAttrs, Ty, V);
2323 GetMaxParameterWidth(Ty);
2324
2325 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
2326 Check(!SawNest, "More than one parameter has attribute nest!", V);
2327 SawNest = true;
2328 }
2329
2330 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
2331 Check(!SawReturned, "More than one parameter has attribute returned!", V);
2332 Check(Ty->canLosslesslyBitCastTo(FT->getReturnType()),
2333 "Incompatible argument and return types for 'returned' attribute",
2334 V);
2335 SawReturned = true;
2336 }
2337
2338 if (ArgAttrs.hasAttribute(Attribute::StructRet)) {
2339 Check(!SawSRet, "Cannot have multiple 'sret' parameters!", V);
2340 Check(i == 0 || i == 1,
2341 "Attribute 'sret' is not on first or second parameter!", V);
2342 SawSRet = true;
2343 }
2344
2345 if (ArgAttrs.hasAttribute(Attribute::SwiftSelf)) {
2346 Check(!SawSwiftSelf, "Cannot have multiple 'swiftself' parameters!", V);
2347 SawSwiftSelf = true;
2348 }
2349
2350 if (ArgAttrs.hasAttribute(Attribute::SwiftAsync)) {
2351 Check(!SawSwiftAsync, "Cannot have multiple 'swiftasync' parameters!", V);
2352 SawSwiftAsync = true;
2353 }
2354
2355 if (ArgAttrs.hasAttribute(Attribute::SwiftError)) {
2356 Check(!SawSwiftError, "Cannot have multiple 'swifterror' parameters!", V);
2357 SawSwiftError = true;
2358 }
2359
2360 if (ArgAttrs.hasAttribute(Attribute::InAlloca)) {
2361 Check(i == FT->getNumParams() - 1,
2362 "inalloca isn't on the last parameter!", V);
2363 }
2364 }
2365
2366 if (!Attrs.hasFnAttrs())
2367 return;
2368
2369 verifyAttributeTypes(Attrs.getFnAttrs(), V);
2370 for (Attribute FnAttr : Attrs.getFnAttrs())
2371 Check(FnAttr.isStringAttribute() ||
2372 Attribute::canUseAsFnAttr(FnAttr.getKindAsEnum()),
2373 "Attribute '" + FnAttr.getAsString() +
2374 "' does not apply to functions!",
2375 V);
2376
2377 Check(!(Attrs.hasFnAttr(Attribute::NoInline) &&
2378 Attrs.hasFnAttr(Attribute::AlwaysInline)),
2379 "Attributes 'noinline and alwaysinline' are incompatible!", V);
2380
2381 if (Attrs.hasFnAttr(Attribute::OptimizeNone)) {
2382 Check(Attrs.hasFnAttr(Attribute::NoInline),
2383 "Attribute 'optnone' requires 'noinline'!", V);
2384
2385 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2386 "Attributes 'optsize and optnone' are incompatible!", V);
2387
2388 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2389 "Attributes 'minsize and optnone' are incompatible!", V);
2390
2391 Check(!Attrs.hasFnAttr(Attribute::OptimizeForDebugging),
2392 "Attributes 'optdebug and optnone' are incompatible!", V);
2393 }
2394
2395 Check(!(Attrs.hasFnAttr(Attribute::SanitizeRealtime) &&
2396 Attrs.hasFnAttr(Attribute::SanitizeRealtimeBlocking)),
2397 "Attributes "
2398 "'sanitize_realtime and sanitize_realtime_blocking' are incompatible!",
2399 V);
2400
2401 if (Attrs.hasFnAttr(Attribute::OptimizeForDebugging)) {
2402 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2403 "Attributes 'optsize and optdebug' are incompatible!", V);
2404
2405 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2406 "Attributes 'minsize and optdebug' are incompatible!", V);
2407 }
2408
2409 Check(!Attrs.hasAttrSomewhere(Attribute::Writable) ||
2410 isModSet(Attrs.getMemoryEffects().getModRef(IRMemLocation::ArgMem)),
2411 "Attribute writable and memory without argmem: write are incompatible!",
2412 V);
2413
2414 if (Attrs.hasFnAttr("aarch64_pstate_sm_enabled")) {
2415 Check(!Attrs.hasFnAttr("aarch64_pstate_sm_compatible"),
2416 "Attributes 'aarch64_pstate_sm_enabled and "
2417 "aarch64_pstate_sm_compatible' are incompatible!",
2418 V);
2419 }
2420
2421 Check((Attrs.hasFnAttr("aarch64_new_za") + Attrs.hasFnAttr("aarch64_in_za") +
2422 Attrs.hasFnAttr("aarch64_inout_za") +
2423 Attrs.hasFnAttr("aarch64_out_za") +
2424 Attrs.hasFnAttr("aarch64_preserves_za") +
2425 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2426 "Attributes 'aarch64_new_za', 'aarch64_in_za', 'aarch64_out_za', "
2427 "'aarch64_inout_za', 'aarch64_preserves_za' and "
2428 "'aarch64_za_state_agnostic' are mutually exclusive",
2429 V);
2430
2431 Check((Attrs.hasFnAttr("aarch64_new_zt0") +
2432 Attrs.hasFnAttr("aarch64_in_zt0") +
2433 Attrs.hasFnAttr("aarch64_inout_zt0") +
2434 Attrs.hasFnAttr("aarch64_out_zt0") +
2435 Attrs.hasFnAttr("aarch64_preserves_zt0") +
2436 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2437 "Attributes 'aarch64_new_zt0', 'aarch64_in_zt0', 'aarch64_out_zt0', "
2438 "'aarch64_inout_zt0', 'aarch64_preserves_zt0' and "
2439 "'aarch64_za_state_agnostic' are mutually exclusive",
2440 V);
2441
2442 if (Attrs.hasFnAttr(Attribute::JumpTable)) {
2443 const GlobalValue *GV = cast<GlobalValue>(V);
2445 "Attribute 'jumptable' requires 'unnamed_addr'", V);
2446 }
2447
2448 if (auto Args = Attrs.getFnAttrs().getAllocSizeArgs()) {
2449 auto CheckParam = [&](StringRef Name, unsigned ParamNo) {
2450 if (ParamNo >= FT->getNumParams()) {
2451 CheckFailed("'allocsize' " + Name + " argument is out of bounds", V);
2452 return false;
2453 }
2454
2455 if (!FT->getParamType(ParamNo)->isIntegerTy()) {
2456 CheckFailed("'allocsize' " + Name +
2457 " argument must refer to an integer parameter",
2458 V);
2459 return false;
2460 }
2461
2462 return true;
2463 };
2464
2465 if (!CheckParam("element size", Args->first))
2466 return;
2467
2468 if (Args->second && !CheckParam("number of elements", *Args->second))
2469 return;
2470 }
2471
2472 if (Attrs.hasFnAttr(Attribute::AllocKind)) {
2473 AllocFnKind K = Attrs.getAllocKind();
2475 K & (AllocFnKind::Alloc | AllocFnKind::Realloc | AllocFnKind::Free);
2476 if (!is_contained(
2477 {AllocFnKind::Alloc, AllocFnKind::Realloc, AllocFnKind::Free},
2478 Type))
2479 CheckFailed(
2480 "'allockind()' requires exactly one of alloc, realloc, and free");
2481 if ((Type == AllocFnKind::Free) &&
2482 ((K & (AllocFnKind::Uninitialized | AllocFnKind::Zeroed |
2483 AllocFnKind::Aligned)) != AllocFnKind::Unknown))
2484 CheckFailed("'allockind(\"free\")' doesn't allow uninitialized, zeroed, "
2485 "or aligned modifiers.");
2486 AllocFnKind ZeroedUninit = AllocFnKind::Uninitialized | AllocFnKind::Zeroed;
2487 if ((K & ZeroedUninit) == ZeroedUninit)
2488 CheckFailed("'allockind()' can't be both zeroed and uninitialized");
2489 }
2490
2491 if (Attribute A = Attrs.getFnAttr("alloc-variant-zeroed"); A.isValid()) {
2492 StringRef S = A.getValueAsString();
2493 Check(!S.empty(), "'alloc-variant-zeroed' must not be empty");
2494 Function *Variant = M.getFunction(S);
2495 if (Variant) {
2496 Attribute Family = Attrs.getFnAttr("alloc-family");
2497 Attribute VariantFamily = Variant->getFnAttribute("alloc-family");
2498 if (Family.isValid())
2499 Check(VariantFamily.isValid() &&
2500 VariantFamily.getValueAsString() == Family.getValueAsString(),
2501 "'alloc-variant-zeroed' must name a function belonging to the "
2502 "same 'alloc-family'");
2503
2504 Check(Variant->hasFnAttribute(Attribute::AllocKind) &&
2505 (Variant->getFnAttribute(Attribute::AllocKind).getAllocKind() &
2506 AllocFnKind::Zeroed) != AllocFnKind::Unknown,
2507 "'alloc-variant-zeroed' must name a function with "
2508 "'allockind(\"zeroed\")'");
2509
2510 Check(FT == Variant->getFunctionType(),
2511 "'alloc-variant-zeroed' must name a function with the same "
2512 "signature");
2513 }
2514 }
2515
2516 if (Attrs.hasFnAttr(Attribute::VScaleRange)) {
2517 unsigned VScaleMin = Attrs.getFnAttrs().getVScaleRangeMin();
2518 if (VScaleMin == 0)
2519 CheckFailed("'vscale_range' minimum must be greater than 0", V);
2520 else if (!isPowerOf2_32(VScaleMin))
2521 CheckFailed("'vscale_range' minimum must be power-of-two value", V);
2522 std::optional<unsigned> VScaleMax = Attrs.getFnAttrs().getVScaleRangeMax();
2523 if (VScaleMax && VScaleMin > VScaleMax)
2524 CheckFailed("'vscale_range' minimum cannot be greater than maximum", V);
2525 else if (VScaleMax && !isPowerOf2_32(*VScaleMax))
2526 CheckFailed("'vscale_range' maximum must be power-of-two value", V);
2527 }
2528
2529 if (Attribute FPAttr = Attrs.getFnAttr("frame-pointer"); FPAttr.isValid()) {
2530 StringRef FP = FPAttr.getValueAsString();
2531 if (FP != "all" && FP != "non-leaf" && FP != "none" && FP != "reserved" &&
2532 FP != "non-leaf-no-reserve")
2533 CheckFailed("invalid value for 'frame-pointer' attribute: " + FP, V);
2534 }
2535
2536 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-prefix", V);
2537 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-entry", V);
2538 if (Attrs.hasFnAttr("patchable-function-entry-section"))
2539 Check(!Attrs.getFnAttr("patchable-function-entry-section")
2540 .getValueAsString()
2541 .empty(),
2542 "\"patchable-function-entry-section\" must not be empty");
2543 checkUnsignedBaseTenFuncAttr(Attrs, "warn-stack-size", V);
2544
2545 if (auto A = Attrs.getFnAttr("sign-return-address"); A.isValid()) {
2546 StringRef S = A.getValueAsString();
2547 if (S != "none" && S != "all" && S != "non-leaf")
2548 CheckFailed("invalid value for 'sign-return-address' attribute: " + S, V);
2549 }
2550
2551 if (auto A = Attrs.getFnAttr("sign-return-address-key"); A.isValid()) {
2552 StringRef S = A.getValueAsString();
2553 if (S != "a_key" && S != "b_key")
2554 CheckFailed("invalid value for 'sign-return-address-key' attribute: " + S,
2555 V);
2556 if (auto AA = Attrs.getFnAttr("sign-return-address"); !AA.isValid()) {
2557 CheckFailed(
2558 "'sign-return-address-key' present without `sign-return-address`");
2559 }
2560 }
2561
2562 if (auto A = Attrs.getFnAttr("branch-target-enforcement"); A.isValid()) {
2563 StringRef S = A.getValueAsString();
2564 if (S != "" && S != "true" && S != "false")
2565 CheckFailed(
2566 "invalid value for 'branch-target-enforcement' attribute: " + S, V);
2567 }
2568
2569 if (auto A = Attrs.getFnAttr("branch-protection-pauth-lr"); A.isValid()) {
2570 StringRef S = A.getValueAsString();
2571 if (S != "" && S != "true" && S != "false")
2572 CheckFailed(
2573 "invalid value for 'branch-protection-pauth-lr' attribute: " + S, V);
2574 }
2575
2576 if (auto A = Attrs.getFnAttr("guarded-control-stack"); A.isValid()) {
2577 StringRef S = A.getValueAsString();
2578 if (S != "" && S != "true" && S != "false")
2579 CheckFailed("invalid value for 'guarded-control-stack' attribute: " + S,
2580 V);
2581 }
2582
2583 if (auto A = Attrs.getFnAttr("vector-function-abi-variant"); A.isValid()) {
2584 StringRef S = A.getValueAsString();
2585 const std::optional<VFInfo> Info = VFABI::tryDemangleForVFABI(S, FT);
2586 if (!Info)
2587 CheckFailed("invalid name for a VFABI variant: " + S, V);
2588 }
2589
2590 if (auto A = Attrs.getFnAttr("denormal-fp-math"); A.isValid()) {
2591 StringRef S = A.getValueAsString();
2593 CheckFailed("invalid value for 'denormal-fp-math' attribute: " + S, V);
2594 }
2595
2596 if (auto A = Attrs.getFnAttr("denormal-fp-math-f32"); A.isValid()) {
2597 StringRef S = A.getValueAsString();
2599 CheckFailed("invalid value for 'denormal-fp-math-f32' attribute: " + S,
2600 V);
2601 }
2602
2603 if (auto A = Attrs.getFnAttr("modular-format"); A.isValid()) {
2604 StringRef S = A.getValueAsString();
2606 S.split(Args, ',');
2607 Check(Args.size() >= 5,
2608 "modular-format attribute requires at least 5 arguments", V);
2609 unsigned FirstArgIdx;
2610 Check(!Args[2].getAsInteger(10, FirstArgIdx),
2611 "modular-format attribute first arg index is not an integer", V);
2612 unsigned UpperBound = FT->getNumParams() + (FT->isVarArg() ? 1 : 0);
2613 Check(FirstArgIdx > 0 && FirstArgIdx <= UpperBound,
2614 "modular-format attribute first arg index is out of bounds", V);
2615 }
2616}
2617void Verifier::verifyUnknownProfileMetadata(MDNode *MD) {
2618 Check(MD->getNumOperands() == 2,
2619 "'unknown' !prof should have a single additional operand", MD);
2620 auto *PassName = dyn_cast<MDString>(MD->getOperand(1));
2621 Check(PassName != nullptr,
2622 "'unknown' !prof should have an additional operand of type "
2623 "string");
2624 Check(!PassName->getString().empty(),
2625 "the 'unknown' !prof operand should not be an empty string");
2626}
2627
2628void Verifier::verifyFunctionMetadata(
2629 ArrayRef<std::pair<unsigned, MDNode *>> MDs) {
2630 for (const auto &Pair : MDs) {
2631 if (Pair.first == LLVMContext::MD_prof) {
2632 MDNode *MD = Pair.second;
2633 Check(MD->getNumOperands() >= 2,
2634 "!prof annotations should have no less than 2 operands", MD);
2635 // We may have functions that are synthesized by the compiler, e.g. in
2636 // WPD, that we can't currently determine the entry count.
2637 if (MD->getOperand(0).equalsStr(
2639 verifyUnknownProfileMetadata(MD);
2640 continue;
2641 }
2642
2643 // Check first operand.
2644 Check(MD->getOperand(0) != nullptr, "first operand should not be null",
2645 MD);
2647 "expected string with name of the !prof annotation", MD);
2648 MDString *MDS = cast<MDString>(MD->getOperand(0));
2649 StringRef ProfName = MDS->getString();
2652 "first operand should be 'function_entry_count'"
2653 " or 'synthetic_function_entry_count'",
2654 MD);
2655
2656 // Check second operand.
2657 Check(MD->getOperand(1) != nullptr, "second operand should not be null",
2658 MD);
2660 "expected integer argument to function_entry_count", MD);
2661 } else if (Pair.first == LLVMContext::MD_kcfi_type) {
2662 MDNode *MD = Pair.second;
2663 Check(MD->getNumOperands() == 1,
2664 "!kcfi_type must have exactly one operand", MD);
2665 Check(MD->getOperand(0) != nullptr, "!kcfi_type operand must not be null",
2666 MD);
2668 "expected a constant operand for !kcfi_type", MD);
2669 Constant *C = cast<ConstantAsMetadata>(MD->getOperand(0))->getValue();
2670 Check(isa<ConstantInt>(C) && isa<IntegerType>(C->getType()),
2671 "expected a constant integer operand for !kcfi_type", MD);
2673 "expected a 32-bit integer constant operand for !kcfi_type", MD);
2674 }
2675 }
2676}
2677
2678void Verifier::visitConstantExprsRecursively(const Constant *EntryC) {
2679 if (!ConstantExprVisited.insert(EntryC).second)
2680 return;
2681
2683 Stack.push_back(EntryC);
2684
2685 while (!Stack.empty()) {
2686 const Constant *C = Stack.pop_back_val();
2687
2688 // Check this constant expression.
2689 if (const auto *CE = dyn_cast<ConstantExpr>(C))
2690 visitConstantExpr(CE);
2691
2692 if (const auto *CPA = dyn_cast<ConstantPtrAuth>(C))
2693 visitConstantPtrAuth(CPA);
2694
2695 if (const auto *GV = dyn_cast<GlobalValue>(C)) {
2696 // Global Values get visited separately, but we do need to make sure
2697 // that the global value is in the correct module
2698 Check(GV->getParent() == &M, "Referencing global in another module!",
2699 EntryC, &M, GV, GV->getParent());
2700 continue;
2701 }
2702
2703 // Visit all sub-expressions.
2704 for (const Use &U : C->operands()) {
2705 const auto *OpC = dyn_cast<Constant>(U);
2706 if (!OpC)
2707 continue;
2708 if (!ConstantExprVisited.insert(OpC).second)
2709 continue;
2710 Stack.push_back(OpC);
2711 }
2712 }
2713}
2714
2715void Verifier::visitConstantExpr(const ConstantExpr *CE) {
2716 if (CE->getOpcode() == Instruction::BitCast)
2717 Check(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0),
2718 CE->getType()),
2719 "Invalid bitcast", CE);
2720 else if (CE->getOpcode() == Instruction::PtrToAddr)
2721 checkPtrToAddr(CE->getOperand(0)->getType(), CE->getType(), *CE);
2722}
2723
2724void Verifier::visitConstantPtrAuth(const ConstantPtrAuth *CPA) {
2725 Check(CPA->getPointer()->getType()->isPointerTy(),
2726 "signed ptrauth constant base pointer must have pointer type");
2727
2728 Check(CPA->getType() == CPA->getPointer()->getType(),
2729 "signed ptrauth constant must have same type as its base pointer");
2730
2731 Check(CPA->getKey()->getBitWidth() == 32,
2732 "signed ptrauth constant key must be i32 constant integer");
2733
2735 "signed ptrauth constant address discriminator must be a pointer");
2736
2737 Check(CPA->getDiscriminator()->getBitWidth() == 64,
2738 "signed ptrauth constant discriminator must be i64 constant integer");
2739
2741 "signed ptrauth constant deactivation symbol must be a pointer");
2742
2745 "signed ptrauth constant deactivation symbol must be a global value "
2746 "or null");
2747}
2748
2749bool Verifier::verifyAttributeCount(AttributeList Attrs, unsigned Params) {
2750 // There shouldn't be more attribute sets than there are parameters plus the
2751 // function and return value.
2752 return Attrs.getNumAttrSets() <= Params + 2;
2753}
2754
2755void Verifier::verifyInlineAsmCall(const CallBase &Call) {
2756 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
2757 unsigned ArgNo = 0;
2758 unsigned LabelNo = 0;
2759 for (const InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) {
2760 if (CI.Type == InlineAsm::isLabel) {
2761 ++LabelNo;
2762 continue;
2763 }
2764
2765 // Only deal with constraints that correspond to call arguments.
2766 if (!CI.hasArg())
2767 continue;
2768
2769 if (CI.isIndirect) {
2770 const Value *Arg = Call.getArgOperand(ArgNo);
2771 Check(Arg->getType()->isPointerTy(),
2772 "Operand for indirect constraint must have pointer type", &Call);
2773
2775 "Operand for indirect constraint must have elementtype attribute",
2776 &Call);
2777 } else {
2778 Check(!Call.paramHasAttr(ArgNo, Attribute::ElementType),
2779 "Elementtype attribute can only be applied for indirect "
2780 "constraints",
2781 &Call);
2782 }
2783
2784 ArgNo++;
2785 }
2786
2787 if (auto *CallBr = dyn_cast<CallBrInst>(&Call)) {
2788 Check(LabelNo == CallBr->getNumIndirectDests(),
2789 "Number of label constraints does not match number of callbr dests",
2790 &Call);
2791 } else {
2792 Check(LabelNo == 0, "Label constraints can only be used with callbr",
2793 &Call);
2794 }
2795}
2796
2797/// Verify that statepoint intrinsic is well formed.
2798void Verifier::verifyStatepoint(const CallBase &Call) {
2799 assert(Call.getIntrinsicID() == Intrinsic::experimental_gc_statepoint);
2800
2803 "gc.statepoint must read and write all memory to preserve "
2804 "reordering restrictions required by safepoint semantics",
2805 Call);
2806
2807 const int64_t NumPatchBytes =
2808 cast<ConstantInt>(Call.getArgOperand(1))->getSExtValue();
2809 assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!");
2810 Check(NumPatchBytes >= 0,
2811 "gc.statepoint number of patchable bytes must be "
2812 "positive",
2813 Call);
2814
2815 Type *TargetElemType = Call.getParamElementType(2);
2816 Check(TargetElemType,
2817 "gc.statepoint callee argument must have elementtype attribute", Call);
2818 FunctionType *TargetFuncType = dyn_cast<FunctionType>(TargetElemType);
2819 Check(TargetFuncType,
2820 "gc.statepoint callee elementtype must be function type", Call);
2821
2822 const int NumCallArgs = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
2823 Check(NumCallArgs >= 0,
2824 "gc.statepoint number of arguments to underlying call "
2825 "must be positive",
2826 Call);
2827 const int NumParams = (int)TargetFuncType->getNumParams();
2828 if (TargetFuncType->isVarArg()) {
2829 Check(NumCallArgs >= NumParams,
2830 "gc.statepoint mismatch in number of vararg call args", Call);
2831
2832 // TODO: Remove this limitation
2833 Check(TargetFuncType->getReturnType()->isVoidTy(),
2834 "gc.statepoint doesn't support wrapping non-void "
2835 "vararg functions yet",
2836 Call);
2837 } else
2838 Check(NumCallArgs == NumParams,
2839 "gc.statepoint mismatch in number of call args", Call);
2840
2841 const uint64_t Flags
2842 = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
2843 Check((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0,
2844 "unknown flag used in gc.statepoint flags argument", Call);
2845
2846 // Verify that the types of the call parameter arguments match
2847 // the type of the wrapped callee.
2848 AttributeList Attrs = Call.getAttributes();
2849 for (int i = 0; i < NumParams; i++) {
2850 Type *ParamType = TargetFuncType->getParamType(i);
2851 Type *ArgType = Call.getArgOperand(5 + i)->getType();
2852 Check(ArgType == ParamType,
2853 "gc.statepoint call argument does not match wrapped "
2854 "function type",
2855 Call);
2856
2857 if (TargetFuncType->isVarArg()) {
2858 AttributeSet ArgAttrs = Attrs.getParamAttrs(5 + i);
2859 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
2860 "Attribute 'sret' cannot be used for vararg call arguments!", Call);
2861 }
2862 }
2863
2864 const int EndCallArgsInx = 4 + NumCallArgs;
2865
2866 const Value *NumTransitionArgsV = Call.getArgOperand(EndCallArgsInx + 1);
2867 Check(isa<ConstantInt>(NumTransitionArgsV),
2868 "gc.statepoint number of transition arguments "
2869 "must be constant integer",
2870 Call);
2871 const int NumTransitionArgs =
2872 cast<ConstantInt>(NumTransitionArgsV)->getZExtValue();
2873 Check(NumTransitionArgs == 0,
2874 "gc.statepoint w/inline transition bundle is deprecated", Call);
2875 const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs;
2876
2877 const Value *NumDeoptArgsV = Call.getArgOperand(EndTransitionArgsInx + 1);
2878 Check(isa<ConstantInt>(NumDeoptArgsV),
2879 "gc.statepoint number of deoptimization arguments "
2880 "must be constant integer",
2881 Call);
2882 const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue();
2883 Check(NumDeoptArgs == 0,
2884 "gc.statepoint w/inline deopt operands is deprecated", Call);
2885
2886 const int ExpectedNumArgs = 7 + NumCallArgs;
2887 Check(ExpectedNumArgs == (int)Call.arg_size(),
2888 "gc.statepoint too many arguments", Call);
2889
2890 // Check that the only uses of this gc.statepoint are gc.result or
2891 // gc.relocate calls which are tied to this statepoint and thus part
2892 // of the same statepoint sequence
2893 for (const User *U : Call.users()) {
2894 const CallInst *UserCall = dyn_cast<const CallInst>(U);
2895 Check(UserCall, "illegal use of statepoint token", Call, U);
2896 if (!UserCall)
2897 continue;
2898 Check(isa<GCRelocateInst>(UserCall) || isa<GCResultInst>(UserCall),
2899 "gc.result or gc.relocate are the only value uses "
2900 "of a gc.statepoint",
2901 Call, U);
2902 if (isa<GCResultInst>(UserCall)) {
2903 Check(UserCall->getArgOperand(0) == &Call,
2904 "gc.result connected to wrong gc.statepoint", Call, UserCall);
2905 } else if (isa<GCRelocateInst>(Call)) {
2906 Check(UserCall->getArgOperand(0) == &Call,
2907 "gc.relocate connected to wrong gc.statepoint", Call, UserCall);
2908 }
2909 }
2910
2911 // Note: It is legal for a single derived pointer to be listed multiple
2912 // times. It's non-optimal, but it is legal. It can also happen after
2913 // insertion if we strip a bitcast away.
2914 // Note: It is really tempting to check that each base is relocated and
2915 // that a derived pointer is never reused as a base pointer. This turns
2916 // out to be problematic since optimizations run after safepoint insertion
2917 // can recognize equality properties that the insertion logic doesn't know
2918 // about. See example statepoint.ll in the verifier subdirectory
2919}
2920
2921void Verifier::verifyFrameRecoverIndices() {
2922 for (auto &Counts : FrameEscapeInfo) {
2923 Function *F = Counts.first;
2924 unsigned EscapedObjectCount = Counts.second.first;
2925 unsigned MaxRecoveredIndex = Counts.second.second;
2926 Check(MaxRecoveredIndex <= EscapedObjectCount,
2927 "all indices passed to llvm.localrecover must be less than the "
2928 "number of arguments passed to llvm.localescape in the parent "
2929 "function",
2930 F);
2931 }
2932}
2933
2934static Instruction *getSuccPad(Instruction *Terminator) {
2935 BasicBlock *UnwindDest;
2936 if (auto *II = dyn_cast<InvokeInst>(Terminator))
2937 UnwindDest = II->getUnwindDest();
2938 else if (auto *CSI = dyn_cast<CatchSwitchInst>(Terminator))
2939 UnwindDest = CSI->getUnwindDest();
2940 else
2941 UnwindDest = cast<CleanupReturnInst>(Terminator)->getUnwindDest();
2942 return &*UnwindDest->getFirstNonPHIIt();
2943}
2944
2945void Verifier::verifySiblingFuncletUnwinds() {
2946 llvm::TimeTraceScope timeScope("Verifier verify sibling funclet unwinds");
2947 SmallPtrSet<Instruction *, 8> Visited;
2948 SmallPtrSet<Instruction *, 8> Active;
2949 for (const auto &Pair : SiblingFuncletInfo) {
2950 Instruction *PredPad = Pair.first;
2951 if (Visited.count(PredPad))
2952 continue;
2953 Active.insert(PredPad);
2954 Instruction *Terminator = Pair.second;
2955 do {
2956 Instruction *SuccPad = getSuccPad(Terminator);
2957 if (Active.count(SuccPad)) {
2958 // Found a cycle; report error
2959 Instruction *CyclePad = SuccPad;
2960 SmallVector<Instruction *, 8> CycleNodes;
2961 do {
2962 CycleNodes.push_back(CyclePad);
2963 Instruction *CycleTerminator = SiblingFuncletInfo[CyclePad];
2964 if (CycleTerminator != CyclePad)
2965 CycleNodes.push_back(CycleTerminator);
2966 CyclePad = getSuccPad(CycleTerminator);
2967 } while (CyclePad != SuccPad);
2968 Check(false, "EH pads can't handle each other's exceptions",
2969 ArrayRef<Instruction *>(CycleNodes));
2970 }
2971 // Don't re-walk a node we've already checked
2972 if (!Visited.insert(SuccPad).second)
2973 break;
2974 // Walk to this successor if it has a map entry.
2975 PredPad = SuccPad;
2976 auto TermI = SiblingFuncletInfo.find(PredPad);
2977 if (TermI == SiblingFuncletInfo.end())
2978 break;
2979 Terminator = TermI->second;
2980 Active.insert(PredPad);
2981 } while (true);
2982 // Each node only has one successor, so we've walked all the active
2983 // nodes' successors.
2984 Active.clear();
2985 }
2986}
2987
2988// visitFunction - Verify that a function is ok.
2989//
2990void Verifier::visitFunction(const Function &F) {
2991 visitGlobalValue(F);
2992
2993 // Check function arguments.
2994 FunctionType *FT = F.getFunctionType();
2995 unsigned NumArgs = F.arg_size();
2996
2997 Check(&Context == &F.getContext(),
2998 "Function context does not match Module context!", &F);
2999
3000 Check(!F.hasCommonLinkage(), "Functions may not have common linkage", &F);
3001 Check(FT->getNumParams() == NumArgs,
3002 "# formal arguments must match # of arguments for function type!", &F,
3003 FT);
3004 Check(F.getReturnType()->isFirstClassType() ||
3005 F.getReturnType()->isVoidTy() || F.getReturnType()->isStructTy(),
3006 "Functions cannot return aggregate values!", &F);
3007
3008 Check(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(),
3009 "Invalid struct return type!", &F);
3010
3011 if (MaybeAlign A = F.getAlign()) {
3012 Check(A->value() <= Value::MaximumAlignment,
3013 "huge alignment values are unsupported", &F);
3014 }
3015
3016 AttributeList Attrs = F.getAttributes();
3017
3018 Check(verifyAttributeCount(Attrs, FT->getNumParams()),
3019 "Attribute after last parameter!", &F);
3020
3021 bool IsIntrinsic = F.isIntrinsic();
3022
3023 // Check function attributes.
3024 verifyFunctionAttrs(FT, Attrs, &F, IsIntrinsic, /* IsInlineAsm */ false);
3025
3026 // On function declarations/definitions, we do not support the builtin
3027 // attribute. We do not check this in VerifyFunctionAttrs since that is
3028 // checking for Attributes that can/can not ever be on functions.
3029 Check(!Attrs.hasFnAttr(Attribute::Builtin),
3030 "Attribute 'builtin' can only be applied to a callsite.", &F);
3031
3032 Check(!Attrs.hasAttrSomewhere(Attribute::ElementType),
3033 "Attribute 'elementtype' can only be applied to a callsite.", &F);
3034
3035 Check(!Attrs.hasFnAttr("aarch64_zt0_undef"),
3036 "Attribute 'aarch64_zt0_undef' can only be applied to a callsite.");
3037
3038 if (Attrs.hasFnAttr(Attribute::Naked))
3039 for (const Argument &Arg : F.args())
3040 Check(Arg.use_empty(), "cannot use argument of naked function", &Arg);
3041
3042 // Check that this function meets the restrictions on this calling convention.
3043 // Sometimes varargs is used for perfectly forwarding thunks, so some of these
3044 // restrictions can be lifted.
3045 switch (F.getCallingConv()) {
3046 default:
3047 case CallingConv::C:
3048 break;
3049 case CallingConv::X86_INTR: {
3050 Check(F.arg_empty() || Attrs.hasParamAttr(0, Attribute::ByVal),
3051 "Calling convention parameter requires byval", &F);
3052 break;
3053 }
3054 case CallingConv::AMDGPU_KERNEL:
3055 case CallingConv::SPIR_KERNEL:
3056 case CallingConv::AMDGPU_CS_Chain:
3057 case CallingConv::AMDGPU_CS_ChainPreserve:
3058 Check(F.getReturnType()->isVoidTy(),
3059 "Calling convention requires void return type", &F);
3060 [[fallthrough]];
3061 case CallingConv::AMDGPU_VS:
3062 case CallingConv::AMDGPU_HS:
3063 case CallingConv::AMDGPU_GS:
3064 case CallingConv::AMDGPU_PS:
3065 case CallingConv::AMDGPU_CS:
3066 Check(!F.hasStructRetAttr(), "Calling convention does not allow sret", &F);
3067 if (F.getCallingConv() != CallingConv::SPIR_KERNEL) {
3068 const unsigned StackAS = DL.getAllocaAddrSpace();
3069 unsigned i = 0;
3070 for (const Argument &Arg : F.args()) {
3071 Check(!Attrs.hasParamAttr(i, Attribute::ByVal),
3072 "Calling convention disallows byval", &F);
3073 Check(!Attrs.hasParamAttr(i, Attribute::Preallocated),
3074 "Calling convention disallows preallocated", &F);
3075 Check(!Attrs.hasParamAttr(i, Attribute::InAlloca),
3076 "Calling convention disallows inalloca", &F);
3077
3078 if (Attrs.hasParamAttr(i, Attribute::ByRef)) {
3079 // FIXME: Should also disallow LDS and GDS, but we don't have the enum
3080 // value here.
3081 Check(Arg.getType()->getPointerAddressSpace() != StackAS,
3082 "Calling convention disallows stack byref", &F);
3083 }
3084
3085 ++i;
3086 }
3087 }
3088
3089 [[fallthrough]];
3090 case CallingConv::Fast:
3091 case CallingConv::Cold:
3092 case CallingConv::Intel_OCL_BI:
3093 case CallingConv::PTX_Kernel:
3094 case CallingConv::PTX_Device:
3095 Check(!F.isVarArg(),
3096 "Calling convention does not support varargs or "
3097 "perfect forwarding!",
3098 &F);
3099 break;
3100 case CallingConv::AMDGPU_Gfx_WholeWave:
3101 Check(!F.arg_empty() && F.arg_begin()->getType()->isIntegerTy(1),
3102 "Calling convention requires first argument to be i1", &F);
3103 Check(!F.arg_begin()->hasInRegAttr(),
3104 "Calling convention requires first argument to not be inreg", &F);
3105 Check(!F.isVarArg(),
3106 "Calling convention does not support varargs or "
3107 "perfect forwarding!",
3108 &F);
3109 break;
3110 }
3111
3112 // Check that the argument values match the function type for this function...
3113 unsigned i = 0;
3114 for (const Argument &Arg : F.args()) {
3115 Check(Arg.getType() == FT->getParamType(i),
3116 "Argument value does not match function argument type!", &Arg,
3117 FT->getParamType(i));
3118 Check(Arg.getType()->isFirstClassType(),
3119 "Function arguments must have first-class types!", &Arg);
3120 if (!IsIntrinsic) {
3121 Check(!Arg.getType()->isMetadataTy(),
3122 "Function takes metadata but isn't an intrinsic", &Arg, &F);
3123 Check(!Arg.getType()->isTokenLikeTy(),
3124 "Function takes token but isn't an intrinsic", &Arg, &F);
3125 Check(!Arg.getType()->isX86_AMXTy(),
3126 "Function takes x86_amx but isn't an intrinsic", &Arg, &F);
3127 }
3128
3129 // Check that swifterror argument is only used by loads and stores.
3130 if (Attrs.hasParamAttr(i, Attribute::SwiftError)) {
3131 verifySwiftErrorValue(&Arg);
3132 }
3133 ++i;
3134 }
3135
3136 if (!IsIntrinsic) {
3137 Check(!F.getReturnType()->isTokenLikeTy(),
3138 "Function returns a token but isn't an intrinsic", &F);
3139 Check(!F.getReturnType()->isX86_AMXTy(),
3140 "Function returns a x86_amx but isn't an intrinsic", &F);
3141 }
3142
3143 // Get the function metadata attachments.
3145 F.getAllMetadata(MDs);
3146 assert(F.hasMetadata() != MDs.empty() && "Bit out-of-sync");
3147 verifyFunctionMetadata(MDs);
3148
3149 // Check validity of the personality function
3150 if (F.hasPersonalityFn()) {
3151 auto *Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
3152 if (Per)
3153 Check(Per->getParent() == F.getParent(),
3154 "Referencing personality function in another module!", &F,
3155 F.getParent(), Per, Per->getParent());
3156 }
3157
3158 // EH funclet coloring can be expensive, recompute on-demand
3159 BlockEHFuncletColors.clear();
3160
3161 if (F.isMaterializable()) {
3162 // Function has a body somewhere we can't see.
3163 Check(MDs.empty(), "unmaterialized function cannot have metadata", &F,
3164 MDs.empty() ? nullptr : MDs.front().second);
3165 } else if (F.isDeclaration()) {
3166 for (const auto &I : MDs) {
3167 // This is used for call site debug information.
3168 CheckDI(I.first != LLVMContext::MD_dbg ||
3169 !cast<DISubprogram>(I.second)->isDistinct(),
3170 "function declaration may only have a unique !dbg attachment",
3171 &F);
3172 Check(I.first != LLVMContext::MD_prof,
3173 "function declaration may not have a !prof attachment", &F);
3174
3175 // Verify the metadata itself.
3176 visitMDNode(*I.second, AreDebugLocsAllowed::Yes);
3177 }
3178 Check(!F.hasPersonalityFn(),
3179 "Function declaration shouldn't have a personality routine", &F);
3180 } else {
3181 // Verify that this function (which has a body) is not named "llvm.*". It
3182 // is not legal to define intrinsics.
3183 Check(!IsIntrinsic, "llvm intrinsics cannot be defined!", &F);
3184
3185 // Check the entry node
3186 const BasicBlock *Entry = &F.getEntryBlock();
3187 Check(pred_empty(Entry),
3188 "Entry block to function must not have predecessors!", Entry);
3189
3190 // The address of the entry block cannot be taken, unless it is dead.
3191 if (Entry->hasAddressTaken()) {
3192 Check(!BlockAddress::lookup(Entry)->isConstantUsed(),
3193 "blockaddress may not be used with the entry block!", Entry);
3194 }
3195
3196 unsigned NumDebugAttachments = 0, NumProfAttachments = 0,
3197 NumKCFIAttachments = 0;
3198 // Visit metadata attachments.
3199 for (const auto &I : MDs) {
3200 // Verify that the attachment is legal.
3201 auto AllowLocs = AreDebugLocsAllowed::No;
3202 switch (I.first) {
3203 default:
3204 break;
3205 case LLVMContext::MD_dbg: {
3206 ++NumDebugAttachments;
3207 CheckDI(NumDebugAttachments == 1,
3208 "function must have a single !dbg attachment", &F, I.second);
3209 CheckDI(isa<DISubprogram>(I.second),
3210 "function !dbg attachment must be a subprogram", &F, I.second);
3211 CheckDI(cast<DISubprogram>(I.second)->isDistinct(),
3212 "function definition may only have a distinct !dbg attachment",
3213 &F);
3214
3215 auto *SP = cast<DISubprogram>(I.second);
3216 const Function *&AttachedTo = DISubprogramAttachments[SP];
3217 CheckDI(!AttachedTo || AttachedTo == &F,
3218 "DISubprogram attached to more than one function", SP, &F);
3219 AttachedTo = &F;
3220 AllowLocs = AreDebugLocsAllowed::Yes;
3221 break;
3222 }
3223 case LLVMContext::MD_prof:
3224 ++NumProfAttachments;
3225 Check(NumProfAttachments == 1,
3226 "function must have a single !prof attachment", &F, I.second);
3227 break;
3228 case LLVMContext::MD_kcfi_type:
3229 ++NumKCFIAttachments;
3230 Check(NumKCFIAttachments == 1,
3231 "function must have a single !kcfi_type attachment", &F,
3232 I.second);
3233 break;
3234 }
3235
3236 // Verify the metadata itself.
3237 visitMDNode(*I.second, AllowLocs);
3238 }
3239 }
3240
3241 // If this function is actually an intrinsic, verify that it is only used in
3242 // direct call/invokes, never having its "address taken".
3243 // Only do this if the module is materialized, otherwise we don't have all the
3244 // uses.
3245 if (F.isIntrinsic() && F.getParent()->isMaterialized()) {
3246 const User *U;
3247 if (F.hasAddressTaken(&U, false, true, false,
3248 /*IgnoreARCAttachedCall=*/true))
3249 Check(false, "Invalid user of intrinsic instruction!", U);
3250 }
3251
3252 // Check intrinsics' signatures.
3253 switch (F.getIntrinsicID()) {
3254 case Intrinsic::experimental_gc_get_pointer_base: {
3255 FunctionType *FT = F.getFunctionType();
3256 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3257 Check(isa<PointerType>(F.getReturnType()),
3258 "gc.get.pointer.base must return a pointer", F);
3259 Check(FT->getParamType(0) == F.getReturnType(),
3260 "gc.get.pointer.base operand and result must be of the same type", F);
3261 break;
3262 }
3263 case Intrinsic::experimental_gc_get_pointer_offset: {
3264 FunctionType *FT = F.getFunctionType();
3265 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3266 Check(isa<PointerType>(FT->getParamType(0)),
3267 "gc.get.pointer.offset operand must be a pointer", F);
3268 Check(F.getReturnType()->isIntegerTy(),
3269 "gc.get.pointer.offset must return integer", F);
3270 break;
3271 }
3272 }
3273
3274 auto *N = F.getSubprogram();
3275 HasDebugInfo = (N != nullptr);
3276 if (!HasDebugInfo)
3277 return;
3278
3279 // Check that all !dbg attachments lead to back to N.
3280 //
3281 // FIXME: Check this incrementally while visiting !dbg attachments.
3282 // FIXME: Only check when N is the canonical subprogram for F.
3283 SmallPtrSet<const MDNode *, 32> Seen;
3284 auto VisitDebugLoc = [&](const Instruction &I, const MDNode *Node) {
3285 // Be careful about using DILocation here since we might be dealing with
3286 // broken code (this is the Verifier after all).
3287 const DILocation *DL = dyn_cast_or_null<DILocation>(Node);
3288 if (!DL)
3289 return;
3290 if (!Seen.insert(DL).second)
3291 return;
3292
3293 Metadata *Parent = DL->getRawScope();
3294 CheckDI(Parent && isa<DILocalScope>(Parent),
3295 "DILocation's scope must be a DILocalScope", N, &F, &I, DL, Parent);
3296
3297 DILocalScope *Scope = DL->getInlinedAtScope();
3298 Check(Scope, "Failed to find DILocalScope", DL);
3299
3300 if (!Seen.insert(Scope).second)
3301 return;
3302
3303 DISubprogram *SP = Scope->getSubprogram();
3304
3305 // Scope and SP could be the same MDNode and we don't want to skip
3306 // validation in that case
3307 if ((Scope != SP) && !Seen.insert(SP).second)
3308 return;
3309
3310 CheckDI(SP->describes(&F),
3311 "!dbg attachment points at wrong subprogram for function", N, &F,
3312 &I, DL, Scope, SP);
3313 };
3314 for (auto &BB : F)
3315 for (auto &I : BB) {
3316 VisitDebugLoc(I, I.getDebugLoc().getAsMDNode());
3317 // The llvm.loop annotations also contain two DILocations.
3318 if (auto MD = I.getMetadata(LLVMContext::MD_loop))
3319 for (unsigned i = 1; i < MD->getNumOperands(); ++i)
3320 VisitDebugLoc(I, dyn_cast_or_null<MDNode>(MD->getOperand(i)));
3321 if (BrokenDebugInfo)
3322 return;
3323 }
3324}
3325
3326// verifyBasicBlock - Verify that a basic block is well formed...
3327//
3328void Verifier::visitBasicBlock(BasicBlock &BB) {
3329 InstsInThisBlock.clear();
3330 ConvergenceVerifyHelper.visit(BB);
3331
3332 // Ensure that basic blocks have terminators!
3333 Check(BB.getTerminator(), "Basic Block does not have terminator!", &BB);
3334
3335 // Check constraints that this basic block imposes on all of the PHI nodes in
3336 // it.
3337 if (isa<PHINode>(BB.front())) {
3338 SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
3340 llvm::sort(Preds);
3341 for (const PHINode &PN : BB.phis()) {
3342 Check(PN.getNumIncomingValues() == Preds.size(),
3343 "PHINode should have one entry for each predecessor of its "
3344 "parent basic block!",
3345 &PN);
3346
3347 // Get and sort all incoming values in the PHI node...
3348 Values.clear();
3349 Values.reserve(PN.getNumIncomingValues());
3350 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
3351 Values.push_back(
3352 std::make_pair(PN.getIncomingBlock(i), PN.getIncomingValue(i)));
3353 llvm::sort(Values);
3354
3355 for (unsigned i = 0, e = Values.size(); i != e; ++i) {
3356 // Check to make sure that if there is more than one entry for a
3357 // particular basic block in this PHI node, that the incoming values are
3358 // all identical.
3359 //
3360 Check(i == 0 || Values[i].first != Values[i - 1].first ||
3361 Values[i].second == Values[i - 1].second,
3362 "PHI node has multiple entries for the same basic block with "
3363 "different incoming values!",
3364 &PN, Values[i].first, Values[i].second, Values[i - 1].second);
3365
3366 // Check to make sure that the predecessors and PHI node entries are
3367 // matched up.
3368 Check(Values[i].first == Preds[i],
3369 "PHI node entries do not match predecessors!", &PN,
3370 Values[i].first, Preds[i]);
3371 }
3372 }
3373 }
3374
3375 // Check that all instructions have their parent pointers set up correctly.
3376 for (auto &I : BB)
3377 {
3378 Check(I.getParent() == &BB, "Instruction has bogus parent pointer!");
3379 }
3380
3381 // Confirm that no issues arise from the debug program.
3382 CheckDI(!BB.getTrailingDbgRecords(), "Basic Block has trailing DbgRecords!",
3383 &BB);
3384}
3385
3386void Verifier::visitTerminator(Instruction &I) {
3387 // Ensure that terminators only exist at the end of the basic block.
3388 Check(&I == I.getParent()->getTerminator(),
3389 "Terminator found in the middle of a basic block!", I.getParent());
3390 visitInstruction(I);
3391}
3392
3393void Verifier::visitBranchInst(BranchInst &BI) {
3394 if (BI.isConditional()) {
3396 "Branch condition is not 'i1' type!", &BI, BI.getCondition());
3397 }
3398 visitTerminator(BI);
3399}
3400
3401void Verifier::visitReturnInst(ReturnInst &RI) {
3402 Function *F = RI.getParent()->getParent();
3403 unsigned N = RI.getNumOperands();
3404 if (F->getReturnType()->isVoidTy())
3405 Check(N == 0,
3406 "Found return instr that returns non-void in Function of void "
3407 "return type!",
3408 &RI, F->getReturnType());
3409 else
3410 Check(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(),
3411 "Function return type does not match operand "
3412 "type of return inst!",
3413 &RI, F->getReturnType());
3414
3415 // Check to make sure that the return value has necessary properties for
3416 // terminators...
3417 visitTerminator(RI);
3418}
3419
3420void Verifier::visitSwitchInst(SwitchInst &SI) {
3421 Check(SI.getType()->isVoidTy(), "Switch must have void result type!", &SI);
3422 // Check to make sure that all of the constants in the switch instruction
3423 // have the same type as the switched-on value.
3424 Type *SwitchTy = SI.getCondition()->getType();
3425 SmallPtrSet<ConstantInt*, 32> Constants;
3426 for (auto &Case : SI.cases()) {
3427 Check(isa<ConstantInt>(SI.getOperand(Case.getCaseIndex() * 2 + 2)),
3428 "Case value is not a constant integer.", &SI);
3429 Check(Case.getCaseValue()->getType() == SwitchTy,
3430 "Switch constants must all be same type as switch value!", &SI);
3431 Check(Constants.insert(Case.getCaseValue()).second,
3432 "Duplicate integer as switch case", &SI, Case.getCaseValue());
3433 }
3434
3435 visitTerminator(SI);
3436}
3437
3438void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
3440 "Indirectbr operand must have pointer type!", &BI);
3441 for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i)
3443 "Indirectbr destinations must all have pointer type!", &BI);
3444
3445 visitTerminator(BI);
3446}
3447
3448void Verifier::visitCallBrInst(CallBrInst &CBI) {
3449 if (!CBI.isInlineAsm()) {
3451 "Callbr: indirect function / invalid signature");
3452 Check(!CBI.hasOperandBundles(),
3453 "Callbr for intrinsics currently doesn't support operand bundles");
3454
3455 switch (CBI.getIntrinsicID()) {
3456 case Intrinsic::amdgcn_kill: {
3457 Check(CBI.getNumIndirectDests() == 1,
3458 "Callbr amdgcn_kill only supports one indirect dest");
3459 bool Unreachable = isa<UnreachableInst>(CBI.getIndirectDest(0)->begin());
3460 CallInst *Call = dyn_cast<CallInst>(CBI.getIndirectDest(0)->begin());
3461 Check(Unreachable || (Call && Call->getIntrinsicID() ==
3462 Intrinsic::amdgcn_unreachable),
3463 "Callbr amdgcn_kill indirect dest needs to be unreachable");
3464 break;
3465 }
3466 default:
3467 CheckFailed(
3468 "Callbr currently only supports asm-goto and selected intrinsics");
3469 }
3470 visitIntrinsicCall(CBI.getIntrinsicID(), CBI);
3471 } else {
3472 const InlineAsm *IA = cast<InlineAsm>(CBI.getCalledOperand());
3473 Check(!IA->canThrow(), "Unwinding from Callbr is not allowed");
3474
3475 verifyInlineAsmCall(CBI);
3476 }
3477 visitTerminator(CBI);
3478}
3479
3480void Verifier::visitSelectInst(SelectInst &SI) {
3481 Check(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1),
3482 SI.getOperand(2)),
3483 "Invalid operands for select instruction!", &SI);
3484
3485 Check(SI.getTrueValue()->getType() == SI.getType(),
3486 "Select values must have same type as select instruction!", &SI);
3487 visitInstruction(SI);
3488}
3489
3490/// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of
3491/// a pass, if any exist, it's an error.
3492///
3493void Verifier::visitUserOp1(Instruction &I) {
3494 Check(false, "User-defined operators should not live outside of a pass!", &I);
3495}
3496
3497void Verifier::visitTruncInst(TruncInst &I) {
3498 // Get the source and destination types
3499 Type *SrcTy = I.getOperand(0)->getType();
3500 Type *DestTy = I.getType();
3501
3502 // Get the size of the types in bits, we'll need this later
3503 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3504 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3505
3506 Check(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer", &I);
3507 Check(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer", &I);
3508 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3509 "trunc source and destination must both be a vector or neither", &I);
3510 Check(SrcBitSize > DestBitSize, "DestTy too big for Trunc", &I);
3511
3512 visitInstruction(I);
3513}
3514
3515void Verifier::visitZExtInst(ZExtInst &I) {
3516 // Get the source and destination types
3517 Type *SrcTy = I.getOperand(0)->getType();
3518 Type *DestTy = I.getType();
3519
3520 // Get the size of the types in bits, we'll need this later
3521 Check(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I);
3522 Check(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer", &I);
3523 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3524 "zext source and destination must both be a vector or neither", &I);
3525 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3526 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3527
3528 Check(SrcBitSize < DestBitSize, "Type too small for ZExt", &I);
3529
3530 visitInstruction(I);
3531}
3532
3533void Verifier::visitSExtInst(SExtInst &I) {
3534 // Get the source and destination types
3535 Type *SrcTy = I.getOperand(0)->getType();
3536 Type *DestTy = I.getType();
3537
3538 // Get the size of the types in bits, we'll need this later
3539 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3540 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3541
3542 Check(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer", &I);
3543 Check(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer", &I);
3544 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3545 "sext source and destination must both be a vector or neither", &I);
3546 Check(SrcBitSize < DestBitSize, "Type too small for SExt", &I);
3547
3548 visitInstruction(I);
3549}
3550
3551void Verifier::visitFPTruncInst(FPTruncInst &I) {
3552 // Get the source and destination types
3553 Type *SrcTy = I.getOperand(0)->getType();
3554 Type *DestTy = I.getType();
3555 // Get the size of the types in bits, we'll need this later
3556 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3557 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3558
3559 Check(SrcTy->isFPOrFPVectorTy(), "FPTrunc only operates on FP", &I);
3560 Check(DestTy->isFPOrFPVectorTy(), "FPTrunc only produces an FP", &I);
3561 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3562 "fptrunc source and destination must both be a vector or neither", &I);
3563 Check(SrcBitSize > DestBitSize, "DestTy too big for FPTrunc", &I);
3564
3565 visitInstruction(I);
3566}
3567
3568void Verifier::visitFPExtInst(FPExtInst &I) {
3569 // Get the source and destination types
3570 Type *SrcTy = I.getOperand(0)->getType();
3571 Type *DestTy = I.getType();
3572
3573 // Get the size of the types in bits, we'll need this later
3574 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3575 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3576
3577 Check(SrcTy->isFPOrFPVectorTy(), "FPExt only operates on FP", &I);
3578 Check(DestTy->isFPOrFPVectorTy(), "FPExt only produces an FP", &I);
3579 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3580 "fpext source and destination must both be a vector or neither", &I);
3581 Check(SrcBitSize < DestBitSize, "DestTy too small for FPExt", &I);
3582
3583 visitInstruction(I);
3584}
3585
3586void Verifier::visitUIToFPInst(UIToFPInst &I) {
3587 // Get the source and destination types
3588 Type *SrcTy = I.getOperand(0)->getType();
3589 Type *DestTy = I.getType();
3590
3591 bool SrcVec = SrcTy->isVectorTy();
3592 bool DstVec = DestTy->isVectorTy();
3593
3594 Check(SrcVec == DstVec,
3595 "UIToFP source and dest must both be vector or scalar", &I);
3596 Check(SrcTy->isIntOrIntVectorTy(),
3597 "UIToFP source must be integer or integer vector", &I);
3598 Check(DestTy->isFPOrFPVectorTy(), "UIToFP result must be FP or FP vector",
3599 &I);
3600
3601 if (SrcVec && DstVec)
3602 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3603 cast<VectorType>(DestTy)->getElementCount(),
3604 "UIToFP source and dest vector length mismatch", &I);
3605
3606 visitInstruction(I);
3607}
3608
3609void Verifier::visitSIToFPInst(SIToFPInst &I) {
3610 // Get the source and destination types
3611 Type *SrcTy = I.getOperand(0)->getType();
3612 Type *DestTy = I.getType();
3613
3614 bool SrcVec = SrcTy->isVectorTy();
3615 bool DstVec = DestTy->isVectorTy();
3616
3617 Check(SrcVec == DstVec,
3618 "SIToFP source and dest must both be vector or scalar", &I);
3619 Check(SrcTy->isIntOrIntVectorTy(),
3620 "SIToFP source must be integer or integer vector", &I);
3621 Check(DestTy->isFPOrFPVectorTy(), "SIToFP result must be FP or FP vector",
3622 &I);
3623
3624 if (SrcVec && DstVec)
3625 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3626 cast<VectorType>(DestTy)->getElementCount(),
3627 "SIToFP source and dest vector length mismatch", &I);
3628
3629 visitInstruction(I);
3630}
3631
3632void Verifier::visitFPToUIInst(FPToUIInst &I) {
3633 // Get the source and destination types
3634 Type *SrcTy = I.getOperand(0)->getType();
3635 Type *DestTy = I.getType();
3636
3637 bool SrcVec = SrcTy->isVectorTy();
3638 bool DstVec = DestTy->isVectorTy();
3639
3640 Check(SrcVec == DstVec,
3641 "FPToUI source and dest must both be vector or scalar", &I);
3642 Check(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector", &I);
3643 Check(DestTy->isIntOrIntVectorTy(),
3644 "FPToUI result must be integer or integer vector", &I);
3645
3646 if (SrcVec && DstVec)
3647 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3648 cast<VectorType>(DestTy)->getElementCount(),
3649 "FPToUI source and dest vector length mismatch", &I);
3650
3651 visitInstruction(I);
3652}
3653
3654void Verifier::visitFPToSIInst(FPToSIInst &I) {
3655 // Get the source and destination types
3656 Type *SrcTy = I.getOperand(0)->getType();
3657 Type *DestTy = I.getType();
3658
3659 bool SrcVec = SrcTy->isVectorTy();
3660 bool DstVec = DestTy->isVectorTy();
3661
3662 Check(SrcVec == DstVec,
3663 "FPToSI source and dest must both be vector or scalar", &I);
3664 Check(SrcTy->isFPOrFPVectorTy(), "FPToSI source must be FP or FP vector", &I);
3665 Check(DestTy->isIntOrIntVectorTy(),
3666 "FPToSI result must be integer or integer vector", &I);
3667
3668 if (SrcVec && DstVec)
3669 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3670 cast<VectorType>(DestTy)->getElementCount(),
3671 "FPToSI source and dest vector length mismatch", &I);
3672
3673 visitInstruction(I);
3674}
3675
3676void Verifier::checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V) {
3677 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToAddr source must be pointer", V);
3678 Check(DestTy->isIntOrIntVectorTy(), "PtrToAddr result must be integral", V);
3679 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToAddr type mismatch",
3680 V);
3681
3682 if (SrcTy->isVectorTy()) {
3683 auto *VSrc = cast<VectorType>(SrcTy);
3684 auto *VDest = cast<VectorType>(DestTy);
3685 Check(VSrc->getElementCount() == VDest->getElementCount(),
3686 "PtrToAddr vector length mismatch", V);
3687 }
3688
3689 Type *AddrTy = DL.getAddressType(SrcTy);
3690 Check(AddrTy == DestTy, "PtrToAddr result must be address width", V);
3691}
3692
3693void Verifier::visitPtrToAddrInst(PtrToAddrInst &I) {
3694 checkPtrToAddr(I.getOperand(0)->getType(), I.getType(), I);
3695 visitInstruction(I);
3696}
3697
3698void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
3699 // Get the source and destination types
3700 Type *SrcTy = I.getOperand(0)->getType();
3701 Type *DestTy = I.getType();
3702
3703 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToInt source must be pointer", &I);
3704
3705 Check(DestTy->isIntOrIntVectorTy(), "PtrToInt result must be integral", &I);
3706 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch",
3707 &I);
3708
3709 if (SrcTy->isVectorTy()) {
3710 auto *VSrc = cast<VectorType>(SrcTy);
3711 auto *VDest = cast<VectorType>(DestTy);
3712 Check(VSrc->getElementCount() == VDest->getElementCount(),
3713 "PtrToInt Vector length mismatch", &I);
3714 }
3715
3716 visitInstruction(I);
3717}
3718
3719void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
3720 // Get the source and destination types
3721 Type *SrcTy = I.getOperand(0)->getType();
3722 Type *DestTy = I.getType();
3723
3724 Check(SrcTy->isIntOrIntVectorTy(), "IntToPtr source must be an integral", &I);
3725 Check(DestTy->isPtrOrPtrVectorTy(), "IntToPtr result must be a pointer", &I);
3726
3727 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch",
3728 &I);
3729 if (SrcTy->isVectorTy()) {
3730 auto *VSrc = cast<VectorType>(SrcTy);
3731 auto *VDest = cast<VectorType>(DestTy);
3732 Check(VSrc->getElementCount() == VDest->getElementCount(),
3733 "IntToPtr Vector length mismatch", &I);
3734 }
3735 visitInstruction(I);
3736}
3737
3738void Verifier::visitBitCastInst(BitCastInst &I) {
3739 Check(
3740 CastInst::castIsValid(Instruction::BitCast, I.getOperand(0), I.getType()),
3741 "Invalid bitcast", &I);
3742 visitInstruction(I);
3743}
3744
3745void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
3746 Type *SrcTy = I.getOperand(0)->getType();
3747 Type *DestTy = I.getType();
3748
3749 Check(SrcTy->isPtrOrPtrVectorTy(), "AddrSpaceCast source must be a pointer",
3750 &I);
3751 Check(DestTy->isPtrOrPtrVectorTy(), "AddrSpaceCast result must be a pointer",
3752 &I);
3754 "AddrSpaceCast must be between different address spaces", &I);
3755 if (auto *SrcVTy = dyn_cast<VectorType>(SrcTy))
3756 Check(SrcVTy->getElementCount() ==
3757 cast<VectorType>(DestTy)->getElementCount(),
3758 "AddrSpaceCast vector pointer number of elements mismatch", &I);
3759 visitInstruction(I);
3760}
3761
3762/// visitPHINode - Ensure that a PHI node is well formed.
3763///
3764void Verifier::visitPHINode(PHINode &PN) {
3765 // Ensure that the PHI nodes are all grouped together at the top of the block.
3766 // This can be tested by checking whether the instruction before this is
3767 // either nonexistent (because this is begin()) or is a PHI node. If not,
3768 // then there is some other instruction before a PHI.
3769 Check(&PN == &PN.getParent()->front() ||
3771 "PHI nodes not grouped at top of basic block!", &PN, PN.getParent());
3772
3773 // Check that a PHI doesn't yield a Token.
3774 Check(!PN.getType()->isTokenLikeTy(), "PHI nodes cannot have token type!");
3775
3776 // Check that all of the values of the PHI node have the same type as the
3777 // result.
3778 for (Value *IncValue : PN.incoming_values()) {
3779 Check(PN.getType() == IncValue->getType(),
3780 "PHI node operands are not the same type as the result!", &PN);
3781 }
3782
3783 // All other PHI node constraints are checked in the visitBasicBlock method.
3784
3785 visitInstruction(PN);
3786}
3787
3788void Verifier::visitCallBase(CallBase &Call) {
3790 "Called function must be a pointer!", Call);
3791 FunctionType *FTy = Call.getFunctionType();
3792
3793 // Verify that the correct number of arguments are being passed
3794 if (FTy->isVarArg())
3795 Check(Call.arg_size() >= FTy->getNumParams(),
3796 "Called function requires more parameters than were provided!", Call);
3797 else
3798 Check(Call.arg_size() == FTy->getNumParams(),
3799 "Incorrect number of arguments passed to called function!", Call);
3800
3801 // Verify that all arguments to the call match the function type.
3802 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
3803 Check(Call.getArgOperand(i)->getType() == FTy->getParamType(i),
3804 "Call parameter type does not match function signature!",
3805 Call.getArgOperand(i), FTy->getParamType(i), Call);
3806
3807 AttributeList Attrs = Call.getAttributes();
3808
3809 Check(verifyAttributeCount(Attrs, Call.arg_size()),
3810 "Attribute after last parameter!", Call);
3811
3812 Function *Callee =
3814 bool IsIntrinsic = Callee && Callee->isIntrinsic();
3815 if (IsIntrinsic)
3816 Check(Callee->getValueType() == FTy,
3817 "Intrinsic called with incompatible signature", Call);
3818
3819 // Verify if the calling convention of the callee is callable.
3821 "calling convention does not permit calls", Call);
3822
3823 // Disallow passing/returning values with alignment higher than we can
3824 // represent.
3825 // FIXME: Consider making DataLayout cap the alignment, so this isn't
3826 // necessary.
3827 auto VerifyTypeAlign = [&](Type *Ty, const Twine &Message) {
3828 if (!Ty->isSized())
3829 return;
3830 Align ABIAlign = DL.getABITypeAlign(Ty);
3831 Check(ABIAlign.value() <= Value::MaximumAlignment,
3832 "Incorrect alignment of " + Message + " to called function!", Call);
3833 };
3834
3835 if (!IsIntrinsic) {
3836 VerifyTypeAlign(FTy->getReturnType(), "return type");
3837 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3838 Type *Ty = FTy->getParamType(i);
3839 VerifyTypeAlign(Ty, "argument passed");
3840 }
3841 }
3842
3843 if (Attrs.hasFnAttr(Attribute::Speculatable)) {
3844 // Don't allow speculatable on call sites, unless the underlying function
3845 // declaration is also speculatable.
3846 Check(Callee && Callee->isSpeculatable(),
3847 "speculatable attribute may not apply to call sites", Call);
3848 }
3849
3850 if (Attrs.hasFnAttr(Attribute::Preallocated)) {
3851 Check(Call.getIntrinsicID() == Intrinsic::call_preallocated_arg,
3852 "preallocated as a call site attribute can only be on "
3853 "llvm.call.preallocated.arg");
3854 }
3855
3856 // Verify call attributes.
3857 verifyFunctionAttrs(FTy, Attrs, &Call, IsIntrinsic, Call.isInlineAsm());
3858
3859 // Conservatively check the inalloca argument.
3860 // We have a bug if we can find that there is an underlying alloca without
3861 // inalloca.
3862 if (Call.hasInAllocaArgument()) {
3863 Value *InAllocaArg = Call.getArgOperand(FTy->getNumParams() - 1);
3864 if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets()))
3865 Check(AI->isUsedWithInAlloca(),
3866 "inalloca argument for call has mismatched alloca", AI, Call);
3867 }
3868
3869 // For each argument of the callsite, if it has the swifterror argument,
3870 // make sure the underlying alloca/parameter it comes from has a swifterror as
3871 // well.
3872 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3873 if (Call.paramHasAttr(i, Attribute::SwiftError)) {
3874 Value *SwiftErrorArg = Call.getArgOperand(i);
3875 if (auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets())) {
3876 Check(AI->isSwiftError(),
3877 "swifterror argument for call has mismatched alloca", AI, Call);
3878 continue;
3879 }
3880 auto ArgI = dyn_cast<Argument>(SwiftErrorArg);
3881 Check(ArgI, "swifterror argument should come from an alloca or parameter",
3882 SwiftErrorArg, Call);
3883 Check(ArgI->hasSwiftErrorAttr(),
3884 "swifterror argument for call has mismatched parameter", ArgI,
3885 Call);
3886 }
3887
3888 if (Attrs.hasParamAttr(i, Attribute::ImmArg)) {
3889 // Don't allow immarg on call sites, unless the underlying declaration
3890 // also has the matching immarg.
3891 Check(Callee && Callee->hasParamAttribute(i, Attribute::ImmArg),
3892 "immarg may not apply only to call sites", Call.getArgOperand(i),
3893 Call);
3894 }
3895
3896 if (Call.paramHasAttr(i, Attribute::ImmArg)) {
3897 Value *ArgVal = Call.getArgOperand(i);
3898 Check(isa<ConstantInt>(ArgVal) || isa<ConstantFP>(ArgVal),
3899 "immarg operand has non-immediate parameter", ArgVal, Call);
3900
3901 // If the imm-arg is an integer and also has a range attached,
3902 // check if the given value is within the range.
3903 if (Call.paramHasAttr(i, Attribute::Range)) {
3904 if (auto *CI = dyn_cast<ConstantInt>(ArgVal)) {
3905 const ConstantRange &CR =
3906 Call.getParamAttr(i, Attribute::Range).getValueAsConstantRange();
3907 Check(CR.contains(CI->getValue()),
3908 "immarg value " + Twine(CI->getValue().getSExtValue()) +
3909 " out of range [" + Twine(CR.getLower().getSExtValue()) +
3910 ", " + Twine(CR.getUpper().getSExtValue()) + ")",
3911 Call);
3912 }
3913 }
3914 }
3915
3916 if (Call.paramHasAttr(i, Attribute::Preallocated)) {
3917 Value *ArgVal = Call.getArgOperand(i);
3918 bool hasOB =
3920 bool isMustTail = Call.isMustTailCall();
3921 Check(hasOB != isMustTail,
3922 "preallocated operand either requires a preallocated bundle or "
3923 "the call to be musttail (but not both)",
3924 ArgVal, Call);
3925 }
3926 }
3927
3928 if (FTy->isVarArg()) {
3929 // FIXME? is 'nest' even legal here?
3930 bool SawNest = false;
3931 bool SawReturned = false;
3932
3933 for (unsigned Idx = 0; Idx < FTy->getNumParams(); ++Idx) {
3934 if (Attrs.hasParamAttr(Idx, Attribute::Nest))
3935 SawNest = true;
3936 if (Attrs.hasParamAttr(Idx, Attribute::Returned))
3937 SawReturned = true;
3938 }
3939
3940 // Check attributes on the varargs part.
3941 for (unsigned Idx = FTy->getNumParams(); Idx < Call.arg_size(); ++Idx) {
3942 Type *Ty = Call.getArgOperand(Idx)->getType();
3943 AttributeSet ArgAttrs = Attrs.getParamAttrs(Idx);
3944 verifyParameterAttrs(ArgAttrs, Ty, &Call);
3945
3946 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
3947 Check(!SawNest, "More than one parameter has attribute nest!", Call);
3948 SawNest = true;
3949 }
3950
3951 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
3952 Check(!SawReturned, "More than one parameter has attribute returned!",
3953 Call);
3954 Check(Ty->canLosslesslyBitCastTo(FTy->getReturnType()),
3955 "Incompatible argument and return types for 'returned' "
3956 "attribute",
3957 Call);
3958 SawReturned = true;
3959 }
3960
3961 // Statepoint intrinsic is vararg but the wrapped function may be not.
3962 // Allow sret here and check the wrapped function in verifyStatepoint.
3963 if (Call.getIntrinsicID() != Intrinsic::experimental_gc_statepoint)
3964 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
3965 "Attribute 'sret' cannot be used for vararg call arguments!",
3966 Call);
3967
3968 if (ArgAttrs.hasAttribute(Attribute::InAlloca))
3969 Check(Idx == Call.arg_size() - 1,
3970 "inalloca isn't on the last argument!", Call);
3971 }
3972 }
3973
3974 // Verify that there's no metadata unless it's a direct call to an intrinsic.
3975 if (!IsIntrinsic) {
3976 for (Type *ParamTy : FTy->params()) {
3977 Check(!ParamTy->isMetadataTy(),
3978 "Function has metadata parameter but isn't an intrinsic", Call);
3979 Check(!ParamTy->isTokenLikeTy(),
3980 "Function has token parameter but isn't an intrinsic", Call);
3981 }
3982 }
3983
3984 // Verify that indirect calls don't return tokens.
3985 if (!Call.getCalledFunction()) {
3986 Check(!FTy->getReturnType()->isTokenLikeTy(),
3987 "Return type cannot be token for indirect call!");
3988 Check(!FTy->getReturnType()->isX86_AMXTy(),
3989 "Return type cannot be x86_amx for indirect call!");
3990 }
3991
3993 visitIntrinsicCall(ID, Call);
3994
3995 // Verify that a callsite has at most one "deopt", at most one "funclet", at
3996 // most one "gc-transition", at most one "cfguardtarget", at most one
3997 // "preallocated" operand bundle, and at most one "ptrauth" operand bundle.
3998 bool FoundDeoptBundle = false, FoundFuncletBundle = false,
3999 FoundGCTransitionBundle = false, FoundCFGuardTargetBundle = false,
4000 FoundPreallocatedBundle = false, FoundGCLiveBundle = false,
4001 FoundPtrauthBundle = false, FoundKCFIBundle = false,
4002 FoundAttachedCallBundle = false;
4003 for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) {
4004 OperandBundleUse BU = Call.getOperandBundleAt(i);
4005 uint32_t Tag = BU.getTagID();
4006 if (Tag == LLVMContext::OB_deopt) {
4007 Check(!FoundDeoptBundle, "Multiple deopt operand bundles", Call);
4008 FoundDeoptBundle = true;
4009 } else if (Tag == LLVMContext::OB_gc_transition) {
4010 Check(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles",
4011 Call);
4012 FoundGCTransitionBundle = true;
4013 } else if (Tag == LLVMContext::OB_funclet) {
4014 Check(!FoundFuncletBundle, "Multiple funclet operand bundles", Call);
4015 FoundFuncletBundle = true;
4016 Check(BU.Inputs.size() == 1,
4017 "Expected exactly one funclet bundle operand", Call);
4018 Check(isa<FuncletPadInst>(BU.Inputs.front()),
4019 "Funclet bundle operands should correspond to a FuncletPadInst",
4020 Call);
4021 } else if (Tag == LLVMContext::OB_cfguardtarget) {
4022 Check(!FoundCFGuardTargetBundle, "Multiple CFGuardTarget operand bundles",
4023 Call);
4024 FoundCFGuardTargetBundle = true;
4025 Check(BU.Inputs.size() == 1,
4026 "Expected exactly one cfguardtarget bundle operand", Call);
4027 } else if (Tag == LLVMContext::OB_ptrauth) {
4028 Check(!FoundPtrauthBundle, "Multiple ptrauth operand bundles", Call);
4029 FoundPtrauthBundle = true;
4030 Check(BU.Inputs.size() == 2,
4031 "Expected exactly two ptrauth bundle operands", Call);
4032 Check(isa<ConstantInt>(BU.Inputs[0]) &&
4033 BU.Inputs[0]->getType()->isIntegerTy(32),
4034 "Ptrauth bundle key operand must be an i32 constant", Call);
4035 Check(BU.Inputs[1]->getType()->isIntegerTy(64),
4036 "Ptrauth bundle discriminator operand must be an i64", Call);
4037 } else if (Tag == LLVMContext::OB_kcfi) {
4038 Check(!FoundKCFIBundle, "Multiple kcfi operand bundles", Call);
4039 FoundKCFIBundle = true;
4040 Check(BU.Inputs.size() == 1, "Expected exactly one kcfi bundle operand",
4041 Call);
4042 Check(isa<ConstantInt>(BU.Inputs[0]) &&
4043 BU.Inputs[0]->getType()->isIntegerTy(32),
4044 "Kcfi bundle operand must be an i32 constant", Call);
4045 } else if (Tag == LLVMContext::OB_preallocated) {
4046 Check(!FoundPreallocatedBundle, "Multiple preallocated operand bundles",
4047 Call);
4048 FoundPreallocatedBundle = true;
4049 Check(BU.Inputs.size() == 1,
4050 "Expected exactly one preallocated bundle operand", Call);
4051 auto Input = dyn_cast<IntrinsicInst>(BU.Inputs.front());
4052 Check(Input &&
4053 Input->getIntrinsicID() == Intrinsic::call_preallocated_setup,
4054 "\"preallocated\" argument must be a token from "
4055 "llvm.call.preallocated.setup",
4056 Call);
4057 } else if (Tag == LLVMContext::OB_gc_live) {
4058 Check(!FoundGCLiveBundle, "Multiple gc-live operand bundles", Call);
4059 FoundGCLiveBundle = true;
4061 Check(!FoundAttachedCallBundle,
4062 "Multiple \"clang.arc.attachedcall\" operand bundles", Call);
4063 FoundAttachedCallBundle = true;
4064 verifyAttachedCallBundle(Call, BU);
4065 }
4066 }
4067
4068 // Verify that callee and callsite agree on whether to use pointer auth.
4069 Check(!(Call.getCalledFunction() && FoundPtrauthBundle),
4070 "Direct call cannot have a ptrauth bundle", Call);
4071
4072 // Verify that each inlinable callsite of a debug-info-bearing function in a
4073 // debug-info-bearing function has a debug location attached to it. Failure to
4074 // do so causes assertion failures when the inliner sets up inline scope info
4075 // (Interposable functions are not inlinable, neither are functions without
4076 // definitions.)
4082 "inlinable function call in a function with "
4083 "debug info must have a !dbg location",
4084 Call);
4085
4086 if (Call.isInlineAsm())
4087 verifyInlineAsmCall(Call);
4088
4089 ConvergenceVerifyHelper.visit(Call);
4090
4091 visitInstruction(Call);
4092}
4093
4094void Verifier::verifyTailCCMustTailAttrs(const AttrBuilder &Attrs,
4095 StringRef Context) {
4096 Check(!Attrs.contains(Attribute::InAlloca),
4097 Twine("inalloca attribute not allowed in ") + Context);
4098 Check(!Attrs.contains(Attribute::InReg),
4099 Twine("inreg attribute not allowed in ") + Context);
4100 Check(!Attrs.contains(Attribute::SwiftError),
4101 Twine("swifterror attribute not allowed in ") + Context);
4102 Check(!Attrs.contains(Attribute::Preallocated),
4103 Twine("preallocated attribute not allowed in ") + Context);
4104 Check(!Attrs.contains(Attribute::ByRef),
4105 Twine("byref attribute not allowed in ") + Context);
4106}
4107
4108/// Two types are "congruent" if they are identical, or if they are both pointer
4109/// types with different pointee types and the same address space.
4110static bool isTypeCongruent(Type *L, Type *R) {
4111 if (L == R)
4112 return true;
4115 if (!PL || !PR)
4116 return false;
4117 return PL->getAddressSpace() == PR->getAddressSpace();
4118}
4119
4120static AttrBuilder getParameterABIAttributes(LLVMContext& C, unsigned I, AttributeList Attrs) {
4121 static const Attribute::AttrKind ABIAttrs[] = {
4122 Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
4123 Attribute::InReg, Attribute::StackAlignment, Attribute::SwiftSelf,
4124 Attribute::SwiftAsync, Attribute::SwiftError, Attribute::Preallocated,
4125 Attribute::ByRef};
4126 AttrBuilder Copy(C);
4127 for (auto AK : ABIAttrs) {
4128 Attribute Attr = Attrs.getParamAttrs(I).getAttribute(AK);
4129 if (Attr.isValid())
4130 Copy.addAttribute(Attr);
4131 }
4132
4133 // `align` is ABI-affecting only in combination with `byval` or `byref`.
4134 if (Attrs.hasParamAttr(I, Attribute::Alignment) &&
4135 (Attrs.hasParamAttr(I, Attribute::ByVal) ||
4136 Attrs.hasParamAttr(I, Attribute::ByRef)))
4137 Copy.addAlignmentAttr(Attrs.getParamAlignment(I));
4138 return Copy;
4139}
4140
4141void Verifier::verifyMustTailCall(CallInst &CI) {
4142 Check(!CI.isInlineAsm(), "cannot use musttail call with inline asm", &CI);
4143
4144 Function *F = CI.getParent()->getParent();
4145 FunctionType *CallerTy = F->getFunctionType();
4146 FunctionType *CalleeTy = CI.getFunctionType();
4147 Check(CallerTy->isVarArg() == CalleeTy->isVarArg(),
4148 "cannot guarantee tail call due to mismatched varargs", &CI);
4149 Check(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()),
4150 "cannot guarantee tail call due to mismatched return types", &CI);
4151
4152 // - The calling conventions of the caller and callee must match.
4153 Check(F->getCallingConv() == CI.getCallingConv(),
4154 "cannot guarantee tail call due to mismatched calling conv", &CI);
4155
4156 // - The call must immediately precede a :ref:`ret <i_ret>` instruction,
4157 // or a pointer bitcast followed by a ret instruction.
4158 // - The ret instruction must return the (possibly bitcasted) value
4159 // produced by the call or void.
4160 Value *RetVal = &CI;
4162
4163 // Handle the optional bitcast.
4164 if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Next)) {
4165 Check(BI->getOperand(0) == RetVal,
4166 "bitcast following musttail call must use the call", BI);
4167 RetVal = BI;
4168 Next = BI->getNextNode();
4169 }
4170
4171 // Check the return.
4172 ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Next);
4173 Check(Ret, "musttail call must precede a ret with an optional bitcast", &CI);
4174 Check(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal ||
4176 "musttail call result must be returned", Ret);
4177
4178 AttributeList CallerAttrs = F->getAttributes();
4179 AttributeList CalleeAttrs = CI.getAttributes();
4180 if (CI.getCallingConv() == CallingConv::SwiftTail ||
4181 CI.getCallingConv() == CallingConv::Tail) {
4182 StringRef CCName =
4183 CI.getCallingConv() == CallingConv::Tail ? "tailcc" : "swifttailcc";
4184
4185 // - Only sret, byval, swiftself, and swiftasync ABI-impacting attributes
4186 // are allowed in swifttailcc call
4187 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4188 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
4189 SmallString<32> Context{CCName, StringRef(" musttail caller")};
4190 verifyTailCCMustTailAttrs(ABIAttrs, Context);
4191 }
4192 for (unsigned I = 0, E = CalleeTy->getNumParams(); I != E; ++I) {
4193 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
4194 SmallString<32> Context{CCName, StringRef(" musttail callee")};
4195 verifyTailCCMustTailAttrs(ABIAttrs, Context);
4196 }
4197 // - Varargs functions are not allowed
4198 Check(!CallerTy->isVarArg(), Twine("cannot guarantee ") + CCName +
4199 " tail call for varargs function");
4200 return;
4201 }
4202
4203 // - The caller and callee prototypes must match. Pointer types of
4204 // parameters or return types may differ in pointee type, but not
4205 // address space.
4206 if (!CI.getIntrinsicID()) {
4207 Check(CallerTy->getNumParams() == CalleeTy->getNumParams(),
4208 "cannot guarantee tail call due to mismatched parameter counts", &CI);
4209 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4210 Check(
4211 isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)),
4212 "cannot guarantee tail call due to mismatched parameter types", &CI);
4213 }
4214 }
4215
4216 // - All ABI-impacting function attributes, such as sret, byval, inreg,
4217 // returned, preallocated, and inalloca, must match.
4218 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4219 AttrBuilder CallerABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
4220 AttrBuilder CalleeABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
4221 Check(CallerABIAttrs == CalleeABIAttrs,
4222 "cannot guarantee tail call due to mismatched ABI impacting "
4223 "function attributes",
4224 &CI, CI.getOperand(I));
4225 }
4226}
4227
4228void Verifier::visitCallInst(CallInst &CI) {
4229 visitCallBase(CI);
4230
4231 if (CI.isMustTailCall())
4232 verifyMustTailCall(CI);
4233}
4234
4235void Verifier::visitInvokeInst(InvokeInst &II) {
4236 visitCallBase(II);
4237
4238 // Verify that the first non-PHI instruction of the unwind destination is an
4239 // exception handling instruction.
4240 Check(
4241 II.getUnwindDest()->isEHPad(),
4242 "The unwind destination does not have an exception handling instruction!",
4243 &II);
4244
4245 visitTerminator(II);
4246}
4247
4248/// visitUnaryOperator - Check the argument to the unary operator.
4249///
4250void Verifier::visitUnaryOperator(UnaryOperator &U) {
4251 Check(U.getType() == U.getOperand(0)->getType(),
4252 "Unary operators must have same type for"
4253 "operands and result!",
4254 &U);
4255
4256 switch (U.getOpcode()) {
4257 // Check that floating-point arithmetic operators are only used with
4258 // floating-point operands.
4259 case Instruction::FNeg:
4260 Check(U.getType()->isFPOrFPVectorTy(),
4261 "FNeg operator only works with float types!", &U);
4262 break;
4263 default:
4264 llvm_unreachable("Unknown UnaryOperator opcode!");
4265 }
4266
4267 visitInstruction(U);
4268}
4269
4270/// visitBinaryOperator - Check that both arguments to the binary operator are
4271/// of the same type!
4272///
4273void Verifier::visitBinaryOperator(BinaryOperator &B) {
4274 Check(B.getOperand(0)->getType() == B.getOperand(1)->getType(),
4275 "Both operands to a binary operator are not of the same type!", &B);
4276
4277 switch (B.getOpcode()) {
4278 // Check that integer arithmetic operators are only used with
4279 // integral operands.
4280 case Instruction::Add:
4281 case Instruction::Sub:
4282 case Instruction::Mul:
4283 case Instruction::SDiv:
4284 case Instruction::UDiv:
4285 case Instruction::SRem:
4286 case Instruction::URem:
4287 Check(B.getType()->isIntOrIntVectorTy(),
4288 "Integer arithmetic operators only work with integral types!", &B);
4289 Check(B.getType() == B.getOperand(0)->getType(),
4290 "Integer arithmetic operators must have same type "
4291 "for operands and result!",
4292 &B);
4293 break;
4294 // Check that floating-point arithmetic operators are only used with
4295 // floating-point operands.
4296 case Instruction::FAdd:
4297 case Instruction::FSub:
4298 case Instruction::FMul:
4299 case Instruction::FDiv:
4300 case Instruction::FRem:
4301 Check(B.getType()->isFPOrFPVectorTy(),
4302 "Floating-point arithmetic operators only work with "
4303 "floating-point types!",
4304 &B);
4305 Check(B.getType() == B.getOperand(0)->getType(),
4306 "Floating-point arithmetic operators must have same type "
4307 "for operands and result!",
4308 &B);
4309 break;
4310 // Check that logical operators are only used with integral operands.
4311 case Instruction::And:
4312 case Instruction::Or:
4313 case Instruction::Xor:
4314 Check(B.getType()->isIntOrIntVectorTy(),
4315 "Logical operators only work with integral types!", &B);
4316 Check(B.getType() == B.getOperand(0)->getType(),
4317 "Logical operators must have same type for operands and result!", &B);
4318 break;
4319 case Instruction::Shl:
4320 case Instruction::LShr:
4321 case Instruction::AShr:
4322 Check(B.getType()->isIntOrIntVectorTy(),
4323 "Shifts only work with integral types!", &B);
4324 Check(B.getType() == B.getOperand(0)->getType(),
4325 "Shift return type must be same as operands!", &B);
4326 break;
4327 default:
4328 llvm_unreachable("Unknown BinaryOperator opcode!");
4329 }
4330
4331 visitInstruction(B);
4332}
4333
4334void Verifier::visitICmpInst(ICmpInst &IC) {
4335 // Check that the operands are the same type
4336 Type *Op0Ty = IC.getOperand(0)->getType();
4337 Type *Op1Ty = IC.getOperand(1)->getType();
4338 Check(Op0Ty == Op1Ty,
4339 "Both operands to ICmp instruction are not of the same type!", &IC);
4340 // Check that the operands are the right type
4341 Check(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPtrOrPtrVectorTy(),
4342 "Invalid operand types for ICmp instruction", &IC);
4343 // Check that the predicate is valid.
4344 Check(IC.isIntPredicate(), "Invalid predicate in ICmp instruction!", &IC);
4345
4346 visitInstruction(IC);
4347}
4348
4349void Verifier::visitFCmpInst(FCmpInst &FC) {
4350 // Check that the operands are the same type
4351 Type *Op0Ty = FC.getOperand(0)->getType();
4352 Type *Op1Ty = FC.getOperand(1)->getType();
4353 Check(Op0Ty == Op1Ty,
4354 "Both operands to FCmp instruction are not of the same type!", &FC);
4355 // Check that the operands are the right type
4356 Check(Op0Ty->isFPOrFPVectorTy(), "Invalid operand types for FCmp instruction",
4357 &FC);
4358 // Check that the predicate is valid.
4359 Check(FC.isFPPredicate(), "Invalid predicate in FCmp instruction!", &FC);
4360
4361 visitInstruction(FC);
4362}
4363
4364void Verifier::visitExtractElementInst(ExtractElementInst &EI) {
4366 "Invalid extractelement operands!", &EI);
4367 visitInstruction(EI);
4368}
4369
4370void Verifier::visitInsertElementInst(InsertElementInst &IE) {
4371 Check(InsertElementInst::isValidOperands(IE.getOperand(0), IE.getOperand(1),
4372 IE.getOperand(2)),
4373 "Invalid insertelement operands!", &IE);
4374 visitInstruction(IE);
4375}
4376
4377void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) {
4379 SV.getShuffleMask()),
4380 "Invalid shufflevector operands!", &SV);
4381 visitInstruction(SV);
4382}
4383
4384void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
4385 Type *TargetTy = GEP.getPointerOperandType()->getScalarType();
4386
4387 Check(isa<PointerType>(TargetTy),
4388 "GEP base pointer is not a vector or a vector of pointers", &GEP);
4389 Check(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP);
4390
4391 if (auto *STy = dyn_cast<StructType>(GEP.getSourceElementType())) {
4392 Check(!STy->isScalableTy(),
4393 "getelementptr cannot target structure that contains scalable vector"
4394 "type",
4395 &GEP);
4396 }
4397
4398 SmallVector<Value *, 16> Idxs(GEP.indices());
4399 Check(
4400 all_of(Idxs, [](Value *V) { return V->getType()->isIntOrIntVectorTy(); }),
4401 "GEP indexes must be integers", &GEP);
4402 Type *ElTy =
4403 GetElementPtrInst::getIndexedType(GEP.getSourceElementType(), Idxs);
4404 Check(ElTy, "Invalid indices for GEP pointer type!", &GEP);
4405
4406 PointerType *PtrTy = dyn_cast<PointerType>(GEP.getType()->getScalarType());
4407
4408 Check(PtrTy && GEP.getResultElementType() == ElTy,
4409 "GEP is not of right type for indices!", &GEP, ElTy);
4410
4411 if (auto *GEPVTy = dyn_cast<VectorType>(GEP.getType())) {
4412 // Additional checks for vector GEPs.
4413 ElementCount GEPWidth = GEPVTy->getElementCount();
4414 if (GEP.getPointerOperandType()->isVectorTy())
4415 Check(
4416 GEPWidth ==
4417 cast<VectorType>(GEP.getPointerOperandType())->getElementCount(),
4418 "Vector GEP result width doesn't match operand's", &GEP);
4419 for (Value *Idx : Idxs) {
4420 Type *IndexTy = Idx->getType();
4421 if (auto *IndexVTy = dyn_cast<VectorType>(IndexTy)) {
4422 ElementCount IndexWidth = IndexVTy->getElementCount();
4423 Check(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP);
4424 }
4425 Check(IndexTy->isIntOrIntVectorTy(),
4426 "All GEP indices should be of integer type");
4427 }
4428 }
4429
4430 Check(GEP.getAddressSpace() == PtrTy->getAddressSpace(),
4431 "GEP address space doesn't match type", &GEP);
4432
4433 visitInstruction(GEP);
4434}
4435
4436static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
4437 return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
4438}
4439
4440/// Verify !range and !absolute_symbol metadata. These have the same
4441/// restrictions, except !absolute_symbol allows the full set.
4442void Verifier::verifyRangeLikeMetadata(const Value &I, const MDNode *Range,
4443 Type *Ty, RangeLikeMetadataKind Kind) {
4444 unsigned NumOperands = Range->getNumOperands();
4445 Check(NumOperands % 2 == 0, "Unfinished range!", Range);
4446 unsigned NumRanges = NumOperands / 2;
4447 Check(NumRanges >= 1, "It should have at least one range!", Range);
4448
4449 ConstantRange LastRange(1, true); // Dummy initial value
4450 for (unsigned i = 0; i < NumRanges; ++i) {
4451 ConstantInt *Low =
4452 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i));
4453 Check(Low, "The lower limit must be an integer!", Low);
4454 ConstantInt *High =
4455 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i + 1));
4456 Check(High, "The upper limit must be an integer!", High);
4457
4458 Check(High->getType() == Low->getType(), "Range pair types must match!",
4459 &I);
4460
4461 if (Kind == RangeLikeMetadataKind::NoaliasAddrspace) {
4462 Check(High->getType()->isIntegerTy(32),
4463 "noalias.addrspace type must be i32!", &I);
4464 } else {
4465 Check(High->getType() == Ty->getScalarType(),
4466 "Range types must match instruction type!", &I);
4467 }
4468
4469 APInt HighV = High->getValue();
4470 APInt LowV = Low->getValue();
4471
4472 // ConstantRange asserts if the ranges are the same except for the min/max
4473 // value. Leave the cases it tolerates for the empty range error below.
4474 Check(LowV != HighV || LowV.isMaxValue() || LowV.isMinValue(),
4475 "The upper and lower limits cannot be the same value", &I);
4476
4477 ConstantRange CurRange(LowV, HighV);
4478 Check(!CurRange.isEmptySet() &&
4479 (Kind == RangeLikeMetadataKind::AbsoluteSymbol ||
4480 !CurRange.isFullSet()),
4481 "Range must not be empty!", Range);
4482 if (i != 0) {
4483 Check(CurRange.intersectWith(LastRange).isEmptySet(),
4484 "Intervals are overlapping", Range);
4485 Check(LowV.sgt(LastRange.getLower()), "Intervals are not in order",
4486 Range);
4487 Check(!isContiguous(CurRange, LastRange), "Intervals are contiguous",
4488 Range);
4489 }
4490 LastRange = ConstantRange(LowV, HighV);
4491 }
4492 if (NumRanges > 2) {
4493 APInt FirstLow =
4494 mdconst::dyn_extract<ConstantInt>(Range->getOperand(0))->getValue();
4495 APInt FirstHigh =
4496 mdconst::dyn_extract<ConstantInt>(Range->getOperand(1))->getValue();
4497 ConstantRange FirstRange(FirstLow, FirstHigh);
4498 Check(FirstRange.intersectWith(LastRange).isEmptySet(),
4499 "Intervals are overlapping", Range);
4500 Check(!isContiguous(FirstRange, LastRange), "Intervals are contiguous",
4501 Range);
4502 }
4503}
4504
4505void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) {
4506 assert(Range && Range == I.getMetadata(LLVMContext::MD_range) &&
4507 "precondition violation");
4508 verifyRangeLikeMetadata(I, Range, Ty, RangeLikeMetadataKind::Range);
4509}
4510
4511void Verifier::visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range,
4512 Type *Ty) {
4513 assert(Range && Range == I.getMetadata(LLVMContext::MD_noalias_addrspace) &&
4514 "precondition violation");
4515 verifyRangeLikeMetadata(I, Range, Ty,
4516 RangeLikeMetadataKind::NoaliasAddrspace);
4517}
4518
4519void Verifier::checkAtomicMemAccessSize(Type *Ty, const Instruction *I) {
4520 unsigned Size = DL.getTypeSizeInBits(Ty).getFixedValue();
4521 Check(Size >= 8, "atomic memory access' size must be byte-sized", Ty, I);
4522 Check(!(Size & (Size - 1)),
4523 "atomic memory access' operand must have a power-of-two size", Ty, I);
4524}
4525
4526void Verifier::visitLoadInst(LoadInst &LI) {
4528 Check(PTy, "Load operand must be a pointer.", &LI);
4529 Type *ElTy = LI.getType();
4530 if (MaybeAlign A = LI.getAlign()) {
4531 Check(A->value() <= Value::MaximumAlignment,
4532 "huge alignment values are unsupported", &LI);
4533 }
4534 Check(ElTy->isSized(), "loading unsized types is not allowed", &LI);
4535 if (LI.isAtomic()) {
4536 Check(LI.getOrdering() != AtomicOrdering::Release &&
4537 LI.getOrdering() != AtomicOrdering::AcquireRelease,
4538 "Load cannot have Release ordering", &LI);
4539 Check(ElTy->getScalarType()->isIntOrPtrTy() ||
4541 "atomic load operand must have integer, pointer, floating point, "
4542 "or vector type!",
4543 ElTy, &LI);
4544
4545 checkAtomicMemAccessSize(ElTy, &LI);
4546 } else {
4548 "Non-atomic load cannot have SynchronizationScope specified", &LI);
4549 }
4550
4551 visitInstruction(LI);
4552}
4553
4554void Verifier::visitStoreInst(StoreInst &SI) {
4555 PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType());
4556 Check(PTy, "Store operand must be a pointer.", &SI);
4557 Type *ElTy = SI.getOperand(0)->getType();
4558 if (MaybeAlign A = SI.getAlign()) {
4559 Check(A->value() <= Value::MaximumAlignment,
4560 "huge alignment values are unsupported", &SI);
4561 }
4562 Check(ElTy->isSized(), "storing unsized types is not allowed", &SI);
4563 if (SI.isAtomic()) {
4564 Check(SI.getOrdering() != AtomicOrdering::Acquire &&
4565 SI.getOrdering() != AtomicOrdering::AcquireRelease,
4566 "Store cannot have Acquire ordering", &SI);
4567 Check(ElTy->getScalarType()->isIntOrPtrTy() ||
4569 "atomic store operand must have integer, pointer, floating point, "
4570 "or vector type!",
4571 ElTy, &SI);
4572 checkAtomicMemAccessSize(ElTy, &SI);
4573 } else {
4574 Check(SI.getSyncScopeID() == SyncScope::System,
4575 "Non-atomic store cannot have SynchronizationScope specified", &SI);
4576 }
4577 visitInstruction(SI);
4578}
4579
4580/// Check that SwiftErrorVal is used as a swifterror argument in CS.
4581void Verifier::verifySwiftErrorCall(CallBase &Call,
4582 const Value *SwiftErrorVal) {
4583 for (const auto &I : llvm::enumerate(Call.args())) {
4584 if (I.value() == SwiftErrorVal) {
4585 Check(Call.paramHasAttr(I.index(), Attribute::SwiftError),
4586 "swifterror value when used in a callsite should be marked "
4587 "with swifterror attribute",
4588 SwiftErrorVal, Call);
4589 }
4590 }
4591}
4592
4593void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) {
4594 // Check that swifterror value is only used by loads, stores, or as
4595 // a swifterror argument.
4596 for (const User *U : SwiftErrorVal->users()) {
4598 isa<InvokeInst>(U),
4599 "swifterror value can only be loaded and stored from, or "
4600 "as a swifterror argument!",
4601 SwiftErrorVal, U);
4602 // If it is used by a store, check it is the second operand.
4603 if (auto StoreI = dyn_cast<StoreInst>(U))
4604 Check(StoreI->getOperand(1) == SwiftErrorVal,
4605 "swifterror value should be the second operand when used "
4606 "by stores",
4607 SwiftErrorVal, U);
4608 if (auto *Call = dyn_cast<CallBase>(U))
4609 verifySwiftErrorCall(*const_cast<CallBase *>(Call), SwiftErrorVal);
4610 }
4611}
4612
4613void Verifier::visitAllocaInst(AllocaInst &AI) {
4614 Type *Ty = AI.getAllocatedType();
4615 SmallPtrSet<Type*, 4> Visited;
4616 Check(Ty->isSized(&Visited), "Cannot allocate unsized type", &AI);
4617 // Check if it's a target extension type that disallows being used on the
4618 // stack.
4620 "Alloca has illegal target extension type", &AI);
4622 "Alloca array size must have integer type", &AI);
4623 if (MaybeAlign A = AI.getAlign()) {
4624 Check(A->value() <= Value::MaximumAlignment,
4625 "huge alignment values are unsupported", &AI);
4626 }
4627
4628 if (AI.isSwiftError()) {
4629 Check(Ty->isPointerTy(), "swifterror alloca must have pointer type", &AI);
4631 "swifterror alloca must not be array allocation", &AI);
4632 verifySwiftErrorValue(&AI);
4633 }
4634
4635 if (TT.isAMDGPU()) {
4637 "alloca on amdgpu must be in addrspace(5)", &AI);
4638 }
4639
4640 visitInstruction(AI);
4641}
4642
4643void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
4644 Type *ElTy = CXI.getOperand(1)->getType();
4645 Check(ElTy->isIntOrPtrTy(),
4646 "cmpxchg operand must have integer or pointer type", ElTy, &CXI);
4647 checkAtomicMemAccessSize(ElTy, &CXI);
4648 visitInstruction(CXI);
4649}
4650
4651void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
4652 Check(RMWI.getOrdering() != AtomicOrdering::Unordered,
4653 "atomicrmw instructions cannot be unordered.", &RMWI);
4654 auto Op = RMWI.getOperation();
4655 Type *ElTy = RMWI.getOperand(1)->getType();
4656 if (Op == AtomicRMWInst::Xchg) {
4657 Check(ElTy->isIntegerTy() || ElTy->isFloatingPointTy() ||
4658 ElTy->isPointerTy(),
4659 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4660 " operand must have integer or floating point type!",
4661 &RMWI, ElTy);
4662 } else if (AtomicRMWInst::isFPOperation(Op)) {
4664 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4665 " operand must have floating-point or fixed vector of floating-point "
4666 "type!",
4667 &RMWI, ElTy);
4668 } else {
4669 Check(ElTy->isIntegerTy(),
4670 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4671 " operand must have integer type!",
4672 &RMWI, ElTy);
4673 }
4674 checkAtomicMemAccessSize(ElTy, &RMWI);
4676 "Invalid binary operation!", &RMWI);
4677 visitInstruction(RMWI);
4678}
4679
4680void Verifier::visitFenceInst(FenceInst &FI) {
4681 const AtomicOrdering Ordering = FI.getOrdering();
4682 Check(Ordering == AtomicOrdering::Acquire ||
4683 Ordering == AtomicOrdering::Release ||
4684 Ordering == AtomicOrdering::AcquireRelease ||
4685 Ordering == AtomicOrdering::SequentiallyConsistent,
4686 "fence instructions may only have acquire, release, acq_rel, or "
4687 "seq_cst ordering.",
4688 &FI);
4689 visitInstruction(FI);
4690}
4691
4692void Verifier::visitExtractValueInst(ExtractValueInst &EVI) {
4694 EVI.getIndices()) == EVI.getType(),
4695 "Invalid ExtractValueInst operands!", &EVI);
4696
4697 visitInstruction(EVI);
4698}
4699
4700void Verifier::visitInsertValueInst(InsertValueInst &IVI) {
4702 IVI.getIndices()) ==
4703 IVI.getOperand(1)->getType(),
4704 "Invalid InsertValueInst operands!", &IVI);
4705
4706 visitInstruction(IVI);
4707}
4708
4709static Value *getParentPad(Value *EHPad) {
4710 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
4711 return FPI->getParentPad();
4712
4713 return cast<CatchSwitchInst>(EHPad)->getParentPad();
4714}
4715
4716void Verifier::visitEHPadPredecessors(Instruction &I) {
4717 assert(I.isEHPad());
4718
4719 BasicBlock *BB = I.getParent();
4720 Function *F = BB->getParent();
4721
4722 Check(BB != &F->getEntryBlock(), "EH pad cannot be in entry block.", &I);
4723
4724 if (auto *LPI = dyn_cast<LandingPadInst>(&I)) {
4725 // The landingpad instruction defines its parent as a landing pad block. The
4726 // landing pad block may be branched to only by the unwind edge of an
4727 // invoke.
4728 for (BasicBlock *PredBB : predecessors(BB)) {
4729 const auto *II = dyn_cast<InvokeInst>(PredBB->getTerminator());
4730 Check(II && II->getUnwindDest() == BB && II->getNormalDest() != BB,
4731 "Block containing LandingPadInst must be jumped to "
4732 "only by the unwind edge of an invoke.",
4733 LPI);
4734 }
4735 return;
4736 }
4737 if (auto *CPI = dyn_cast<CatchPadInst>(&I)) {
4738 if (!pred_empty(BB))
4739 Check(BB->getUniquePredecessor() == CPI->getCatchSwitch()->getParent(),
4740 "Block containg CatchPadInst must be jumped to "
4741 "only by its catchswitch.",
4742 CPI);
4743 Check(BB != CPI->getCatchSwitch()->getUnwindDest(),
4744 "Catchswitch cannot unwind to one of its catchpads",
4745 CPI->getCatchSwitch(), CPI);
4746 return;
4747 }
4748
4749 // Verify that each pred has a legal terminator with a legal to/from EH
4750 // pad relationship.
4751 Instruction *ToPad = &I;
4752 Value *ToPadParent = getParentPad(ToPad);
4753 for (BasicBlock *PredBB : predecessors(BB)) {
4754 Instruction *TI = PredBB->getTerminator();
4755 Value *FromPad;
4756 if (auto *II = dyn_cast<InvokeInst>(TI)) {
4757 Check(II->getUnwindDest() == BB && II->getNormalDest() != BB,
4758 "EH pad must be jumped to via an unwind edge", ToPad, II);
4759 auto *CalledFn =
4760 dyn_cast<Function>(II->getCalledOperand()->stripPointerCasts());
4761 if (CalledFn && CalledFn->isIntrinsic() && II->doesNotThrow() &&
4762 !IntrinsicInst::mayLowerToFunctionCall(CalledFn->getIntrinsicID()))
4763 continue;
4764 if (auto Bundle = II->getOperandBundle(LLVMContext::OB_funclet))
4765 FromPad = Bundle->Inputs[0];
4766 else
4767 FromPad = ConstantTokenNone::get(II->getContext());
4768 } else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
4769 FromPad = CRI->getOperand(0);
4770 Check(FromPad != ToPadParent, "A cleanupret must exit its cleanup", CRI);
4771 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) {
4772 FromPad = CSI;
4773 } else {
4774 Check(false, "EH pad must be jumped to via an unwind edge", ToPad, TI);
4775 }
4776
4777 // The edge may exit from zero or more nested pads.
4778 SmallPtrSet<Value *, 8> Seen;
4779 for (;; FromPad = getParentPad(FromPad)) {
4780 Check(FromPad != ToPad,
4781 "EH pad cannot handle exceptions raised within it", FromPad, TI);
4782 if (FromPad == ToPadParent) {
4783 // This is a legal unwind edge.
4784 break;
4785 }
4786 Check(!isa<ConstantTokenNone>(FromPad),
4787 "A single unwind edge may only enter one EH pad", TI);
4788 Check(Seen.insert(FromPad).second, "EH pad jumps through a cycle of pads",
4789 FromPad);
4790
4791 // This will be diagnosed on the corresponding instruction already. We
4792 // need the extra check here to make sure getParentPad() works.
4793 Check(isa<FuncletPadInst>(FromPad) || isa<CatchSwitchInst>(FromPad),
4794 "Parent pad must be catchpad/cleanuppad/catchswitch", TI);
4795 }
4796 }
4797}
4798
4799void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
4800 // The landingpad instruction is ill-formed if it doesn't have any clauses and
4801 // isn't a cleanup.
4802 Check(LPI.getNumClauses() > 0 || LPI.isCleanup(),
4803 "LandingPadInst needs at least one clause or to be a cleanup.", &LPI);
4804
4805 visitEHPadPredecessors(LPI);
4806
4807 if (!LandingPadResultTy)
4808 LandingPadResultTy = LPI.getType();
4809 else
4810 Check(LandingPadResultTy == LPI.getType(),
4811 "The landingpad instruction should have a consistent result type "
4812 "inside a function.",
4813 &LPI);
4814
4815 Function *F = LPI.getParent()->getParent();
4816 Check(F->hasPersonalityFn(),
4817 "LandingPadInst needs to be in a function with a personality.", &LPI);
4818
4819 // The landingpad instruction must be the first non-PHI instruction in the
4820 // block.
4821 Check(LPI.getParent()->getLandingPadInst() == &LPI,
4822 "LandingPadInst not the first non-PHI instruction in the block.", &LPI);
4823
4824 for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) {
4825 Constant *Clause = LPI.getClause(i);
4826 if (LPI.isCatch(i)) {
4827 Check(isa<PointerType>(Clause->getType()),
4828 "Catch operand does not have pointer type!", &LPI);
4829 } else {
4830 Check(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI);
4832 "Filter operand is not an array of constants!", &LPI);
4833 }
4834 }
4835
4836 visitInstruction(LPI);
4837}
4838
4839void Verifier::visitResumeInst(ResumeInst &RI) {
4841 "ResumeInst needs to be in a function with a personality.", &RI);
4842
4843 if (!LandingPadResultTy)
4844 LandingPadResultTy = RI.getValue()->getType();
4845 else
4846 Check(LandingPadResultTy == RI.getValue()->getType(),
4847 "The resume instruction should have a consistent result type "
4848 "inside a function.",
4849 &RI);
4850
4851 visitTerminator(RI);
4852}
4853
4854void Verifier::visitCatchPadInst(CatchPadInst &CPI) {
4855 BasicBlock *BB = CPI.getParent();
4856
4857 Function *F = BB->getParent();
4858 Check(F->hasPersonalityFn(),
4859 "CatchPadInst needs to be in a function with a personality.", &CPI);
4860
4862 "CatchPadInst needs to be directly nested in a CatchSwitchInst.",
4863 CPI.getParentPad());
4864
4865 // The catchpad instruction must be the first non-PHI instruction in the
4866 // block.
4867 Check(&*BB->getFirstNonPHIIt() == &CPI,
4868 "CatchPadInst not the first non-PHI instruction in the block.", &CPI);
4869
4870 visitEHPadPredecessors(CPI);
4871 visitFuncletPadInst(CPI);
4872}
4873
4874void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) {
4875 Check(isa<CatchPadInst>(CatchReturn.getOperand(0)),
4876 "CatchReturnInst needs to be provided a CatchPad", &CatchReturn,
4877 CatchReturn.getOperand(0));
4878
4879 visitTerminator(CatchReturn);
4880}
4881
4882void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) {
4883 BasicBlock *BB = CPI.getParent();
4884
4885 Function *F = BB->getParent();
4886 Check(F->hasPersonalityFn(),
4887 "CleanupPadInst needs to be in a function with a personality.", &CPI);
4888
4889 // The cleanuppad instruction must be the first non-PHI instruction in the
4890 // block.
4891 Check(&*BB->getFirstNonPHIIt() == &CPI,
4892 "CleanupPadInst not the first non-PHI instruction in the block.", &CPI);
4893
4894 auto *ParentPad = CPI.getParentPad();
4895 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
4896 "CleanupPadInst has an invalid parent.", &CPI);
4897
4898 visitEHPadPredecessors(CPI);
4899 visitFuncletPadInst(CPI);
4900}
4901
4902void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) {
4903 User *FirstUser = nullptr;
4904 Value *FirstUnwindPad = nullptr;
4905 SmallVector<FuncletPadInst *, 8> Worklist({&FPI});
4906 SmallPtrSet<FuncletPadInst *, 8> Seen;
4907
4908 while (!Worklist.empty()) {
4909 FuncletPadInst *CurrentPad = Worklist.pop_back_val();
4910 Check(Seen.insert(CurrentPad).second,
4911 "FuncletPadInst must not be nested within itself", CurrentPad);
4912 Value *UnresolvedAncestorPad = nullptr;
4913 for (User *U : CurrentPad->users()) {
4914 BasicBlock *UnwindDest;
4915 if (auto *CRI = dyn_cast<CleanupReturnInst>(U)) {
4916 UnwindDest = CRI->getUnwindDest();
4917 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(U)) {
4918 // We allow catchswitch unwind to caller to nest
4919 // within an outer pad that unwinds somewhere else,
4920 // because catchswitch doesn't have a nounwind variant.
4921 // See e.g. SimplifyCFGOpt::SimplifyUnreachable.
4922 if (CSI->unwindsToCaller())
4923 continue;
4924 UnwindDest = CSI->getUnwindDest();
4925 } else if (auto *II = dyn_cast<InvokeInst>(U)) {
4926 UnwindDest = II->getUnwindDest();
4927 } else if (isa<CallInst>(U)) {
4928 // Calls which don't unwind may be found inside funclet
4929 // pads that unwind somewhere else. We don't *require*
4930 // such calls to be annotated nounwind.
4931 continue;
4932 } else if (auto *CPI = dyn_cast<CleanupPadInst>(U)) {
4933 // The unwind dest for a cleanup can only be found by
4934 // recursive search. Add it to the worklist, and we'll
4935 // search for its first use that determines where it unwinds.
4936 Worklist.push_back(CPI);
4937 continue;
4938 } else {
4939 Check(isa<CatchReturnInst>(U), "Bogus funclet pad use", U);
4940 continue;
4941 }
4942
4943 Value *UnwindPad;
4944 bool ExitsFPI;
4945 if (UnwindDest) {
4946 UnwindPad = &*UnwindDest->getFirstNonPHIIt();
4947 if (!cast<Instruction>(UnwindPad)->isEHPad())
4948 continue;
4949 Value *UnwindParent = getParentPad(UnwindPad);
4950 // Ignore unwind edges that don't exit CurrentPad.
4951 if (UnwindParent == CurrentPad)
4952 continue;
4953 // Determine whether the original funclet pad is exited,
4954 // and if we are scanning nested pads determine how many
4955 // of them are exited so we can stop searching their
4956 // children.
4957 Value *ExitedPad = CurrentPad;
4958 ExitsFPI = false;
4959 do {
4960 if (ExitedPad == &FPI) {
4961 ExitsFPI = true;
4962 // Now we can resolve any ancestors of CurrentPad up to
4963 // FPI, but not including FPI since we need to make sure
4964 // to check all direct users of FPI for consistency.
4965 UnresolvedAncestorPad = &FPI;
4966 break;
4967 }
4968 Value *ExitedParent = getParentPad(ExitedPad);
4969 if (ExitedParent == UnwindParent) {
4970 // ExitedPad is the ancestor-most pad which this unwind
4971 // edge exits, so we can resolve up to it, meaning that
4972 // ExitedParent is the first ancestor still unresolved.
4973 UnresolvedAncestorPad = ExitedParent;
4974 break;
4975 }
4976 ExitedPad = ExitedParent;
4977 } while (!isa<ConstantTokenNone>(ExitedPad));
4978 } else {
4979 // Unwinding to caller exits all pads.
4980 UnwindPad = ConstantTokenNone::get(FPI.getContext());
4981 ExitsFPI = true;
4982 UnresolvedAncestorPad = &FPI;
4983 }
4984
4985 if (ExitsFPI) {
4986 // This unwind edge exits FPI. Make sure it agrees with other
4987 // such edges.
4988 if (FirstUser) {
4989 Check(UnwindPad == FirstUnwindPad,
4990 "Unwind edges out of a funclet "
4991 "pad must have the same unwind "
4992 "dest",
4993 &FPI, U, FirstUser);
4994 } else {
4995 FirstUser = U;
4996 FirstUnwindPad = UnwindPad;
4997 // Record cleanup sibling unwinds for verifySiblingFuncletUnwinds
4998 if (isa<CleanupPadInst>(&FPI) && !isa<ConstantTokenNone>(UnwindPad) &&
4999 getParentPad(UnwindPad) == getParentPad(&FPI))
5000 SiblingFuncletInfo[&FPI] = cast<Instruction>(U);
5001 }
5002 }
5003 // Make sure we visit all uses of FPI, but for nested pads stop as
5004 // soon as we know where they unwind to.
5005 if (CurrentPad != &FPI)
5006 break;
5007 }
5008 if (UnresolvedAncestorPad) {
5009 if (CurrentPad == UnresolvedAncestorPad) {
5010 // When CurrentPad is FPI itself, we don't mark it as resolved even if
5011 // we've found an unwind edge that exits it, because we need to verify
5012 // all direct uses of FPI.
5013 assert(CurrentPad == &FPI);
5014 continue;
5015 }
5016 // Pop off the worklist any nested pads that we've found an unwind
5017 // destination for. The pads on the worklist are the uncles,
5018 // great-uncles, etc. of CurrentPad. We've found an unwind destination
5019 // for all ancestors of CurrentPad up to but not including
5020 // UnresolvedAncestorPad.
5021 Value *ResolvedPad = CurrentPad;
5022 while (!Worklist.empty()) {
5023 Value *UnclePad = Worklist.back();
5024 Value *AncestorPad = getParentPad(UnclePad);
5025 // Walk ResolvedPad up the ancestor list until we either find the
5026 // uncle's parent or the last resolved ancestor.
5027 while (ResolvedPad != AncestorPad) {
5028 Value *ResolvedParent = getParentPad(ResolvedPad);
5029 if (ResolvedParent == UnresolvedAncestorPad) {
5030 break;
5031 }
5032 ResolvedPad = ResolvedParent;
5033 }
5034 // If the resolved ancestor search didn't find the uncle's parent,
5035 // then the uncle is not yet resolved.
5036 if (ResolvedPad != AncestorPad)
5037 break;
5038 // This uncle is resolved, so pop it from the worklist.
5039 Worklist.pop_back();
5040 }
5041 }
5042 }
5043
5044 if (FirstUnwindPad) {
5045 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FPI.getParentPad())) {
5046 BasicBlock *SwitchUnwindDest = CatchSwitch->getUnwindDest();
5047 Value *SwitchUnwindPad;
5048 if (SwitchUnwindDest)
5049 SwitchUnwindPad = &*SwitchUnwindDest->getFirstNonPHIIt();
5050 else
5051 SwitchUnwindPad = ConstantTokenNone::get(FPI.getContext());
5052 Check(SwitchUnwindPad == FirstUnwindPad,
5053 "Unwind edges out of a catch must have the same unwind dest as "
5054 "the parent catchswitch",
5055 &FPI, FirstUser, CatchSwitch);
5056 }
5057 }
5058
5059 visitInstruction(FPI);
5060}
5061
5062void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) {
5063 BasicBlock *BB = CatchSwitch.getParent();
5064
5065 Function *F = BB->getParent();
5066 Check(F->hasPersonalityFn(),
5067 "CatchSwitchInst needs to be in a function with a personality.",
5068 &CatchSwitch);
5069
5070 // The catchswitch instruction must be the first non-PHI instruction in the
5071 // block.
5072 Check(&*BB->getFirstNonPHIIt() == &CatchSwitch,
5073 "CatchSwitchInst not the first non-PHI instruction in the block.",
5074 &CatchSwitch);
5075
5076 auto *ParentPad = CatchSwitch.getParentPad();
5077 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
5078 "CatchSwitchInst has an invalid parent.", ParentPad);
5079
5080 if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) {
5081 BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt();
5082 Check(I->isEHPad() && !isa<LandingPadInst>(I),
5083 "CatchSwitchInst must unwind to an EH block which is not a "
5084 "landingpad.",
5085 &CatchSwitch);
5086
5087 // Record catchswitch sibling unwinds for verifySiblingFuncletUnwinds
5088 if (getParentPad(&*I) == ParentPad)
5089 SiblingFuncletInfo[&CatchSwitch] = &CatchSwitch;
5090 }
5091
5092 Check(CatchSwitch.getNumHandlers() != 0,
5093 "CatchSwitchInst cannot have empty handler list", &CatchSwitch);
5094
5095 for (BasicBlock *Handler : CatchSwitch.handlers()) {
5096 Check(isa<CatchPadInst>(Handler->getFirstNonPHIIt()),
5097 "CatchSwitchInst handlers must be catchpads", &CatchSwitch, Handler);
5098 }
5099
5100 visitEHPadPredecessors(CatchSwitch);
5101 visitTerminator(CatchSwitch);
5102}
5103
5104void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) {
5106 "CleanupReturnInst needs to be provided a CleanupPad", &CRI,
5107 CRI.getOperand(0));
5108
5109 if (BasicBlock *UnwindDest = CRI.getUnwindDest()) {
5110 BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt();
5111 Check(I->isEHPad() && !isa<LandingPadInst>(I),
5112 "CleanupReturnInst must unwind to an EH block which is not a "
5113 "landingpad.",
5114 &CRI);
5115 }
5116
5117 visitTerminator(CRI);
5118}
5119
5120void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
5121 Instruction *Op = cast<Instruction>(I.getOperand(i));
5122 // If the we have an invalid invoke, don't try to compute the dominance.
5123 // We already reject it in the invoke specific checks and the dominance
5124 // computation doesn't handle multiple edges.
5125 if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) {
5126 if (II->getNormalDest() == II->getUnwindDest())
5127 return;
5128 }
5129
5130 // Quick check whether the def has already been encountered in the same block.
5131 // PHI nodes are not checked to prevent accepting preceding PHIs, because PHI
5132 // uses are defined to happen on the incoming edge, not at the instruction.
5133 //
5134 // FIXME: If this operand is a MetadataAsValue (wrapping a LocalAsMetadata)
5135 // wrapping an SSA value, assert that we've already encountered it. See
5136 // related FIXME in Mapper::mapLocalAsMetadata in ValueMapper.cpp.
5137 if (!isa<PHINode>(I) && InstsInThisBlock.count(Op))
5138 return;
5139
5140 const Use &U = I.getOperandUse(i);
5141 Check(DT.dominates(Op, U), "Instruction does not dominate all uses!", Op, &I);
5142}
5143
5144void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) {
5145 Check(I.getType()->isPointerTy(),
5146 "dereferenceable, dereferenceable_or_null "
5147 "apply only to pointer types",
5148 &I);
5150 "dereferenceable, dereferenceable_or_null apply only to load"
5151 " and inttoptr instructions, use attributes for calls or invokes",
5152 &I);
5153 Check(MD->getNumOperands() == 1,
5154 "dereferenceable, dereferenceable_or_null "
5155 "take one operand!",
5156 &I);
5157 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0));
5158 Check(CI && CI->getType()->isIntegerTy(64),
5159 "dereferenceable, "
5160 "dereferenceable_or_null metadata value must be an i64!",
5161 &I);
5162}
5163
5164void Verifier::visitNofreeMetadata(Instruction &I, MDNode *MD) {
5165 Check(I.getType()->isPointerTy(), "nofree applies only to pointer types", &I);
5166 Check((isa<IntToPtrInst>(I)), "nofree applies only to inttoptr instruction",
5167 &I);
5168 Check(MD->getNumOperands() == 0, "nofree metadata must be empty", &I);
5169}
5170
5171void Verifier::visitProfMetadata(Instruction &I, MDNode *MD) {
5172 auto GetBranchingTerminatorNumOperands = [&]() {
5173 unsigned ExpectedNumOperands = 0;
5174 if (BranchInst *BI = dyn_cast<BranchInst>(&I))
5175 ExpectedNumOperands = BI->getNumSuccessors();
5176 else if (SwitchInst *SI = dyn_cast<SwitchInst>(&I))
5177 ExpectedNumOperands = SI->getNumSuccessors();
5178 else if (isa<CallInst>(&I))
5179 ExpectedNumOperands = 1;
5180 else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(&I))
5181 ExpectedNumOperands = IBI->getNumDestinations();
5182 else if (isa<SelectInst>(&I))
5183 ExpectedNumOperands = 2;
5184 else if (CallBrInst *CI = dyn_cast<CallBrInst>(&I))
5185 ExpectedNumOperands = CI->getNumSuccessors();
5186 return ExpectedNumOperands;
5187 };
5188 Check(MD->getNumOperands() >= 1,
5189 "!prof annotations should have at least 1 operand", MD);
5190 // Check first operand.
5191 Check(MD->getOperand(0) != nullptr, "first operand should not be null", MD);
5193 "expected string with name of the !prof annotation", MD);
5194 MDString *MDS = cast<MDString>(MD->getOperand(0));
5195 StringRef ProfName = MDS->getString();
5196
5198 Check(GetBranchingTerminatorNumOperands() != 0 || isa<InvokeInst>(I),
5199 "'unknown' !prof should only appear on instructions on which "
5200 "'branch_weights' would",
5201 MD);
5202 verifyUnknownProfileMetadata(MD);
5203 return;
5204 }
5205
5206 Check(MD->getNumOperands() >= 2,
5207 "!prof annotations should have no less than 2 operands", MD);
5208
5209 // Check consistency of !prof branch_weights metadata.
5210 if (ProfName == MDProfLabels::BranchWeights) {
5211 unsigned NumBranchWeights = getNumBranchWeights(*MD);
5212 if (isa<InvokeInst>(&I)) {
5213 Check(NumBranchWeights == 1 || NumBranchWeights == 2,
5214 "Wrong number of InvokeInst branch_weights operands", MD);
5215 } else {
5216 const unsigned ExpectedNumOperands = GetBranchingTerminatorNumOperands();
5217 if (ExpectedNumOperands == 0)
5218 CheckFailed("!prof branch_weights are not allowed for this instruction",
5219 MD);
5220
5221 Check(NumBranchWeights == ExpectedNumOperands, "Wrong number of operands",
5222 MD);
5223 }
5224 for (unsigned i = getBranchWeightOffset(MD); i < MD->getNumOperands();
5225 ++i) {
5226 auto &MDO = MD->getOperand(i);
5227 Check(MDO, "second operand should not be null", MD);
5229 "!prof brunch_weights operand is not a const int");
5230 }
5231 } else if (ProfName == MDProfLabels::ValueProfile) {
5232 Check(isValueProfileMD(MD), "invalid value profiling metadata", MD);
5233 ConstantInt *KindInt = mdconst::dyn_extract<ConstantInt>(MD->getOperand(1));
5234 Check(KindInt, "VP !prof missing kind argument", MD);
5235
5236 auto Kind = KindInt->getZExtValue();
5237 Check(Kind >= InstrProfValueKind::IPVK_First &&
5238 Kind <= InstrProfValueKind::IPVK_Last,
5239 "Invalid VP !prof kind", MD);
5240 Check(MD->getNumOperands() % 2 == 1,
5241 "VP !prof should have an even number "
5242 "of arguments after 'VP'",
5243 MD);
5244 if (Kind == InstrProfValueKind::IPVK_IndirectCallTarget ||
5245 Kind == InstrProfValueKind::IPVK_MemOPSize)
5247 "VP !prof indirect call or memop size expected to be applied to "
5248 "CallBase instructions only",
5249 MD);
5250 } else {
5251 CheckFailed("expected either branch_weights or VP profile name", MD);
5252 }
5253}
5254
5255void Verifier::visitDIAssignIDMetadata(Instruction &I, MDNode *MD) {
5256 assert(I.hasMetadata(LLVMContext::MD_DIAssignID));
5257 // DIAssignID metadata must be attached to either an alloca or some form of
5258 // store/memory-writing instruction.
5259 // FIXME: We allow all intrinsic insts here to avoid trying to enumerate all
5260 // possible store intrinsics.
5261 bool ExpectedInstTy =
5263 CheckDI(ExpectedInstTy, "!DIAssignID attached to unexpected instruction kind",
5264 I, MD);
5265 // Iterate over the MetadataAsValue uses of the DIAssignID - these should
5266 // only be found as DbgAssignIntrinsic operands.
5267 if (auto *AsValue = MetadataAsValue::getIfExists(Context, MD)) {
5268 for (auto *User : AsValue->users()) {
5270 "!DIAssignID should only be used by llvm.dbg.assign intrinsics",
5271 MD, User);
5272 // All of the dbg.assign intrinsics should be in the same function as I.
5273 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(User))
5274 CheckDI(DAI->getFunction() == I.getFunction(),
5275 "dbg.assign not in same function as inst", DAI, &I);
5276 }
5277 }
5278 for (DbgVariableRecord *DVR :
5279 cast<DIAssignID>(MD)->getAllDbgVariableRecordUsers()) {
5280 CheckDI(DVR->isDbgAssign(),
5281 "!DIAssignID should only be used by Assign DVRs.", MD, DVR);
5282 CheckDI(DVR->getFunction() == I.getFunction(),
5283 "DVRAssign not in same function as inst", DVR, &I);
5284 }
5285}
5286
5287void Verifier::visitMMRAMetadata(Instruction &I, MDNode *MD) {
5289 "!mmra metadata attached to unexpected instruction kind", I, MD);
5290
5291 // MMRA Metadata should either be a tag, e.g. !{!"foo", !"bar"}, or a
5292 // list of tags such as !2 in the following example:
5293 // !0 = !{!"a", !"b"}
5294 // !1 = !{!"c", !"d"}
5295 // !2 = !{!0, !1}
5296 if (MMRAMetadata::isTagMD(MD))
5297 return;
5298
5299 Check(isa<MDTuple>(MD), "!mmra expected to be a metadata tuple", I, MD);
5300 for (const MDOperand &MDOp : MD->operands())
5301 Check(MMRAMetadata::isTagMD(MDOp.get()),
5302 "!mmra metadata tuple operand is not an MMRA tag", I, MDOp.get());
5303}
5304
5305void Verifier::visitCallStackMetadata(MDNode *MD) {
5306 // Call stack metadata should consist of a list of at least 1 constant int
5307 // (representing a hash of the location).
5308 Check(MD->getNumOperands() >= 1,
5309 "call stack metadata should have at least 1 operand", MD);
5310
5311 for (const auto &Op : MD->operands())
5313 "call stack metadata operand should be constant integer", Op);
5314}
5315
5316void Verifier::visitMemProfMetadata(Instruction &I, MDNode *MD) {
5317 Check(isa<CallBase>(I), "!memprof metadata should only exist on calls", &I);
5318 Check(MD->getNumOperands() >= 1,
5319 "!memprof annotations should have at least 1 metadata operand "
5320 "(MemInfoBlock)",
5321 MD);
5322
5323 // Check each MIB
5324 for (auto &MIBOp : MD->operands()) {
5325 MDNode *MIB = dyn_cast<MDNode>(MIBOp);
5326 // The first operand of an MIB should be the call stack metadata.
5327 // There rest of the operands should be MDString tags, and there should be
5328 // at least one.
5329 Check(MIB->getNumOperands() >= 2,
5330 "Each !memprof MemInfoBlock should have at least 2 operands", MIB);
5331
5332 // Check call stack metadata (first operand).
5333 Check(MIB->getOperand(0) != nullptr,
5334 "!memprof MemInfoBlock first operand should not be null", MIB);
5335 Check(isa<MDNode>(MIB->getOperand(0)),
5336 "!memprof MemInfoBlock first operand should be an MDNode", MIB);
5337 MDNode *StackMD = dyn_cast<MDNode>(MIB->getOperand(0));
5338 visitCallStackMetadata(StackMD);
5339
5340 // The next set of 1 or more operands should be MDString.
5341 unsigned I = 1;
5342 for (; I < MIB->getNumOperands(); ++I) {
5343 if (!isa<MDString>(MIB->getOperand(I))) {
5344 Check(I > 1,
5345 "!memprof MemInfoBlock second operand should be an MDString",
5346 MIB);
5347 break;
5348 }
5349 }
5350
5351 // Any remaining should be MDNode that are pairs of integers
5352 for (; I < MIB->getNumOperands(); ++I) {
5353 MDNode *OpNode = dyn_cast<MDNode>(MIB->getOperand(I));
5354 Check(OpNode, "Not all !memprof MemInfoBlock operands 2 to N are MDNode",
5355 MIB);
5356 Check(OpNode->getNumOperands() == 2,
5357 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with 2 "
5358 "operands",
5359 MIB);
5360 // Check that all of Op's operands are ConstantInt.
5361 Check(llvm::all_of(OpNode->operands(),
5362 [](const MDOperand &Op) {
5363 return mdconst::hasa<ConstantInt>(Op);
5364 }),
5365 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with "
5366 "ConstantInt operands",
5367 MIB);
5368 }
5369 }
5370}
5371
5372void Verifier::visitCallsiteMetadata(Instruction &I, MDNode *MD) {
5373 Check(isa<CallBase>(I), "!callsite metadata should only exist on calls", &I);
5374 // Verify the partial callstack annotated from memprof profiles. This callsite
5375 // is a part of a profiled allocation callstack.
5376 visitCallStackMetadata(MD);
5377}
5378
5379static inline bool isConstantIntMetadataOperand(const Metadata *MD) {
5380 if (auto *VAL = dyn_cast<ValueAsMetadata>(MD))
5381 return isa<ConstantInt>(VAL->getValue());
5382 return false;
5383}
5384
5385void Verifier::visitCalleeTypeMetadata(Instruction &I, MDNode *MD) {
5386 Check(isa<CallBase>(I), "!callee_type metadata should only exist on calls",
5387 &I);
5388 for (Metadata *Op : MD->operands()) {
5390 "The callee_type metadata must be a list of type metadata nodes", Op);
5391 auto *TypeMD = cast<MDNode>(Op);
5392 Check(TypeMD->getNumOperands() == 2,
5393 "Well-formed generalized type metadata must contain exactly two "
5394 "operands",
5395 Op);
5396 Check(isConstantIntMetadataOperand(TypeMD->getOperand(0)) &&
5397 mdconst::extract<ConstantInt>(TypeMD->getOperand(0))->isZero(),
5398 "The first operand of type metadata for functions must be zero", Op);
5399 Check(TypeMD->hasGeneralizedMDString(),
5400 "Only generalized type metadata can be part of the callee_type "
5401 "metadata list",
5402 Op);
5403 }
5404}
5405
5406void Verifier::visitAnnotationMetadata(MDNode *Annotation) {
5407 Check(isa<MDTuple>(Annotation), "annotation must be a tuple");
5408 Check(Annotation->getNumOperands() >= 1,
5409 "annotation must have at least one operand");
5410 for (const MDOperand &Op : Annotation->operands()) {
5411 bool TupleOfStrings =
5412 isa<MDTuple>(Op.get()) &&
5413 all_of(cast<MDTuple>(Op)->operands(), [](auto &Annotation) {
5414 return isa<MDString>(Annotation.get());
5415 });
5416 Check(isa<MDString>(Op.get()) || TupleOfStrings,
5417 "operands must be a string or a tuple of strings");
5418 }
5419}
5420
5421void Verifier::visitAliasScopeMetadata(const MDNode *MD) {
5422 unsigned NumOps = MD->getNumOperands();
5423 Check(NumOps >= 2 && NumOps <= 3, "scope must have two or three operands",
5424 MD);
5425 Check(MD->getOperand(0).get() == MD || isa<MDString>(MD->getOperand(0)),
5426 "first scope operand must be self-referential or string", MD);
5427 if (NumOps == 3)
5429 "third scope operand must be string (if used)", MD);
5430
5431 MDNode *Domain = dyn_cast<MDNode>(MD->getOperand(1));
5432 Check(Domain != nullptr, "second scope operand must be MDNode", MD);
5433
5434 unsigned NumDomainOps = Domain->getNumOperands();
5435 Check(NumDomainOps >= 1 && NumDomainOps <= 2,
5436 "domain must have one or two operands", Domain);
5437 Check(Domain->getOperand(0).get() == Domain ||
5438 isa<MDString>(Domain->getOperand(0)),
5439 "first domain operand must be self-referential or string", Domain);
5440 if (NumDomainOps == 2)
5441 Check(isa<MDString>(Domain->getOperand(1)),
5442 "second domain operand must be string (if used)", Domain);
5443}
5444
5445void Verifier::visitAliasScopeListMetadata(const MDNode *MD) {
5446 for (const MDOperand &Op : MD->operands()) {
5447 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5448 Check(OpMD != nullptr, "scope list must consist of MDNodes", MD);
5449 visitAliasScopeMetadata(OpMD);
5450 }
5451}
5452
5453void Verifier::visitAccessGroupMetadata(const MDNode *MD) {
5454 auto IsValidAccessScope = [](const MDNode *MD) {
5455 return MD->getNumOperands() == 0 && MD->isDistinct();
5456 };
5457
5458 // It must be either an access scope itself...
5459 if (IsValidAccessScope(MD))
5460 return;
5461
5462 // ...or a list of access scopes.
5463 for (const MDOperand &Op : MD->operands()) {
5464 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5465 Check(OpMD != nullptr, "Access scope list must consist of MDNodes", MD);
5466 Check(IsValidAccessScope(OpMD),
5467 "Access scope list contains invalid access scope", MD);
5468 }
5469}
5470
5471void Verifier::visitCapturesMetadata(Instruction &I, const MDNode *Captures) {
5472 static const char *ValidArgs[] = {"address_is_null", "address",
5473 "read_provenance", "provenance"};
5474
5475 auto *SI = dyn_cast<StoreInst>(&I);
5476 Check(SI, "!captures metadata can only be applied to store instructions", &I);
5477 Check(SI->getValueOperand()->getType()->isPointerTy(),
5478 "!captures metadata can only be applied to store with value operand of "
5479 "pointer type",
5480 &I);
5481 Check(Captures->getNumOperands() != 0, "!captures metadata cannot be empty",
5482 &I);
5483
5484 for (Metadata *Op : Captures->operands()) {
5485 auto *Str = dyn_cast<MDString>(Op);
5486 Check(Str, "!captures metadata must be a list of strings", &I);
5487 Check(is_contained(ValidArgs, Str->getString()),
5488 "invalid entry in !captures metadata", &I, Str);
5489 }
5490}
5491
5492void Verifier::visitAllocTokenMetadata(Instruction &I, MDNode *MD) {
5493 Check(isa<CallBase>(I), "!alloc_token should only exist on calls", &I);
5494 Check(MD->getNumOperands() == 2, "!alloc_token must have 2 operands", MD);
5495 Check(isa<MDString>(MD->getOperand(0)), "expected string", MD);
5497 "expected integer constant", MD);
5498}
5499
5500/// verifyInstruction - Verify that an instruction is well formed.
5501///
5502void Verifier::visitInstruction(Instruction &I) {
5503 BasicBlock *BB = I.getParent();
5504 Check(BB, "Instruction not embedded in basic block!", &I);
5505
5506 if (!isa<PHINode>(I)) { // Check that non-phi nodes are not self referential
5507 for (User *U : I.users()) {
5508 Check(U != (User *)&I || !DT.isReachableFromEntry(BB),
5509 "Only PHI nodes may reference their own value!", &I);
5510 }
5511 }
5512
5513 // Check that void typed values don't have names
5514 Check(!I.getType()->isVoidTy() || !I.hasName(),
5515 "Instruction has a name, but provides a void value!", &I);
5516
5517 // Check that the return value of the instruction is either void or a legal
5518 // value type.
5519 Check(I.getType()->isVoidTy() || I.getType()->isFirstClassType(),
5520 "Instruction returns a non-scalar type!", &I);
5521
5522 // Check that the instruction doesn't produce metadata. Calls are already
5523 // checked against the callee type.
5524 Check(!I.getType()->isMetadataTy() || isa<CallInst>(I) || isa<InvokeInst>(I),
5525 "Invalid use of metadata!", &I);
5526
5527 // Check that all uses of the instruction, if they are instructions
5528 // themselves, actually have parent basic blocks. If the use is not an
5529 // instruction, it is an error!
5530 for (Use &U : I.uses()) {
5531 if (Instruction *Used = dyn_cast<Instruction>(U.getUser()))
5532 Check(Used->getParent() != nullptr,
5533 "Instruction referencing"
5534 " instruction not embedded in a basic block!",
5535 &I, Used);
5536 else {
5537 CheckFailed("Use of instruction is not an instruction!", U);
5538 return;
5539 }
5540 }
5541
5542 // Get a pointer to the call base of the instruction if it is some form of
5543 // call.
5544 const CallBase *CBI = dyn_cast<CallBase>(&I);
5545
5546 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
5547 Check(I.getOperand(i) != nullptr, "Instruction has null operand!", &I);
5548
5549 // Check to make sure that only first-class-values are operands to
5550 // instructions.
5551 if (!I.getOperand(i)->getType()->isFirstClassType()) {
5552 Check(false, "Instruction operands must be first-class values!", &I);
5553 }
5554
5555 if (Function *F = dyn_cast<Function>(I.getOperand(i))) {
5556 // This code checks whether the function is used as the operand of a
5557 // clang_arc_attachedcall operand bundle.
5558 auto IsAttachedCallOperand = [](Function *F, const CallBase *CBI,
5559 int Idx) {
5560 return CBI && CBI->isOperandBundleOfType(
5562 };
5563
5564 // Check to make sure that the "address of" an intrinsic function is never
5565 // taken. Ignore cases where the address of the intrinsic function is used
5566 // as the argument of operand bundle "clang.arc.attachedcall" as those
5567 // cases are handled in verifyAttachedCallBundle.
5568 Check((!F->isIntrinsic() ||
5569 (CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i)) ||
5570 IsAttachedCallOperand(F, CBI, i)),
5571 "Cannot take the address of an intrinsic!", &I);
5572 Check(!F->isIntrinsic() || isa<CallInst>(I) || isa<CallBrInst>(I) ||
5573 F->getIntrinsicID() == Intrinsic::donothing ||
5574 F->getIntrinsicID() == Intrinsic::seh_try_begin ||
5575 F->getIntrinsicID() == Intrinsic::seh_try_end ||
5576 F->getIntrinsicID() == Intrinsic::seh_scope_begin ||
5577 F->getIntrinsicID() == Intrinsic::seh_scope_end ||
5578 F->getIntrinsicID() == Intrinsic::coro_resume ||
5579 F->getIntrinsicID() == Intrinsic::coro_destroy ||
5580 F->getIntrinsicID() == Intrinsic::coro_await_suspend_void ||
5581 F->getIntrinsicID() == Intrinsic::coro_await_suspend_bool ||
5582 F->getIntrinsicID() == Intrinsic::coro_await_suspend_handle ||
5583 F->getIntrinsicID() ==
5584 Intrinsic::experimental_patchpoint_void ||
5585 F->getIntrinsicID() == Intrinsic::experimental_patchpoint ||
5586 F->getIntrinsicID() == Intrinsic::fake_use ||
5587 F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint ||
5588 F->getIntrinsicID() == Intrinsic::wasm_throw ||
5589 F->getIntrinsicID() == Intrinsic::wasm_rethrow ||
5590 IsAttachedCallOperand(F, CBI, i),
5591 "Cannot invoke an intrinsic other than donothing, patchpoint, "
5592 "statepoint, coro_resume, coro_destroy, clang.arc.attachedcall or "
5593 "wasm.(re)throw",
5594 &I);
5595 Check(F->getParent() == &M, "Referencing function in another module!", &I,
5596 &M, F, F->getParent());
5597 } else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) {
5598 Check(OpBB->getParent() == BB->getParent(),
5599 "Referring to a basic block in another function!", &I);
5600 } else if (Argument *OpArg = dyn_cast<Argument>(I.getOperand(i))) {
5601 Check(OpArg->getParent() == BB->getParent(),
5602 "Referring to an argument in another function!", &I);
5603 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) {
5604 Check(GV->getParent() == &M, "Referencing global in another module!", &I,
5605 &M, GV, GV->getParent());
5606 } else if (Instruction *OpInst = dyn_cast<Instruction>(I.getOperand(i))) {
5607 Check(OpInst->getFunction() == BB->getParent(),
5608 "Referring to an instruction in another function!", &I);
5609 verifyDominatesUse(I, i);
5610 } else if (isa<InlineAsm>(I.getOperand(i))) {
5611 Check(CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i),
5612 "Cannot take the address of an inline asm!", &I);
5613 } else if (auto *CPA = dyn_cast<ConstantPtrAuth>(I.getOperand(i))) {
5614 visitConstantExprsRecursively(CPA);
5615 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(I.getOperand(i))) {
5616 if (CE->getType()->isPtrOrPtrVectorTy()) {
5617 // If we have a ConstantExpr pointer, we need to see if it came from an
5618 // illegal bitcast.
5619 visitConstantExprsRecursively(CE);
5620 }
5621 }
5622 }
5623
5624 if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) {
5625 Check(I.getType()->isFPOrFPVectorTy(),
5626 "fpmath requires a floating point result!", &I);
5627 Check(MD->getNumOperands() == 1, "fpmath takes one operand!", &I);
5628 if (ConstantFP *CFP0 =
5630 const APFloat &Accuracy = CFP0->getValueAPF();
5631 Check(&Accuracy.getSemantics() == &APFloat::IEEEsingle(),
5632 "fpmath accuracy must have float type", &I);
5633 Check(Accuracy.isFiniteNonZero() && !Accuracy.isNegative(),
5634 "fpmath accuracy not a positive number!", &I);
5635 } else {
5636 Check(false, "invalid fpmath accuracy!", &I);
5637 }
5638 }
5639
5640 if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) {
5642 "Ranges are only for loads, calls and invokes!", &I);
5643 visitRangeMetadata(I, Range, I.getType());
5644 }
5645
5646 if (MDNode *Range = I.getMetadata(LLVMContext::MD_noalias_addrspace)) {
5649 "noalias.addrspace are only for memory operations!", &I);
5650 visitNoaliasAddrspaceMetadata(I, Range, I.getType());
5651 }
5652
5653 if (I.hasMetadata(LLVMContext::MD_invariant_group)) {
5655 "invariant.group metadata is only for loads and stores", &I);
5656 }
5657
5658 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nonnull)) {
5659 Check(I.getType()->isPointerTy(), "nonnull applies only to pointer types",
5660 &I);
5662 "nonnull applies only to load instructions, use attributes"
5663 " for calls or invokes",
5664 &I);
5665 Check(MD->getNumOperands() == 0, "nonnull metadata must be empty", &I);
5666 }
5667
5668 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable))
5669 visitDereferenceableMetadata(I, MD);
5670
5671 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable_or_null))
5672 visitDereferenceableMetadata(I, MD);
5673
5674 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nofree))
5675 visitNofreeMetadata(I, MD);
5676
5677 if (MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa))
5678 TBAAVerifyHelper.visitTBAAMetadata(&I, TBAA);
5679
5680 if (MDNode *MD = I.getMetadata(LLVMContext::MD_noalias))
5681 visitAliasScopeListMetadata(MD);
5682 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alias_scope))
5683 visitAliasScopeListMetadata(MD);
5684
5685 if (MDNode *MD = I.getMetadata(LLVMContext::MD_access_group))
5686 visitAccessGroupMetadata(MD);
5687
5688 if (MDNode *AlignMD = I.getMetadata(LLVMContext::MD_align)) {
5689 Check(I.getType()->isPointerTy(), "align applies only to pointer types",
5690 &I);
5692 "align applies only to load instructions, "
5693 "use attributes for calls or invokes",
5694 &I);
5695 Check(AlignMD->getNumOperands() == 1, "align takes one operand!", &I);
5696 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(AlignMD->getOperand(0));
5697 Check(CI && CI->getType()->isIntegerTy(64),
5698 "align metadata value must be an i64!", &I);
5699 uint64_t Align = CI->getZExtValue();
5700 Check(isPowerOf2_64(Align), "align metadata value must be a power of 2!",
5701 &I);
5702 Check(Align <= Value::MaximumAlignment,
5703 "alignment is larger that implementation defined limit", &I);
5704 }
5705
5706 if (MDNode *MD = I.getMetadata(LLVMContext::MD_prof))
5707 visitProfMetadata(I, MD);
5708
5709 if (MDNode *MD = I.getMetadata(LLVMContext::MD_memprof))
5710 visitMemProfMetadata(I, MD);
5711
5712 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callsite))
5713 visitCallsiteMetadata(I, MD);
5714
5715 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callee_type))
5716 visitCalleeTypeMetadata(I, MD);
5717
5718 if (MDNode *MD = I.getMetadata(LLVMContext::MD_DIAssignID))
5719 visitDIAssignIDMetadata(I, MD);
5720
5721 if (MDNode *MMRA = I.getMetadata(LLVMContext::MD_mmra))
5722 visitMMRAMetadata(I, MMRA);
5723
5724 if (MDNode *Annotation = I.getMetadata(LLVMContext::MD_annotation))
5725 visitAnnotationMetadata(Annotation);
5726
5727 if (MDNode *Captures = I.getMetadata(LLVMContext::MD_captures))
5728 visitCapturesMetadata(I, Captures);
5729
5730 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alloc_token))
5731 visitAllocTokenMetadata(I, MD);
5732
5733 if (MDNode *N = I.getDebugLoc().getAsMDNode()) {
5734 CheckDI(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N);
5735 visitMDNode(*N, AreDebugLocsAllowed::Yes);
5736
5737 if (auto *DL = dyn_cast<DILocation>(N)) {
5738 if (DL->getAtomGroup()) {
5739 CheckDI(DL->getScope()->getSubprogram()->getKeyInstructionsEnabled(),
5740 "DbgLoc uses atomGroup but DISubprogram doesn't have Key "
5741 "Instructions enabled",
5742 DL, DL->getScope()->getSubprogram());
5743 }
5744 }
5745 }
5746
5748 I.getAllMetadata(MDs);
5749 for (auto Attachment : MDs) {
5750 unsigned Kind = Attachment.first;
5751 auto AllowLocs =
5752 (Kind == LLVMContext::MD_dbg || Kind == LLVMContext::MD_loop)
5753 ? AreDebugLocsAllowed::Yes
5754 : AreDebugLocsAllowed::No;
5755 visitMDNode(*Attachment.second, AllowLocs);
5756 }
5757
5758 InstsInThisBlock.insert(&I);
5759}
5760
5761/// Allow intrinsics to be verified in different ways.
5762void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
5764 Check(IF->isDeclaration(), "Intrinsic functions should never be defined!",
5765 IF);
5766
5767 // Verify that the intrinsic prototype lines up with what the .td files
5768 // describe.
5769 FunctionType *IFTy = IF->getFunctionType();
5770 bool IsVarArg = IFTy->isVarArg();
5771
5775
5776 // Walk the descriptors to extract overloaded types.
5781 "Intrinsic has incorrect return type!", IF);
5783 "Intrinsic has incorrect argument type!", IF);
5784
5785 // Verify if the intrinsic call matches the vararg property.
5786 if (IsVarArg)
5788 "Intrinsic was not defined with variable arguments!", IF);
5789 else
5791 "Callsite was not defined with variable arguments!", IF);
5792
5793 // All descriptors should be absorbed by now.
5794 Check(TableRef.empty(), "Intrinsic has too few arguments!", IF);
5795
5796 // Now that we have the intrinsic ID and the actual argument types (and we
5797 // know they are legal for the intrinsic!) get the intrinsic name through the
5798 // usual means. This allows us to verify the mangling of argument types into
5799 // the name.
5800 const std::string ExpectedName =
5801 Intrinsic::getName(ID, ArgTys, IF->getParent(), IFTy);
5802 Check(ExpectedName == IF->getName(),
5803 "Intrinsic name not mangled correctly for type arguments! "
5804 "Should be: " +
5805 ExpectedName,
5806 IF);
5807
5808 // If the intrinsic takes MDNode arguments, verify that they are either global
5809 // or are local to *this* function.
5810 for (Value *V : Call.args()) {
5811 if (auto *MD = dyn_cast<MetadataAsValue>(V))
5812 visitMetadataAsValue(*MD, Call.getCaller());
5813 if (auto *Const = dyn_cast<Constant>(V))
5814 Check(!Const->getType()->isX86_AMXTy(),
5815 "const x86_amx is not allowed in argument!");
5816 }
5817
5818 switch (ID) {
5819 default:
5820 break;
5821 case Intrinsic::assume: {
5822 if (Call.hasOperandBundles()) {
5824 Check(Cond && Cond->isOne(),
5825 "assume with operand bundles must have i1 true condition", Call);
5826 }
5827 for (auto &Elem : Call.bundle_op_infos()) {
5828 unsigned ArgCount = Elem.End - Elem.Begin;
5829 // Separate storage assumptions are special insofar as they're the only
5830 // operand bundles allowed on assumes that aren't parameter attributes.
5831 if (Elem.Tag->getKey() == "separate_storage") {
5832 Check(ArgCount == 2,
5833 "separate_storage assumptions should have 2 arguments", Call);
5834 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy() &&
5835 Call.getOperand(Elem.Begin + 1)->getType()->isPointerTy(),
5836 "arguments to separate_storage assumptions should be pointers",
5837 Call);
5838 continue;
5839 }
5840 Check(Elem.Tag->getKey() == "ignore" ||
5841 Attribute::isExistingAttribute(Elem.Tag->getKey()),
5842 "tags must be valid attribute names", Call);
5843 Attribute::AttrKind Kind =
5844 Attribute::getAttrKindFromName(Elem.Tag->getKey());
5845 if (Kind == Attribute::Alignment) {
5846 Check(ArgCount <= 3 && ArgCount >= 2,
5847 "alignment assumptions should have 2 or 3 arguments", Call);
5848 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5849 "first argument should be a pointer", Call);
5850 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5851 "second argument should be an integer", Call);
5852 if (ArgCount == 3)
5853 Check(Call.getOperand(Elem.Begin + 2)->getType()->isIntegerTy(),
5854 "third argument should be an integer if present", Call);
5855 continue;
5856 }
5857 if (Kind == Attribute::Dereferenceable) {
5858 Check(ArgCount == 2,
5859 "dereferenceable assumptions should have 2 arguments", Call);
5860 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5861 "first argument should be a pointer", Call);
5862 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5863 "second argument should be an integer", Call);
5864 continue;
5865 }
5866 Check(ArgCount <= 2, "too many arguments", Call);
5867 if (Kind == Attribute::None)
5868 break;
5869 if (Attribute::isIntAttrKind(Kind)) {
5870 Check(ArgCount == 2, "this attribute should have 2 arguments", Call);
5871 Check(isa<ConstantInt>(Call.getOperand(Elem.Begin + 1)),
5872 "the second argument should be a constant integral value", Call);
5873 } else if (Attribute::canUseAsParamAttr(Kind)) {
5874 Check((ArgCount) == 1, "this attribute should have one argument", Call);
5875 } else if (Attribute::canUseAsFnAttr(Kind)) {
5876 Check((ArgCount) == 0, "this attribute has no argument", Call);
5877 }
5878 }
5879 break;
5880 }
5881 case Intrinsic::ucmp:
5882 case Intrinsic::scmp: {
5883 Type *SrcTy = Call.getOperand(0)->getType();
5884 Type *DestTy = Call.getType();
5885
5886 Check(DestTy->getScalarSizeInBits() >= 2,
5887 "result type must be at least 2 bits wide", Call);
5888
5889 bool IsDestTypeVector = DestTy->isVectorTy();
5890 Check(SrcTy->isVectorTy() == IsDestTypeVector,
5891 "ucmp/scmp argument and result types must both be either vector or "
5892 "scalar types",
5893 Call);
5894 if (IsDestTypeVector) {
5895 auto SrcVecLen = cast<VectorType>(SrcTy)->getElementCount();
5896 auto DestVecLen = cast<VectorType>(DestTy)->getElementCount();
5897 Check(SrcVecLen == DestVecLen,
5898 "return type and arguments must have the same number of "
5899 "elements",
5900 Call);
5901 }
5902 break;
5903 }
5904 case Intrinsic::coro_id: {
5905 auto *InfoArg = Call.getArgOperand(3)->stripPointerCasts();
5906 if (isa<ConstantPointerNull>(InfoArg))
5907 break;
5908 auto *GV = dyn_cast<GlobalVariable>(InfoArg);
5909 Check(GV && GV->isConstant() && GV->hasDefinitiveInitializer(),
5910 "info argument of llvm.coro.id must refer to an initialized "
5911 "constant");
5912 Constant *Init = GV->getInitializer();
5914 "info argument of llvm.coro.id must refer to either a struct or "
5915 "an array");
5916 break;
5917 }
5918 case Intrinsic::is_fpclass: {
5919 const ConstantInt *TestMask = cast<ConstantInt>(Call.getOperand(1));
5920 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
5921 "unsupported bits for llvm.is.fpclass test mask");
5922 break;
5923 }
5924 case Intrinsic::fptrunc_round: {
5925 // Check the rounding mode
5926 Metadata *MD = nullptr;
5928 if (MAV)
5929 MD = MAV->getMetadata();
5930
5931 Check(MD != nullptr, "missing rounding mode argument", Call);
5932
5933 Check(isa<MDString>(MD),
5934 ("invalid value for llvm.fptrunc.round metadata operand"
5935 " (the operand should be a string)"),
5936 MD);
5937
5938 std::optional<RoundingMode> RoundMode =
5939 convertStrToRoundingMode(cast<MDString>(MD)->getString());
5940 Check(RoundMode && *RoundMode != RoundingMode::Dynamic,
5941 "unsupported rounding mode argument", Call);
5942 break;
5943 }
5944#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
5945#include "llvm/IR/VPIntrinsics.def"
5946#undef BEGIN_REGISTER_VP_INTRINSIC
5947 visitVPIntrinsic(cast<VPIntrinsic>(Call));
5948 break;
5949#define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC) \
5950 case Intrinsic::INTRINSIC:
5951#include "llvm/IR/ConstrainedOps.def"
5952#undef INSTRUCTION
5953 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(Call));
5954 break;
5955 case Intrinsic::dbg_declare: // llvm.dbg.declare
5956 case Intrinsic::dbg_value: // llvm.dbg.value
5957 case Intrinsic::dbg_assign: // llvm.dbg.assign
5958 case Intrinsic::dbg_label: // llvm.dbg.label
5959 // We no longer interpret debug intrinsics (the old variable-location
5960 // design). They're meaningless as far as LLVM is concerned we could make
5961 // it an error for them to appear, but it's possible we'll have users
5962 // converting back to intrinsics for the forseeable future (such as DXIL),
5963 // so tolerate their existance.
5964 break;
5965 case Intrinsic::memcpy:
5966 case Intrinsic::memcpy_inline:
5967 case Intrinsic::memmove:
5968 case Intrinsic::memset:
5969 case Intrinsic::memset_inline:
5970 break;
5971 case Intrinsic::experimental_memset_pattern: {
5972 const auto Memset = cast<MemSetPatternInst>(&Call);
5973 Check(Memset->getValue()->getType()->isSized(),
5974 "unsized types cannot be used as memset patterns", Call);
5975 break;
5976 }
5977 case Intrinsic::memcpy_element_unordered_atomic:
5978 case Intrinsic::memmove_element_unordered_atomic:
5979 case Intrinsic::memset_element_unordered_atomic: {
5980 const auto *AMI = cast<AnyMemIntrinsic>(&Call);
5981
5982 ConstantInt *ElementSizeCI =
5983 cast<ConstantInt>(AMI->getRawElementSizeInBytes());
5984 const APInt &ElementSizeVal = ElementSizeCI->getValue();
5985 Check(ElementSizeVal.isPowerOf2(),
5986 "element size of the element-wise atomic memory intrinsic "
5987 "must be a power of 2",
5988 Call);
5989
5990 auto IsValidAlignment = [&](MaybeAlign Alignment) {
5991 return Alignment && ElementSizeVal.ule(Alignment->value());
5992 };
5993 Check(IsValidAlignment(AMI->getDestAlign()),
5994 "incorrect alignment of the destination argument", Call);
5995 if (const auto *AMT = dyn_cast<AnyMemTransferInst>(AMI)) {
5996 Check(IsValidAlignment(AMT->getSourceAlign()),
5997 "incorrect alignment of the source argument", Call);
5998 }
5999 break;
6000 }
6001 case Intrinsic::call_preallocated_setup: {
6002 auto *NumArgs = cast<ConstantInt>(Call.getArgOperand(0));
6003 bool FoundCall = false;
6004 for (User *U : Call.users()) {
6005 auto *UseCall = dyn_cast<CallBase>(U);
6006 Check(UseCall != nullptr,
6007 "Uses of llvm.call.preallocated.setup must be calls");
6008 Intrinsic::ID IID = UseCall->getIntrinsicID();
6009 if (IID == Intrinsic::call_preallocated_arg) {
6010 auto *AllocArgIndex = dyn_cast<ConstantInt>(UseCall->getArgOperand(1));
6011 Check(AllocArgIndex != nullptr,
6012 "llvm.call.preallocated.alloc arg index must be a constant");
6013 auto AllocArgIndexInt = AllocArgIndex->getValue();
6014 Check(AllocArgIndexInt.sge(0) &&
6015 AllocArgIndexInt.slt(NumArgs->getValue()),
6016 "llvm.call.preallocated.alloc arg index must be between 0 and "
6017 "corresponding "
6018 "llvm.call.preallocated.setup's argument count");
6019 } else if (IID == Intrinsic::call_preallocated_teardown) {
6020 // nothing to do
6021 } else {
6022 Check(!FoundCall, "Can have at most one call corresponding to a "
6023 "llvm.call.preallocated.setup");
6024 FoundCall = true;
6025 size_t NumPreallocatedArgs = 0;
6026 for (unsigned i = 0; i < UseCall->arg_size(); i++) {
6027 if (UseCall->paramHasAttr(i, Attribute::Preallocated)) {
6028 ++NumPreallocatedArgs;
6029 }
6030 }
6031 Check(NumPreallocatedArgs != 0,
6032 "cannot use preallocated intrinsics on a call without "
6033 "preallocated arguments");
6034 Check(NumArgs->equalsInt(NumPreallocatedArgs),
6035 "llvm.call.preallocated.setup arg size must be equal to number "
6036 "of preallocated arguments "
6037 "at call site",
6038 Call, *UseCall);
6039 // getOperandBundle() cannot be called if more than one of the operand
6040 // bundle exists. There is already a check elsewhere for this, so skip
6041 // here if we see more than one.
6042 if (UseCall->countOperandBundlesOfType(LLVMContext::OB_preallocated) >
6043 1) {
6044 return;
6045 }
6046 auto PreallocatedBundle =
6047 UseCall->getOperandBundle(LLVMContext::OB_preallocated);
6048 Check(PreallocatedBundle,
6049 "Use of llvm.call.preallocated.setup outside intrinsics "
6050 "must be in \"preallocated\" operand bundle");
6051 Check(PreallocatedBundle->Inputs.front().get() == &Call,
6052 "preallocated bundle must have token from corresponding "
6053 "llvm.call.preallocated.setup");
6054 }
6055 }
6056 break;
6057 }
6058 case Intrinsic::call_preallocated_arg: {
6059 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
6060 Check(Token &&
6061 Token->getIntrinsicID() == Intrinsic::call_preallocated_setup,
6062 "llvm.call.preallocated.arg token argument must be a "
6063 "llvm.call.preallocated.setup");
6064 Check(Call.hasFnAttr(Attribute::Preallocated),
6065 "llvm.call.preallocated.arg must be called with a \"preallocated\" "
6066 "call site attribute");
6067 break;
6068 }
6069 case Intrinsic::call_preallocated_teardown: {
6070 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
6071 Check(Token &&
6072 Token->getIntrinsicID() == Intrinsic::call_preallocated_setup,
6073 "llvm.call.preallocated.teardown token argument must be a "
6074 "llvm.call.preallocated.setup");
6075 break;
6076 }
6077 case Intrinsic::gcroot:
6078 case Intrinsic::gcwrite:
6079 case Intrinsic::gcread:
6080 if (ID == Intrinsic::gcroot) {
6081 AllocaInst *AI =
6083 Check(AI, "llvm.gcroot parameter #1 must be an alloca.", Call);
6085 "llvm.gcroot parameter #2 must be a constant.", Call);
6086 if (!AI->getAllocatedType()->isPointerTy()) {
6088 "llvm.gcroot parameter #1 must either be a pointer alloca, "
6089 "or argument #2 must be a non-null constant.",
6090 Call);
6091 }
6092 }
6093
6094 Check(Call.getParent()->getParent()->hasGC(),
6095 "Enclosing function does not use GC.", Call);
6096 break;
6097 case Intrinsic::init_trampoline:
6099 "llvm.init_trampoline parameter #2 must resolve to a function.",
6100 Call);
6101 break;
6102 case Intrinsic::prefetch:
6103 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6104 "rw argument to llvm.prefetch must be 0-1", Call);
6105 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6106 "locality argument to llvm.prefetch must be 0-3", Call);
6107 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6108 "cache type argument to llvm.prefetch must be 0-1", Call);
6109 break;
6110 case Intrinsic::reloc_none: {
6112 cast<MetadataAsValue>(Call.getArgOperand(0))->getMetadata()),
6113 "llvm.reloc.none argument must be a metadata string", &Call);
6114 break;
6115 }
6116 case Intrinsic::stackprotector:
6118 "llvm.stackprotector parameter #2 must resolve to an alloca.", Call);
6119 break;
6120 case Intrinsic::localescape: {
6121 BasicBlock *BB = Call.getParent();
6122 Check(BB->isEntryBlock(), "llvm.localescape used outside of entry block",
6123 Call);
6124 Check(!SawFrameEscape, "multiple calls to llvm.localescape in one function",
6125 Call);
6126 for (Value *Arg : Call.args()) {
6127 if (isa<ConstantPointerNull>(Arg))
6128 continue; // Null values are allowed as placeholders.
6129 auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
6130 Check(AI && AI->isStaticAlloca(),
6131 "llvm.localescape only accepts static allocas", Call);
6132 }
6133 FrameEscapeInfo[BB->getParent()].first = Call.arg_size();
6134 SawFrameEscape = true;
6135 break;
6136 }
6137 case Intrinsic::localrecover: {
6139 Function *Fn = dyn_cast<Function>(FnArg);
6140 Check(Fn && !Fn->isDeclaration(),
6141 "llvm.localrecover first "
6142 "argument must be function defined in this module",
6143 Call);
6144 auto *IdxArg = cast<ConstantInt>(Call.getArgOperand(2));
6145 auto &Entry = FrameEscapeInfo[Fn];
6146 Entry.second = unsigned(
6147 std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1));
6148 break;
6149 }
6150
6151 case Intrinsic::experimental_gc_statepoint:
6152 if (auto *CI = dyn_cast<CallInst>(&Call))
6153 Check(!CI->isInlineAsm(),
6154 "gc.statepoint support for inline assembly unimplemented", CI);
6155 Check(Call.getParent()->getParent()->hasGC(),
6156 "Enclosing function does not use GC.", Call);
6157
6158 verifyStatepoint(Call);
6159 break;
6160 case Intrinsic::experimental_gc_result: {
6161 Check(Call.getParent()->getParent()->hasGC(),
6162 "Enclosing function does not use GC.", Call);
6163
6164 auto *Statepoint = Call.getArgOperand(0);
6165 if (isa<UndefValue>(Statepoint))
6166 break;
6167
6168 // Are we tied to a statepoint properly?
6169 const auto *StatepointCall = dyn_cast<CallBase>(Statepoint);
6170 Check(StatepointCall && StatepointCall->getIntrinsicID() ==
6171 Intrinsic::experimental_gc_statepoint,
6172 "gc.result operand #1 must be from a statepoint", Call,
6173 Call.getArgOperand(0));
6174
6175 // Check that result type matches wrapped callee.
6176 auto *TargetFuncType =
6177 cast<FunctionType>(StatepointCall->getParamElementType(2));
6178 Check(Call.getType() == TargetFuncType->getReturnType(),
6179 "gc.result result type does not match wrapped callee", Call);
6180 break;
6181 }
6182 case Intrinsic::experimental_gc_relocate: {
6183 Check(Call.arg_size() == 3, "wrong number of arguments", Call);
6184
6186 "gc.relocate must return a pointer or a vector of pointers", Call);
6187
6188 // Check that this relocate is correctly tied to the statepoint
6189
6190 // This is case for relocate on the unwinding path of an invoke statepoint
6191 if (LandingPadInst *LandingPad =
6193
6194 const BasicBlock *InvokeBB =
6195 LandingPad->getParent()->getUniquePredecessor();
6196
6197 // Landingpad relocates should have only one predecessor with invoke
6198 // statepoint terminator
6199 Check(InvokeBB, "safepoints should have unique landingpads",
6200 LandingPad->getParent());
6201 Check(InvokeBB->getTerminator(), "safepoint block should be well formed",
6202 InvokeBB);
6204 "gc relocate should be linked to a statepoint", InvokeBB);
6205 } else {
6206 // In all other cases relocate should be tied to the statepoint directly.
6207 // This covers relocates on a normal return path of invoke statepoint and
6208 // relocates of a call statepoint.
6209 auto *Token = Call.getArgOperand(0);
6211 "gc relocate is incorrectly tied to the statepoint", Call, Token);
6212 }
6213
6214 // Verify rest of the relocate arguments.
6215 const Value &StatepointCall = *cast<GCRelocateInst>(Call).getStatepoint();
6216
6217 // Both the base and derived must be piped through the safepoint.
6220 "gc.relocate operand #2 must be integer offset", Call);
6221
6222 Value *Derived = Call.getArgOperand(2);
6223 Check(isa<ConstantInt>(Derived),
6224 "gc.relocate operand #3 must be integer offset", Call);
6225
6226 const uint64_t BaseIndex = cast<ConstantInt>(Base)->getZExtValue();
6227 const uint64_t DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue();
6228
6229 // Check the bounds
6230 if (isa<UndefValue>(StatepointCall))
6231 break;
6232 if (auto Opt = cast<GCStatepointInst>(StatepointCall)
6233 .getOperandBundle(LLVMContext::OB_gc_live)) {
6234 Check(BaseIndex < Opt->Inputs.size(),
6235 "gc.relocate: statepoint base index out of bounds", Call);
6236 Check(DerivedIndex < Opt->Inputs.size(),
6237 "gc.relocate: statepoint derived index out of bounds", Call);
6238 }
6239
6240 // Relocated value must be either a pointer type or vector-of-pointer type,
6241 // but gc_relocate does not need to return the same pointer type as the
6242 // relocated pointer. It can be casted to the correct type later if it's
6243 // desired. However, they must have the same address space and 'vectorness'
6244 GCRelocateInst &Relocate = cast<GCRelocateInst>(Call);
6245 auto *ResultType = Call.getType();
6246 auto *DerivedType = Relocate.getDerivedPtr()->getType();
6247 auto *BaseType = Relocate.getBasePtr()->getType();
6248
6249 Check(BaseType->isPtrOrPtrVectorTy(),
6250 "gc.relocate: relocated value must be a pointer", Call);
6251 Check(DerivedType->isPtrOrPtrVectorTy(),
6252 "gc.relocate: relocated value must be a pointer", Call);
6253
6254 Check(ResultType->isVectorTy() == DerivedType->isVectorTy(),
6255 "gc.relocate: vector relocates to vector and pointer to pointer",
6256 Call);
6257 Check(
6258 ResultType->getPointerAddressSpace() ==
6259 DerivedType->getPointerAddressSpace(),
6260 "gc.relocate: relocating a pointer shouldn't change its address space",
6261 Call);
6262
6263 auto GC = llvm::getGCStrategy(Relocate.getFunction()->getGC());
6264 Check(GC, "gc.relocate: calling function must have GCStrategy",
6265 Call.getFunction());
6266 if (GC) {
6267 auto isGCPtr = [&GC](Type *PTy) {
6268 return GC->isGCManagedPointer(PTy->getScalarType()).value_or(true);
6269 };
6270 Check(isGCPtr(ResultType), "gc.relocate: must return gc pointer", Call);
6271 Check(isGCPtr(BaseType),
6272 "gc.relocate: relocated value must be a gc pointer", Call);
6273 Check(isGCPtr(DerivedType),
6274 "gc.relocate: relocated value must be a gc pointer", Call);
6275 }
6276 break;
6277 }
6278 case Intrinsic::experimental_patchpoint: {
6279 if (Call.getCallingConv() == CallingConv::AnyReg) {
6281 "patchpoint: invalid return type used with anyregcc", Call);
6282 }
6283 break;
6284 }
6285 case Intrinsic::eh_exceptioncode:
6286 case Intrinsic::eh_exceptionpointer: {
6288 "eh.exceptionpointer argument must be a catchpad", Call);
6289 break;
6290 }
6291 case Intrinsic::get_active_lane_mask: {
6293 "get_active_lane_mask: must return a "
6294 "vector",
6295 Call);
6296 auto *ElemTy = Call.getType()->getScalarType();
6297 Check(ElemTy->isIntegerTy(1),
6298 "get_active_lane_mask: element type is not "
6299 "i1",
6300 Call);
6301 break;
6302 }
6303 case Intrinsic::experimental_get_vector_length: {
6304 ConstantInt *VF = cast<ConstantInt>(Call.getArgOperand(1));
6305 Check(!VF->isNegative() && !VF->isZero(),
6306 "get_vector_length: VF must be positive", Call);
6307 break;
6308 }
6309 case Intrinsic::masked_load: {
6310 Check(Call.getType()->isVectorTy(), "masked_load: must return a vector",
6311 Call);
6312
6314 Value *PassThru = Call.getArgOperand(2);
6315 Check(Mask->getType()->isVectorTy(), "masked_load: mask must be vector",
6316 Call);
6317 Check(PassThru->getType() == Call.getType(),
6318 "masked_load: pass through and return type must match", Call);
6319 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
6320 cast<VectorType>(Call.getType())->getElementCount(),
6321 "masked_load: vector mask must be same length as return", Call);
6322 break;
6323 }
6324 case Intrinsic::masked_store: {
6325 Value *Val = Call.getArgOperand(0);
6327 Check(Mask->getType()->isVectorTy(), "masked_store: mask must be vector",
6328 Call);
6329 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
6330 cast<VectorType>(Val->getType())->getElementCount(),
6331 "masked_store: vector mask must be same length as value", Call);
6332 break;
6333 }
6334
6335 case Intrinsic::experimental_guard: {
6336 Check(isa<CallInst>(Call), "experimental_guard cannot be invoked", Call);
6338 "experimental_guard must have exactly one "
6339 "\"deopt\" operand bundle");
6340 break;
6341 }
6342
6343 case Intrinsic::experimental_deoptimize: {
6344 Check(isa<CallInst>(Call), "experimental_deoptimize cannot be invoked",
6345 Call);
6347 "experimental_deoptimize must have exactly one "
6348 "\"deopt\" operand bundle");
6350 "experimental_deoptimize return type must match caller return type");
6351
6352 if (isa<CallInst>(Call)) {
6354 Check(RI,
6355 "calls to experimental_deoptimize must be followed by a return");
6356
6357 if (!Call.getType()->isVoidTy() && RI)
6358 Check(RI->getReturnValue() == &Call,
6359 "calls to experimental_deoptimize must be followed by a return "
6360 "of the value computed by experimental_deoptimize");
6361 }
6362
6363 break;
6364 }
6365 case Intrinsic::vastart: {
6367 "va_start called in a non-varargs function");
6368 break;
6369 }
6370 case Intrinsic::get_dynamic_area_offset: {
6371 auto *IntTy = dyn_cast<IntegerType>(Call.getType());
6372 Check(IntTy && DL.getPointerSizeInBits(DL.getAllocaAddrSpace()) ==
6373 IntTy->getBitWidth(),
6374 "get_dynamic_area_offset result type must be scalar integer matching "
6375 "alloca address space width",
6376 Call);
6377 break;
6378 }
6379 case Intrinsic::vector_reduce_and:
6380 case Intrinsic::vector_reduce_or:
6381 case Intrinsic::vector_reduce_xor:
6382 case Intrinsic::vector_reduce_add:
6383 case Intrinsic::vector_reduce_mul:
6384 case Intrinsic::vector_reduce_smax:
6385 case Intrinsic::vector_reduce_smin:
6386 case Intrinsic::vector_reduce_umax:
6387 case Intrinsic::vector_reduce_umin: {
6388 Type *ArgTy = Call.getArgOperand(0)->getType();
6389 Check(ArgTy->isIntOrIntVectorTy() && ArgTy->isVectorTy(),
6390 "Intrinsic has incorrect argument type!");
6391 break;
6392 }
6393 case Intrinsic::vector_reduce_fmax:
6394 case Intrinsic::vector_reduce_fmin: {
6395 Type *ArgTy = Call.getArgOperand(0)->getType();
6396 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6397 "Intrinsic has incorrect argument type!");
6398 break;
6399 }
6400 case Intrinsic::vector_reduce_fadd:
6401 case Intrinsic::vector_reduce_fmul: {
6402 // Unlike the other reductions, the first argument is a start value. The
6403 // second argument is the vector to be reduced.
6404 Type *ArgTy = Call.getArgOperand(1)->getType();
6405 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6406 "Intrinsic has incorrect argument type!");
6407 break;
6408 }
6409 case Intrinsic::smul_fix:
6410 case Intrinsic::smul_fix_sat:
6411 case Intrinsic::umul_fix:
6412 case Intrinsic::umul_fix_sat:
6413 case Intrinsic::sdiv_fix:
6414 case Intrinsic::sdiv_fix_sat:
6415 case Intrinsic::udiv_fix:
6416 case Intrinsic::udiv_fix_sat: {
6417 Value *Op1 = Call.getArgOperand(0);
6418 Value *Op2 = Call.getArgOperand(1);
6420 "first operand of [us][mul|div]_fix[_sat] must be an int type or "
6421 "vector of ints");
6423 "second operand of [us][mul|div]_fix[_sat] must be an int type or "
6424 "vector of ints");
6425
6426 auto *Op3 = cast<ConstantInt>(Call.getArgOperand(2));
6427 Check(Op3->getType()->isIntegerTy(),
6428 "third operand of [us][mul|div]_fix[_sat] must be an int type");
6429 Check(Op3->getBitWidth() <= 32,
6430 "third operand of [us][mul|div]_fix[_sat] must fit within 32 bits");
6431
6432 if (ID == Intrinsic::smul_fix || ID == Intrinsic::smul_fix_sat ||
6433 ID == Intrinsic::sdiv_fix || ID == Intrinsic::sdiv_fix_sat) {
6434 Check(Op3->getZExtValue() < Op1->getType()->getScalarSizeInBits(),
6435 "the scale of s[mul|div]_fix[_sat] must be less than the width of "
6436 "the operands");
6437 } else {
6438 Check(Op3->getZExtValue() <= Op1->getType()->getScalarSizeInBits(),
6439 "the scale of u[mul|div]_fix[_sat] must be less than or equal "
6440 "to the width of the operands");
6441 }
6442 break;
6443 }
6444 case Intrinsic::lrint:
6445 case Intrinsic::llrint:
6446 case Intrinsic::lround:
6447 case Intrinsic::llround: {
6448 Type *ValTy = Call.getArgOperand(0)->getType();
6449 Type *ResultTy = Call.getType();
6450 auto *VTy = dyn_cast<VectorType>(ValTy);
6451 auto *RTy = dyn_cast<VectorType>(ResultTy);
6452 Check(ValTy->isFPOrFPVectorTy() && ResultTy->isIntOrIntVectorTy(),
6453 ExpectedName + ": argument must be floating-point or vector "
6454 "of floating-points, and result must be integer or "
6455 "vector of integers",
6456 &Call);
6457 Check(ValTy->isVectorTy() == ResultTy->isVectorTy(),
6458 ExpectedName + ": argument and result disagree on vector use", &Call);
6459 if (VTy) {
6460 Check(VTy->getElementCount() == RTy->getElementCount(),
6461 ExpectedName + ": argument must be same length as result", &Call);
6462 }
6463 break;
6464 }
6465 case Intrinsic::bswap: {
6466 Type *Ty = Call.getType();
6467 unsigned Size = Ty->getScalarSizeInBits();
6468 Check(Size % 16 == 0, "bswap must be an even number of bytes", &Call);
6469 break;
6470 }
6471 case Intrinsic::invariant_start: {
6472 ConstantInt *InvariantSize = dyn_cast<ConstantInt>(Call.getArgOperand(0));
6473 Check(InvariantSize &&
6474 (!InvariantSize->isNegative() || InvariantSize->isMinusOne()),
6475 "invariant_start parameter must be -1, 0 or a positive number",
6476 &Call);
6477 break;
6478 }
6479 case Intrinsic::matrix_multiply:
6480 case Intrinsic::matrix_transpose:
6481 case Intrinsic::matrix_column_major_load:
6482 case Intrinsic::matrix_column_major_store: {
6484 ConstantInt *Stride = nullptr;
6485 ConstantInt *NumRows;
6486 ConstantInt *NumColumns;
6487 VectorType *ResultTy;
6488 Type *Op0ElemTy = nullptr;
6489 Type *Op1ElemTy = nullptr;
6490 switch (ID) {
6491 case Intrinsic::matrix_multiply: {
6492 NumRows = cast<ConstantInt>(Call.getArgOperand(2));
6493 ConstantInt *N = cast<ConstantInt>(Call.getArgOperand(3));
6494 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6496 ->getNumElements() ==
6497 NumRows->getZExtValue() * N->getZExtValue(),
6498 "First argument of a matrix operation does not match specified "
6499 "shape!");
6501 ->getNumElements() ==
6502 N->getZExtValue() * NumColumns->getZExtValue(),
6503 "Second argument of a matrix operation does not match specified "
6504 "shape!");
6505
6506 ResultTy = cast<VectorType>(Call.getType());
6507 Op0ElemTy =
6508 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6509 Op1ElemTy =
6510 cast<VectorType>(Call.getArgOperand(1)->getType())->getElementType();
6511 break;
6512 }
6513 case Intrinsic::matrix_transpose:
6514 NumRows = cast<ConstantInt>(Call.getArgOperand(1));
6515 NumColumns = cast<ConstantInt>(Call.getArgOperand(2));
6516 ResultTy = cast<VectorType>(Call.getType());
6517 Op0ElemTy =
6518 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6519 break;
6520 case Intrinsic::matrix_column_major_load: {
6522 NumRows = cast<ConstantInt>(Call.getArgOperand(3));
6523 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6524 ResultTy = cast<VectorType>(Call.getType());
6525 break;
6526 }
6527 case Intrinsic::matrix_column_major_store: {
6529 NumRows = cast<ConstantInt>(Call.getArgOperand(4));
6530 NumColumns = cast<ConstantInt>(Call.getArgOperand(5));
6531 ResultTy = cast<VectorType>(Call.getArgOperand(0)->getType());
6532 Op0ElemTy =
6533 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6534 break;
6535 }
6536 default:
6537 llvm_unreachable("unexpected intrinsic");
6538 }
6539
6540 Check(ResultTy->getElementType()->isIntegerTy() ||
6541 ResultTy->getElementType()->isFloatingPointTy(),
6542 "Result type must be an integer or floating-point type!", IF);
6543
6544 if (Op0ElemTy)
6545 Check(ResultTy->getElementType() == Op0ElemTy,
6546 "Vector element type mismatch of the result and first operand "
6547 "vector!",
6548 IF);
6549
6550 if (Op1ElemTy)
6551 Check(ResultTy->getElementType() == Op1ElemTy,
6552 "Vector element type mismatch of the result and second operand "
6553 "vector!",
6554 IF);
6555
6557 NumRows->getZExtValue() * NumColumns->getZExtValue(),
6558 "Result of a matrix operation does not fit in the returned vector!");
6559
6560 if (Stride) {
6561 Check(Stride->getBitWidth() <= 64, "Stride bitwidth cannot exceed 64!",
6562 IF);
6563 Check(Stride->getZExtValue() >= NumRows->getZExtValue(),
6564 "Stride must be greater or equal than the number of rows!", IF);
6565 }
6566
6567 break;
6568 }
6569 case Intrinsic::vector_splice: {
6571 int64_t Idx = cast<ConstantInt>(Call.getArgOperand(2))->getSExtValue();
6572 int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
6573 if (VecTy->isScalableTy() && Call.getParent() &&
6574 Call.getParent()->getParent()) {
6575 AttributeList Attrs = Call.getParent()->getParent()->getAttributes();
6576 if (Attrs.hasFnAttr(Attribute::VScaleRange))
6577 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
6578 }
6579 Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
6580 (Idx >= 0 && Idx < KnownMinNumElements),
6581 "The splice index exceeds the range [-VL, VL-1] where VL is the "
6582 "known minimum number of elements in the vector. For scalable "
6583 "vectors the minimum number of elements is determined from "
6584 "vscale_range.",
6585 &Call);
6586 break;
6587 }
6588 case Intrinsic::stepvector: {
6590 Check(VecTy && VecTy->getScalarType()->isIntegerTy() &&
6591 VecTy->getScalarSizeInBits() >= 8,
6592 "stepvector only supported for vectors of integers "
6593 "with a bitwidth of at least 8.",
6594 &Call);
6595 break;
6596 }
6597 case Intrinsic::experimental_vector_match: {
6598 Value *Op1 = Call.getArgOperand(0);
6599 Value *Op2 = Call.getArgOperand(1);
6601
6602 VectorType *Op1Ty = dyn_cast<VectorType>(Op1->getType());
6603 VectorType *Op2Ty = dyn_cast<VectorType>(Op2->getType());
6604 VectorType *MaskTy = dyn_cast<VectorType>(Mask->getType());
6605
6606 Check(Op1Ty && Op2Ty && MaskTy, "Operands must be vectors.", &Call);
6608 "Second operand must be a fixed length vector.", &Call);
6609 Check(Op1Ty->getElementType()->isIntegerTy(),
6610 "First operand must be a vector of integers.", &Call);
6611 Check(Op1Ty->getElementType() == Op2Ty->getElementType(),
6612 "First two operands must have the same element type.", &Call);
6613 Check(Op1Ty->getElementCount() == MaskTy->getElementCount(),
6614 "First operand and mask must have the same number of elements.",
6615 &Call);
6616 Check(MaskTy->getElementType()->isIntegerTy(1),
6617 "Mask must be a vector of i1's.", &Call);
6618 Check(Call.getType() == MaskTy, "Return type must match the mask type.",
6619 &Call);
6620 break;
6621 }
6622 case Intrinsic::vector_insert: {
6623 Value *Vec = Call.getArgOperand(0);
6624 Value *SubVec = Call.getArgOperand(1);
6625 Value *Idx = Call.getArgOperand(2);
6626 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6627
6628 VectorType *VecTy = cast<VectorType>(Vec->getType());
6629 VectorType *SubVecTy = cast<VectorType>(SubVec->getType());
6630
6631 ElementCount VecEC = VecTy->getElementCount();
6632 ElementCount SubVecEC = SubVecTy->getElementCount();
6633 Check(VecTy->getElementType() == SubVecTy->getElementType(),
6634 "vector_insert parameters must have the same element "
6635 "type.",
6636 &Call);
6637 Check(IdxN % SubVecEC.getKnownMinValue() == 0,
6638 "vector_insert index must be a constant multiple of "
6639 "the subvector's known minimum vector length.");
6640
6641 // If this insertion is not the 'mixed' case where a fixed vector is
6642 // inserted into a scalable vector, ensure that the insertion of the
6643 // subvector does not overrun the parent vector.
6644 if (VecEC.isScalable() == SubVecEC.isScalable()) {
6645 Check(IdxN < VecEC.getKnownMinValue() &&
6646 IdxN + SubVecEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6647 "subvector operand of vector_insert would overrun the "
6648 "vector being inserted into.");
6649 }
6650 break;
6651 }
6652 case Intrinsic::vector_extract: {
6653 Value *Vec = Call.getArgOperand(0);
6654 Value *Idx = Call.getArgOperand(1);
6655 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6656
6657 VectorType *ResultTy = cast<VectorType>(Call.getType());
6658 VectorType *VecTy = cast<VectorType>(Vec->getType());
6659
6660 ElementCount VecEC = VecTy->getElementCount();
6661 ElementCount ResultEC = ResultTy->getElementCount();
6662
6663 Check(ResultTy->getElementType() == VecTy->getElementType(),
6664 "vector_extract result must have the same element "
6665 "type as the input vector.",
6666 &Call);
6667 Check(IdxN % ResultEC.getKnownMinValue() == 0,
6668 "vector_extract index must be a constant multiple of "
6669 "the result type's known minimum vector length.");
6670
6671 // If this extraction is not the 'mixed' case where a fixed vector is
6672 // extracted from a scalable vector, ensure that the extraction does not
6673 // overrun the parent vector.
6674 if (VecEC.isScalable() == ResultEC.isScalable()) {
6675 Check(IdxN < VecEC.getKnownMinValue() &&
6676 IdxN + ResultEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6677 "vector_extract would overrun.");
6678 }
6679 break;
6680 }
6681 case Intrinsic::vector_partial_reduce_fadd:
6682 case Intrinsic::vector_partial_reduce_add: {
6685
6686 unsigned VecWidth = VecTy->getElementCount().getKnownMinValue();
6687 unsigned AccWidth = AccTy->getElementCount().getKnownMinValue();
6688
6689 Check((VecWidth % AccWidth) == 0,
6690 "Invalid vector widths for partial "
6691 "reduction. The width of the input vector "
6692 "must be a positive integer multiple of "
6693 "the width of the accumulator vector.");
6694 break;
6695 }
6696 case Intrinsic::experimental_noalias_scope_decl: {
6697 NoAliasScopeDecls.push_back(cast<IntrinsicInst>(&Call));
6698 break;
6699 }
6700 case Intrinsic::preserve_array_access_index:
6701 case Intrinsic::preserve_struct_access_index:
6702 case Intrinsic::aarch64_ldaxr:
6703 case Intrinsic::aarch64_ldxr:
6704 case Intrinsic::arm_ldaex:
6705 case Intrinsic::arm_ldrex: {
6706 Type *ElemTy = Call.getParamElementType(0);
6707 Check(ElemTy, "Intrinsic requires elementtype attribute on first argument.",
6708 &Call);
6709 break;
6710 }
6711 case Intrinsic::aarch64_stlxr:
6712 case Intrinsic::aarch64_stxr:
6713 case Intrinsic::arm_stlex:
6714 case Intrinsic::arm_strex: {
6715 Type *ElemTy = Call.getAttributes().getParamElementType(1);
6716 Check(ElemTy,
6717 "Intrinsic requires elementtype attribute on second argument.",
6718 &Call);
6719 break;
6720 }
6721 case Intrinsic::aarch64_prefetch: {
6722 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6723 "write argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6724 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6725 "target argument to llvm.aarch64.prefetch must be 0-3", Call);
6726 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6727 "stream argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6728 Check(cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue() < 2,
6729 "isdata argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6730 break;
6731 }
6732 case Intrinsic::callbr_landingpad: {
6733 const auto *CBR = dyn_cast<CallBrInst>(Call.getOperand(0));
6734 Check(CBR, "intrinstic requires callbr operand", &Call);
6735 if (!CBR)
6736 break;
6737
6738 const BasicBlock *LandingPadBB = Call.getParent();
6739 const BasicBlock *PredBB = LandingPadBB->getUniquePredecessor();
6740 if (!PredBB) {
6741 CheckFailed("Intrinsic in block must have 1 unique predecessor", &Call);
6742 break;
6743 }
6744 if (!isa<CallBrInst>(PredBB->getTerminator())) {
6745 CheckFailed("Intrinsic must have corresponding callbr in predecessor",
6746 &Call);
6747 break;
6748 }
6749 Check(llvm::is_contained(CBR->getIndirectDests(), LandingPadBB),
6750 "Intrinsic's corresponding callbr must have intrinsic's parent basic "
6751 "block in indirect destination list",
6752 &Call);
6753 const Instruction &First = *LandingPadBB->begin();
6754 Check(&First == &Call, "No other instructions may proceed intrinsic",
6755 &Call);
6756 break;
6757 }
6758 case Intrinsic::amdgcn_cs_chain: {
6759 auto CallerCC = Call.getCaller()->getCallingConv();
6760 switch (CallerCC) {
6761 case CallingConv::AMDGPU_CS:
6762 case CallingConv::AMDGPU_CS_Chain:
6763 case CallingConv::AMDGPU_CS_ChainPreserve:
6764 break;
6765 default:
6766 CheckFailed("Intrinsic can only be used from functions with the "
6767 "amdgpu_cs, amdgpu_cs_chain or amdgpu_cs_chain_preserve "
6768 "calling conventions",
6769 &Call);
6770 break;
6771 }
6772
6773 Check(Call.paramHasAttr(2, Attribute::InReg),
6774 "SGPR arguments must have the `inreg` attribute", &Call);
6775 Check(!Call.paramHasAttr(3, Attribute::InReg),
6776 "VGPR arguments must not have the `inreg` attribute", &Call);
6777
6778 auto *Next = Call.getNextNode();
6779 bool IsAMDUnreachable = Next && isa<IntrinsicInst>(Next) &&
6780 cast<IntrinsicInst>(Next)->getIntrinsicID() ==
6781 Intrinsic::amdgcn_unreachable;
6782 Check(Next && (isa<UnreachableInst>(Next) || IsAMDUnreachable),
6783 "llvm.amdgcn.cs.chain must be followed by unreachable", &Call);
6784 break;
6785 }
6786 case Intrinsic::amdgcn_init_exec_from_input: {
6787 const Argument *Arg = dyn_cast<Argument>(Call.getOperand(0));
6788 Check(Arg && Arg->hasInRegAttr(),
6789 "only inreg arguments to the parent function are valid as inputs to "
6790 "this intrinsic",
6791 &Call);
6792 break;
6793 }
6794 case Intrinsic::amdgcn_set_inactive_chain_arg: {
6795 auto CallerCC = Call.getCaller()->getCallingConv();
6796 switch (CallerCC) {
6797 case CallingConv::AMDGPU_CS_Chain:
6798 case CallingConv::AMDGPU_CS_ChainPreserve:
6799 break;
6800 default:
6801 CheckFailed("Intrinsic can only be used from functions with the "
6802 "amdgpu_cs_chain or amdgpu_cs_chain_preserve "
6803 "calling conventions",
6804 &Call);
6805 break;
6806 }
6807
6808 unsigned InactiveIdx = 1;
6809 Check(!Call.paramHasAttr(InactiveIdx, Attribute::InReg),
6810 "Value for inactive lanes must not have the `inreg` attribute",
6811 &Call);
6812 Check(isa<Argument>(Call.getArgOperand(InactiveIdx)),
6813 "Value for inactive lanes must be a function argument", &Call);
6814 Check(!cast<Argument>(Call.getArgOperand(InactiveIdx))->hasInRegAttr(),
6815 "Value for inactive lanes must be a VGPR function argument", &Call);
6816 break;
6817 }
6818 case Intrinsic::amdgcn_call_whole_wave: {
6820 Check(F, "Indirect whole wave calls are not allowed", &Call);
6821
6822 CallingConv::ID CC = F->getCallingConv();
6823 Check(CC == CallingConv::AMDGPU_Gfx_WholeWave,
6824 "Callee must have the amdgpu_gfx_whole_wave calling convention",
6825 &Call);
6826
6827 Check(!F->isVarArg(), "Variadic whole wave calls are not allowed", &Call);
6828
6829 Check(Call.arg_size() == F->arg_size(),
6830 "Call argument count must match callee argument count", &Call);
6831
6832 // The first argument of the call is the callee, and the first argument of
6833 // the callee is the active mask. The rest of the arguments must match.
6834 Check(F->arg_begin()->getType()->isIntegerTy(1),
6835 "Callee must have i1 as its first argument", &Call);
6836 for (auto [CallArg, FuncArg] :
6837 drop_begin(zip_equal(Call.args(), F->args()))) {
6838 Check(CallArg->getType() == FuncArg.getType(),
6839 "Argument types must match", &Call);
6840
6841 // Check that inreg attributes match between call site and function
6842 Check(Call.paramHasAttr(FuncArg.getArgNo(), Attribute::InReg) ==
6843 FuncArg.hasInRegAttr(),
6844 "Argument inreg attributes must match", &Call);
6845 }
6846 break;
6847 }
6848 case Intrinsic::amdgcn_s_prefetch_data: {
6849 Check(
6852 "llvm.amdgcn.s.prefetch.data only supports global or constant memory");
6853 break;
6854 }
6855 case Intrinsic::amdgcn_mfma_scale_f32_16x16x128_f8f6f4:
6856 case Intrinsic::amdgcn_mfma_scale_f32_32x32x64_f8f6f4: {
6857 Value *Src0 = Call.getArgOperand(0);
6858 Value *Src1 = Call.getArgOperand(1);
6859
6860 uint64_t CBSZ = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
6861 uint64_t BLGP = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
6862 Check(CBSZ <= 4, "invalid value for cbsz format", Call,
6863 Call.getArgOperand(3));
6864 Check(BLGP <= 4, "invalid value for blgp format", Call,
6865 Call.getArgOperand(4));
6866
6867 // AMDGPU::MFMAScaleFormats values
6868 auto getFormatNumRegs = [](unsigned FormatVal) {
6869 switch (FormatVal) {
6870 case 0:
6871 case 1:
6872 return 8u;
6873 case 2:
6874 case 3:
6875 return 6u;
6876 case 4:
6877 return 4u;
6878 default:
6879 llvm_unreachable("invalid format value");
6880 }
6881 };
6882
6883 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
6884 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
6885 return false;
6886 unsigned NumElts = Ty->getNumElements();
6887 return NumElts == 4 || NumElts == 6 || NumElts == 8;
6888 };
6889
6890 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
6891 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
6892 Check(isValidSrcASrcBVector(Src0Ty),
6893 "operand 0 must be 4, 6 or 8 element i32 vector", &Call, Src0);
6894 Check(isValidSrcASrcBVector(Src1Ty),
6895 "operand 1 must be 4, 6 or 8 element i32 vector", &Call, Src1);
6896
6897 // Permit excess registers for the format.
6898 Check(Src0Ty->getNumElements() >= getFormatNumRegs(CBSZ),
6899 "invalid vector type for format", &Call, Src0, Call.getArgOperand(3));
6900 Check(Src1Ty->getNumElements() >= getFormatNumRegs(BLGP),
6901 "invalid vector type for format", &Call, Src1, Call.getArgOperand(5));
6902 break;
6903 }
6904 case Intrinsic::amdgcn_wmma_f32_16x16x128_f8f6f4:
6905 case Intrinsic::amdgcn_wmma_scale_f32_16x16x128_f8f6f4:
6906 case Intrinsic::amdgcn_wmma_scale16_f32_16x16x128_f8f6f4: {
6907 Value *Src0 = Call.getArgOperand(1);
6908 Value *Src1 = Call.getArgOperand(3);
6909
6910 unsigned FmtA = cast<ConstantInt>(Call.getArgOperand(0))->getZExtValue();
6911 unsigned FmtB = cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue();
6912 Check(FmtA <= 4, "invalid value for matrix format", Call,
6913 Call.getArgOperand(0));
6914 Check(FmtB <= 4, "invalid value for matrix format", Call,
6915 Call.getArgOperand(2));
6916
6917 // AMDGPU::MatrixFMT values
6918 auto getFormatNumRegs = [](unsigned FormatVal) {
6919 switch (FormatVal) {
6920 case 0:
6921 case 1:
6922 return 16u;
6923 case 2:
6924 case 3:
6925 return 12u;
6926 case 4:
6927 return 8u;
6928 default:
6929 llvm_unreachable("invalid format value");
6930 }
6931 };
6932
6933 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
6934 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
6935 return false;
6936 unsigned NumElts = Ty->getNumElements();
6937 return NumElts == 16 || NumElts == 12 || NumElts == 8;
6938 };
6939
6940 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
6941 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
6942 Check(isValidSrcASrcBVector(Src0Ty),
6943 "operand 1 must be 8, 12 or 16 element i32 vector", &Call, Src0);
6944 Check(isValidSrcASrcBVector(Src1Ty),
6945 "operand 3 must be 8, 12 or 16 element i32 vector", &Call, Src1);
6946
6947 // Permit excess registers for the format.
6948 Check(Src0Ty->getNumElements() >= getFormatNumRegs(FmtA),
6949 "invalid vector type for format", &Call, Src0, Call.getArgOperand(0));
6950 Check(Src1Ty->getNumElements() >= getFormatNumRegs(FmtB),
6951 "invalid vector type for format", &Call, Src1, Call.getArgOperand(2));
6952 break;
6953 }
6954 case Intrinsic::amdgcn_cooperative_atomic_load_32x4B:
6955 case Intrinsic::amdgcn_cooperative_atomic_load_16x8B:
6956 case Intrinsic::amdgcn_cooperative_atomic_load_8x16B:
6957 case Intrinsic::amdgcn_cooperative_atomic_store_32x4B:
6958 case Intrinsic::amdgcn_cooperative_atomic_store_16x8B:
6959 case Intrinsic::amdgcn_cooperative_atomic_store_8x16B: {
6960 // Check we only use this intrinsic on the FLAT or GLOBAL address spaces.
6961 Value *PtrArg = Call.getArgOperand(0);
6962 const unsigned AS = PtrArg->getType()->getPointerAddressSpace();
6964 "cooperative atomic intrinsics require a generic or global pointer",
6965 &Call, PtrArg);
6966
6967 // Last argument must be a MD string
6969 MDNode *MD = cast<MDNode>(Op->getMetadata());
6970 Check((MD->getNumOperands() == 1) && isa<MDString>(MD->getOperand(0)),
6971 "cooperative atomic intrinsics require that the last argument is a "
6972 "metadata string",
6973 &Call, Op);
6974 break;
6975 }
6976 case Intrinsic::nvvm_setmaxnreg_inc_sync_aligned_u32:
6977 case Intrinsic::nvvm_setmaxnreg_dec_sync_aligned_u32: {
6978 Value *V = Call.getArgOperand(0);
6979 unsigned RegCount = cast<ConstantInt>(V)->getZExtValue();
6980 Check(RegCount % 8 == 0,
6981 "reg_count argument to nvvm.setmaxnreg must be in multiples of 8");
6982 break;
6983 }
6984 case Intrinsic::experimental_convergence_entry:
6985 case Intrinsic::experimental_convergence_anchor:
6986 break;
6987 case Intrinsic::experimental_convergence_loop:
6988 break;
6989 case Intrinsic::ptrmask: {
6990 Type *Ty0 = Call.getArgOperand(0)->getType();
6991 Type *Ty1 = Call.getArgOperand(1)->getType();
6993 "llvm.ptrmask intrinsic first argument must be pointer or vector "
6994 "of pointers",
6995 &Call);
6996 Check(
6997 Ty0->isVectorTy() == Ty1->isVectorTy(),
6998 "llvm.ptrmask intrinsic arguments must be both scalars or both vectors",
6999 &Call);
7000 if (Ty0->isVectorTy())
7001 Check(cast<VectorType>(Ty0)->getElementCount() ==
7002 cast<VectorType>(Ty1)->getElementCount(),
7003 "llvm.ptrmask intrinsic arguments must have the same number of "
7004 "elements",
7005 &Call);
7006 Check(DL.getIndexTypeSizeInBits(Ty0) == Ty1->getScalarSizeInBits(),
7007 "llvm.ptrmask intrinsic second argument bitwidth must match "
7008 "pointer index type size of first argument",
7009 &Call);
7010 break;
7011 }
7012 case Intrinsic::thread_pointer: {
7014 DL.getDefaultGlobalsAddressSpace(),
7015 "llvm.thread.pointer intrinsic return type must be for the globals "
7016 "address space",
7017 &Call);
7018 break;
7019 }
7020 case Intrinsic::threadlocal_address: {
7021 const Value &Arg0 = *Call.getArgOperand(0);
7022 Check(isa<GlobalValue>(Arg0),
7023 "llvm.threadlocal.address first argument must be a GlobalValue");
7024 Check(cast<GlobalValue>(Arg0).isThreadLocal(),
7025 "llvm.threadlocal.address operand isThreadLocal() must be true");
7026 break;
7027 }
7028 case Intrinsic::lifetime_start:
7029 case Intrinsic::lifetime_end: {
7030 Value *Ptr = Call.getArgOperand(0);
7032 "llvm.lifetime.start/end can only be used on alloca or poison",
7033 &Call);
7034 break;
7035 }
7036 };
7037
7038 // Verify that there aren't any unmediated control transfers between funclets.
7040 Function *F = Call.getParent()->getParent();
7041 if (F->hasPersonalityFn() &&
7042 isScopedEHPersonality(classifyEHPersonality(F->getPersonalityFn()))) {
7043 // Run EH funclet coloring on-demand and cache results for other intrinsic
7044 // calls in this function
7045 if (BlockEHFuncletColors.empty())
7046 BlockEHFuncletColors = colorEHFunclets(*F);
7047
7048 // Check for catch-/cleanup-pad in first funclet block
7049 bool InEHFunclet = false;
7050 BasicBlock *CallBB = Call.getParent();
7051 const ColorVector &CV = BlockEHFuncletColors.find(CallBB)->second;
7052 assert(CV.size() > 0 && "Uncolored block");
7053 for (BasicBlock *ColorFirstBB : CV)
7054 if (auto It = ColorFirstBB->getFirstNonPHIIt();
7055 It != ColorFirstBB->end())
7057 InEHFunclet = true;
7058
7059 // Check for funclet operand bundle
7060 bool HasToken = false;
7061 for (unsigned I = 0, E = Call.getNumOperandBundles(); I != E; ++I)
7063 HasToken = true;
7064
7065 // This would cause silent code truncation in WinEHPrepare
7066 if (InEHFunclet)
7067 Check(HasToken, "Missing funclet token on intrinsic call", &Call);
7068 }
7069 }
7070}
7071
7072/// Carefully grab the subprogram from a local scope.
7073///
7074/// This carefully grabs the subprogram from a local scope, avoiding the
7075/// built-in assertions that would typically fire.
7077 if (!LocalScope)
7078 return nullptr;
7079
7080 if (auto *SP = dyn_cast<DISubprogram>(LocalScope))
7081 return SP;
7082
7083 if (auto *LB = dyn_cast<DILexicalBlockBase>(LocalScope))
7084 return getSubprogram(LB->getRawScope());
7085
7086 // Just return null; broken scope chains are checked elsewhere.
7087 assert(!isa<DILocalScope>(LocalScope) && "Unknown type of local scope");
7088 return nullptr;
7089}
7090
7091void Verifier::visit(DbgLabelRecord &DLR) {
7093 "invalid #dbg_label intrinsic variable", &DLR, DLR.getRawLabel());
7094
7095 // Ignore broken !dbg attachments; they're checked elsewhere.
7096 if (MDNode *N = DLR.getDebugLoc().getAsMDNode())
7097 if (!isa<DILocation>(N))
7098 return;
7099
7100 BasicBlock *BB = DLR.getParent();
7101 Function *F = BB ? BB->getParent() : nullptr;
7102
7103 // The scopes for variables and !dbg attachments must agree.
7104 DILabel *Label = DLR.getLabel();
7105 DILocation *Loc = DLR.getDebugLoc();
7106 CheckDI(Loc, "#dbg_label record requires a !dbg attachment", &DLR, BB, F);
7107
7108 DISubprogram *LabelSP = getSubprogram(Label->getRawScope());
7109 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
7110 if (!LabelSP || !LocSP)
7111 return;
7112
7113 CheckDI(LabelSP == LocSP,
7114 "mismatched subprogram between #dbg_label label and !dbg attachment",
7115 &DLR, BB, F, Label, Label->getScope()->getSubprogram(), Loc,
7116 Loc->getScope()->getSubprogram());
7117}
7118
7119void Verifier::visit(DbgVariableRecord &DVR) {
7120 BasicBlock *BB = DVR.getParent();
7121 Function *F = BB->getParent();
7122
7123 CheckDI(DVR.getType() == DbgVariableRecord::LocationType::Value ||
7124 DVR.getType() == DbgVariableRecord::LocationType::Declare ||
7125 DVR.getType() == DbgVariableRecord::LocationType::DeclareValue ||
7126 DVR.getType() == DbgVariableRecord::LocationType::Assign,
7127 "invalid #dbg record type", &DVR, DVR.getType(), BB, F);
7128
7129 // The location for a DbgVariableRecord must be either a ValueAsMetadata,
7130 // DIArgList, or an empty MDNode (which is a legacy representation for an
7131 // "undef" location).
7132 auto *MD = DVR.getRawLocation();
7133 CheckDI(MD && (isa<ValueAsMetadata>(MD) || isa<DIArgList>(MD) ||
7134 (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands())),
7135 "invalid #dbg record address/value", &DVR, MD, BB, F);
7136 if (auto *VAM = dyn_cast<ValueAsMetadata>(MD)) {
7137 visitValueAsMetadata(*VAM, F);
7138 if (DVR.isDbgDeclare()) {
7139 // Allow integers here to support inttoptr salvage.
7140 Type *Ty = VAM->getValue()->getType();
7141 CheckDI(Ty->isPointerTy() || Ty->isIntegerTy(),
7142 "location of #dbg_declare must be a pointer or int", &DVR, MD, BB,
7143 F);
7144 }
7145 } else if (auto *AL = dyn_cast<DIArgList>(MD)) {
7146 visitDIArgList(*AL, F);
7147 }
7148
7150 "invalid #dbg record variable", &DVR, DVR.getRawVariable(), BB, F);
7151 visitMDNode(*DVR.getRawVariable(), AreDebugLocsAllowed::No);
7152
7154 "invalid #dbg record expression", &DVR, DVR.getRawExpression(), BB,
7155 F);
7156 visitMDNode(*DVR.getExpression(), AreDebugLocsAllowed::No);
7157
7158 if (DVR.isDbgAssign()) {
7160 "invalid #dbg_assign DIAssignID", &DVR, DVR.getRawAssignID(), BB,
7161 F);
7162 visitMDNode(*cast<DIAssignID>(DVR.getRawAssignID()),
7163 AreDebugLocsAllowed::No);
7164
7165 const auto *RawAddr = DVR.getRawAddress();
7166 // Similarly to the location above, the address for an assign
7167 // DbgVariableRecord must be a ValueAsMetadata or an empty MDNode, which
7168 // represents an undef address.
7169 CheckDI(
7170 isa<ValueAsMetadata>(RawAddr) ||
7171 (isa<MDNode>(RawAddr) && !cast<MDNode>(RawAddr)->getNumOperands()),
7172 "invalid #dbg_assign address", &DVR, DVR.getRawAddress(), BB, F);
7173 if (auto *VAM = dyn_cast<ValueAsMetadata>(RawAddr))
7174 visitValueAsMetadata(*VAM, F);
7175
7177 "invalid #dbg_assign address expression", &DVR,
7178 DVR.getRawAddressExpression(), BB, F);
7179 visitMDNode(*DVR.getAddressExpression(), AreDebugLocsAllowed::No);
7180
7181 // All of the linked instructions should be in the same function as DVR.
7182 for (Instruction *I : at::getAssignmentInsts(&DVR))
7183 CheckDI(DVR.getFunction() == I->getFunction(),
7184 "inst not in same function as #dbg_assign", I, &DVR, BB, F);
7185 }
7186
7187 // This check is redundant with one in visitLocalVariable().
7188 DILocalVariable *Var = DVR.getVariable();
7189 CheckDI(isType(Var->getRawType()), "invalid type ref", Var, Var->getRawType(),
7190 BB, F);
7191
7192 auto *DLNode = DVR.getDebugLoc().getAsMDNode();
7193 CheckDI(isa_and_nonnull<DILocation>(DLNode), "invalid #dbg record DILocation",
7194 &DVR, DLNode, BB, F);
7195 DILocation *Loc = DVR.getDebugLoc();
7196
7197 // The scopes for variables and !dbg attachments must agree.
7198 DISubprogram *VarSP = getSubprogram(Var->getRawScope());
7199 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
7200 if (!VarSP || !LocSP)
7201 return; // Broken scope chains are checked elsewhere.
7202
7203 CheckDI(VarSP == LocSP,
7204 "mismatched subprogram between #dbg record variable and DILocation",
7205 &DVR, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
7206 Loc->getScope()->getSubprogram(), BB, F);
7207
7208 verifyFnArgs(DVR);
7209}
7210
7211void Verifier::visitVPIntrinsic(VPIntrinsic &VPI) {
7212 if (auto *VPCast = dyn_cast<VPCastIntrinsic>(&VPI)) {
7213 auto *RetTy = cast<VectorType>(VPCast->getType());
7214 auto *ValTy = cast<VectorType>(VPCast->getOperand(0)->getType());
7215 Check(RetTy->getElementCount() == ValTy->getElementCount(),
7216 "VP cast intrinsic first argument and result vector lengths must be "
7217 "equal",
7218 *VPCast);
7219
7220 switch (VPCast->getIntrinsicID()) {
7221 default:
7222 llvm_unreachable("Unknown VP cast intrinsic");
7223 case Intrinsic::vp_trunc:
7224 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
7225 "llvm.vp.trunc intrinsic first argument and result element type "
7226 "must be integer",
7227 *VPCast);
7228 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
7229 "llvm.vp.trunc intrinsic the bit size of first argument must be "
7230 "larger than the bit size of the return type",
7231 *VPCast);
7232 break;
7233 case Intrinsic::vp_zext:
7234 case Intrinsic::vp_sext:
7235 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
7236 "llvm.vp.zext or llvm.vp.sext intrinsic first argument and result "
7237 "element type must be integer",
7238 *VPCast);
7239 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
7240 "llvm.vp.zext or llvm.vp.sext intrinsic the bit size of first "
7241 "argument must be smaller than the bit size of the return type",
7242 *VPCast);
7243 break;
7244 case Intrinsic::vp_fptoui:
7245 case Intrinsic::vp_fptosi:
7246 case Intrinsic::vp_lrint:
7247 case Intrinsic::vp_llrint:
7248 Check(
7249 RetTy->isIntOrIntVectorTy() && ValTy->isFPOrFPVectorTy(),
7250 "llvm.vp.fptoui, llvm.vp.fptosi, llvm.vp.lrint or llvm.vp.llrint" "intrinsic first argument element "
7251 "type must be floating-point and result element type must be integer",
7252 *VPCast);
7253 break;
7254 case Intrinsic::vp_uitofp:
7255 case Intrinsic::vp_sitofp:
7256 Check(
7257 RetTy->isFPOrFPVectorTy() && ValTy->isIntOrIntVectorTy(),
7258 "llvm.vp.uitofp or llvm.vp.sitofp intrinsic first argument element "
7259 "type must be integer and result element type must be floating-point",
7260 *VPCast);
7261 break;
7262 case Intrinsic::vp_fptrunc:
7263 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
7264 "llvm.vp.fptrunc intrinsic first argument and result element type "
7265 "must be floating-point",
7266 *VPCast);
7267 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
7268 "llvm.vp.fptrunc intrinsic the bit size of first argument must be "
7269 "larger than the bit size of the return type",
7270 *VPCast);
7271 break;
7272 case Intrinsic::vp_fpext:
7273 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
7274 "llvm.vp.fpext intrinsic first argument and result element type "
7275 "must be floating-point",
7276 *VPCast);
7277 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
7278 "llvm.vp.fpext intrinsic the bit size of first argument must be "
7279 "smaller than the bit size of the return type",
7280 *VPCast);
7281 break;
7282 case Intrinsic::vp_ptrtoint:
7283 Check(RetTy->isIntOrIntVectorTy() && ValTy->isPtrOrPtrVectorTy(),
7284 "llvm.vp.ptrtoint intrinsic first argument element type must be "
7285 "pointer and result element type must be integer",
7286 *VPCast);
7287 break;
7288 case Intrinsic::vp_inttoptr:
7289 Check(RetTy->isPtrOrPtrVectorTy() && ValTy->isIntOrIntVectorTy(),
7290 "llvm.vp.inttoptr intrinsic first argument element type must be "
7291 "integer and result element type must be pointer",
7292 *VPCast);
7293 break;
7294 }
7295 }
7296
7297 switch (VPI.getIntrinsicID()) {
7298 case Intrinsic::vp_fcmp: {
7299 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
7301 "invalid predicate for VP FP comparison intrinsic", &VPI);
7302 break;
7303 }
7304 case Intrinsic::vp_icmp: {
7305 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
7307 "invalid predicate for VP integer comparison intrinsic", &VPI);
7308 break;
7309 }
7310 case Intrinsic::vp_is_fpclass: {
7311 auto TestMask = cast<ConstantInt>(VPI.getOperand(1));
7312 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
7313 "unsupported bits for llvm.vp.is.fpclass test mask");
7314 break;
7315 }
7316 case Intrinsic::experimental_vp_splice: {
7317 VectorType *VecTy = cast<VectorType>(VPI.getType());
7318 int64_t Idx = cast<ConstantInt>(VPI.getArgOperand(2))->getSExtValue();
7319 int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
7320 if (VPI.getParent() && VPI.getParent()->getParent()) {
7321 AttributeList Attrs = VPI.getParent()->getParent()->getAttributes();
7322 if (Attrs.hasFnAttr(Attribute::VScaleRange))
7323 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
7324 }
7325 Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
7326 (Idx >= 0 && Idx < KnownMinNumElements),
7327 "The splice index exceeds the range [-VL, VL-1] where VL is the "
7328 "known minimum number of elements in the vector. For scalable "
7329 "vectors the minimum number of elements is determined from "
7330 "vscale_range.",
7331 &VPI);
7332 break;
7333 }
7334 }
7335}
7336
7337void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
7338 unsigned NumOperands = FPI.getNonMetadataArgCount();
7339 bool HasRoundingMD =
7341
7342 // Add the expected number of metadata operands.
7343 NumOperands += (1 + HasRoundingMD);
7344
7345 // Compare intrinsics carry an extra predicate metadata operand.
7347 NumOperands += 1;
7348 Check((FPI.arg_size() == NumOperands),
7349 "invalid arguments for constrained FP intrinsic", &FPI);
7350
7351 switch (FPI.getIntrinsicID()) {
7352 case Intrinsic::experimental_constrained_lrint:
7353 case Intrinsic::experimental_constrained_llrint: {
7354 Type *ValTy = FPI.getArgOperand(0)->getType();
7355 Type *ResultTy = FPI.getType();
7356 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
7357 "Intrinsic does not support vectors", &FPI);
7358 break;
7359 }
7360
7361 case Intrinsic::experimental_constrained_lround:
7362 case Intrinsic::experimental_constrained_llround: {
7363 Type *ValTy = FPI.getArgOperand(0)->getType();
7364 Type *ResultTy = FPI.getType();
7365 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
7366 "Intrinsic does not support vectors", &FPI);
7367 break;
7368 }
7369
7370 case Intrinsic::experimental_constrained_fcmp:
7371 case Intrinsic::experimental_constrained_fcmps: {
7372 auto Pred = cast<ConstrainedFPCmpIntrinsic>(&FPI)->getPredicate();
7374 "invalid predicate for constrained FP comparison intrinsic", &FPI);
7375 break;
7376 }
7377
7378 case Intrinsic::experimental_constrained_fptosi:
7379 case Intrinsic::experimental_constrained_fptoui: {
7380 Value *Operand = FPI.getArgOperand(0);
7381 ElementCount SrcEC;
7382 Check(Operand->getType()->isFPOrFPVectorTy(),
7383 "Intrinsic first argument must be floating point", &FPI);
7384 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7385 SrcEC = cast<VectorType>(OperandT)->getElementCount();
7386 }
7387
7388 Operand = &FPI;
7389 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
7390 "Intrinsic first argument and result disagree on vector use", &FPI);
7391 Check(Operand->getType()->isIntOrIntVectorTy(),
7392 "Intrinsic result must be an integer", &FPI);
7393 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7394 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
7395 "Intrinsic first argument and result vector lengths must be equal",
7396 &FPI);
7397 }
7398 break;
7399 }
7400
7401 case Intrinsic::experimental_constrained_sitofp:
7402 case Intrinsic::experimental_constrained_uitofp: {
7403 Value *Operand = FPI.getArgOperand(0);
7404 ElementCount SrcEC;
7405 Check(Operand->getType()->isIntOrIntVectorTy(),
7406 "Intrinsic first argument must be integer", &FPI);
7407 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7408 SrcEC = cast<VectorType>(OperandT)->getElementCount();
7409 }
7410
7411 Operand = &FPI;
7412 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
7413 "Intrinsic first argument and result disagree on vector use", &FPI);
7414 Check(Operand->getType()->isFPOrFPVectorTy(),
7415 "Intrinsic result must be a floating point", &FPI);
7416 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7417 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
7418 "Intrinsic first argument and result vector lengths must be equal",
7419 &FPI);
7420 }
7421 break;
7422 }
7423
7424 case Intrinsic::experimental_constrained_fptrunc:
7425 case Intrinsic::experimental_constrained_fpext: {
7426 Value *Operand = FPI.getArgOperand(0);
7427 Type *OperandTy = Operand->getType();
7428 Value *Result = &FPI;
7429 Type *ResultTy = Result->getType();
7430 Check(OperandTy->isFPOrFPVectorTy(),
7431 "Intrinsic first argument must be FP or FP vector", &FPI);
7432 Check(ResultTy->isFPOrFPVectorTy(),
7433 "Intrinsic result must be FP or FP vector", &FPI);
7434 Check(OperandTy->isVectorTy() == ResultTy->isVectorTy(),
7435 "Intrinsic first argument and result disagree on vector use", &FPI);
7436 if (OperandTy->isVectorTy()) {
7437 Check(cast<VectorType>(OperandTy)->getElementCount() ==
7438 cast<VectorType>(ResultTy)->getElementCount(),
7439 "Intrinsic first argument and result vector lengths must be equal",
7440 &FPI);
7441 }
7442 if (FPI.getIntrinsicID() == Intrinsic::experimental_constrained_fptrunc) {
7443 Check(OperandTy->getScalarSizeInBits() > ResultTy->getScalarSizeInBits(),
7444 "Intrinsic first argument's type must be larger than result type",
7445 &FPI);
7446 } else {
7447 Check(OperandTy->getScalarSizeInBits() < ResultTy->getScalarSizeInBits(),
7448 "Intrinsic first argument's type must be smaller than result type",
7449 &FPI);
7450 }
7451 break;
7452 }
7453
7454 default:
7455 break;
7456 }
7457
7458 // If a non-metadata argument is passed in a metadata slot then the
7459 // error will be caught earlier when the incorrect argument doesn't
7460 // match the specification in the intrinsic call table. Thus, no
7461 // argument type check is needed here.
7462
7463 Check(FPI.getExceptionBehavior().has_value(),
7464 "invalid exception behavior argument", &FPI);
7465 if (HasRoundingMD) {
7466 Check(FPI.getRoundingMode().has_value(), "invalid rounding mode argument",
7467 &FPI);
7468 }
7469}
7470
7471void Verifier::verifyFragmentExpression(const DbgVariableRecord &DVR) {
7472 DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(DVR.getRawVariable());
7473 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7474
7475 // We don't know whether this intrinsic verified correctly.
7476 if (!V || !E || !E->isValid())
7477 return;
7478
7479 // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
7480 auto Fragment = E->getFragmentInfo();
7481 if (!Fragment)
7482 return;
7483
7484 // The frontend helps out GDB by emitting the members of local anonymous
7485 // unions as artificial local variables with shared storage. When SROA splits
7486 // the storage for artificial local variables that are smaller than the entire
7487 // union, the overhang piece will be outside of the allotted space for the
7488 // variable and this check fails.
7489 // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
7490 if (V->isArtificial())
7491 return;
7492
7493 verifyFragmentExpression(*V, *Fragment, &DVR);
7494}
7495
7496template <typename ValueOrMetadata>
7497void Verifier::verifyFragmentExpression(const DIVariable &V,
7499 ValueOrMetadata *Desc) {
7500 // If there's no size, the type is broken, but that should be checked
7501 // elsewhere.
7502 auto VarSize = V.getSizeInBits();
7503 if (!VarSize)
7504 return;
7505
7506 unsigned FragSize = Fragment.SizeInBits;
7507 unsigned FragOffset = Fragment.OffsetInBits;
7508 CheckDI(FragSize + FragOffset <= *VarSize,
7509 "fragment is larger than or outside of variable", Desc, &V);
7510 CheckDI(FragSize != *VarSize, "fragment covers entire variable", Desc, &V);
7511}
7512
7513void Verifier::verifyFnArgs(const DbgVariableRecord &DVR) {
7514 // This function does not take the scope of noninlined function arguments into
7515 // account. Don't run it if current function is nodebug, because it may
7516 // contain inlined debug intrinsics.
7517 if (!HasDebugInfo)
7518 return;
7519
7520 // For performance reasons only check non-inlined ones.
7521 if (DVR.getDebugLoc()->getInlinedAt())
7522 return;
7523
7524 DILocalVariable *Var = DVR.getVariable();
7525 CheckDI(Var, "#dbg record without variable");
7526
7527 unsigned ArgNo = Var->getArg();
7528 if (!ArgNo)
7529 return;
7530
7531 // Verify there are no duplicate function argument debug info entries.
7532 // These will cause hard-to-debug assertions in the DWARF backend.
7533 if (DebugFnArgs.size() < ArgNo)
7534 DebugFnArgs.resize(ArgNo, nullptr);
7535
7536 auto *Prev = DebugFnArgs[ArgNo - 1];
7537 DebugFnArgs[ArgNo - 1] = Var;
7538 CheckDI(!Prev || (Prev == Var), "conflicting debug info for argument", &DVR,
7539 Prev, Var);
7540}
7541
7542void Verifier::verifyNotEntryValue(const DbgVariableRecord &DVR) {
7543 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7544
7545 // We don't know whether this intrinsic verified correctly.
7546 if (!E || !E->isValid())
7547 return;
7548
7550 Value *VarValue = DVR.getVariableLocationOp(0);
7551 if (isa<UndefValue>(VarValue) || isa<PoisonValue>(VarValue))
7552 return;
7553 // We allow EntryValues for swift async arguments, as they have an
7554 // ABI-guarantee to be turned into a specific register.
7555 if (auto *ArgLoc = dyn_cast_or_null<Argument>(VarValue);
7556 ArgLoc && ArgLoc->hasAttribute(Attribute::SwiftAsync))
7557 return;
7558 }
7559
7560 CheckDI(!E->isEntryValue(),
7561 "Entry values are only allowed in MIR unless they target a "
7562 "swiftasync Argument",
7563 &DVR);
7564}
7565
7566void Verifier::verifyCompileUnits() {
7567 // When more than one Module is imported into the same context, such as during
7568 // an LTO build before linking the modules, ODR type uniquing may cause types
7569 // to point to a different CU. This check does not make sense in this case.
7570 if (M.getContext().isODRUniquingDebugTypes())
7571 return;
7572 auto *CUs = M.getNamedMetadata("llvm.dbg.cu");
7573 SmallPtrSet<const Metadata *, 2> Listed;
7574 if (CUs)
7575 Listed.insert_range(CUs->operands());
7576 for (const auto *CU : CUVisited)
7577 CheckDI(Listed.count(CU), "DICompileUnit not listed in llvm.dbg.cu", CU);
7578 CUVisited.clear();
7579}
7580
7581void Verifier::verifyDeoptimizeCallingConvs() {
7582 if (DeoptimizeDeclarations.empty())
7583 return;
7584
7585 const Function *First = DeoptimizeDeclarations[0];
7586 for (const auto *F : ArrayRef(DeoptimizeDeclarations).slice(1)) {
7587 Check(First->getCallingConv() == F->getCallingConv(),
7588 "All llvm.experimental.deoptimize declarations must have the same "
7589 "calling convention",
7590 First, F);
7591 }
7592}
7593
7594void Verifier::verifyAttachedCallBundle(const CallBase &Call,
7595 const OperandBundleUse &BU) {
7596 FunctionType *FTy = Call.getFunctionType();
7597
7598 Check((FTy->getReturnType()->isPointerTy() ||
7599 (Call.doesNotReturn() && FTy->getReturnType()->isVoidTy())),
7600 "a call with operand bundle \"clang.arc.attachedcall\" must call a "
7601 "function returning a pointer or a non-returning function that has a "
7602 "void return type",
7603 Call);
7604
7605 Check(BU.Inputs.size() == 1 && isa<Function>(BU.Inputs.front()),
7606 "operand bundle \"clang.arc.attachedcall\" requires one function as "
7607 "an argument",
7608 Call);
7609
7610 auto *Fn = cast<Function>(BU.Inputs.front());
7611 Intrinsic::ID IID = Fn->getIntrinsicID();
7612
7613 if (IID) {
7614 Check((IID == Intrinsic::objc_retainAutoreleasedReturnValue ||
7615 IID == Intrinsic::objc_claimAutoreleasedReturnValue ||
7616 IID == Intrinsic::objc_unsafeClaimAutoreleasedReturnValue),
7617 "invalid function argument", Call);
7618 } else {
7619 StringRef FnName = Fn->getName();
7620 Check((FnName == "objc_retainAutoreleasedReturnValue" ||
7621 FnName == "objc_claimAutoreleasedReturnValue" ||
7622 FnName == "objc_unsafeClaimAutoreleasedReturnValue"),
7623 "invalid function argument", Call);
7624 }
7625}
7626
7627void Verifier::verifyNoAliasScopeDecl() {
7628 if (NoAliasScopeDecls.empty())
7629 return;
7630
7631 // only a single scope must be declared at a time.
7632 for (auto *II : NoAliasScopeDecls) {
7633 assert(II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl &&
7634 "Not a llvm.experimental.noalias.scope.decl ?");
7635 const auto *ScopeListMV = dyn_cast<MetadataAsValue>(
7637 Check(ScopeListMV != nullptr,
7638 "llvm.experimental.noalias.scope.decl must have a MetadataAsValue "
7639 "argument",
7640 II);
7641
7642 const auto *ScopeListMD = dyn_cast<MDNode>(ScopeListMV->getMetadata());
7643 Check(ScopeListMD != nullptr, "!id.scope.list must point to an MDNode", II);
7644 Check(ScopeListMD->getNumOperands() == 1,
7645 "!id.scope.list must point to a list with a single scope", II);
7646 visitAliasScopeListMetadata(ScopeListMD);
7647 }
7648
7649 // Only check the domination rule when requested. Once all passes have been
7650 // adapted this option can go away.
7652 return;
7653
7654 // Now sort the intrinsics based on the scope MDNode so that declarations of
7655 // the same scopes are next to each other.
7656 auto GetScope = [](IntrinsicInst *II) {
7657 const auto *ScopeListMV = cast<MetadataAsValue>(
7659 return &cast<MDNode>(ScopeListMV->getMetadata())->getOperand(0);
7660 };
7661
7662 // We are sorting on MDNode pointers here. For valid input IR this is ok.
7663 // TODO: Sort on Metadata ID to avoid non-deterministic error messages.
7664 auto Compare = [GetScope](IntrinsicInst *Lhs, IntrinsicInst *Rhs) {
7665 return GetScope(Lhs) < GetScope(Rhs);
7666 };
7667
7668 llvm::sort(NoAliasScopeDecls, Compare);
7669
7670 // Go over the intrinsics and check that for the same scope, they are not
7671 // dominating each other.
7672 auto ItCurrent = NoAliasScopeDecls.begin();
7673 while (ItCurrent != NoAliasScopeDecls.end()) {
7674 auto CurScope = GetScope(*ItCurrent);
7675 auto ItNext = ItCurrent;
7676 do {
7677 ++ItNext;
7678 } while (ItNext != NoAliasScopeDecls.end() &&
7679 GetScope(*ItNext) == CurScope);
7680
7681 // [ItCurrent, ItNext) represents the declarations for the same scope.
7682 // Ensure they are not dominating each other.. but only if it is not too
7683 // expensive.
7684 if (ItNext - ItCurrent < 32)
7685 for (auto *I : llvm::make_range(ItCurrent, ItNext))
7686 for (auto *J : llvm::make_range(ItCurrent, ItNext))
7687 if (I != J)
7688 Check(!DT.dominates(I, J),
7689 "llvm.experimental.noalias.scope.decl dominates another one "
7690 "with the same scope",
7691 I);
7692 ItCurrent = ItNext;
7693 }
7694}
7695
7696//===----------------------------------------------------------------------===//
7697// Implement the public interfaces to this file...
7698//===----------------------------------------------------------------------===//
7699
7701 Function &F = const_cast<Function &>(f);
7702
7703 // Don't use a raw_null_ostream. Printing IR is expensive.
7704 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/true, *f.getParent());
7705
7706 // Note that this function's return value is inverted from what you would
7707 // expect of a function called "verify".
7708 return !V.verify(F);
7709}
7710
7712 bool *BrokenDebugInfo) {
7713 // Don't use a raw_null_ostream. Printing IR is expensive.
7714 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/!BrokenDebugInfo, M);
7715
7716 bool Broken = false;
7717 for (const Function &F : M)
7718 Broken |= !V.verify(F);
7719
7720 Broken |= !V.verify();
7721 if (BrokenDebugInfo)
7722 *BrokenDebugInfo = V.hasBrokenDebugInfo();
7723 // Note that this function's return value is inverted from what you would
7724 // expect of a function called "verify".
7725 return Broken;
7726}
7727
7728namespace {
7729
7730struct VerifierLegacyPass : public FunctionPass {
7731 static char ID;
7732
7733 std::unique_ptr<Verifier> V;
7734 bool FatalErrors = true;
7735
7736 VerifierLegacyPass() : FunctionPass(ID) {
7738 }
7739 explicit VerifierLegacyPass(bool FatalErrors)
7740 : FunctionPass(ID),
7741 FatalErrors(FatalErrors) {
7743 }
7744
7745 bool doInitialization(Module &M) override {
7746 V = std::make_unique<Verifier>(
7747 &dbgs(), /*ShouldTreatBrokenDebugInfoAsError=*/false, M);
7748 return false;
7749 }
7750
7751 bool runOnFunction(Function &F) override {
7752 if (!V->verify(F) && FatalErrors) {
7753 errs() << "in function " << F.getName() << '\n';
7754 report_fatal_error("Broken function found, compilation aborted!");
7755 }
7756 return false;
7757 }
7758
7759 bool doFinalization(Module &M) override {
7760 bool HasErrors = false;
7761 for (Function &F : M)
7762 if (F.isDeclaration())
7763 HasErrors |= !V->verify(F);
7764
7765 HasErrors |= !V->verify();
7766 if (FatalErrors && (HasErrors || V->hasBrokenDebugInfo()))
7767 report_fatal_error("Broken module found, compilation aborted!");
7768 return false;
7769 }
7770
7771 void getAnalysisUsage(AnalysisUsage &AU) const override {
7772 AU.setPreservesAll();
7773 }
7774};
7775
7776} // end anonymous namespace
7777
7778/// Helper to issue failure from the TBAA verification
7779template <typename... Tys> void TBAAVerifier::CheckFailed(Tys &&... Args) {
7780 if (Diagnostic)
7781 return Diagnostic->CheckFailed(Args...);
7782}
7783
7784#define CheckTBAA(C, ...) \
7785 do { \
7786 if (!(C)) { \
7787 CheckFailed(__VA_ARGS__); \
7788 return false; \
7789 } \
7790 } while (false)
7791
7792/// Verify that \p BaseNode can be used as the "base type" in the struct-path
7793/// TBAA scheme. This means \p BaseNode is either a scalar node, or a
7794/// struct-type node describing an aggregate data structure (like a struct).
7795TBAAVerifier::TBAABaseNodeSummary
7796TBAAVerifier::verifyTBAABaseNode(const Instruction *I, const MDNode *BaseNode,
7797 bool IsNewFormat) {
7798 if (BaseNode->getNumOperands() < 2) {
7799 CheckFailed("Base nodes must have at least two operands", I, BaseNode);
7800 return {true, ~0u};
7801 }
7802
7803 auto Itr = TBAABaseNodes.find(BaseNode);
7804 if (Itr != TBAABaseNodes.end())
7805 return Itr->second;
7806
7807 auto Result = verifyTBAABaseNodeImpl(I, BaseNode, IsNewFormat);
7808 auto InsertResult = TBAABaseNodes.insert({BaseNode, Result});
7809 (void)InsertResult;
7810 assert(InsertResult.second && "We just checked!");
7811 return Result;
7812}
7813
7814TBAAVerifier::TBAABaseNodeSummary
7815TBAAVerifier::verifyTBAABaseNodeImpl(const Instruction *I,
7816 const MDNode *BaseNode, bool IsNewFormat) {
7817 const TBAAVerifier::TBAABaseNodeSummary InvalidNode = {true, ~0u};
7818
7819 if (BaseNode->getNumOperands() == 2) {
7820 // Scalar nodes can only be accessed at offset 0.
7821 return isValidScalarTBAANode(BaseNode)
7822 ? TBAAVerifier::TBAABaseNodeSummary({false, 0})
7823 : InvalidNode;
7824 }
7825
7826 if (IsNewFormat) {
7827 if (BaseNode->getNumOperands() % 3 != 0) {
7828 CheckFailed("Access tag nodes must have the number of operands that is a "
7829 "multiple of 3!", BaseNode);
7830 return InvalidNode;
7831 }
7832 } else {
7833 if (BaseNode->getNumOperands() % 2 != 1) {
7834 CheckFailed("Struct tag nodes must have an odd number of operands!",
7835 BaseNode);
7836 return InvalidNode;
7837 }
7838 }
7839
7840 // Check the type size field.
7841 if (IsNewFormat) {
7842 auto *TypeSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7843 BaseNode->getOperand(1));
7844 if (!TypeSizeNode) {
7845 CheckFailed("Type size nodes must be constants!", I, BaseNode);
7846 return InvalidNode;
7847 }
7848 }
7849
7850 // Check the type name field. In the new format it can be anything.
7851 if (!IsNewFormat && !isa<MDString>(BaseNode->getOperand(0))) {
7852 CheckFailed("Struct tag nodes have a string as their first operand",
7853 BaseNode);
7854 return InvalidNode;
7855 }
7856
7857 bool Failed = false;
7858
7859 std::optional<APInt> PrevOffset;
7860 unsigned BitWidth = ~0u;
7861
7862 // We've already checked that BaseNode is not a degenerate root node with one
7863 // operand in \c verifyTBAABaseNode, so this loop should run at least once.
7864 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
7865 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
7866 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
7867 Idx += NumOpsPerField) {
7868 const MDOperand &FieldTy = BaseNode->getOperand(Idx);
7869 const MDOperand &FieldOffset = BaseNode->getOperand(Idx + 1);
7870 if (!isa<MDNode>(FieldTy)) {
7871 CheckFailed("Incorrect field entry in struct type node!", I, BaseNode);
7872 Failed = true;
7873 continue;
7874 }
7875
7876 auto *OffsetEntryCI =
7878 if (!OffsetEntryCI) {
7879 CheckFailed("Offset entries must be constants!", I, BaseNode);
7880 Failed = true;
7881 continue;
7882 }
7883
7884 if (BitWidth == ~0u)
7885 BitWidth = OffsetEntryCI->getBitWidth();
7886
7887 if (OffsetEntryCI->getBitWidth() != BitWidth) {
7888 CheckFailed(
7889 "Bitwidth between the offsets and struct type entries must match", I,
7890 BaseNode);
7891 Failed = true;
7892 continue;
7893 }
7894
7895 // NB! As far as I can tell, we generate a non-strictly increasing offset
7896 // sequence only from structs that have zero size bit fields. When
7897 // recursing into a contained struct in \c getFieldNodeFromTBAABaseNode we
7898 // pick the field lexically the latest in struct type metadata node. This
7899 // mirrors the actual behavior of the alias analysis implementation.
7900 bool IsAscending =
7901 !PrevOffset || PrevOffset->ule(OffsetEntryCI->getValue());
7902
7903 if (!IsAscending) {
7904 CheckFailed("Offsets must be increasing!", I, BaseNode);
7905 Failed = true;
7906 }
7907
7908 PrevOffset = OffsetEntryCI->getValue();
7909
7910 if (IsNewFormat) {
7911 auto *MemberSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7912 BaseNode->getOperand(Idx + 2));
7913 if (!MemberSizeNode) {
7914 CheckFailed("Member size entries must be constants!", I, BaseNode);
7915 Failed = true;
7916 continue;
7917 }
7918 }
7919 }
7920
7921 return Failed ? InvalidNode
7922 : TBAAVerifier::TBAABaseNodeSummary(false, BitWidth);
7923}
7924
7925static bool IsRootTBAANode(const MDNode *MD) {
7926 return MD->getNumOperands() < 2;
7927}
7928
7929static bool IsScalarTBAANodeImpl(const MDNode *MD,
7931 if (MD->getNumOperands() != 2 && MD->getNumOperands() != 3)
7932 return false;
7933
7934 if (!isa<MDString>(MD->getOperand(0)))
7935 return false;
7936
7937 if (MD->getNumOperands() == 3) {
7939 if (!(Offset && Offset->isZero() && isa<MDString>(MD->getOperand(0))))
7940 return false;
7941 }
7942
7943 auto *Parent = dyn_cast_or_null<MDNode>(MD->getOperand(1));
7944 return Parent && Visited.insert(Parent).second &&
7945 (IsRootTBAANode(Parent) || IsScalarTBAANodeImpl(Parent, Visited));
7946}
7947
7948bool TBAAVerifier::isValidScalarTBAANode(const MDNode *MD) {
7949 auto ResultIt = TBAAScalarNodes.find(MD);
7950 if (ResultIt != TBAAScalarNodes.end())
7951 return ResultIt->second;
7952
7953 SmallPtrSet<const MDNode *, 4> Visited;
7954 bool Result = IsScalarTBAANodeImpl(MD, Visited);
7955 auto InsertResult = TBAAScalarNodes.insert({MD, Result});
7956 (void)InsertResult;
7957 assert(InsertResult.second && "Just checked!");
7958
7959 return Result;
7960}
7961
7962/// Returns the field node at the offset \p Offset in \p BaseNode. Update \p
7963/// Offset in place to be the offset within the field node returned.
7964///
7965/// We assume we've okayed \p BaseNode via \c verifyTBAABaseNode.
7966MDNode *TBAAVerifier::getFieldNodeFromTBAABaseNode(const Instruction *I,
7967 const MDNode *BaseNode,
7968 APInt &Offset,
7969 bool IsNewFormat) {
7970 assert(BaseNode->getNumOperands() >= 2 && "Invalid base node!");
7971
7972 // Scalar nodes have only one possible "field" -- their parent in the access
7973 // hierarchy. Offset must be zero at this point, but our caller is supposed
7974 // to check that.
7975 if (BaseNode->getNumOperands() == 2)
7976 return cast<MDNode>(BaseNode->getOperand(1));
7977
7978 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
7979 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
7980 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
7981 Idx += NumOpsPerField) {
7982 auto *OffsetEntryCI =
7983 mdconst::extract<ConstantInt>(BaseNode->getOperand(Idx + 1));
7984 if (OffsetEntryCI->getValue().ugt(Offset)) {
7985 if (Idx == FirstFieldOpNo) {
7986 CheckFailed("Could not find TBAA parent in struct type node", I,
7987 BaseNode, &Offset);
7988 return nullptr;
7989 }
7990
7991 unsigned PrevIdx = Idx - NumOpsPerField;
7992 auto *PrevOffsetEntryCI =
7993 mdconst::extract<ConstantInt>(BaseNode->getOperand(PrevIdx + 1));
7994 Offset -= PrevOffsetEntryCI->getValue();
7995 return cast<MDNode>(BaseNode->getOperand(PrevIdx));
7996 }
7997 }
7998
7999 unsigned LastIdx = BaseNode->getNumOperands() - NumOpsPerField;
8000 auto *LastOffsetEntryCI = mdconst::extract<ConstantInt>(
8001 BaseNode->getOperand(LastIdx + 1));
8002 Offset -= LastOffsetEntryCI->getValue();
8003 return cast<MDNode>(BaseNode->getOperand(LastIdx));
8004}
8005
8007 if (!Type || Type->getNumOperands() < 3)
8008 return false;
8009
8010 // In the new format type nodes shall have a reference to the parent type as
8011 // its first operand.
8012 return isa_and_nonnull<MDNode>(Type->getOperand(0));
8013}
8014
8016 CheckTBAA(MD->getNumOperands() > 0, "TBAA metadata cannot have 0 operands", I,
8017 MD);
8018
8019 if (I)
8023 "This instruction shall not have a TBAA access tag!", I);
8024
8025 bool IsStructPathTBAA =
8026 isa<MDNode>(MD->getOperand(0)) && MD->getNumOperands() >= 3;
8027
8028 CheckTBAA(IsStructPathTBAA,
8029 "Old-style TBAA is no longer allowed, use struct-path TBAA instead",
8030 I);
8031
8032 MDNode *BaseNode = dyn_cast_or_null<MDNode>(MD->getOperand(0));
8033 MDNode *AccessType = dyn_cast_or_null<MDNode>(MD->getOperand(1));
8034
8035 bool IsNewFormat = isNewFormatTBAATypeNode(AccessType);
8036
8037 if (IsNewFormat) {
8038 CheckTBAA(MD->getNumOperands() == 4 || MD->getNumOperands() == 5,
8039 "Access tag metadata must have either 4 or 5 operands", I, MD);
8040 } else {
8041 CheckTBAA(MD->getNumOperands() < 5,
8042 "Struct tag metadata must have either 3 or 4 operands", I, MD);
8043 }
8044
8045 // Check the access size field.
8046 if (IsNewFormat) {
8047 auto *AccessSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
8048 MD->getOperand(3));
8049 CheckTBAA(AccessSizeNode, "Access size field must be a constant", I, MD);
8050 }
8051
8052 // Check the immutability flag.
8053 unsigned ImmutabilityFlagOpNo = IsNewFormat ? 4 : 3;
8054 if (MD->getNumOperands() == ImmutabilityFlagOpNo + 1) {
8055 auto *IsImmutableCI = mdconst::dyn_extract_or_null<ConstantInt>(
8056 MD->getOperand(ImmutabilityFlagOpNo));
8057 CheckTBAA(IsImmutableCI,
8058 "Immutability tag on struct tag metadata must be a constant", I,
8059 MD);
8060 CheckTBAA(
8061 IsImmutableCI->isZero() || IsImmutableCI->isOne(),
8062 "Immutability part of the struct tag metadata must be either 0 or 1", I,
8063 MD);
8064 }
8065
8066 CheckTBAA(BaseNode && AccessType,
8067 "Malformed struct tag metadata: base and access-type "
8068 "should be non-null and point to Metadata nodes",
8069 I, MD, BaseNode, AccessType);
8070
8071 if (!IsNewFormat) {
8072 CheckTBAA(isValidScalarTBAANode(AccessType),
8073 "Access type node must be a valid scalar type", I, MD,
8074 AccessType);
8075 }
8076
8078 CheckTBAA(OffsetCI, "Offset must be constant integer", I, MD);
8079
8080 APInt Offset = OffsetCI->getValue();
8081 bool SeenAccessTypeInPath = false;
8082
8083 SmallPtrSet<MDNode *, 4> StructPath;
8084
8085 for (/* empty */; BaseNode && !IsRootTBAANode(BaseNode);
8086 BaseNode =
8087 getFieldNodeFromTBAABaseNode(I, BaseNode, Offset, IsNewFormat)) {
8088 if (!StructPath.insert(BaseNode).second) {
8089 CheckFailed("Cycle detected in struct path", I, MD);
8090 return false;
8091 }
8092
8093 bool Invalid;
8094 unsigned BaseNodeBitWidth;
8095 std::tie(Invalid, BaseNodeBitWidth) =
8096 verifyTBAABaseNode(I, BaseNode, IsNewFormat);
8097
8098 // If the base node is invalid in itself, then we've already printed all the
8099 // errors we wanted to print.
8100 if (Invalid)
8101 return false;
8102
8103 SeenAccessTypeInPath |= BaseNode == AccessType;
8104
8105 if (isValidScalarTBAANode(BaseNode) || BaseNode == AccessType)
8106 CheckTBAA(Offset == 0, "Offset not zero at the point of scalar access", I,
8107 MD, &Offset);
8108
8109 CheckTBAA(BaseNodeBitWidth == Offset.getBitWidth() ||
8110 (BaseNodeBitWidth == 0 && Offset == 0) ||
8111 (IsNewFormat && BaseNodeBitWidth == ~0u),
8112 "Access bit-width not the same as description bit-width", I, MD,
8113 BaseNodeBitWidth, Offset.getBitWidth());
8114
8115 if (IsNewFormat && SeenAccessTypeInPath)
8116 break;
8117 }
8118
8119 CheckTBAA(SeenAccessTypeInPath, "Did not see access type in access path!", I,
8120 MD);
8121 return true;
8122}
8123
8124char VerifierLegacyPass::ID = 0;
8125INITIALIZE_PASS(VerifierLegacyPass, "verify", "Module Verifier", false, false)
8126
8128 return new VerifierLegacyPass(FatalErrors);
8129}
8130
8131AnalysisKey VerifierAnalysis::Key;
8138
8143
8145 auto Res = AM.getResult<VerifierAnalysis>(M);
8146 if (FatalErrors && (Res.IRBroken || Res.DebugInfoBroken))
8147 report_fatal_error("Broken module found, compilation aborted!");
8148
8149 return PreservedAnalyses::all();
8150}
8151
8153 auto res = AM.getResult<VerifierAnalysis>(F);
8154 if (res.IRBroken && FatalErrors)
8155 report_fatal_error("Broken function found, compilation aborted!");
8156
8157 return PreservedAnalyses::all();
8158}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU address space definition.
ArrayRef< TableEntry > TableRef
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Atomic ordering constants.
@ RetAttr
@ FnAttr
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file declares the LLVM IR specialization of the GenericConvergenceVerifier template.
static DISubprogram * getSubprogram(bool IsDistinct, Ts &&...Args)
dxil translate DXIL Translate Metadata
This file defines the DenseMap class.
This file contains constants used for implementing Dwarf debug support.
static bool runOnFunction(Function &F, bool PostInlining)
#define Check(C,...)
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Machine Check Debug Module
This file implements a map that provides insertion order iteration.
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
static bool isContiguous(const ConstantRange &A, const ConstantRange &B)
This file contains the declarations for metadata subclasses.
#define T
#define T1
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t High
uint64_t IntrinsicInst * II
#define P(N)
ppc ctr loops verify
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
static unsigned getNumElements(Type *Ty)
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
This file contains some templates that are useful if you are working with the STL at all.
verify safepoint Safepoint IR Verifier
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
static bool IsScalarTBAANodeImpl(const MDNode *MD, SmallPtrSetImpl< const MDNode * > &Visited)
static bool isType(const Metadata *MD)
static Instruction * getSuccPad(Instruction *Terminator)
static bool isMDTuple(const Metadata *MD)
static bool isNewFormatTBAATypeNode(llvm::MDNode *Type)
#define CheckDI(C,...)
We know that a debug info condition should be true, if not print an error message.
Definition Verifier.cpp:681
static void forEachUser(const Value *User, SmallPtrSet< const Value *, 32 > &Visited, llvm::function_ref< bool(const Value *)> Callback)
Definition Verifier.cpp:722
static bool isDINode(const Metadata *MD)
static bool isScope(const Metadata *MD)
static cl::opt< bool > VerifyNoAliasScopeDomination("verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false), cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical " "scopes are not dominating"))
static bool isTypeCongruent(Type *L, Type *R)
Two types are "congruent" if they are identical, or if they are both pointer types with different poi...
#define CheckTBAA(C,...)
static bool isConstantIntMetadataOperand(const Metadata *MD)
static bool IsRootTBAANode(const MDNode *MD)
static Value * getParentPad(Value *EHPad)
static bool hasConflictingReferenceFlags(unsigned Flags)
Detect mutually exclusive flags.
static AttrBuilder getParameterABIAttributes(LLVMContext &C, unsigned I, AttributeList Attrs)
static const char PassName[]
bool isFiniteNonZero() const
Definition APFloat.h:1441
bool isNegative() const
Definition APFloat.h:1431
const fltSemantics & getSemantics() const
Definition APFloat.h:1439
Class for arbitrary precision integers.
Definition APInt.h:78
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition APInt.h:1202
bool isMinValue() const
Determine if this is the smallest unsigned value.
Definition APInt.h:418
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition APInt.h:1151
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:441
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1563
bool isMaxValue() const
Determine if this is the largest unsigned value.
Definition APInt.h:400
This class represents a conversion between pointers from one address space to another.
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
unsigned getAddressSpace() const
Return the address space for the allocation.
LLVM_ABI bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
const Value * getArraySize() const
Get the number of elements allocated.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
void setPreservesAll()
Set by analyses that do not transform their input at all.
LLVM_ABI bool hasInRegAttr() const
Return true if this argument has the inreg attribute.
Definition Function.cpp:293
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
static bool isFPOperation(BinOp Op)
BinOp getOperation() const
static LLVM_ABI StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
bool contains(Attribute::AttrKind A) const
Return true if the builder has the specified attribute.
LLVM_ABI bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
LLVM_ABI std::string getAsString(bool InAttrGrp=false) const
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:69
LLVM_ABI const ConstantRange & getValueAsConstantRange() const
Return the attribute's value as a ConstantRange.
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition Attributes.h:88
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:223
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:459
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:528
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
const Instruction & front() const
Definition BasicBlock.h:482
LLVM_ABI const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
This class represents a no-op cast from one type to another.
static LLVM_ABI BlockAddress * lookup(const BasicBlock *BB)
Lookup an existing BlockAddress constant for the given BasicBlock.
bool isConditional() const
Value * getCondition() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool isInlineAsm() const
Check if this call is an inline asm statement.
bool hasInAllocaArgument() const
Determine if there are is an inalloca argument.
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool doesNotAccessMemory(unsigned OpNo) const
bool hasFnAttr(Attribute::AttrKind Kind) const
Determine whether this call has the given attribute.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
CallingConv::ID getCallingConv() const
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Get the attribute of a given kind from a given arg.
iterator_range< bundle_op_iterator > bundle_op_infos()
Return the range [bundle_op_info_begin, bundle_op_info_end).
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
bool onlyReadsMemory(unsigned OpNo) const
Value * getCalledOperand() const
Type * getParamElementType(unsigned ArgNo) const
Extract the elementtype type for a parameter.
Value * getArgOperand(unsigned i) const
FunctionType * getFunctionType() const
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
bool doesNotReturn() const
Determine if the call cannot return.
LLVM_ABI bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
bool hasOperandBundles() const
Return true if this User has any operand bundles.
LLVM_ABI Function * getCaller()
Helper to get the caller (the parent function).
BasicBlock * getIndirectDest(unsigned i) const
unsigned getNumIndirectDests() const
Return the number of callbr indirect dest labels.
bool isMustTailCall() const
static LLVM_ABI bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
unsigned getNumHandlers() const
return the number of 'handlers' in this catchswitch instruction, except the default handler
Value * getParentPad() const
BasicBlock * getUnwindDest() const
handler_range handlers()
iteration adapter for range-for loops.
BasicBlock * getUnwindDest() const
bool isFPPredicate() const
Definition InstrTypes.h:782
bool isIntPredicate() const
Definition InstrTypes.h:783
static bool isIntPredicate(Predicate P)
Definition InstrTypes.h:776
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
Definition Constants.h:231
bool isNegative() const
Definition Constants.h:214
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition Constants.h:219
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition Constants.h:162
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:168
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:159
Constant * getAddrDiscriminator() const
The address discriminator if any, or the null constant.
Definition Constants.h:1078
Constant * getPointer() const
The pointer that is signed in this ptrauth signed pointer.
Definition Constants.h:1065
ConstantInt * getKey() const
The Key ID, an i32 constant.
Definition Constants.h:1068
Constant * getDeactivationSymbol() const
Definition Constants.h:1087
ConstantInt * getDiscriminator() const
The integer discriminator, an i64 constant, or 0.
Definition Constants.h:1071
static LLVM_ABI bool isOrderedRanges(ArrayRef< ConstantRange > RangesRef)
This class represents a range of values.
const APInt & getLower() const
Return the lower value for this range.
const APInt & getUpper() const
Return the upper value for this range.
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
static LLVM_ABI ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition Constants.cpp:90
LLVM_ABI std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
LLVM_ABI std::optional< RoundingMode > getRoundingMode() const
LLVM_ABI unsigned getNonMetadataArgCount() const
DbgVariableFragmentInfo FragmentInfo
@ FixedPointBinary
Scale factor 2^Factor.
@ FixedPointDecimal
Scale factor 10^Factor.
@ FixedPointRational
Arbitrary rational scale factor.
DIGlobalVariable * getVariable() const
LLVM_ABI DISubprogram * getSubprogram() const
Get the subprogram for this scope.
DILocalScope * getScope() const
Get the local scope for this variable.
Metadata * getRawScope() const
Base class for scope-like contexts.
Subprogram description. Uses SubclassData1.
static const DIScope * getRawRetainedNodeScope(const MDNode *N)
Base class for template parameters.
Base class for variables.
Metadata * getRawType() const
Metadata * getRawScope() const
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
LLVM_ABI void print(raw_ostream &O, bool IsForDebug=false) const
DebugLoc getDebugLoc() const
LLVM_ABI const BasicBlock * getParent() const
LLVM_ABI Function * getFunction()
Record of a variable value-assignment, aka a non instruction representation of the dbg....
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
DIExpression * getExpression() const
DILocalVariable * getVariable() const
Metadata * getRawLocation() const
Returns the metadata operand for the first location description.
@ End
Marks the end of the concrete types.
@ Any
To indicate all LocationTypes in searches.
DIExpression * getAddressExpression() const
MDNode * getAsMDNode() const
Return this as a bar MDNode.
Definition DebugLoc.h:290
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:205
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
bool empty() const
Definition DenseMap.h:109
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:241
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:164
This instruction extracts a single (scalar) element from a VectorType value.
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
ArrayRef< unsigned > getIndices() const
static LLVM_ABI Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
This instruction compares its operands according to the predicate given to the constructor.
This class represents an extension of floating point types.
This class represents a cast from floating point to signed integer.
This class represents a cast from floating point to unsigned integer.
This class represents a truncation of floating point types.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Value * getParentPad() const
Convenience accessors.
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
Type * getReturnType() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition Function.h:209
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition Function.h:244
DISubprogram * getSubprogram() const
Get the attached subprogram.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:270
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition Function.h:903
const Function & getFunction() const
Definition Function.h:164
const std::string & getGC() const
Definition Function.cpp:834
Type * getReturnType() const
Returns the type of the ret val.
Definition Function.h:214
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
Definition Function.h:227
LLVM_ABI Value * getBasePtr() const
LLVM_ABI Value * getDerivedPtr() const
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
static bool isValidLinkage(LinkageTypes L)
Definition GlobalAlias.h:98
const Constant * getAliasee() const
Definition GlobalAlias.h:87
LLVM_ABI const Function * getResolverFunction() const
Definition Globals.cpp:665
static bool isValidLinkage(LinkageTypes L)
Definition GlobalIFunc.h:86
const Constant * getResolver() const
Definition GlobalIFunc.h:73
LLVM_ABI void getAllMetadata(SmallVectorImpl< std::pair< unsigned, MDNode * > > &MDs) const
Appends all metadata attached to this value to MDs, sorting by KindID.
bool hasComdat() const
MDNode * getMetadata(unsigned KindID) const
Get the current metadata attachments for the given kind, if any.
Definition Value.h:576
bool hasExternalLinkage() const
bool isDSOLocal() const
bool isImplicitDSOLocal() const
LLVM_ABI bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition Globals.cpp:328
bool hasValidDeclarationLinkage() const
LinkageTypes getLinkage() const
bool hasDefaultVisibility() const
bool hasPrivateLinkage() const
bool hasHiddenVisibility() const
bool hasExternalWeakLinkage() const
bool hasDLLImportStorageClass() const
bool hasDLLExportStorageClass() const
bool isDeclarationForLinker() const
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
PointerType * getType() const
Global values are always pointers.
LLVM_ABI bool isInterposable() const
Return true if this global's definition can be substituted with an arbitrary definition at link time ...
Definition Globals.cpp:107
bool hasComdat() const
bool hasCommonLinkage() const
bool hasGlobalUnnamedAddr() const
bool hasAppendingLinkage() const
bool hasAvailableExternallyLinkage() const
Type * getValueType() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool hasInitializer() const
Definitions have initializers, declarations don't.
MaybeAlign getAlign() const
Returns the alignment of the given variable.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
This instruction compares its operands according to the predicate given to the constructor.
BasicBlock * getDestination(unsigned i)
Return the specified destination.
unsigned getNumDestinations() const
return the number of possible destinations in this indirectbr instruction.
unsigned getNumSuccessors() const
This instruction inserts a single (scalar) element into a VectorType value.
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
ArrayRef< unsigned > getIndices() const
Base class for instruction visitors.
Definition InstVisitor.h:78
void visit(Iterator Start, Iterator End)
Definition InstVisitor.h:87
LLVM_ABI unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
This class represents a cast from an integer to a pointer.
static LLVM_ABI bool mayLowerToFunctionCall(Intrinsic::ID IID)
Check if the intrinsic might lower into a regular function call in the course of IR transformations.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Align getAlign() const
Return the alignment of the access that is being performed.
Metadata node.
Definition Metadata.h:1078
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1442
bool isTemporary() const
Definition Metadata.h:1262
ArrayRef< MDOperand > operands() const
Definition Metadata.h:1440
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1448
bool isDistinct() const
Definition Metadata.h:1261
bool isResolved() const
Check if node is fully resolved.
Definition Metadata.h:1258
LLVMContext & getContext() const
Definition Metadata.h:1242
bool equalsStr(StringRef Str) const
Definition Metadata.h:922
Metadata * get() const
Definition Metadata.h:929
LLVM_ABI StringRef getString() const
Definition Metadata.cpp:618
static LLVM_ABI bool isTagMD(const Metadata *MD)
This class implements a map that also provides access to all stored values in a deterministic order.
Definition MapVector.h:36
static LLVM_ABI MetadataAsValue * getIfExists(LLVMContext &Context, Metadata *MD)
Definition Metadata.cpp:112
Metadata * getMetadata() const
Definition Metadata.h:201
Root of the metadata hierarchy.
Definition Metadata.h:64
LLVM_ABI void print(raw_ostream &OS, const Module *M=nullptr, bool IsForDebug=false) const
Print.
unsigned getMetadataID() const
Definition Metadata.h:104
Manage lifetime of a slot tracker for printing IR.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
LLVM_ABI StringRef getName() const
LLVM_ABI void print(raw_ostream &ROS, bool IsForDebug=false) const
LLVM_ABI unsigned getNumOperands() const
iterator_range< op_iterator > operands()
Definition Metadata.h:1853
op_range incoming_values()
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
This class represents a cast from a pointer to an address (non-capturing ptrtoint).
This class represents a cast from a pointer to an integer.
Value * getValue() const
Convenience accessor.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
This class represents a sign extension of integer types.
This class represents a cast from signed integer to floating point.
static LLVM_ABI const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
This instruction constructs a fixed permutation of two input vectors.
static LLVM_ABI bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
static LLVM_ABI void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
void insert_range(Range &&R)
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
void reserve(size_type N)
iterator insert(iterator I, T &&Elt)
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition StringRef.h:702
static constexpr size_t npos
Definition StringRef.h:57
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition StringRef.h:472
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:261
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:143
unsigned getNumElements() const
Random access to the elements.
LLVM_ABI Type * getTypeAtIndex(const Value *V) const
Given an index value into the type, return the type of the element.
Definition Type.cpp:718
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Returns true if this struct contains a scalable vector.
Definition Type.cpp:440
Verify that the TBAA Metadatas are valid.
Definition Verifier.h:40
LLVM_ABI bool visitTBAAMetadata(const Instruction *I, const MDNode *MD)
Visit an instruction, or a TBAA node itself as part of a metadata, and return true if it is valid,...
unsigned size() const
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
LLVM_ABI bool containsNonGlobalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a global...
Definition Type.cpp:74
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition Type.h:264
LLVM_ABI bool containsNonLocalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a local.
Definition Type.cpp:90
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
Definition Type.cpp:61
bool isLabelTy() const
Return true if this is 'label'.
Definition Type.h:228
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition Type.h:246
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
bool isTokenLikeTy() const
Returns true if this is 'token' or a token-like target type.s.
Definition Type.cpp:1065
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isSingleValueType() const
Return true if the type is a valid type for a register in codegen.
Definition Type.h:296
LLVM_ABI bool canLosslesslyBitCastTo(Type *Ty) const
Return true if this type could be converted with a lossless BitCast to type 'Ty'.
Definition Type.cpp:153
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition Type.h:311
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:230
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:184
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition Type.h:270
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
Definition Type.h:255
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition Type.h:225
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
bool isMetadataTy() const
Return true if this is 'metadata'.
Definition Type.h:231
This class represents a cast unsigned integer to floating point.
op_range operands()
Definition User.h:292
Value * getOperand(unsigned i) const
Definition User.h:232
unsigned getNumOperands() const
Definition User.h:254
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
Value * getValue() const
Definition Metadata.h:498
LLVM Value Representation.
Definition Value.h:75
iterator_range< user_iterator > materialized_users()
Definition Value.h:420
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI const Value * stripInBoundsOffsets(function_ref< void(const Value *)> Func=[](const Value *) {}) const
Strip off pointer casts and inbounds GEPs.
Definition Value.cpp:812
iterator_range< user_iterator > users()
Definition Value.h:426
bool materialized_use_empty() const
Definition Value.h:351
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:701
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1099
bool hasName() const
Definition Value.h:262
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
Check a module for errors, and report separate error states for IR and debug info errors.
Definition Verifier.h:109
LLVM_ABI Result run(Module &M, ModuleAnalysisManager &)
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
This class represents zero extension of integer types.
constexpr bool isNonZero() const
Definition TypeSize.h:155
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
CallInst * Call
This file contains the declaration of the Comdat class, which represents a single COMDAT in LLVM.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ FLAT_ADDRESS
Address space for flat memory.
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
@ PRIVATE_ADDRESS
Address space for private memory.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
bool isFlatGlobalAddrSpace(unsigned AS)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Entry
Definition COFF.h:862
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
LLVM_ABI MatchIntrinsicTypesResult matchIntrinsicSignature(FunctionType *FTy, ArrayRef< IITDescriptor > &Infos, SmallVectorImpl< Type * > &ArgTys)
Match the specified function type with the type constraints specified by the .td file.
LLVM_ABI void getIntrinsicInfoTableEntries(ID id, SmallVectorImpl< IITDescriptor > &T)
Return the IIT table descriptor for the specified intrinsic into an array of IITDescriptors.
@ MatchIntrinsicTypes_NoMatchRet
Definition Intrinsics.h:260
@ MatchIntrinsicTypes_NoMatchArg
Definition Intrinsics.h:261
LLVM_ABI bool hasConstrainedFPRoundingModeOperand(ID QID)
Returns true if the intrinsic ID is for one of the "ConstrainedFloating-Point Intrinsics" that take r...
LLVM_ABI StringRef getName(ID id)
Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
static const int NoAliasScopeDeclScopeArg
Definition Intrinsics.h:41
LLVM_ABI bool matchIntrinsicVarArg(bool isVarArg, ArrayRef< IITDescriptor > &Infos)
Verify if the intrinsic has variable arguments.
std::variant< std::monostate, Loc::Single, Loc::Multi, Loc::MMI, Loc::EntryValue > Variant
Alias for the std::variant specialization base class of DbgVariable.
Definition DwarfDebug.h:189
Flag
These should be considered private to the implementation of the MCInstrDesc class.
@ System
Synchronized with respect to all concurrently executing threads.
Definition LLVMContext.h:58
LLVM_ABI std::optional< VFInfo > tryDemangleForVFABI(StringRef MangledName, const FunctionType *FTy)
Function to construct a VFInfo out of a mangled names in the following format:
@ CE
Windows NT (Windows on ARM)
Definition MCAsmInfo.h:48
LLVM_ABI AssignmentInstRange getAssignmentInsts(DIAssignID *ID)
Return a range of instructions (typically just one) that have ID as an attachment.
initializer< Ty > init(const Ty &Val)
@ DW_MACINFO_undef
Definition Dwarf.h:818
@ DW_MACINFO_start_file
Definition Dwarf.h:819
@ DW_MACINFO_define
Definition Dwarf.h:817
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract_or_null(Y &&MD)
Extract a Value from Metadata, if any, allowing null.
Definition Metadata.h:708
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract(Y &&MD)
Extract a Value from Metadata, if any.
Definition Metadata.h:695
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:667
@ User
could "use" a pointer
NodeAddr< UseNode * > Use
Definition RDFGraph.h:385
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
Definition Threading.h:280
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1737
LLVM_ABI bool canInstructionHaveMMRAs(const Instruction &I)
detail::zippy< detail::zip_first, T, U, Args... > zip_equal(T &&t, U &&u, Args &&...args)
zip iterator that assumes that all iteratees have the same length.
Definition STLExtras.h:839
LLVM_ABI unsigned getBranchWeightOffset(const MDNode *ProfileData)
Return the offset to the first branch weight data.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2484
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
AllocFnKind
Definition Attributes.h:51
testing::Matcher< const detail::ErrorHolder & > Failed()
Definition Error.h:198
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2148
LLVM_ABI DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition MathExtras.h:284
bool isa_and_nonnull(const Y &Val)
Definition Casting.h:676
Op::Description Desc
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
GenericConvergenceVerifier< SSAContext > ConvergenceVerifier
LLVM_ABI void initializeVerifierLegacyPassPass(PassRegistry &)
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
bool isModSet(const ModRefInfo MRI)
Definition ModRef.h:49
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1634
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI bool isValueProfileMD(const MDNode *ProfileData)
Checks if an MDNode contains value profiling Metadata.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
LLVM_ABI unsigned getNumBranchWeights(const MDNode &ProfileData)
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:74
LLVM_ABI FunctionPass * createVerifierPass(bool FatalErrors=true)
FunctionAddr VTableAddr Next
Definition InstrProf.h:141
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
TinyPtrVector< BasicBlock * > ColorVector
LLVM_ABI const char * LLVMLoopEstimatedTripCount
Profile-based loop metadata that should be accessed only by using llvm::getLoopEstimatedTripCount and...
DenormalMode parseDenormalFPAttribute(StringRef Str)
Returns the denormal mode to use for inputs and outputs.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
Definition FPEnv.cpp:25
LLVM_ABI std::unique_ptr< GCStrategy > getGCStrategy(const StringRef Name)
Lookup the GCStrategy object associated with the given gc name.
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1909
bool pred_empty(const BasicBlock *BB)
Definition CFG.h:119
bool isHexDigit(char C)
Checks if character C is a hexadecimal numeric character.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
constexpr bool isCallableCC(CallingConv::ID CC)
LLVM_ABI bool verifyModule(const Module &M, raw_ostream *OS=nullptr, bool *BrokenDebugInfo=nullptr)
Check a module for errors.
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
Definition MIRParser.h:39
#define N
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition Analysis.h:29
static LLVM_ABI const char * SyntheticFunctionEntryCount
static LLVM_ABI const char * UnknownBranchWeightsMarker
static LLVM_ABI const char * ValueProfile
static LLVM_ABI const char * FunctionEntryCount
static LLVM_ABI const char * BranchWeights
uint32_t getTagID() const
Return the tag of this operand bundle as an integer.
ArrayRef< Use > Inputs
void DebugInfoCheckFailed(const Twine &Message)
A debug info check failed.
Definition Verifier.cpp:305
VerifierSupport(raw_ostream *OS, const Module &M)
Definition Verifier.cpp:154
bool Broken
Track the brokenness of the module while recursively visiting.
Definition Verifier.cpp:148
void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A check failed (with values to print).
Definition Verifier.cpp:298
bool BrokenDebugInfo
Broken debug info can be "recovered" from by stripping the debug info.
Definition Verifier.cpp:150
LLVMContext & Context
Definition Verifier.cpp:145
bool TreatBrokenDebugInfoAsError
Whether to treat broken debug info as an error.
Definition Verifier.cpp:152
void CheckFailed(const Twine &Message)
A check failed, so printout out the condition and the message.
Definition Verifier.cpp:287
const Module & M
Definition Verifier.cpp:141
const DataLayout & DL
Definition Verifier.cpp:144
void DebugInfoCheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A debug info check failed (with values to print).
Definition Verifier.cpp:314
const Triple & TT
Definition Verifier.cpp:143
ModuleSlotTracker MST
Definition Verifier.cpp:142