LLVM 22.0.0git
Verifier.cpp
Go to the documentation of this file.
1//===-- Verifier.cpp - Implement the Module Verifier -----------------------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the function verifier interface, that can be used for some
10// basic correctness checking of input to the system.
11//
12// Note that this does not provide full `Java style' security and verifications,
13// instead it just tries to ensure that code is well-formed.
14//
15// * Both of a binary operator's parameters are of the same type
16// * Verify that the indices of mem access instructions match other operands
17// * Verify that arithmetic and other things are only performed on first-class
18// types. Verify that shifts & logicals only happen on integrals f.e.
19// * All of the constants in a switch statement are of the correct type
20// * The code is in valid SSA form
21// * It should be illegal to put a label into any other type (like a structure)
22// or to return one. [except constant arrays!]
23// * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad
24// * PHI nodes must have an entry for each predecessor, with no extras.
25// * PHI nodes must be the first thing in a basic block, all grouped together
26// * All basic blocks should only end with terminator insts, not contain them
27// * The entry node to a function must not have predecessors
28// * All Instructions must be embedded into a basic block
29// * Functions cannot take a void-typed parameter
30// * Verify that a function's argument list agrees with it's declared type.
31// * It is illegal to specify a name for a void value.
32// * It is illegal to have a internal global value with no initializer
33// * It is illegal to have a ret instruction that returns a value that does not
34// agree with the function return value type.
35// * Function call argument types match the function prototype
36// * A landing pad is defined by a landingpad instruction, and can be jumped to
37// only by the unwind edge of an invoke instruction.
38// * A landingpad instruction must be the first non-PHI instruction in the
39// block.
40// * Landingpad instructions must be in a function with a personality function.
41// * Convergence control intrinsics are introduced in ConvergentOperations.rst.
42// The applied restrictions are too numerous to list here.
43// * The convergence entry intrinsic and the loop heart must be the first
44// non-PHI instruction in their respective block. This does not conflict with
45// the landing pads, since these two kinds cannot occur in the same block.
46// * All other things that are tested by asserts spread about the code...
47//
48//===----------------------------------------------------------------------===//
49
50#include "llvm/IR/Verifier.h"
51#include "llvm/ADT/APFloat.h"
52#include "llvm/ADT/APInt.h"
53#include "llvm/ADT/ArrayRef.h"
54#include "llvm/ADT/DenseMap.h"
55#include "llvm/ADT/MapVector.h"
56#include "llvm/ADT/STLExtras.h"
60#include "llvm/ADT/StringRef.h"
61#include "llvm/ADT/Twine.h"
63#include "llvm/IR/Argument.h"
65#include "llvm/IR/Attributes.h"
66#include "llvm/IR/BasicBlock.h"
67#include "llvm/IR/CFG.h"
68#include "llvm/IR/CallingConv.h"
69#include "llvm/IR/Comdat.h"
70#include "llvm/IR/Constant.h"
73#include "llvm/IR/Constants.h"
75#include "llvm/IR/DataLayout.h"
76#include "llvm/IR/DebugInfo.h"
78#include "llvm/IR/DebugLoc.h"
80#include "llvm/IR/Dominators.h"
82#include "llvm/IR/Function.h"
83#include "llvm/IR/GCStrategy.h"
84#include "llvm/IR/GlobalAlias.h"
85#include "llvm/IR/GlobalValue.h"
87#include "llvm/IR/InlineAsm.h"
88#include "llvm/IR/InstVisitor.h"
89#include "llvm/IR/InstrTypes.h"
90#include "llvm/IR/Instruction.h"
93#include "llvm/IR/Intrinsics.h"
94#include "llvm/IR/IntrinsicsAArch64.h"
95#include "llvm/IR/IntrinsicsAMDGPU.h"
96#include "llvm/IR/IntrinsicsARM.h"
97#include "llvm/IR/IntrinsicsNVPTX.h"
98#include "llvm/IR/IntrinsicsWebAssembly.h"
99#include "llvm/IR/LLVMContext.h"
101#include "llvm/IR/Metadata.h"
102#include "llvm/IR/Module.h"
104#include "llvm/IR/PassManager.h"
106#include "llvm/IR/Statepoint.h"
107#include "llvm/IR/Type.h"
108#include "llvm/IR/Use.h"
109#include "llvm/IR/User.h"
111#include "llvm/IR/Value.h"
113#include "llvm/Pass.h"
117#include "llvm/Support/Casting.h"
121#include "llvm/Support/ModRef.h"
124#include <algorithm>
125#include <cassert>
126#include <cstdint>
127#include <memory>
128#include <optional>
129#include <string>
130#include <utility>
131
132using namespace llvm;
133
135 "verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false),
136 cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical "
137 "scopes are not dominating"));
138
141 const Module &M;
143 const Triple &TT;
146
147 /// Track the brokenness of the module while recursively visiting.
148 bool Broken = false;
149 /// Broken debug info can be "recovered" from by stripping the debug info.
150 bool BrokenDebugInfo = false;
151 /// Whether to treat broken debug info as an error.
153
155 : OS(OS), M(M), MST(&M), TT(M.getTargetTriple()), DL(M.getDataLayout()),
156 Context(M.getContext()) {}
157
158private:
159 void Write(const Module *M) {
160 *OS << "; ModuleID = '" << M->getModuleIdentifier() << "'\n";
161 }
162
163 void Write(const Value *V) {
164 if (V)
165 Write(*V);
166 }
167
168 void Write(const Value &V) {
169 if (isa<Instruction>(V)) {
170 V.print(*OS, MST);
171 *OS << '\n';
172 } else {
173 V.printAsOperand(*OS, true, MST);
174 *OS << '\n';
175 }
176 }
177
178 void Write(const DbgRecord *DR) {
179 if (DR) {
180 DR->print(*OS, MST, false);
181 *OS << '\n';
182 }
183 }
184
186 switch (Type) {
188 *OS << "value";
189 break;
191 *OS << "declare";
192 break;
194 *OS << "declare_value";
195 break;
197 *OS << "assign";
198 break;
200 *OS << "end";
201 break;
203 *OS << "any";
204 break;
205 };
206 }
207
208 void Write(const Metadata *MD) {
209 if (!MD)
210 return;
211 MD->print(*OS, MST, &M);
212 *OS << '\n';
213 }
214
215 template <class T> void Write(const MDTupleTypedArrayWrapper<T> &MD) {
216 Write(MD.get());
217 }
218
219 void Write(const NamedMDNode *NMD) {
220 if (!NMD)
221 return;
222 NMD->print(*OS, MST);
223 *OS << '\n';
224 }
225
226 void Write(Type *T) {
227 if (!T)
228 return;
229 *OS << ' ' << *T;
230 }
231
232 void Write(const Comdat *C) {
233 if (!C)
234 return;
235 *OS << *C;
236 }
237
238 void Write(const APInt *AI) {
239 if (!AI)
240 return;
241 *OS << *AI << '\n';
242 }
243
244 void Write(const unsigned i) { *OS << i << '\n'; }
245
246 // NOLINTNEXTLINE(readability-identifier-naming)
247 void Write(const Attribute *A) {
248 if (!A)
249 return;
250 *OS << A->getAsString() << '\n';
251 }
252
253 // NOLINTNEXTLINE(readability-identifier-naming)
254 void Write(const AttributeSet *AS) {
255 if (!AS)
256 return;
257 *OS << AS->getAsString() << '\n';
258 }
259
260 // NOLINTNEXTLINE(readability-identifier-naming)
261 void Write(const AttributeList *AL) {
262 if (!AL)
263 return;
264 AL->print(*OS);
265 }
266
267 void Write(Printable P) { *OS << P << '\n'; }
268
269 template <typename T> void Write(ArrayRef<T> Vs) {
270 for (const T &V : Vs)
271 Write(V);
272 }
273
274 template <typename T1, typename... Ts>
275 void WriteTs(const T1 &V1, const Ts &... Vs) {
276 Write(V1);
277 WriteTs(Vs...);
278 }
279
280 template <typename... Ts> void WriteTs() {}
281
282public:
283 /// A check failed, so printout out the condition and the message.
284 ///
285 /// This provides a nice place to put a breakpoint if you want to see why
286 /// something is not correct.
287 void CheckFailed(const Twine &Message) {
288 if (OS)
289 *OS << Message << '\n';
290 Broken = true;
291 }
292
293 /// A check failed (with values to print).
294 ///
295 /// This calls the Message-only version so that the above is easier to set a
296 /// breakpoint on.
297 template <typename T1, typename... Ts>
298 void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) {
299 CheckFailed(Message);
300 if (OS)
301 WriteTs(V1, Vs...);
302 }
303
304 /// A debug info check failed.
305 void DebugInfoCheckFailed(const Twine &Message) {
306 if (OS)
307 *OS << Message << '\n';
309 BrokenDebugInfo = true;
310 }
311
312 /// A debug info check failed (with values to print).
313 template <typename T1, typename... Ts>
314 void DebugInfoCheckFailed(const Twine &Message, const T1 &V1,
315 const Ts &... Vs) {
316 DebugInfoCheckFailed(Message);
317 if (OS)
318 WriteTs(V1, Vs...);
319 }
320};
321
322namespace {
323
324class Verifier : public InstVisitor<Verifier>, VerifierSupport {
325 friend class InstVisitor<Verifier>;
326 DominatorTree DT;
327
328 /// When verifying a basic block, keep track of all of the
329 /// instructions we have seen so far.
330 ///
331 /// This allows us to do efficient dominance checks for the case when an
332 /// instruction has an operand that is an instruction in the same block.
333 SmallPtrSet<Instruction *, 16> InstsInThisBlock;
334
335 /// Keep track of the metadata nodes that have been checked already.
337
338 /// Keep track which DISubprogram is attached to which function.
340
341 /// Track all DICompileUnits visited.
343
344 /// The result type for a landingpad.
345 Type *LandingPadResultTy;
346
347 /// Whether we've seen a call to @llvm.localescape in this function
348 /// already.
349 bool SawFrameEscape;
350
351 /// Whether the current function has a DISubprogram attached to it.
352 bool HasDebugInfo = false;
353
354 /// Stores the count of how many objects were passed to llvm.localescape for a
355 /// given function and the largest index passed to llvm.localrecover.
357
358 // Maps catchswitches and cleanuppads that unwind to siblings to the
359 // terminators that indicate the unwind, used to detect cycles therein.
361
362 /// Cache which blocks are in which funclet, if an EH funclet personality is
363 /// in use. Otherwise empty.
364 DenseMap<BasicBlock *, ColorVector> BlockEHFuncletColors;
365
366 /// Cache of constants visited in search of ConstantExprs.
367 SmallPtrSet<const Constant *, 32> ConstantExprVisited;
368
369 /// Cache of declarations of the llvm.experimental.deoptimize.<ty> intrinsic.
370 SmallVector<const Function *, 4> DeoptimizeDeclarations;
371
372 /// Cache of attribute lists verified.
373 SmallPtrSet<const void *, 32> AttributeListsVisited;
374
375 // Verify that this GlobalValue is only used in this module.
376 // This map is used to avoid visiting uses twice. We can arrive at a user
377 // twice, if they have multiple operands. In particular for very large
378 // constant expressions, we can arrive at a particular user many times.
379 SmallPtrSet<const Value *, 32> GlobalValueVisited;
380
381 // Keeps track of duplicate function argument debug info.
383
384 TBAAVerifier TBAAVerifyHelper;
385 ConvergenceVerifier ConvergenceVerifyHelper;
386
387 SmallVector<IntrinsicInst *, 4> NoAliasScopeDecls;
388
389 void checkAtomicMemAccessSize(Type *Ty, const Instruction *I);
390
391public:
392 explicit Verifier(raw_ostream *OS, bool ShouldTreatBrokenDebugInfoAsError,
393 const Module &M)
394 : VerifierSupport(OS, M), LandingPadResultTy(nullptr),
395 SawFrameEscape(false), TBAAVerifyHelper(this) {
396 TreatBrokenDebugInfoAsError = ShouldTreatBrokenDebugInfoAsError;
397 }
398
399 bool hasBrokenDebugInfo() const { return BrokenDebugInfo; }
400
401 bool verify(const Function &F) {
402 llvm::TimeTraceScope timeScope("Verifier");
403 assert(F.getParent() == &M &&
404 "An instance of this class only works with a specific module!");
405
406 // First ensure the function is well-enough formed to compute dominance
407 // information, and directly compute a dominance tree. We don't rely on the
408 // pass manager to provide this as it isolates us from a potentially
409 // out-of-date dominator tree and makes it significantly more complex to run
410 // this code outside of a pass manager.
411 // FIXME: It's really gross that we have to cast away constness here.
412 if (!F.empty())
413 DT.recalculate(const_cast<Function &>(F));
414
415 for (const BasicBlock &BB : F) {
416 if (!BB.empty() && BB.back().isTerminator())
417 continue;
418
419 if (OS) {
420 *OS << "Basic Block in function '" << F.getName()
421 << "' does not have terminator!\n";
422 BB.printAsOperand(*OS, true, MST);
423 *OS << "\n";
424 }
425 return false;
426 }
427
428 auto FailureCB = [this](const Twine &Message) {
429 this->CheckFailed(Message);
430 };
431 ConvergenceVerifyHelper.initialize(OS, FailureCB, F);
432
433 Broken = false;
434 // FIXME: We strip const here because the inst visitor strips const.
435 visit(const_cast<Function &>(F));
436 verifySiblingFuncletUnwinds();
437
438 if (ConvergenceVerifyHelper.sawTokens())
439 ConvergenceVerifyHelper.verify(DT);
440
441 InstsInThisBlock.clear();
442 DebugFnArgs.clear();
443 LandingPadResultTy = nullptr;
444 SawFrameEscape = false;
445 SiblingFuncletInfo.clear();
446 verifyNoAliasScopeDecl();
447 NoAliasScopeDecls.clear();
448
449 return !Broken;
450 }
451
452 /// Verify the module that this instance of \c Verifier was initialized with.
453 bool verify() {
454 Broken = false;
455
456 // Collect all declarations of the llvm.experimental.deoptimize intrinsic.
457 for (const Function &F : M)
458 if (F.getIntrinsicID() == Intrinsic::experimental_deoptimize)
459 DeoptimizeDeclarations.push_back(&F);
460
461 // Now that we've visited every function, verify that we never asked to
462 // recover a frame index that wasn't escaped.
463 verifyFrameRecoverIndices();
464 for (const GlobalVariable &GV : M.globals())
465 visitGlobalVariable(GV);
466
467 for (const GlobalAlias &GA : M.aliases())
468 visitGlobalAlias(GA);
469
470 for (const GlobalIFunc &GI : M.ifuncs())
471 visitGlobalIFunc(GI);
472
473 for (const NamedMDNode &NMD : M.named_metadata())
474 visitNamedMDNode(NMD);
475
476 for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable())
477 visitComdat(SMEC.getValue());
478
479 visitModuleFlags();
480 visitModuleIdents();
481 visitModuleCommandLines();
482 visitModuleErrnoTBAA();
483
484 verifyCompileUnits();
485
486 verifyDeoptimizeCallingConvs();
487 DISubprogramAttachments.clear();
488 return !Broken;
489 }
490
491private:
492 /// Whether a metadata node is allowed to be, or contain, a DILocation.
493 enum class AreDebugLocsAllowed { No, Yes };
494
495 /// Metadata that should be treated as a range, with slightly different
496 /// requirements.
497 enum class RangeLikeMetadataKind {
498 Range, // MD_range
499 AbsoluteSymbol, // MD_absolute_symbol
500 NoaliasAddrspace // MD_noalias_addrspace
501 };
502
503 // Verification methods...
504 void visitGlobalValue(const GlobalValue &GV);
505 void visitGlobalVariable(const GlobalVariable &GV);
506 void visitGlobalAlias(const GlobalAlias &GA);
507 void visitGlobalIFunc(const GlobalIFunc &GI);
508 void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C);
509 void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited,
510 const GlobalAlias &A, const Constant &C);
511 void visitNamedMDNode(const NamedMDNode &NMD);
512 void visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs);
513 void visitMetadataAsValue(const MetadataAsValue &MD, Function *F);
514 void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F);
515 void visitDIArgList(const DIArgList &AL, Function *F);
516 void visitComdat(const Comdat &C);
517 void visitModuleIdents();
518 void visitModuleCommandLines();
519 void visitModuleErrnoTBAA();
520 void visitModuleFlags();
521 void visitModuleFlag(const MDNode *Op,
522 DenseMap<const MDString *, const MDNode *> &SeenIDs,
523 SmallVectorImpl<const MDNode *> &Requirements);
524 void visitModuleFlagCGProfileEntry(const MDOperand &MDO);
525 void visitFunction(const Function &F);
526 void visitBasicBlock(BasicBlock &BB);
527 void verifyRangeLikeMetadata(const Value &V, const MDNode *Range, Type *Ty,
528 RangeLikeMetadataKind Kind);
529 void visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty);
530 void visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range, Type *Ty);
531 void visitDereferenceableMetadata(Instruction &I, MDNode *MD);
532 void visitNofreeMetadata(Instruction &I, MDNode *MD);
533 void visitProfMetadata(Instruction &I, MDNode *MD);
534 void visitCallStackMetadata(MDNode *MD);
535 void visitMemProfMetadata(Instruction &I, MDNode *MD);
536 void visitCallsiteMetadata(Instruction &I, MDNode *MD);
537 void visitCalleeTypeMetadata(Instruction &I, MDNode *MD);
538 void visitDIAssignIDMetadata(Instruction &I, MDNode *MD);
539 void visitMMRAMetadata(Instruction &I, MDNode *MD);
540 void visitAnnotationMetadata(MDNode *Annotation);
541 void visitAliasScopeMetadata(const MDNode *MD);
542 void visitAliasScopeListMetadata(const MDNode *MD);
543 void visitAccessGroupMetadata(const MDNode *MD);
544 void visitCapturesMetadata(Instruction &I, const MDNode *Captures);
545 void visitAllocTokenMetadata(Instruction &I, MDNode *MD);
546
547 template <class Ty> bool isValidMetadataArray(const MDTuple &N);
548#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N);
549#include "llvm/IR/Metadata.def"
550 void visitDIScope(const DIScope &N);
551 void visitDIVariable(const DIVariable &N);
552 void visitDILexicalBlockBase(const DILexicalBlockBase &N);
553 void visitDITemplateParameter(const DITemplateParameter &N);
554
555 void visitTemplateParams(const MDNode &N, const Metadata &RawParams);
556
557 void visit(DbgLabelRecord &DLR);
558 void visit(DbgVariableRecord &DVR);
559 // InstVisitor overrides...
560 using InstVisitor<Verifier>::visit;
561 void visitDbgRecords(Instruction &I);
562 void visit(Instruction &I);
563
564 void visitTruncInst(TruncInst &I);
565 void visitZExtInst(ZExtInst &I);
566 void visitSExtInst(SExtInst &I);
567 void visitFPTruncInst(FPTruncInst &I);
568 void visitFPExtInst(FPExtInst &I);
569 void visitFPToUIInst(FPToUIInst &I);
570 void visitFPToSIInst(FPToSIInst &I);
571 void visitUIToFPInst(UIToFPInst &I);
572 void visitSIToFPInst(SIToFPInst &I);
573 void visitIntToPtrInst(IntToPtrInst &I);
574 void checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V);
575 void visitPtrToAddrInst(PtrToAddrInst &I);
576 void visitPtrToIntInst(PtrToIntInst &I);
577 void visitBitCastInst(BitCastInst &I);
578 void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
579 void visitPHINode(PHINode &PN);
580 void visitCallBase(CallBase &Call);
581 void visitUnaryOperator(UnaryOperator &U);
582 void visitBinaryOperator(BinaryOperator &B);
583 void visitICmpInst(ICmpInst &IC);
584 void visitFCmpInst(FCmpInst &FC);
585 void visitExtractElementInst(ExtractElementInst &EI);
586 void visitInsertElementInst(InsertElementInst &EI);
587 void visitShuffleVectorInst(ShuffleVectorInst &EI);
588 void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); }
589 void visitCallInst(CallInst &CI);
590 void visitInvokeInst(InvokeInst &II);
591 void visitGetElementPtrInst(GetElementPtrInst &GEP);
592 void visitLoadInst(LoadInst &LI);
593 void visitStoreInst(StoreInst &SI);
594 void verifyDominatesUse(Instruction &I, unsigned i);
595 void visitInstruction(Instruction &I);
596 void visitTerminator(Instruction &I);
597 void visitBranchInst(BranchInst &BI);
598 void visitReturnInst(ReturnInst &RI);
599 void visitSwitchInst(SwitchInst &SI);
600 void visitIndirectBrInst(IndirectBrInst &BI);
601 void visitCallBrInst(CallBrInst &CBI);
602 void visitSelectInst(SelectInst &SI);
603 void visitUserOp1(Instruction &I);
604 void visitUserOp2(Instruction &I) { visitUserOp1(I); }
605 void visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call);
606 void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI);
607 void visitVPIntrinsic(VPIntrinsic &VPI);
608 void visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI);
609 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
610 void visitAtomicRMWInst(AtomicRMWInst &RMWI);
611 void visitFenceInst(FenceInst &FI);
612 void visitAllocaInst(AllocaInst &AI);
613 void visitExtractValueInst(ExtractValueInst &EVI);
614 void visitInsertValueInst(InsertValueInst &IVI);
615 void visitEHPadPredecessors(Instruction &I);
616 void visitLandingPadInst(LandingPadInst &LPI);
617 void visitResumeInst(ResumeInst &RI);
618 void visitCatchPadInst(CatchPadInst &CPI);
619 void visitCatchReturnInst(CatchReturnInst &CatchReturn);
620 void visitCleanupPadInst(CleanupPadInst &CPI);
621 void visitFuncletPadInst(FuncletPadInst &FPI);
622 void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch);
623 void visitCleanupReturnInst(CleanupReturnInst &CRI);
624
625 void verifySwiftErrorCall(CallBase &Call, const Value *SwiftErrorVal);
626 void verifySwiftErrorValue(const Value *SwiftErrorVal);
627 void verifyTailCCMustTailAttrs(const AttrBuilder &Attrs, StringRef Context);
628 void verifyMustTailCall(CallInst &CI);
629 bool verifyAttributeCount(AttributeList Attrs, unsigned Params);
630 void verifyAttributeTypes(AttributeSet Attrs, const Value *V);
631 void verifyParameterAttrs(AttributeSet Attrs, Type *Ty, const Value *V);
632 void checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
633 const Value *V);
634 void verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
635 const Value *V, bool IsIntrinsic, bool IsInlineAsm);
636 void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs);
637 void verifyUnknownProfileMetadata(MDNode *MD);
638 void visitConstantExprsRecursively(const Constant *EntryC);
639 void visitConstantExpr(const ConstantExpr *CE);
640 void visitConstantPtrAuth(const ConstantPtrAuth *CPA);
641 void verifyInlineAsmCall(const CallBase &Call);
642 void verifyStatepoint(const CallBase &Call);
643 void verifyFrameRecoverIndices();
644 void verifySiblingFuncletUnwinds();
645
646 void verifyFragmentExpression(const DbgVariableRecord &I);
647 template <typename ValueOrMetadata>
648 void verifyFragmentExpression(const DIVariable &V,
650 ValueOrMetadata *Desc);
651 void verifyFnArgs(const DbgVariableRecord &DVR);
652 void verifyNotEntryValue(const DbgVariableRecord &I);
653
654 /// Module-level debug info verification...
655 void verifyCompileUnits();
656
657 /// Module-level verification that all @llvm.experimental.deoptimize
658 /// declarations share the same calling convention.
659 void verifyDeoptimizeCallingConvs();
660
661 void verifyAttachedCallBundle(const CallBase &Call,
662 const OperandBundleUse &BU);
663
664 /// Verify the llvm.experimental.noalias.scope.decl declarations
665 void verifyNoAliasScopeDecl();
666};
667
668} // end anonymous namespace
669
670/// We know that cond should be true, if not print an error message.
671#define Check(C, ...) \
672 do { \
673 if (!(C)) { \
674 CheckFailed(__VA_ARGS__); \
675 return; \
676 } \
677 } while (false)
678
679/// We know that a debug info condition should be true, if not print
680/// an error message.
681#define CheckDI(C, ...) \
682 do { \
683 if (!(C)) { \
684 DebugInfoCheckFailed(__VA_ARGS__); \
685 return; \
686 } \
687 } while (false)
688
689void Verifier::visitDbgRecords(Instruction &I) {
690 if (!I.DebugMarker)
691 return;
692 CheckDI(I.DebugMarker->MarkedInstr == &I,
693 "Instruction has invalid DebugMarker", &I);
694 CheckDI(!isa<PHINode>(&I) || !I.hasDbgRecords(),
695 "PHI Node must not have any attached DbgRecords", &I);
696 for (DbgRecord &DR : I.getDbgRecordRange()) {
697 CheckDI(DR.getMarker() == I.DebugMarker,
698 "DbgRecord had invalid DebugMarker", &I, &DR);
699 if (auto *Loc =
701 visitMDNode(*Loc, AreDebugLocsAllowed::Yes);
702 if (auto *DVR = dyn_cast<DbgVariableRecord>(&DR)) {
703 visit(*DVR);
704 // These have to appear after `visit` for consistency with existing
705 // intrinsic behaviour.
706 verifyFragmentExpression(*DVR);
707 verifyNotEntryValue(*DVR);
708 } else if (auto *DLR = dyn_cast<DbgLabelRecord>(&DR)) {
709 visit(*DLR);
710 }
711 }
712}
713
714void Verifier::visit(Instruction &I) {
715 visitDbgRecords(I);
716 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
717 Check(I.getOperand(i) != nullptr, "Operand is null", &I);
719}
720
721// Helper to iterate over indirect users. By returning false, the callback can ask to stop traversing further.
722static void forEachUser(const Value *User,
724 llvm::function_ref<bool(const Value *)> Callback) {
725 if (!Visited.insert(User).second)
726 return;
727
729 while (!WorkList.empty()) {
730 const Value *Cur = WorkList.pop_back_val();
731 if (!Visited.insert(Cur).second)
732 continue;
733 if (Callback(Cur))
734 append_range(WorkList, Cur->materialized_users());
735 }
736}
737
738void Verifier::visitGlobalValue(const GlobalValue &GV) {
740 "Global is external, but doesn't have external or weak linkage!", &GV);
741
742 if (const GlobalObject *GO = dyn_cast<GlobalObject>(&GV)) {
743 if (const MDNode *Associated =
744 GO->getMetadata(LLVMContext::MD_associated)) {
745 Check(Associated->getNumOperands() == 1,
746 "associated metadata must have one operand", &GV, Associated);
747 const Metadata *Op = Associated->getOperand(0).get();
748 Check(Op, "associated metadata must have a global value", GO, Associated);
749
750 const auto *VM = dyn_cast_or_null<ValueAsMetadata>(Op);
751 Check(VM, "associated metadata must be ValueAsMetadata", GO, Associated);
752 if (VM) {
753 Check(isa<PointerType>(VM->getValue()->getType()),
754 "associated value must be pointer typed", GV, Associated);
755
756 const Value *Stripped = VM->getValue()->stripPointerCastsAndAliases();
757 Check(isa<GlobalObject>(Stripped) || isa<Constant>(Stripped),
758 "associated metadata must point to a GlobalObject", GO, Stripped);
759 Check(Stripped != GO,
760 "global values should not associate to themselves", GO,
761 Associated);
762 }
763 }
764
765 // FIXME: Why is getMetadata on GlobalValue protected?
766 if (const MDNode *AbsoluteSymbol =
767 GO->getMetadata(LLVMContext::MD_absolute_symbol)) {
768 verifyRangeLikeMetadata(*GO, AbsoluteSymbol,
769 DL.getIntPtrType(GO->getType()),
770 RangeLikeMetadataKind::AbsoluteSymbol);
771 }
772 }
773
775 "Only global variables can have appending linkage!", &GV);
776
777 if (GV.hasAppendingLinkage()) {
778 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV);
779 Check(GVar && GVar->getValueType()->isArrayTy(),
780 "Only global arrays can have appending linkage!", GVar);
781 }
782
783 if (GV.isDeclarationForLinker())
784 Check(!GV.hasComdat(), "Declaration may not be in a Comdat!", &GV);
785
786 if (GV.hasDLLExportStorageClass()) {
788 "dllexport GlobalValue must have default or protected visibility",
789 &GV);
790 }
791 if (GV.hasDLLImportStorageClass()) {
793 "dllimport GlobalValue must have default visibility", &GV);
794 Check(!GV.isDSOLocal(), "GlobalValue with DLLImport Storage is dso_local!",
795 &GV);
796
797 Check((GV.isDeclaration() &&
800 "Global is marked as dllimport, but not external", &GV);
801 }
802
803 if (GV.isImplicitDSOLocal())
804 Check(GV.isDSOLocal(),
805 "GlobalValue with local linkage or non-default "
806 "visibility must be dso_local!",
807 &GV);
808
809 forEachUser(&GV, GlobalValueVisited, [&](const Value *V) -> bool {
810 if (const Instruction *I = dyn_cast<Instruction>(V)) {
811 if (!I->getParent() || !I->getParent()->getParent())
812 CheckFailed("Global is referenced by parentless instruction!", &GV, &M,
813 I);
814 else if (I->getParent()->getParent()->getParent() != &M)
815 CheckFailed("Global is referenced in a different module!", &GV, &M, I,
816 I->getParent()->getParent(),
817 I->getParent()->getParent()->getParent());
818 return false;
819 } else if (const Function *F = dyn_cast<Function>(V)) {
820 if (F->getParent() != &M)
821 CheckFailed("Global is used by function in a different module", &GV, &M,
822 F, F->getParent());
823 return false;
824 }
825 return true;
826 });
827}
828
829void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
830 Type *GVType = GV.getValueType();
831
832 if (MaybeAlign A = GV.getAlign()) {
833 Check(A->value() <= Value::MaximumAlignment,
834 "huge alignment values are unsupported", &GV);
835 }
836
837 if (GV.hasInitializer()) {
838 Check(GV.getInitializer()->getType() == GVType,
839 "Global variable initializer type does not match global "
840 "variable type!",
841 &GV);
843 "Global variable initializer must be sized", &GV);
844 visitConstantExprsRecursively(GV.getInitializer());
845 // If the global has common linkage, it must have a zero initializer and
846 // cannot be constant.
847 if (GV.hasCommonLinkage()) {
849 "'common' global must have a zero initializer!", &GV);
850 Check(!GV.isConstant(), "'common' global may not be marked constant!",
851 &GV);
852 Check(!GV.hasComdat(), "'common' global may not be in a Comdat!", &GV);
853 }
854 }
855
856 if (GV.hasName() && (GV.getName() == "llvm.global_ctors" ||
857 GV.getName() == "llvm.global_dtors")) {
859 "invalid linkage for intrinsic global variable", &GV);
861 "invalid uses of intrinsic global variable", &GV);
862
863 // Don't worry about emitting an error for it not being an array,
864 // visitGlobalValue will complain on appending non-array.
865 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
866 StructType *STy = dyn_cast<StructType>(ATy->getElementType());
867 PointerType *FuncPtrTy =
868 PointerType::get(Context, DL.getProgramAddressSpace());
869 Check(STy && (STy->getNumElements() == 2 || STy->getNumElements() == 3) &&
870 STy->getTypeAtIndex(0u)->isIntegerTy(32) &&
871 STy->getTypeAtIndex(1) == FuncPtrTy,
872 "wrong type for intrinsic global variable", &GV);
873 Check(STy->getNumElements() == 3,
874 "the third field of the element type is mandatory, "
875 "specify ptr null to migrate from the obsoleted 2-field form");
876 Type *ETy = STy->getTypeAtIndex(2);
877 Check(ETy->isPointerTy(), "wrong type for intrinsic global variable",
878 &GV);
879 }
880 }
881
882 if (GV.hasName() && (GV.getName() == "llvm.used" ||
883 GV.getName() == "llvm.compiler.used")) {
885 "invalid linkage for intrinsic global variable", &GV);
887 "invalid uses of intrinsic global variable", &GV);
888
889 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
890 PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType());
891 Check(PTy, "wrong type for intrinsic global variable", &GV);
892 if (GV.hasInitializer()) {
893 const Constant *Init = GV.getInitializer();
894 const ConstantArray *InitArray = dyn_cast<ConstantArray>(Init);
895 Check(InitArray, "wrong initializer for intrinsic global variable",
896 Init);
897 for (Value *Op : InitArray->operands()) {
898 Value *V = Op->stripPointerCasts();
901 Twine("invalid ") + GV.getName() + " member", V);
902 Check(V->hasName(),
903 Twine("members of ") + GV.getName() + " must be named", V);
904 }
905 }
906 }
907 }
908
909 // Visit any debug info attachments.
911 GV.getMetadata(LLVMContext::MD_dbg, MDs);
912 for (auto *MD : MDs) {
913 if (auto *GVE = dyn_cast<DIGlobalVariableExpression>(MD))
914 visitDIGlobalVariableExpression(*GVE);
915 else
916 CheckDI(false, "!dbg attachment of global variable must be a "
917 "DIGlobalVariableExpression");
918 }
919
920 // Scalable vectors cannot be global variables, since we don't know
921 // the runtime size.
922 Check(!GVType->isScalableTy(), "Globals cannot contain scalable types", &GV);
923
924 // Check if it is or contains a target extension type that disallows being
925 // used as a global.
927 "Global @" + GV.getName() + " has illegal target extension type",
928 GVType);
929
930 if (!GV.hasInitializer()) {
931 visitGlobalValue(GV);
932 return;
933 }
934
935 // Walk any aggregate initializers looking for bitcasts between address spaces
936 visitConstantExprsRecursively(GV.getInitializer());
937
938 visitGlobalValue(GV);
939}
940
941void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) {
942 SmallPtrSet<const GlobalAlias*, 4> Visited;
943 Visited.insert(&GA);
944 visitAliaseeSubExpr(Visited, GA, C);
945}
946
947void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited,
948 const GlobalAlias &GA, const Constant &C) {
951 cast<GlobalValue>(C).hasAvailableExternallyLinkage(),
952 "available_externally alias must point to available_externally "
953 "global value",
954 &GA);
955 }
956 if (const auto *GV = dyn_cast<GlobalValue>(&C)) {
958 Check(!GV->isDeclarationForLinker(), "Alias must point to a definition",
959 &GA);
960 }
961
962 if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) {
963 Check(Visited.insert(GA2).second, "Aliases cannot form a cycle", &GA);
964
965 Check(!GA2->isInterposable(),
966 "Alias cannot point to an interposable alias", &GA);
967 } else {
968 // Only continue verifying subexpressions of GlobalAliases.
969 // Do not recurse into global initializers.
970 return;
971 }
972 }
973
974 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
975 visitConstantExprsRecursively(CE);
976
977 for (const Use &U : C.operands()) {
978 Value *V = &*U;
979 if (const auto *GA2 = dyn_cast<GlobalAlias>(V))
980 visitAliaseeSubExpr(Visited, GA, *GA2->getAliasee());
981 else if (const auto *C2 = dyn_cast<Constant>(V))
982 visitAliaseeSubExpr(Visited, GA, *C2);
983 }
984}
985
986void Verifier::visitGlobalAlias(const GlobalAlias &GA) {
988 "Alias should have private, internal, linkonce, weak, linkonce_odr, "
989 "weak_odr, external, or available_externally linkage!",
990 &GA);
991 const Constant *Aliasee = GA.getAliasee();
992 Check(Aliasee, "Aliasee cannot be NULL!", &GA);
993 Check(GA.getType() == Aliasee->getType(),
994 "Alias and aliasee types should match!", &GA);
995
996 Check(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee),
997 "Aliasee should be either GlobalValue or ConstantExpr", &GA);
998
999 visitAliaseeSubExpr(GA, *Aliasee);
1000
1001 visitGlobalValue(GA);
1002}
1003
1004void Verifier::visitGlobalIFunc(const GlobalIFunc &GI) {
1005 visitGlobalValue(GI);
1006
1008 GI.getAllMetadata(MDs);
1009 for (const auto &I : MDs) {
1010 CheckDI(I.first != LLVMContext::MD_dbg,
1011 "an ifunc may not have a !dbg attachment", &GI);
1012 Check(I.first != LLVMContext::MD_prof,
1013 "an ifunc may not have a !prof attachment", &GI);
1014 visitMDNode(*I.second, AreDebugLocsAllowed::No);
1015 }
1016
1018 "IFunc should have private, internal, linkonce, weak, linkonce_odr, "
1019 "weak_odr, or external linkage!",
1020 &GI);
1021 // Pierce through ConstantExprs and GlobalAliases and check that the resolver
1022 // is a Function definition.
1023 const Function *Resolver = GI.getResolverFunction();
1024 Check(Resolver, "IFunc must have a Function resolver", &GI);
1025 Check(!Resolver->isDeclarationForLinker(),
1026 "IFunc resolver must be a definition", &GI);
1027
1028 // Check that the immediate resolver operand (prior to any bitcasts) has the
1029 // correct type.
1030 const Type *ResolverTy = GI.getResolver()->getType();
1031
1033 "IFunc resolver must return a pointer", &GI);
1034
1035 Check(ResolverTy == PointerType::get(Context, GI.getAddressSpace()),
1036 "IFunc resolver has incorrect type", &GI);
1037}
1038
1039void Verifier::visitNamedMDNode(const NamedMDNode &NMD) {
1040 // There used to be various other llvm.dbg.* nodes, but we don't support
1041 // upgrading them and we want to reserve the namespace for future uses.
1042 if (NMD.getName().starts_with("llvm.dbg."))
1043 CheckDI(NMD.getName() == "llvm.dbg.cu",
1044 "unrecognized named metadata node in the llvm.dbg namespace", &NMD);
1045 for (const MDNode *MD : NMD.operands()) {
1046 if (NMD.getName() == "llvm.dbg.cu")
1047 CheckDI(MD && isa<DICompileUnit>(MD), "invalid compile unit", &NMD, MD);
1048
1049 if (!MD)
1050 continue;
1051
1052 visitMDNode(*MD, AreDebugLocsAllowed::Yes);
1053 }
1054}
1055
1056void Verifier::visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs) {
1057 // Only visit each node once. Metadata can be mutually recursive, so this
1058 // avoids infinite recursion here, as well as being an optimization.
1059 if (!MDNodes.insert(&MD).second)
1060 return;
1061
1062 Check(&MD.getContext() == &Context,
1063 "MDNode context does not match Module context!", &MD);
1064
1065 switch (MD.getMetadataID()) {
1066 default:
1067 llvm_unreachable("Invalid MDNode subclass");
1068 case Metadata::MDTupleKind:
1069 break;
1070#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) \
1071 case Metadata::CLASS##Kind: \
1072 visit##CLASS(cast<CLASS>(MD)); \
1073 break;
1074#include "llvm/IR/Metadata.def"
1075 }
1076
1077 for (const Metadata *Op : MD.operands()) {
1078 if (!Op)
1079 continue;
1080 Check(!isa<LocalAsMetadata>(Op), "Invalid operand for global metadata!",
1081 &MD, Op);
1082 CheckDI(!isa<DILocation>(Op) || AllowLocs == AreDebugLocsAllowed::Yes,
1083 "DILocation not allowed within this metadata node", &MD, Op);
1084 if (auto *N = dyn_cast<MDNode>(Op)) {
1085 visitMDNode(*N, AllowLocs);
1086 continue;
1087 }
1088 if (auto *V = dyn_cast<ValueAsMetadata>(Op)) {
1089 visitValueAsMetadata(*V, nullptr);
1090 continue;
1091 }
1092 }
1093
1094 // Check llvm.loop.estimated_trip_count.
1095 if (MD.getNumOperands() > 0 &&
1097 Check(MD.getNumOperands() == 2, "Expected two operands", &MD);
1099 Check(Count && Count->getType()->isIntegerTy() &&
1100 cast<IntegerType>(Count->getType())->getBitWidth() <= 32,
1101 "Expected second operand to be an integer constant of type i32 or "
1102 "smaller",
1103 &MD);
1104 }
1105
1106 // Check these last, so we diagnose problems in operands first.
1107 Check(!MD.isTemporary(), "Expected no forward declarations!", &MD);
1108 Check(MD.isResolved(), "All nodes should be resolved!", &MD);
1109}
1110
1111void Verifier::visitValueAsMetadata(const ValueAsMetadata &MD, Function *F) {
1112 Check(MD.getValue(), "Expected valid value", &MD);
1113 Check(!MD.getValue()->getType()->isMetadataTy(),
1114 "Unexpected metadata round-trip through values", &MD, MD.getValue());
1115
1116 auto *L = dyn_cast<LocalAsMetadata>(&MD);
1117 if (!L)
1118 return;
1119
1120 Check(F, "function-local metadata used outside a function", L);
1121
1122 // If this was an instruction, bb, or argument, verify that it is in the
1123 // function that we expect.
1124 Function *ActualF = nullptr;
1125 if (Instruction *I = dyn_cast<Instruction>(L->getValue())) {
1126 Check(I->getParent(), "function-local metadata not in basic block", L, I);
1127 ActualF = I->getParent()->getParent();
1128 } else if (BasicBlock *BB = dyn_cast<BasicBlock>(L->getValue()))
1129 ActualF = BB->getParent();
1130 else if (Argument *A = dyn_cast<Argument>(L->getValue()))
1131 ActualF = A->getParent();
1132 assert(ActualF && "Unimplemented function local metadata case!");
1133
1134 Check(ActualF == F, "function-local metadata used in wrong function", L);
1135}
1136
1137void Verifier::visitDIArgList(const DIArgList &AL, Function *F) {
1138 for (const ValueAsMetadata *VAM : AL.getArgs())
1139 visitValueAsMetadata(*VAM, F);
1140}
1141
1142void Verifier::visitMetadataAsValue(const MetadataAsValue &MDV, Function *F) {
1143 Metadata *MD = MDV.getMetadata();
1144 if (auto *N = dyn_cast<MDNode>(MD)) {
1145 visitMDNode(*N, AreDebugLocsAllowed::No);
1146 return;
1147 }
1148
1149 // Only visit each node once. Metadata can be mutually recursive, so this
1150 // avoids infinite recursion here, as well as being an optimization.
1151 if (!MDNodes.insert(MD).second)
1152 return;
1153
1154 if (auto *V = dyn_cast<ValueAsMetadata>(MD))
1155 visitValueAsMetadata(*V, F);
1156
1157 if (auto *AL = dyn_cast<DIArgList>(MD))
1158 visitDIArgList(*AL, F);
1159}
1160
1161static bool isType(const Metadata *MD) { return !MD || isa<DIType>(MD); }
1162static bool isScope(const Metadata *MD) { return !MD || isa<DIScope>(MD); }
1163static bool isDINode(const Metadata *MD) { return !MD || isa<DINode>(MD); }
1164static bool isMDTuple(const Metadata *MD) { return !MD || isa<MDTuple>(MD); }
1165
1166void Verifier::visitDILocation(const DILocation &N) {
1167 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1168 "location requires a valid scope", &N, N.getRawScope());
1169 if (auto *IA = N.getRawInlinedAt())
1170 CheckDI(isa<DILocation>(IA), "inlined-at should be a location", &N, IA);
1171 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1172 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1173}
1174
1175void Verifier::visitGenericDINode(const GenericDINode &N) {
1176 CheckDI(N.getTag(), "invalid tag", &N);
1177}
1178
1179void Verifier::visitDIScope(const DIScope &N) {
1180 if (auto *F = N.getRawFile())
1181 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1182}
1183
1184void Verifier::visitDISubrangeType(const DISubrangeType &N) {
1185 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1186 auto *BaseType = N.getRawBaseType();
1187 CheckDI(!BaseType || isType(BaseType), "BaseType must be a type");
1188 auto *LBound = N.getRawLowerBound();
1189 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1190 isa<DIVariable>(LBound) || isa<DIExpression>(LBound) ||
1191 isa<DIDerivedType>(LBound),
1192 "LowerBound must be signed constant or DIVariable or DIExpression or "
1193 "DIDerivedType",
1194 &N);
1195 auto *UBound = N.getRawUpperBound();
1196 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1197 isa<DIVariable>(UBound) || isa<DIExpression>(UBound) ||
1198 isa<DIDerivedType>(UBound),
1199 "UpperBound must be signed constant or DIVariable or DIExpression or "
1200 "DIDerivedType",
1201 &N);
1202 auto *Stride = N.getRawStride();
1203 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1204 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1205 "Stride must be signed constant or DIVariable or DIExpression", &N);
1206 auto *Bias = N.getRawBias();
1207 CheckDI(!Bias || isa<ConstantAsMetadata>(Bias) || isa<DIVariable>(Bias) ||
1208 isa<DIExpression>(Bias),
1209 "Bias must be signed constant or DIVariable or DIExpression", &N);
1210 // Subrange types currently only support constant size.
1211 auto *Size = N.getRawSizeInBits();
1213 "SizeInBits must be a constant");
1214}
1215
1216void Verifier::visitDISubrange(const DISubrange &N) {
1217 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1218 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1219 "Subrange can have any one of count or upperBound", &N);
1220 auto *CBound = N.getRawCountNode();
1221 CheckDI(!CBound || isa<ConstantAsMetadata>(CBound) ||
1222 isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1223 "Count must be signed constant or DIVariable or DIExpression", &N);
1224 auto Count = N.getCount();
1226 cast<ConstantInt *>(Count)->getSExtValue() >= -1,
1227 "invalid subrange count", &N);
1228 auto *LBound = N.getRawLowerBound();
1229 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1230 isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1231 "LowerBound must be signed constant or DIVariable or DIExpression",
1232 &N);
1233 auto *UBound = N.getRawUpperBound();
1234 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1235 isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1236 "UpperBound must be signed constant or DIVariable or DIExpression",
1237 &N);
1238 auto *Stride = N.getRawStride();
1239 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1240 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1241 "Stride must be signed constant or DIVariable or DIExpression", &N);
1242}
1243
1244void Verifier::visitDIGenericSubrange(const DIGenericSubrange &N) {
1245 CheckDI(N.getTag() == dwarf::DW_TAG_generic_subrange, "invalid tag", &N);
1246 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1247 "GenericSubrange can have any one of count or upperBound", &N);
1248 auto *CBound = N.getRawCountNode();
1249 CheckDI(!CBound || isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1250 "Count must be signed constant or DIVariable or DIExpression", &N);
1251 auto *LBound = N.getRawLowerBound();
1252 CheckDI(LBound, "GenericSubrange must contain lowerBound", &N);
1253 CheckDI(isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1254 "LowerBound must be signed constant or DIVariable or DIExpression",
1255 &N);
1256 auto *UBound = N.getRawUpperBound();
1257 CheckDI(!UBound || isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1258 "UpperBound must be signed constant or DIVariable or DIExpression",
1259 &N);
1260 auto *Stride = N.getRawStride();
1261 CheckDI(Stride, "GenericSubrange must contain stride", &N);
1262 CheckDI(isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1263 "Stride must be signed constant or DIVariable or DIExpression", &N);
1264}
1265
1266void Verifier::visitDIEnumerator(const DIEnumerator &N) {
1267 CheckDI(N.getTag() == dwarf::DW_TAG_enumerator, "invalid tag", &N);
1268}
1269
1270void Verifier::visitDIBasicType(const DIBasicType &N) {
1271 CheckDI(N.getTag() == dwarf::DW_TAG_base_type ||
1272 N.getTag() == dwarf::DW_TAG_unspecified_type ||
1273 N.getTag() == dwarf::DW_TAG_string_type,
1274 "invalid tag", &N);
1275 // Basic types currently only support constant size.
1276 auto *Size = N.getRawSizeInBits();
1278 "SizeInBits must be a constant");
1279}
1280
1281void Verifier::visitDIFixedPointType(const DIFixedPointType &N) {
1282 visitDIBasicType(N);
1283
1284 CheckDI(N.getTag() == dwarf::DW_TAG_base_type, "invalid tag", &N);
1285 CheckDI(N.getEncoding() == dwarf::DW_ATE_signed_fixed ||
1286 N.getEncoding() == dwarf::DW_ATE_unsigned_fixed,
1287 "invalid encoding", &N);
1291 "invalid kind", &N);
1293 N.getFactorRaw() == 0,
1294 "factor should be 0 for rationals", &N);
1296 (N.getNumeratorRaw() == 0 && N.getDenominatorRaw() == 0),
1297 "numerator and denominator should be 0 for non-rationals", &N);
1298}
1299
1300void Verifier::visitDIStringType(const DIStringType &N) {
1301 CheckDI(N.getTag() == dwarf::DW_TAG_string_type, "invalid tag", &N);
1302 CheckDI(!(N.isBigEndian() && N.isLittleEndian()), "has conflicting flags",
1303 &N);
1304}
1305
1306void Verifier::visitDIDerivedType(const DIDerivedType &N) {
1307 // Common scope checks.
1308 visitDIScope(N);
1309
1310 CheckDI(N.getTag() == dwarf::DW_TAG_typedef ||
1311 N.getTag() == dwarf::DW_TAG_pointer_type ||
1312 N.getTag() == dwarf::DW_TAG_ptr_to_member_type ||
1313 N.getTag() == dwarf::DW_TAG_reference_type ||
1314 N.getTag() == dwarf::DW_TAG_rvalue_reference_type ||
1315 N.getTag() == dwarf::DW_TAG_const_type ||
1316 N.getTag() == dwarf::DW_TAG_immutable_type ||
1317 N.getTag() == dwarf::DW_TAG_volatile_type ||
1318 N.getTag() == dwarf::DW_TAG_restrict_type ||
1319 N.getTag() == dwarf::DW_TAG_atomic_type ||
1320 N.getTag() == dwarf::DW_TAG_LLVM_ptrauth_type ||
1321 N.getTag() == dwarf::DW_TAG_member ||
1322 (N.getTag() == dwarf::DW_TAG_variable && N.isStaticMember()) ||
1323 N.getTag() == dwarf::DW_TAG_inheritance ||
1324 N.getTag() == dwarf::DW_TAG_friend ||
1325 N.getTag() == dwarf::DW_TAG_set_type ||
1326 N.getTag() == dwarf::DW_TAG_template_alias,
1327 "invalid tag", &N);
1328 if (N.getTag() == dwarf::DW_TAG_ptr_to_member_type) {
1329 CheckDI(isType(N.getRawExtraData()), "invalid pointer to member type", &N,
1330 N.getRawExtraData());
1331 } else if (N.getTag() == dwarf::DW_TAG_template_alias) {
1332 CheckDI(isMDTuple(N.getRawExtraData()), "invalid template parameters", &N,
1333 N.getRawExtraData());
1334 } else if (N.getTag() == dwarf::DW_TAG_inheritance ||
1335 N.getTag() == dwarf::DW_TAG_member ||
1336 N.getTag() == dwarf::DW_TAG_variable) {
1337 auto *ExtraData = N.getRawExtraData();
1338 auto IsValidExtraData = [&]() {
1339 if (ExtraData == nullptr)
1340 return true;
1341 if (isa<ConstantAsMetadata>(ExtraData) || isa<MDString>(ExtraData) ||
1342 isa<DIObjCProperty>(ExtraData))
1343 return true;
1344 if (auto *Tuple = dyn_cast<MDTuple>(ExtraData)) {
1345 if (Tuple->getNumOperands() != 1)
1346 return false;
1347 return isa_and_nonnull<ConstantAsMetadata>(Tuple->getOperand(0).get());
1348 }
1349 return false;
1350 };
1351 CheckDI(IsValidExtraData(),
1352 "extraData must be ConstantAsMetadata, MDString, DIObjCProperty, "
1353 "or MDTuple with single ConstantAsMetadata operand",
1354 &N, ExtraData);
1355 }
1356
1357 if (N.getTag() == dwarf::DW_TAG_set_type) {
1358 if (auto *T = N.getRawBaseType()) {
1362 CheckDI(
1363 (Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type) ||
1364 (Subrange && Subrange->getTag() == dwarf::DW_TAG_subrange_type) ||
1365 (Basic && (Basic->getEncoding() == dwarf::DW_ATE_unsigned ||
1366 Basic->getEncoding() == dwarf::DW_ATE_signed ||
1367 Basic->getEncoding() == dwarf::DW_ATE_unsigned_char ||
1368 Basic->getEncoding() == dwarf::DW_ATE_signed_char ||
1369 Basic->getEncoding() == dwarf::DW_ATE_boolean)),
1370 "invalid set base type", &N, T);
1371 }
1372 }
1373
1374 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1375 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1376 N.getRawBaseType());
1377
1378 if (N.getDWARFAddressSpace()) {
1379 CheckDI(N.getTag() == dwarf::DW_TAG_pointer_type ||
1380 N.getTag() == dwarf::DW_TAG_reference_type ||
1381 N.getTag() == dwarf::DW_TAG_rvalue_reference_type,
1382 "DWARF address space only applies to pointer or reference types",
1383 &N);
1384 }
1385
1386 auto *Size = N.getRawSizeInBits();
1389 "SizeInBits must be a constant or DIVariable or DIExpression");
1390}
1391
1392/// Detect mutually exclusive flags.
1393static bool hasConflictingReferenceFlags(unsigned Flags) {
1394 return ((Flags & DINode::FlagLValueReference) &&
1395 (Flags & DINode::FlagRValueReference)) ||
1396 ((Flags & DINode::FlagTypePassByValue) &&
1397 (Flags & DINode::FlagTypePassByReference));
1398}
1399
1400void Verifier::visitTemplateParams(const MDNode &N, const Metadata &RawParams) {
1401 auto *Params = dyn_cast<MDTuple>(&RawParams);
1402 CheckDI(Params, "invalid template params", &N, &RawParams);
1403 for (Metadata *Op : Params->operands()) {
1404 CheckDI(Op && isa<DITemplateParameter>(Op), "invalid template parameter",
1405 &N, Params, Op);
1406 }
1407}
1408
1409void Verifier::visitDICompositeType(const DICompositeType &N) {
1410 // Common scope checks.
1411 visitDIScope(N);
1412
1413 CheckDI(N.getTag() == dwarf::DW_TAG_array_type ||
1414 N.getTag() == dwarf::DW_TAG_structure_type ||
1415 N.getTag() == dwarf::DW_TAG_union_type ||
1416 N.getTag() == dwarf::DW_TAG_enumeration_type ||
1417 N.getTag() == dwarf::DW_TAG_class_type ||
1418 N.getTag() == dwarf::DW_TAG_variant_part ||
1419 N.getTag() == dwarf::DW_TAG_variant ||
1420 N.getTag() == dwarf::DW_TAG_namelist,
1421 "invalid tag", &N);
1422
1423 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1424 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1425 N.getRawBaseType());
1426
1427 CheckDI(!N.getRawElements() || isa<MDTuple>(N.getRawElements()),
1428 "invalid composite elements", &N, N.getRawElements());
1429 CheckDI(isType(N.getRawVTableHolder()), "invalid vtable holder", &N,
1430 N.getRawVTableHolder());
1432 "invalid reference flags", &N);
1433 unsigned DIBlockByRefStruct = 1 << 4;
1434 CheckDI((N.getFlags() & DIBlockByRefStruct) == 0,
1435 "DIBlockByRefStruct on DICompositeType is no longer supported", &N);
1436 CheckDI(llvm::all_of(N.getElements(), [](const DINode *N) { return N; }),
1437 "DISubprogram contains null entry in `elements` field", &N);
1438
1439 if (N.isVector()) {
1440 const DINodeArray Elements = N.getElements();
1441 CheckDI(Elements.size() == 1 &&
1442 Elements[0]->getTag() == dwarf::DW_TAG_subrange_type,
1443 "invalid vector, expected one element of type subrange", &N);
1444 }
1445
1446 if (auto *Params = N.getRawTemplateParams())
1447 visitTemplateParams(N, *Params);
1448
1449 if (auto *D = N.getRawDiscriminator()) {
1450 CheckDI(isa<DIDerivedType>(D) && N.getTag() == dwarf::DW_TAG_variant_part,
1451 "discriminator can only appear on variant part");
1452 }
1453
1454 if (N.getRawDataLocation()) {
1455 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1456 "dataLocation can only appear in array type");
1457 }
1458
1459 if (N.getRawAssociated()) {
1460 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1461 "associated can only appear in array type");
1462 }
1463
1464 if (N.getRawAllocated()) {
1465 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1466 "allocated can only appear in array type");
1467 }
1468
1469 if (N.getRawRank()) {
1470 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1471 "rank can only appear in array type");
1472 }
1473
1474 if (N.getTag() == dwarf::DW_TAG_array_type) {
1475 CheckDI(N.getRawBaseType(), "array types must have a base type", &N);
1476 }
1477
1478 auto *Size = N.getRawSizeInBits();
1481 "SizeInBits must be a constant or DIVariable or DIExpression");
1482}
1483
1484void Verifier::visitDISubroutineType(const DISubroutineType &N) {
1485 CheckDI(N.getTag() == dwarf::DW_TAG_subroutine_type, "invalid tag", &N);
1486 if (auto *Types = N.getRawTypeArray()) {
1487 CheckDI(isa<MDTuple>(Types), "invalid composite elements", &N, Types);
1488 for (Metadata *Ty : N.getTypeArray()->operands()) {
1489 CheckDI(isType(Ty), "invalid subroutine type ref", &N, Types, Ty);
1490 }
1491 }
1493 "invalid reference flags", &N);
1494}
1495
1496void Verifier::visitDIFile(const DIFile &N) {
1497 CheckDI(N.getTag() == dwarf::DW_TAG_file_type, "invalid tag", &N);
1498 std::optional<DIFile::ChecksumInfo<StringRef>> Checksum = N.getChecksum();
1499 if (Checksum) {
1500 CheckDI(Checksum->Kind <= DIFile::ChecksumKind::CSK_Last,
1501 "invalid checksum kind", &N);
1502 size_t Size;
1503 switch (Checksum->Kind) {
1504 case DIFile::CSK_MD5:
1505 Size = 32;
1506 break;
1507 case DIFile::CSK_SHA1:
1508 Size = 40;
1509 break;
1510 case DIFile::CSK_SHA256:
1511 Size = 64;
1512 break;
1513 }
1514 CheckDI(Checksum->Value.size() == Size, "invalid checksum length", &N);
1515 CheckDI(Checksum->Value.find_if_not(llvm::isHexDigit) == StringRef::npos,
1516 "invalid checksum", &N);
1517 }
1518}
1519
1520void Verifier::visitDICompileUnit(const DICompileUnit &N) {
1521 CheckDI(N.isDistinct(), "compile units must be distinct", &N);
1522 CheckDI(N.getTag() == dwarf::DW_TAG_compile_unit, "invalid tag", &N);
1523
1524 // Don't bother verifying the compilation directory or producer string
1525 // as those could be empty.
1526 CheckDI(N.getRawFile() && isa<DIFile>(N.getRawFile()), "invalid file", &N,
1527 N.getRawFile());
1528 CheckDI(!N.getFile()->getFilename().empty(), "invalid filename", &N,
1529 N.getFile());
1530
1531 CheckDI((N.getEmissionKind() <= DICompileUnit::LastEmissionKind),
1532 "invalid emission kind", &N);
1533
1534 if (auto *Array = N.getRawEnumTypes()) {
1535 CheckDI(isa<MDTuple>(Array), "invalid enum list", &N, Array);
1536 for (Metadata *Op : N.getEnumTypes()->operands()) {
1538 CheckDI(Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type,
1539 "invalid enum type", &N, N.getEnumTypes(), Op);
1540 }
1541 }
1542 if (auto *Array = N.getRawRetainedTypes()) {
1543 CheckDI(isa<MDTuple>(Array), "invalid retained type list", &N, Array);
1544 for (Metadata *Op : N.getRetainedTypes()->operands()) {
1545 CheckDI(
1546 Op && (isa<DIType>(Op) || (isa<DISubprogram>(Op) &&
1547 !cast<DISubprogram>(Op)->isDefinition())),
1548 "invalid retained type", &N, Op);
1549 }
1550 }
1551 if (auto *Array = N.getRawGlobalVariables()) {
1552 CheckDI(isa<MDTuple>(Array), "invalid global variable list", &N, Array);
1553 for (Metadata *Op : N.getGlobalVariables()->operands()) {
1555 "invalid global variable ref", &N, Op);
1556 }
1557 }
1558 if (auto *Array = N.getRawImportedEntities()) {
1559 CheckDI(isa<MDTuple>(Array), "invalid imported entity list", &N, Array);
1560 for (Metadata *Op : N.getImportedEntities()->operands()) {
1561 CheckDI(Op && isa<DIImportedEntity>(Op), "invalid imported entity ref",
1562 &N, Op);
1563 }
1564 }
1565 if (auto *Array = N.getRawMacros()) {
1566 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1567 for (Metadata *Op : N.getMacros()->operands()) {
1568 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1569 }
1570 }
1571 CUVisited.insert(&N);
1572}
1573
1574void Verifier::visitDISubprogram(const DISubprogram &N) {
1575 CheckDI(N.getTag() == dwarf::DW_TAG_subprogram, "invalid tag", &N);
1576 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1577 if (auto *F = N.getRawFile())
1578 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1579 else
1580 CheckDI(N.getLine() == 0, "line specified with no file", &N, N.getLine());
1581 if (auto *T = N.getRawType())
1582 CheckDI(isa<DISubroutineType>(T), "invalid subroutine type", &N, T);
1583 CheckDI(isType(N.getRawContainingType()), "invalid containing type", &N,
1584 N.getRawContainingType());
1585 if (auto *Params = N.getRawTemplateParams())
1586 visitTemplateParams(N, *Params);
1587 if (auto *S = N.getRawDeclaration())
1588 CheckDI(isa<DISubprogram>(S) && !cast<DISubprogram>(S)->isDefinition(),
1589 "invalid subprogram declaration", &N, S);
1590 if (auto *RawNode = N.getRawRetainedNodes()) {
1591 auto *Node = dyn_cast<MDTuple>(RawNode);
1592 CheckDI(Node, "invalid retained nodes list", &N, RawNode);
1593 for (Metadata *Op : Node->operands()) {
1594 CheckDI(Op, "nullptr in retained nodes", &N, Node);
1595
1596 auto True = [](const Metadata *) { return true; };
1597 auto False = [](const Metadata *) { return false; };
1598 bool IsTypeCorrect =
1599 DISubprogram::visitRetainedNode<bool>(Op, True, True, True, False);
1600 CheckDI(IsTypeCorrect,
1601 "invalid retained nodes, expected DILocalVariable, DILabel or "
1602 "DIImportedEntity",
1603 &N, Node, Op);
1604
1605 auto *RetainedNode = cast<DINode>(Op);
1606 auto *RetainedNodeScope = dyn_cast_or_null<DILocalScope>(
1608 CheckDI(RetainedNodeScope,
1609 "invalid retained nodes, retained node is not local", &N, Node,
1610 RetainedNode);
1611 CheckDI(
1612 RetainedNodeScope->getSubprogram() == &N,
1613 "invalid retained nodes, retained node does not belong to subprogram",
1614 &N, Node, RetainedNode, RetainedNodeScope);
1615 }
1616 }
1618 "invalid reference flags", &N);
1619
1620 auto *Unit = N.getRawUnit();
1621 if (N.isDefinition()) {
1622 // Subprogram definitions (not part of the type hierarchy).
1623 CheckDI(N.isDistinct(), "subprogram definitions must be distinct", &N);
1624 CheckDI(Unit, "subprogram definitions must have a compile unit", &N);
1625 CheckDI(isa<DICompileUnit>(Unit), "invalid unit type", &N, Unit);
1626 // There's no good way to cross the CU boundary to insert a nested
1627 // DISubprogram definition in one CU into a type defined in another CU.
1628 auto *CT = dyn_cast_or_null<DICompositeType>(N.getRawScope());
1629 if (CT && CT->getRawIdentifier() &&
1630 M.getContext().isODRUniquingDebugTypes())
1631 CheckDI(N.getDeclaration(),
1632 "definition subprograms cannot be nested within DICompositeType "
1633 "when enabling ODR",
1634 &N);
1635 } else {
1636 // Subprogram declarations (part of the type hierarchy).
1637 CheckDI(!Unit, "subprogram declarations must not have a compile unit", &N);
1638 CheckDI(!N.getRawDeclaration(),
1639 "subprogram declaration must not have a declaration field");
1640 }
1641
1642 if (auto *RawThrownTypes = N.getRawThrownTypes()) {
1643 auto *ThrownTypes = dyn_cast<MDTuple>(RawThrownTypes);
1644 CheckDI(ThrownTypes, "invalid thrown types list", &N, RawThrownTypes);
1645 for (Metadata *Op : ThrownTypes->operands())
1646 CheckDI(Op && isa<DIType>(Op), "invalid thrown type", &N, ThrownTypes,
1647 Op);
1648 }
1649
1650 if (N.areAllCallsDescribed())
1651 CheckDI(N.isDefinition(),
1652 "DIFlagAllCallsDescribed must be attached to a definition");
1653}
1654
1655void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) {
1656 CheckDI(N.getTag() == dwarf::DW_TAG_lexical_block, "invalid tag", &N);
1657 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1658 "invalid local scope", &N, N.getRawScope());
1659 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1660 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1661}
1662
1663void Verifier::visitDILexicalBlock(const DILexicalBlock &N) {
1664 visitDILexicalBlockBase(N);
1665
1666 CheckDI(N.getLine() || !N.getColumn(),
1667 "cannot have column info without line info", &N);
1668}
1669
1670void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) {
1671 visitDILexicalBlockBase(N);
1672}
1673
1674void Verifier::visitDICommonBlock(const DICommonBlock &N) {
1675 CheckDI(N.getTag() == dwarf::DW_TAG_common_block, "invalid tag", &N);
1676 if (auto *S = N.getRawScope())
1677 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1678 if (auto *S = N.getRawDecl())
1679 CheckDI(isa<DIGlobalVariable>(S), "invalid declaration", &N, S);
1680}
1681
1682void Verifier::visitDINamespace(const DINamespace &N) {
1683 CheckDI(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag", &N);
1684 if (auto *S = N.getRawScope())
1685 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1686}
1687
1688void Verifier::visitDIMacro(const DIMacro &N) {
1689 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_define ||
1690 N.getMacinfoType() == dwarf::DW_MACINFO_undef,
1691 "invalid macinfo type", &N);
1692 CheckDI(!N.getName().empty(), "anonymous macro", &N);
1693 if (!N.getValue().empty()) {
1694 assert(N.getValue().data()[0] != ' ' && "Macro value has a space prefix");
1695 }
1696}
1697
1698void Verifier::visitDIMacroFile(const DIMacroFile &N) {
1699 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_start_file,
1700 "invalid macinfo type", &N);
1701 if (auto *F = N.getRawFile())
1702 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1703
1704 if (auto *Array = N.getRawElements()) {
1705 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1706 for (Metadata *Op : N.getElements()->operands()) {
1707 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1708 }
1709 }
1710}
1711
1712void Verifier::visitDIModule(const DIModule &N) {
1713 CheckDI(N.getTag() == dwarf::DW_TAG_module, "invalid tag", &N);
1714 CheckDI(!N.getName().empty(), "anonymous module", &N);
1715}
1716
1717void Verifier::visitDITemplateParameter(const DITemplateParameter &N) {
1718 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1719}
1720
1721void Verifier::visitDITemplateTypeParameter(const DITemplateTypeParameter &N) {
1722 visitDITemplateParameter(N);
1723
1724 CheckDI(N.getTag() == dwarf::DW_TAG_template_type_parameter, "invalid tag",
1725 &N);
1726}
1727
1728void Verifier::visitDITemplateValueParameter(
1729 const DITemplateValueParameter &N) {
1730 visitDITemplateParameter(N);
1731
1732 CheckDI(N.getTag() == dwarf::DW_TAG_template_value_parameter ||
1733 N.getTag() == dwarf::DW_TAG_GNU_template_template_param ||
1734 N.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack,
1735 "invalid tag", &N);
1736}
1737
1738void Verifier::visitDIVariable(const DIVariable &N) {
1739 if (auto *S = N.getRawScope())
1740 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1741 if (auto *F = N.getRawFile())
1742 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1743}
1744
1745void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) {
1746 // Checks common to all variables.
1747 visitDIVariable(N);
1748
1749 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1750 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1751 // Check only if the global variable is not an extern
1752 if (N.isDefinition())
1753 CheckDI(N.getType(), "missing global variable type", &N);
1754 if (auto *Member = N.getRawStaticDataMemberDeclaration()) {
1756 "invalid static data member declaration", &N, Member);
1757 }
1758}
1759
1760void Verifier::visitDILocalVariable(const DILocalVariable &N) {
1761 // Checks common to all variables.
1762 visitDIVariable(N);
1763
1764 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1765 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1766 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1767 "local variable requires a valid scope", &N, N.getRawScope());
1768 if (auto Ty = N.getType())
1769 CheckDI(!isa<DISubroutineType>(Ty), "invalid type", &N, N.getType());
1770}
1771
1772void Verifier::visitDIAssignID(const DIAssignID &N) {
1773 CheckDI(!N.getNumOperands(), "DIAssignID has no arguments", &N);
1774 CheckDI(N.isDistinct(), "DIAssignID must be distinct", &N);
1775}
1776
1777void Verifier::visitDILabel(const DILabel &N) {
1778 if (auto *S = N.getRawScope())
1779 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1780 if (auto *F = N.getRawFile())
1781 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1782
1783 CheckDI(N.getTag() == dwarf::DW_TAG_label, "invalid tag", &N);
1784 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1785 "label requires a valid scope", &N, N.getRawScope());
1786}
1787
1788void Verifier::visitDIExpression(const DIExpression &N) {
1789 CheckDI(N.isValid(), "invalid expression", &N);
1790}
1791
1792void Verifier::visitDIGlobalVariableExpression(
1793 const DIGlobalVariableExpression &GVE) {
1794 CheckDI(GVE.getVariable(), "missing variable");
1795 if (auto *Var = GVE.getVariable())
1796 visitDIGlobalVariable(*Var);
1797 if (auto *Expr = GVE.getExpression()) {
1798 visitDIExpression(*Expr);
1799 if (auto Fragment = Expr->getFragmentInfo())
1800 verifyFragmentExpression(*GVE.getVariable(), *Fragment, &GVE);
1801 }
1802}
1803
1804void Verifier::visitDIObjCProperty(const DIObjCProperty &N) {
1805 CheckDI(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag", &N);
1806 if (auto *T = N.getRawType())
1807 CheckDI(isType(T), "invalid type ref", &N, T);
1808 if (auto *F = N.getRawFile())
1809 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1810}
1811
1812void Verifier::visitDIImportedEntity(const DIImportedEntity &N) {
1813 CheckDI(N.getTag() == dwarf::DW_TAG_imported_module ||
1814 N.getTag() == dwarf::DW_TAG_imported_declaration,
1815 "invalid tag", &N);
1816 if (auto *S = N.getRawScope())
1817 CheckDI(isa<DIScope>(S), "invalid scope for imported entity", &N, S);
1818 CheckDI(isDINode(N.getRawEntity()), "invalid imported entity", &N,
1819 N.getRawEntity());
1820}
1821
1822void Verifier::visitComdat(const Comdat &C) {
1823 // In COFF the Module is invalid if the GlobalValue has private linkage.
1824 // Entities with private linkage don't have entries in the symbol table.
1825 if (TT.isOSBinFormatCOFF())
1826 if (const GlobalValue *GV = M.getNamedValue(C.getName()))
1827 Check(!GV->hasPrivateLinkage(), "comdat global value has private linkage",
1828 GV);
1829}
1830
1831void Verifier::visitModuleIdents() {
1832 const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident");
1833 if (!Idents)
1834 return;
1835
1836 // llvm.ident takes a list of metadata entry. Each entry has only one string.
1837 // Scan each llvm.ident entry and make sure that this requirement is met.
1838 for (const MDNode *N : Idents->operands()) {
1839 Check(N->getNumOperands() == 1,
1840 "incorrect number of operands in llvm.ident metadata", N);
1841 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1842 ("invalid value for llvm.ident metadata entry operand"
1843 "(the operand should be a string)"),
1844 N->getOperand(0));
1845 }
1846}
1847
1848void Verifier::visitModuleCommandLines() {
1849 const NamedMDNode *CommandLines = M.getNamedMetadata("llvm.commandline");
1850 if (!CommandLines)
1851 return;
1852
1853 // llvm.commandline takes a list of metadata entry. Each entry has only one
1854 // string. Scan each llvm.commandline entry and make sure that this
1855 // requirement is met.
1856 for (const MDNode *N : CommandLines->operands()) {
1857 Check(N->getNumOperands() == 1,
1858 "incorrect number of operands in llvm.commandline metadata", N);
1859 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1860 ("invalid value for llvm.commandline metadata entry operand"
1861 "(the operand should be a string)"),
1862 N->getOperand(0));
1863 }
1864}
1865
1866void Verifier::visitModuleErrnoTBAA() {
1867 const NamedMDNode *ErrnoTBAA = M.getNamedMetadata("llvm.errno.tbaa");
1868 if (!ErrnoTBAA)
1869 return;
1870
1871 Check(ErrnoTBAA->getNumOperands() >= 1,
1872 "llvm.errno.tbaa must have at least one operand", ErrnoTBAA);
1873
1874 for (const MDNode *N : ErrnoTBAA->operands())
1875 TBAAVerifyHelper.visitTBAAMetadata(nullptr, N);
1876}
1877
1878void Verifier::visitModuleFlags() {
1879 const NamedMDNode *Flags = M.getModuleFlagsMetadata();
1880 if (!Flags) return;
1881
1882 // Scan each flag, and track the flags and requirements.
1883 DenseMap<const MDString*, const MDNode*> SeenIDs;
1884 SmallVector<const MDNode*, 16> Requirements;
1885 uint64_t PAuthABIPlatform = -1;
1886 uint64_t PAuthABIVersion = -1;
1887 for (const MDNode *MDN : Flags->operands()) {
1888 visitModuleFlag(MDN, SeenIDs, Requirements);
1889 if (MDN->getNumOperands() != 3)
1890 continue;
1891 if (const auto *FlagName = dyn_cast_or_null<MDString>(MDN->getOperand(1))) {
1892 if (FlagName->getString() == "aarch64-elf-pauthabi-platform") {
1893 if (const auto *PAP =
1895 PAuthABIPlatform = PAP->getZExtValue();
1896 } else if (FlagName->getString() == "aarch64-elf-pauthabi-version") {
1897 if (const auto *PAV =
1899 PAuthABIVersion = PAV->getZExtValue();
1900 }
1901 }
1902 }
1903
1904 if ((PAuthABIPlatform == uint64_t(-1)) != (PAuthABIVersion == uint64_t(-1)))
1905 CheckFailed("either both or no 'aarch64-elf-pauthabi-platform' and "
1906 "'aarch64-elf-pauthabi-version' module flags must be present");
1907
1908 // Validate that the requirements in the module are valid.
1909 for (const MDNode *Requirement : Requirements) {
1910 const MDString *Flag = cast<MDString>(Requirement->getOperand(0));
1911 const Metadata *ReqValue = Requirement->getOperand(1);
1912
1913 const MDNode *Op = SeenIDs.lookup(Flag);
1914 if (!Op) {
1915 CheckFailed("invalid requirement on flag, flag is not present in module",
1916 Flag);
1917 continue;
1918 }
1919
1920 if (Op->getOperand(2) != ReqValue) {
1921 CheckFailed(("invalid requirement on flag, "
1922 "flag does not have the required value"),
1923 Flag);
1924 continue;
1925 }
1926 }
1927}
1928
1929void
1930Verifier::visitModuleFlag(const MDNode *Op,
1931 DenseMap<const MDString *, const MDNode *> &SeenIDs,
1932 SmallVectorImpl<const MDNode *> &Requirements) {
1933 // Each module flag should have three arguments, the merge behavior (a
1934 // constant int), the flag ID (an MDString), and the value.
1935 Check(Op->getNumOperands() == 3,
1936 "incorrect number of operands in module flag", Op);
1937 Module::ModFlagBehavior MFB;
1938 if (!Module::isValidModFlagBehavior(Op->getOperand(0), MFB)) {
1940 "invalid behavior operand in module flag (expected constant integer)",
1941 Op->getOperand(0));
1942 Check(false,
1943 "invalid behavior operand in module flag (unexpected constant)",
1944 Op->getOperand(0));
1945 }
1946 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
1947 Check(ID, "invalid ID operand in module flag (expected metadata string)",
1948 Op->getOperand(1));
1949
1950 // Check the values for behaviors with additional requirements.
1951 switch (MFB) {
1952 case Module::Error:
1953 case Module::Warning:
1954 case Module::Override:
1955 // These behavior types accept any value.
1956 break;
1957
1958 case Module::Min: {
1959 auto *V = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1960 Check(V && V->getValue().isNonNegative(),
1961 "invalid value for 'min' module flag (expected constant non-negative "
1962 "integer)",
1963 Op->getOperand(2));
1964 break;
1965 }
1966
1967 case Module::Max: {
1969 "invalid value for 'max' module flag (expected constant integer)",
1970 Op->getOperand(2));
1971 break;
1972 }
1973
1974 case Module::Require: {
1975 // The value should itself be an MDNode with two operands, a flag ID (an
1976 // MDString), and a value.
1977 MDNode *Value = dyn_cast<MDNode>(Op->getOperand(2));
1978 Check(Value && Value->getNumOperands() == 2,
1979 "invalid value for 'require' module flag (expected metadata pair)",
1980 Op->getOperand(2));
1981 Check(isa<MDString>(Value->getOperand(0)),
1982 ("invalid value for 'require' module flag "
1983 "(first value operand should be a string)"),
1984 Value->getOperand(0));
1985
1986 // Append it to the list of requirements, to check once all module flags are
1987 // scanned.
1988 Requirements.push_back(Value);
1989 break;
1990 }
1991
1992 case Module::Append:
1993 case Module::AppendUnique: {
1994 // These behavior types require the operand be an MDNode.
1995 Check(isa<MDNode>(Op->getOperand(2)),
1996 "invalid value for 'append'-type module flag "
1997 "(expected a metadata node)",
1998 Op->getOperand(2));
1999 break;
2000 }
2001 }
2002
2003 // Unless this is a "requires" flag, check the ID is unique.
2004 if (MFB != Module::Require) {
2005 bool Inserted = SeenIDs.insert(std::make_pair(ID, Op)).second;
2006 Check(Inserted,
2007 "module flag identifiers must be unique (or of 'require' type)", ID);
2008 }
2009
2010 if (ID->getString() == "wchar_size") {
2011 ConstantInt *Value
2013 Check(Value, "wchar_size metadata requires constant integer argument");
2014 }
2015
2016 if (ID->getString() == "Linker Options") {
2017 // If the llvm.linker.options named metadata exists, we assume that the
2018 // bitcode reader has upgraded the module flag. Otherwise the flag might
2019 // have been created by a client directly.
2020 Check(M.getNamedMetadata("llvm.linker.options"),
2021 "'Linker Options' named metadata no longer supported");
2022 }
2023
2024 if (ID->getString() == "SemanticInterposition") {
2025 ConstantInt *Value =
2027 Check(Value,
2028 "SemanticInterposition metadata requires constant integer argument");
2029 }
2030
2031 if (ID->getString() == "CG Profile") {
2032 for (const MDOperand &MDO : cast<MDNode>(Op->getOperand(2))->operands())
2033 visitModuleFlagCGProfileEntry(MDO);
2034 }
2035}
2036
2037void Verifier::visitModuleFlagCGProfileEntry(const MDOperand &MDO) {
2038 auto CheckFunction = [&](const MDOperand &FuncMDO) {
2039 if (!FuncMDO)
2040 return;
2041 auto F = dyn_cast<ValueAsMetadata>(FuncMDO);
2042 Check(F && isa<Function>(F->getValue()->stripPointerCasts()),
2043 "expected a Function or null", FuncMDO);
2044 };
2045 auto Node = dyn_cast_or_null<MDNode>(MDO);
2046 Check(Node && Node->getNumOperands() == 3, "expected a MDNode triple", MDO);
2047 CheckFunction(Node->getOperand(0));
2048 CheckFunction(Node->getOperand(1));
2049 auto Count = dyn_cast_or_null<ConstantAsMetadata>(Node->getOperand(2));
2050 Check(Count && Count->getType()->isIntegerTy(),
2051 "expected an integer constant", Node->getOperand(2));
2052}
2053
2054void Verifier::verifyAttributeTypes(AttributeSet Attrs, const Value *V) {
2055 for (Attribute A : Attrs) {
2056
2057 if (A.isStringAttribute()) {
2058#define GET_ATTR_NAMES
2059#define ATTRIBUTE_ENUM(ENUM_NAME, DISPLAY_NAME)
2060#define ATTRIBUTE_STRBOOL(ENUM_NAME, DISPLAY_NAME) \
2061 if (A.getKindAsString() == #DISPLAY_NAME) { \
2062 auto V = A.getValueAsString(); \
2063 if (!(V.empty() || V == "true" || V == "false")) \
2064 CheckFailed("invalid value for '" #DISPLAY_NAME "' attribute: " + V + \
2065 ""); \
2066 }
2067
2068#include "llvm/IR/Attributes.inc"
2069 continue;
2070 }
2071
2072 if (A.isIntAttribute() != Attribute::isIntAttrKind(A.getKindAsEnum())) {
2073 CheckFailed("Attribute '" + A.getAsString() + "' should have an Argument",
2074 V);
2075 return;
2076 }
2077 }
2078}
2079
2080// VerifyParameterAttrs - Check the given attributes for an argument or return
2081// value of the specified type. The value V is printed in error messages.
2082void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty,
2083 const Value *V) {
2084 if (!Attrs.hasAttributes())
2085 return;
2086
2087 verifyAttributeTypes(Attrs, V);
2088
2089 for (Attribute Attr : Attrs)
2090 Check(Attr.isStringAttribute() ||
2091 Attribute::canUseAsParamAttr(Attr.getKindAsEnum()),
2092 "Attribute '" + Attr.getAsString() + "' does not apply to parameters",
2093 V);
2094
2095 if (Attrs.hasAttribute(Attribute::ImmArg)) {
2096 unsigned AttrCount =
2097 Attrs.getNumAttributes() - Attrs.hasAttribute(Attribute::Range);
2098 Check(AttrCount == 1,
2099 "Attribute 'immarg' is incompatible with other attributes except the "
2100 "'range' attribute",
2101 V);
2102 }
2103
2104 // Check for mutually incompatible attributes. Only inreg is compatible with
2105 // sret.
2106 unsigned AttrCount = 0;
2107 AttrCount += Attrs.hasAttribute(Attribute::ByVal);
2108 AttrCount += Attrs.hasAttribute(Attribute::InAlloca);
2109 AttrCount += Attrs.hasAttribute(Attribute::Preallocated);
2110 AttrCount += Attrs.hasAttribute(Attribute::StructRet) ||
2111 Attrs.hasAttribute(Attribute::InReg);
2112 AttrCount += Attrs.hasAttribute(Attribute::Nest);
2113 AttrCount += Attrs.hasAttribute(Attribute::ByRef);
2114 Check(AttrCount <= 1,
2115 "Attributes 'byval', 'inalloca', 'preallocated', 'inreg', 'nest', "
2116 "'byref', and 'sret' are incompatible!",
2117 V);
2118
2119 Check(!(Attrs.hasAttribute(Attribute::InAlloca) &&
2120 Attrs.hasAttribute(Attribute::ReadOnly)),
2121 "Attributes "
2122 "'inalloca and readonly' are incompatible!",
2123 V);
2124
2125 Check(!(Attrs.hasAttribute(Attribute::StructRet) &&
2126 Attrs.hasAttribute(Attribute::Returned)),
2127 "Attributes "
2128 "'sret and returned' are incompatible!",
2129 V);
2130
2131 Check(!(Attrs.hasAttribute(Attribute::ZExt) &&
2132 Attrs.hasAttribute(Attribute::SExt)),
2133 "Attributes "
2134 "'zeroext and signext' are incompatible!",
2135 V);
2136
2137 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
2138 Attrs.hasAttribute(Attribute::ReadOnly)),
2139 "Attributes "
2140 "'readnone and readonly' are incompatible!",
2141 V);
2142
2143 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
2144 Attrs.hasAttribute(Attribute::WriteOnly)),
2145 "Attributes "
2146 "'readnone and writeonly' are incompatible!",
2147 V);
2148
2149 Check(!(Attrs.hasAttribute(Attribute::ReadOnly) &&
2150 Attrs.hasAttribute(Attribute::WriteOnly)),
2151 "Attributes "
2152 "'readonly and writeonly' are incompatible!",
2153 V);
2154
2155 Check(!(Attrs.hasAttribute(Attribute::NoInline) &&
2156 Attrs.hasAttribute(Attribute::AlwaysInline)),
2157 "Attributes "
2158 "'noinline and alwaysinline' are incompatible!",
2159 V);
2160
2161 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2162 Attrs.hasAttribute(Attribute::ReadNone)),
2163 "Attributes writable and readnone are incompatible!", V);
2164
2165 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2166 Attrs.hasAttribute(Attribute::ReadOnly)),
2167 "Attributes writable and readonly are incompatible!", V);
2168
2169 AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible(Ty, Attrs);
2170 for (Attribute Attr : Attrs) {
2171 if (!Attr.isStringAttribute() &&
2172 IncompatibleAttrs.contains(Attr.getKindAsEnum())) {
2173 CheckFailed("Attribute '" + Attr.getAsString() +
2174 "' applied to incompatible type!", V);
2175 return;
2176 }
2177 }
2178
2179 if (isa<PointerType>(Ty)) {
2180 if (Attrs.hasAttribute(Attribute::Alignment)) {
2181 Align AttrAlign = Attrs.getAlignment().valueOrOne();
2182 Check(AttrAlign.value() <= Value::MaximumAlignment,
2183 "huge alignment values are unsupported", V);
2184 }
2185 if (Attrs.hasAttribute(Attribute::ByVal)) {
2186 Type *ByValTy = Attrs.getByValType();
2187 SmallPtrSet<Type *, 4> Visited;
2188 Check(ByValTy->isSized(&Visited),
2189 "Attribute 'byval' does not support unsized types!", V);
2190 // Check if it is or contains a target extension type that disallows being
2191 // used on the stack.
2193 "'byval' argument has illegal target extension type", V);
2194 Check(DL.getTypeAllocSize(ByValTy).getKnownMinValue() < (1ULL << 32),
2195 "huge 'byval' arguments are unsupported", V);
2196 }
2197 if (Attrs.hasAttribute(Attribute::ByRef)) {
2198 SmallPtrSet<Type *, 4> Visited;
2199 Check(Attrs.getByRefType()->isSized(&Visited),
2200 "Attribute 'byref' does not support unsized types!", V);
2201 Check(DL.getTypeAllocSize(Attrs.getByRefType()).getKnownMinValue() <
2202 (1ULL << 32),
2203 "huge 'byref' arguments are unsupported", V);
2204 }
2205 if (Attrs.hasAttribute(Attribute::InAlloca)) {
2206 SmallPtrSet<Type *, 4> Visited;
2207 Check(Attrs.getInAllocaType()->isSized(&Visited),
2208 "Attribute 'inalloca' does not support unsized types!", V);
2209 Check(DL.getTypeAllocSize(Attrs.getInAllocaType()).getKnownMinValue() <
2210 (1ULL << 32),
2211 "huge 'inalloca' arguments are unsupported", V);
2212 }
2213 if (Attrs.hasAttribute(Attribute::Preallocated)) {
2214 SmallPtrSet<Type *, 4> Visited;
2215 Check(Attrs.getPreallocatedType()->isSized(&Visited),
2216 "Attribute 'preallocated' does not support unsized types!", V);
2217 Check(
2218 DL.getTypeAllocSize(Attrs.getPreallocatedType()).getKnownMinValue() <
2219 (1ULL << 32),
2220 "huge 'preallocated' arguments are unsupported", V);
2221 }
2222 }
2223
2224 if (Attrs.hasAttribute(Attribute::Initializes)) {
2225 auto Inits = Attrs.getAttribute(Attribute::Initializes).getInitializes();
2226 Check(!Inits.empty(), "Attribute 'initializes' does not support empty list",
2227 V);
2229 "Attribute 'initializes' does not support unordered ranges", V);
2230 }
2231
2232 if (Attrs.hasAttribute(Attribute::NoFPClass)) {
2233 uint64_t Val = Attrs.getAttribute(Attribute::NoFPClass).getValueAsInt();
2234 Check(Val != 0, "Attribute 'nofpclass' must have at least one test bit set",
2235 V);
2236 Check((Val & ~static_cast<unsigned>(fcAllFlags)) == 0,
2237 "Invalid value for 'nofpclass' test mask", V);
2238 }
2239 if (Attrs.hasAttribute(Attribute::Range)) {
2240 const ConstantRange &CR =
2241 Attrs.getAttribute(Attribute::Range).getValueAsConstantRange();
2243 "Range bit width must match type bit width!", V);
2244 }
2245}
2246
2247void Verifier::checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
2248 const Value *V) {
2249 if (Attrs.hasFnAttr(Attr)) {
2250 StringRef S = Attrs.getFnAttr(Attr).getValueAsString();
2251 unsigned N;
2252 if (S.getAsInteger(10, N))
2253 CheckFailed("\"" + Attr + "\" takes an unsigned integer: " + S, V);
2254 }
2255}
2256
2257// Check parameter attributes against a function type.
2258// The value V is printed in error messages.
2259void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
2260 const Value *V, bool IsIntrinsic,
2261 bool IsInlineAsm) {
2262 if (Attrs.isEmpty())
2263 return;
2264
2265 if (AttributeListsVisited.insert(Attrs.getRawPointer()).second) {
2266 Check(Attrs.hasParentContext(Context),
2267 "Attribute list does not match Module context!", &Attrs, V);
2268 for (const auto &AttrSet : Attrs) {
2269 Check(!AttrSet.hasAttributes() || AttrSet.hasParentContext(Context),
2270 "Attribute set does not match Module context!", &AttrSet, V);
2271 for (const auto &A : AttrSet) {
2272 Check(A.hasParentContext(Context),
2273 "Attribute does not match Module context!", &A, V);
2274 }
2275 }
2276 }
2277
2278 bool SawNest = false;
2279 bool SawReturned = false;
2280 bool SawSRet = false;
2281 bool SawSwiftSelf = false;
2282 bool SawSwiftAsync = false;
2283 bool SawSwiftError = false;
2284
2285 // Verify return value attributes.
2286 AttributeSet RetAttrs = Attrs.getRetAttrs();
2287 for (Attribute RetAttr : RetAttrs)
2288 Check(RetAttr.isStringAttribute() ||
2289 Attribute::canUseAsRetAttr(RetAttr.getKindAsEnum()),
2290 "Attribute '" + RetAttr.getAsString() +
2291 "' does not apply to function return values",
2292 V);
2293
2294 unsigned MaxParameterWidth = 0;
2295 auto GetMaxParameterWidth = [&MaxParameterWidth](Type *Ty) {
2296 if (Ty->isVectorTy()) {
2297 if (auto *VT = dyn_cast<FixedVectorType>(Ty)) {
2298 unsigned Size = VT->getPrimitiveSizeInBits().getFixedValue();
2299 if (Size > MaxParameterWidth)
2300 MaxParameterWidth = Size;
2301 }
2302 }
2303 };
2304 GetMaxParameterWidth(FT->getReturnType());
2305 verifyParameterAttrs(RetAttrs, FT->getReturnType(), V);
2306
2307 // Verify parameter attributes.
2308 for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
2309 Type *Ty = FT->getParamType(i);
2310 AttributeSet ArgAttrs = Attrs.getParamAttrs(i);
2311
2312 if (!IsIntrinsic) {
2313 Check(!ArgAttrs.hasAttribute(Attribute::ImmArg),
2314 "immarg attribute only applies to intrinsics", V);
2315 if (!IsInlineAsm)
2316 Check(!ArgAttrs.hasAttribute(Attribute::ElementType),
2317 "Attribute 'elementtype' can only be applied to intrinsics"
2318 " and inline asm.",
2319 V);
2320 }
2321
2322 verifyParameterAttrs(ArgAttrs, Ty, V);
2323 GetMaxParameterWidth(Ty);
2324
2325 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
2326 Check(!SawNest, "More than one parameter has attribute nest!", V);
2327 SawNest = true;
2328 }
2329
2330 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
2331 Check(!SawReturned, "More than one parameter has attribute returned!", V);
2332 Check(Ty->canLosslesslyBitCastTo(FT->getReturnType()),
2333 "Incompatible argument and return types for 'returned' attribute",
2334 V);
2335 SawReturned = true;
2336 }
2337
2338 if (ArgAttrs.hasAttribute(Attribute::StructRet)) {
2339 Check(!SawSRet, "Cannot have multiple 'sret' parameters!", V);
2340 Check(i == 0 || i == 1,
2341 "Attribute 'sret' is not on first or second parameter!", V);
2342 SawSRet = true;
2343 }
2344
2345 if (ArgAttrs.hasAttribute(Attribute::SwiftSelf)) {
2346 Check(!SawSwiftSelf, "Cannot have multiple 'swiftself' parameters!", V);
2347 SawSwiftSelf = true;
2348 }
2349
2350 if (ArgAttrs.hasAttribute(Attribute::SwiftAsync)) {
2351 Check(!SawSwiftAsync, "Cannot have multiple 'swiftasync' parameters!", V);
2352 SawSwiftAsync = true;
2353 }
2354
2355 if (ArgAttrs.hasAttribute(Attribute::SwiftError)) {
2356 Check(!SawSwiftError, "Cannot have multiple 'swifterror' parameters!", V);
2357 SawSwiftError = true;
2358 }
2359
2360 if (ArgAttrs.hasAttribute(Attribute::InAlloca)) {
2361 Check(i == FT->getNumParams() - 1,
2362 "inalloca isn't on the last parameter!", V);
2363 }
2364 }
2365
2366 if (!Attrs.hasFnAttrs())
2367 return;
2368
2369 verifyAttributeTypes(Attrs.getFnAttrs(), V);
2370 for (Attribute FnAttr : Attrs.getFnAttrs())
2371 Check(FnAttr.isStringAttribute() ||
2372 Attribute::canUseAsFnAttr(FnAttr.getKindAsEnum()),
2373 "Attribute '" + FnAttr.getAsString() +
2374 "' does not apply to functions!",
2375 V);
2376
2377 Check(!(Attrs.hasFnAttr(Attribute::NoInline) &&
2378 Attrs.hasFnAttr(Attribute::AlwaysInline)),
2379 "Attributes 'noinline and alwaysinline' are incompatible!", V);
2380
2381 if (Attrs.hasFnAttr(Attribute::OptimizeNone)) {
2382 Check(Attrs.hasFnAttr(Attribute::NoInline),
2383 "Attribute 'optnone' requires 'noinline'!", V);
2384
2385 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2386 "Attributes 'optsize and optnone' are incompatible!", V);
2387
2388 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2389 "Attributes 'minsize and optnone' are incompatible!", V);
2390
2391 Check(!Attrs.hasFnAttr(Attribute::OptimizeForDebugging),
2392 "Attributes 'optdebug and optnone' are incompatible!", V);
2393 }
2394
2395 Check(!(Attrs.hasFnAttr(Attribute::SanitizeRealtime) &&
2396 Attrs.hasFnAttr(Attribute::SanitizeRealtimeBlocking)),
2397 "Attributes "
2398 "'sanitize_realtime and sanitize_realtime_blocking' are incompatible!",
2399 V);
2400
2401 if (Attrs.hasFnAttr(Attribute::OptimizeForDebugging)) {
2402 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2403 "Attributes 'optsize and optdebug' are incompatible!", V);
2404
2405 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2406 "Attributes 'minsize and optdebug' are incompatible!", V);
2407 }
2408
2409 Check(!Attrs.hasAttrSomewhere(Attribute::Writable) ||
2410 isModSet(Attrs.getMemoryEffects().getModRef(IRMemLocation::ArgMem)),
2411 "Attribute writable and memory without argmem: write are incompatible!",
2412 V);
2413
2414 if (Attrs.hasFnAttr("aarch64_pstate_sm_enabled")) {
2415 Check(!Attrs.hasFnAttr("aarch64_pstate_sm_compatible"),
2416 "Attributes 'aarch64_pstate_sm_enabled and "
2417 "aarch64_pstate_sm_compatible' are incompatible!",
2418 V);
2419 }
2420
2421 Check((Attrs.hasFnAttr("aarch64_new_za") + Attrs.hasFnAttr("aarch64_in_za") +
2422 Attrs.hasFnAttr("aarch64_inout_za") +
2423 Attrs.hasFnAttr("aarch64_out_za") +
2424 Attrs.hasFnAttr("aarch64_preserves_za") +
2425 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2426 "Attributes 'aarch64_new_za', 'aarch64_in_za', 'aarch64_out_za', "
2427 "'aarch64_inout_za', 'aarch64_preserves_za' and "
2428 "'aarch64_za_state_agnostic' are mutually exclusive",
2429 V);
2430
2431 Check((Attrs.hasFnAttr("aarch64_new_zt0") +
2432 Attrs.hasFnAttr("aarch64_in_zt0") +
2433 Attrs.hasFnAttr("aarch64_inout_zt0") +
2434 Attrs.hasFnAttr("aarch64_out_zt0") +
2435 Attrs.hasFnAttr("aarch64_preserves_zt0") +
2436 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2437 "Attributes 'aarch64_new_zt0', 'aarch64_in_zt0', 'aarch64_out_zt0', "
2438 "'aarch64_inout_zt0', 'aarch64_preserves_zt0' and "
2439 "'aarch64_za_state_agnostic' are mutually exclusive",
2440 V);
2441
2442 if (Attrs.hasFnAttr(Attribute::JumpTable)) {
2443 const GlobalValue *GV = cast<GlobalValue>(V);
2445 "Attribute 'jumptable' requires 'unnamed_addr'", V);
2446 }
2447
2448 if (auto Args = Attrs.getFnAttrs().getAllocSizeArgs()) {
2449 auto CheckParam = [&](StringRef Name, unsigned ParamNo) {
2450 if (ParamNo >= FT->getNumParams()) {
2451 CheckFailed("'allocsize' " + Name + " argument is out of bounds", V);
2452 return false;
2453 }
2454
2455 if (!FT->getParamType(ParamNo)->isIntegerTy()) {
2456 CheckFailed("'allocsize' " + Name +
2457 " argument must refer to an integer parameter",
2458 V);
2459 return false;
2460 }
2461
2462 return true;
2463 };
2464
2465 if (!CheckParam("element size", Args->first))
2466 return;
2467
2468 if (Args->second && !CheckParam("number of elements", *Args->second))
2469 return;
2470 }
2471
2472 if (Attrs.hasFnAttr(Attribute::AllocKind)) {
2473 AllocFnKind K = Attrs.getAllocKind();
2475 K & (AllocFnKind::Alloc | AllocFnKind::Realloc | AllocFnKind::Free);
2476 if (!is_contained(
2477 {AllocFnKind::Alloc, AllocFnKind::Realloc, AllocFnKind::Free},
2478 Type))
2479 CheckFailed(
2480 "'allockind()' requires exactly one of alloc, realloc, and free");
2481 if ((Type == AllocFnKind::Free) &&
2482 ((K & (AllocFnKind::Uninitialized | AllocFnKind::Zeroed |
2483 AllocFnKind::Aligned)) != AllocFnKind::Unknown))
2484 CheckFailed("'allockind(\"free\")' doesn't allow uninitialized, zeroed, "
2485 "or aligned modifiers.");
2486 AllocFnKind ZeroedUninit = AllocFnKind::Uninitialized | AllocFnKind::Zeroed;
2487 if ((K & ZeroedUninit) == ZeroedUninit)
2488 CheckFailed("'allockind()' can't be both zeroed and uninitialized");
2489 }
2490
2491 if (Attribute A = Attrs.getFnAttr("alloc-variant-zeroed"); A.isValid()) {
2492 StringRef S = A.getValueAsString();
2493 Check(!S.empty(), "'alloc-variant-zeroed' must not be empty");
2494 Function *Variant = M.getFunction(S);
2495 if (Variant) {
2496 Attribute Family = Attrs.getFnAttr("alloc-family");
2497 Attribute VariantFamily = Variant->getFnAttribute("alloc-family");
2498 if (Family.isValid())
2499 Check(VariantFamily.isValid() &&
2500 VariantFamily.getValueAsString() == Family.getValueAsString(),
2501 "'alloc-variant-zeroed' must name a function belonging to the "
2502 "same 'alloc-family'");
2503
2504 Check(Variant->hasFnAttribute(Attribute::AllocKind) &&
2505 (Variant->getFnAttribute(Attribute::AllocKind).getAllocKind() &
2506 AllocFnKind::Zeroed) != AllocFnKind::Unknown,
2507 "'alloc-variant-zeroed' must name a function with "
2508 "'allockind(\"zeroed\")'");
2509
2510 Check(FT == Variant->getFunctionType(),
2511 "'alloc-variant-zeroed' must name a function with the same "
2512 "signature");
2513 }
2514 }
2515
2516 if (Attrs.hasFnAttr(Attribute::VScaleRange)) {
2517 unsigned VScaleMin = Attrs.getFnAttrs().getVScaleRangeMin();
2518 if (VScaleMin == 0)
2519 CheckFailed("'vscale_range' minimum must be greater than 0", V);
2520 else if (!isPowerOf2_32(VScaleMin))
2521 CheckFailed("'vscale_range' minimum must be power-of-two value", V);
2522 std::optional<unsigned> VScaleMax = Attrs.getFnAttrs().getVScaleRangeMax();
2523 if (VScaleMax && VScaleMin > VScaleMax)
2524 CheckFailed("'vscale_range' minimum cannot be greater than maximum", V);
2525 else if (VScaleMax && !isPowerOf2_32(*VScaleMax))
2526 CheckFailed("'vscale_range' maximum must be power-of-two value", V);
2527 }
2528
2529 if (Attribute FPAttr = Attrs.getFnAttr("frame-pointer"); FPAttr.isValid()) {
2530 StringRef FP = FPAttr.getValueAsString();
2531 if (FP != "all" && FP != "non-leaf" && FP != "none" && FP != "reserved" &&
2532 FP != "non-leaf-no-reserve")
2533 CheckFailed("invalid value for 'frame-pointer' attribute: " + FP, V);
2534 }
2535
2536 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-prefix", V);
2537 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-entry", V);
2538 if (Attrs.hasFnAttr("patchable-function-entry-section"))
2539 Check(!Attrs.getFnAttr("patchable-function-entry-section")
2540 .getValueAsString()
2541 .empty(),
2542 "\"patchable-function-entry-section\" must not be empty");
2543 checkUnsignedBaseTenFuncAttr(Attrs, "warn-stack-size", V);
2544
2545 if (auto A = Attrs.getFnAttr("sign-return-address"); A.isValid()) {
2546 StringRef S = A.getValueAsString();
2547 if (S != "none" && S != "all" && S != "non-leaf")
2548 CheckFailed("invalid value for 'sign-return-address' attribute: " + S, V);
2549 }
2550
2551 if (auto A = Attrs.getFnAttr("sign-return-address-key"); A.isValid()) {
2552 StringRef S = A.getValueAsString();
2553 if (S != "a_key" && S != "b_key")
2554 CheckFailed("invalid value for 'sign-return-address-key' attribute: " + S,
2555 V);
2556 if (auto AA = Attrs.getFnAttr("sign-return-address"); !AA.isValid()) {
2557 CheckFailed(
2558 "'sign-return-address-key' present without `sign-return-address`");
2559 }
2560 }
2561
2562 if (auto A = Attrs.getFnAttr("branch-target-enforcement"); A.isValid()) {
2563 StringRef S = A.getValueAsString();
2564 if (S != "" && S != "true" && S != "false")
2565 CheckFailed(
2566 "invalid value for 'branch-target-enforcement' attribute: " + S, V);
2567 }
2568
2569 if (auto A = Attrs.getFnAttr("branch-protection-pauth-lr"); A.isValid()) {
2570 StringRef S = A.getValueAsString();
2571 if (S != "" && S != "true" && S != "false")
2572 CheckFailed(
2573 "invalid value for 'branch-protection-pauth-lr' attribute: " + S, V);
2574 }
2575
2576 if (auto A = Attrs.getFnAttr("guarded-control-stack"); A.isValid()) {
2577 StringRef S = A.getValueAsString();
2578 if (S != "" && S != "true" && S != "false")
2579 CheckFailed("invalid value for 'guarded-control-stack' attribute: " + S,
2580 V);
2581 }
2582
2583 if (auto A = Attrs.getFnAttr("vector-function-abi-variant"); A.isValid()) {
2584 StringRef S = A.getValueAsString();
2585 const std::optional<VFInfo> Info = VFABI::tryDemangleForVFABI(S, FT);
2586 if (!Info)
2587 CheckFailed("invalid name for a VFABI variant: " + S, V);
2588 }
2589
2590 if (auto A = Attrs.getFnAttr("denormal-fp-math"); A.isValid()) {
2591 StringRef S = A.getValueAsString();
2593 CheckFailed("invalid value for 'denormal-fp-math' attribute: " + S, V);
2594 }
2595
2596 if (auto A = Attrs.getFnAttr("denormal-fp-math-f32"); A.isValid()) {
2597 StringRef S = A.getValueAsString();
2599 CheckFailed("invalid value for 'denormal-fp-math-f32' attribute: " + S,
2600 V);
2601 }
2602
2603 if (auto A = Attrs.getFnAttr("modular-format"); A.isValid()) {
2604 StringRef S = A.getValueAsString();
2606 S.split(Args, ',');
2607 Check(Args.size() >= 5,
2608 "modular-format attribute requires at least 5 arguments", V);
2609 unsigned FirstArgIdx;
2610 Check(!Args[2].getAsInteger(10, FirstArgIdx),
2611 "modular-format attribute first arg index is not an integer", V);
2612 unsigned UpperBound = FT->getNumParams() + (FT->isVarArg() ? 1 : 0);
2613 Check(FirstArgIdx > 0 && FirstArgIdx <= UpperBound,
2614 "modular-format attribute first arg index is out of bounds", V);
2615 }
2616}
2617void Verifier::verifyUnknownProfileMetadata(MDNode *MD) {
2618 Check(MD->getNumOperands() == 2,
2619 "'unknown' !prof should have a single additional operand", MD);
2620 auto *PassName = dyn_cast<MDString>(MD->getOperand(1));
2621 Check(PassName != nullptr,
2622 "'unknown' !prof should have an additional operand of type "
2623 "string");
2624 Check(!PassName->getString().empty(),
2625 "the 'unknown' !prof operand should not be an empty string");
2626}
2627
2628void Verifier::verifyFunctionMetadata(
2629 ArrayRef<std::pair<unsigned, MDNode *>> MDs) {
2630 for (const auto &Pair : MDs) {
2631 if (Pair.first == LLVMContext::MD_prof) {
2632 MDNode *MD = Pair.second;
2633 Check(MD->getNumOperands() >= 2,
2634 "!prof annotations should have no less than 2 operands", MD);
2635 // We may have functions that are synthesized by the compiler, e.g. in
2636 // WPD, that we can't currently determine the entry count.
2637 if (MD->getOperand(0).equalsStr(
2639 verifyUnknownProfileMetadata(MD);
2640 continue;
2641 }
2642
2643 // Check first operand.
2644 Check(MD->getOperand(0) != nullptr, "first operand should not be null",
2645 MD);
2647 "expected string with name of the !prof annotation", MD);
2648 MDString *MDS = cast<MDString>(MD->getOperand(0));
2649 StringRef ProfName = MDS->getString();
2652 "first operand should be 'function_entry_count'"
2653 " or 'synthetic_function_entry_count'",
2654 MD);
2655
2656 // Check second operand.
2657 Check(MD->getOperand(1) != nullptr, "second operand should not be null",
2658 MD);
2660 "expected integer argument to function_entry_count", MD);
2661 } else if (Pair.first == LLVMContext::MD_kcfi_type) {
2662 MDNode *MD = Pair.second;
2663 Check(MD->getNumOperands() == 1,
2664 "!kcfi_type must have exactly one operand", MD);
2665 Check(MD->getOperand(0) != nullptr, "!kcfi_type operand must not be null",
2666 MD);
2668 "expected a constant operand for !kcfi_type", MD);
2669 Constant *C = cast<ConstantAsMetadata>(MD->getOperand(0))->getValue();
2670 Check(isa<ConstantInt>(C) && isa<IntegerType>(C->getType()),
2671 "expected a constant integer operand for !kcfi_type", MD);
2673 "expected a 32-bit integer constant operand for !kcfi_type", MD);
2674 }
2675 }
2676}
2677
2678void Verifier::visitConstantExprsRecursively(const Constant *EntryC) {
2679 if (EntryC->getNumOperands() == 0)
2680 return;
2681
2682 if (!ConstantExprVisited.insert(EntryC).second)
2683 return;
2684
2686 Stack.push_back(EntryC);
2687
2688 while (!Stack.empty()) {
2689 const Constant *C = Stack.pop_back_val();
2690
2691 // Check this constant expression.
2692 if (const auto *CE = dyn_cast<ConstantExpr>(C))
2693 visitConstantExpr(CE);
2694
2695 if (const auto *CPA = dyn_cast<ConstantPtrAuth>(C))
2696 visitConstantPtrAuth(CPA);
2697
2698 if (const auto *GV = dyn_cast<GlobalValue>(C)) {
2699 // Global Values get visited separately, but we do need to make sure
2700 // that the global value is in the correct module
2701 Check(GV->getParent() == &M, "Referencing global in another module!",
2702 EntryC, &M, GV, GV->getParent());
2703 continue;
2704 }
2705
2706 // Visit all sub-expressions.
2707 for (const Use &U : C->operands()) {
2708 const auto *OpC = dyn_cast<Constant>(U);
2709 if (!OpC)
2710 continue;
2711 if (!ConstantExprVisited.insert(OpC).second)
2712 continue;
2713 Stack.push_back(OpC);
2714 }
2715 }
2716}
2717
2718void Verifier::visitConstantExpr(const ConstantExpr *CE) {
2719 if (CE->getOpcode() == Instruction::BitCast)
2720 Check(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0),
2721 CE->getType()),
2722 "Invalid bitcast", CE);
2723 else if (CE->getOpcode() == Instruction::PtrToAddr)
2724 checkPtrToAddr(CE->getOperand(0)->getType(), CE->getType(), *CE);
2725}
2726
2727void Verifier::visitConstantPtrAuth(const ConstantPtrAuth *CPA) {
2728 Check(CPA->getPointer()->getType()->isPointerTy(),
2729 "signed ptrauth constant base pointer must have pointer type");
2730
2731 Check(CPA->getType() == CPA->getPointer()->getType(),
2732 "signed ptrauth constant must have same type as its base pointer");
2733
2734 Check(CPA->getKey()->getBitWidth() == 32,
2735 "signed ptrauth constant key must be i32 constant integer");
2736
2738 "signed ptrauth constant address discriminator must be a pointer");
2739
2740 Check(CPA->getDiscriminator()->getBitWidth() == 64,
2741 "signed ptrauth constant discriminator must be i64 constant integer");
2742
2744 "signed ptrauth constant deactivation symbol must be a pointer");
2745
2748 "signed ptrauth constant deactivation symbol must be a global value "
2749 "or null");
2750}
2751
2752bool Verifier::verifyAttributeCount(AttributeList Attrs, unsigned Params) {
2753 // There shouldn't be more attribute sets than there are parameters plus the
2754 // function and return value.
2755 return Attrs.getNumAttrSets() <= Params + 2;
2756}
2757
2758void Verifier::verifyInlineAsmCall(const CallBase &Call) {
2759 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
2760 unsigned ArgNo = 0;
2761 unsigned LabelNo = 0;
2762 for (const InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) {
2763 if (CI.Type == InlineAsm::isLabel) {
2764 ++LabelNo;
2765 continue;
2766 }
2767
2768 // Only deal with constraints that correspond to call arguments.
2769 if (!CI.hasArg())
2770 continue;
2771
2772 if (CI.isIndirect) {
2773 const Value *Arg = Call.getArgOperand(ArgNo);
2774 Check(Arg->getType()->isPointerTy(),
2775 "Operand for indirect constraint must have pointer type", &Call);
2776
2778 "Operand for indirect constraint must have elementtype attribute",
2779 &Call);
2780 } else {
2781 Check(!Call.paramHasAttr(ArgNo, Attribute::ElementType),
2782 "Elementtype attribute can only be applied for indirect "
2783 "constraints",
2784 &Call);
2785 }
2786
2787 ArgNo++;
2788 }
2789
2790 if (auto *CallBr = dyn_cast<CallBrInst>(&Call)) {
2791 Check(LabelNo == CallBr->getNumIndirectDests(),
2792 "Number of label constraints does not match number of callbr dests",
2793 &Call);
2794 } else {
2795 Check(LabelNo == 0, "Label constraints can only be used with callbr",
2796 &Call);
2797 }
2798}
2799
2800/// Verify that statepoint intrinsic is well formed.
2801void Verifier::verifyStatepoint(const CallBase &Call) {
2802 assert(Call.getIntrinsicID() == Intrinsic::experimental_gc_statepoint);
2803
2806 "gc.statepoint must read and write all memory to preserve "
2807 "reordering restrictions required by safepoint semantics",
2808 Call);
2809
2810 const int64_t NumPatchBytes =
2811 cast<ConstantInt>(Call.getArgOperand(1))->getSExtValue();
2812 assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!");
2813 Check(NumPatchBytes >= 0,
2814 "gc.statepoint number of patchable bytes must be "
2815 "positive",
2816 Call);
2817
2818 Type *TargetElemType = Call.getParamElementType(2);
2819 Check(TargetElemType,
2820 "gc.statepoint callee argument must have elementtype attribute", Call);
2821 FunctionType *TargetFuncType = dyn_cast<FunctionType>(TargetElemType);
2822 Check(TargetFuncType,
2823 "gc.statepoint callee elementtype must be function type", Call);
2824
2825 const int NumCallArgs = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
2826 Check(NumCallArgs >= 0,
2827 "gc.statepoint number of arguments to underlying call "
2828 "must be positive",
2829 Call);
2830 const int NumParams = (int)TargetFuncType->getNumParams();
2831 if (TargetFuncType->isVarArg()) {
2832 Check(NumCallArgs >= NumParams,
2833 "gc.statepoint mismatch in number of vararg call args", Call);
2834
2835 // TODO: Remove this limitation
2836 Check(TargetFuncType->getReturnType()->isVoidTy(),
2837 "gc.statepoint doesn't support wrapping non-void "
2838 "vararg functions yet",
2839 Call);
2840 } else
2841 Check(NumCallArgs == NumParams,
2842 "gc.statepoint mismatch in number of call args", Call);
2843
2844 const uint64_t Flags
2845 = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
2846 Check((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0,
2847 "unknown flag used in gc.statepoint flags argument", Call);
2848
2849 // Verify that the types of the call parameter arguments match
2850 // the type of the wrapped callee.
2851 AttributeList Attrs = Call.getAttributes();
2852 for (int i = 0; i < NumParams; i++) {
2853 Type *ParamType = TargetFuncType->getParamType(i);
2854 Type *ArgType = Call.getArgOperand(5 + i)->getType();
2855 Check(ArgType == ParamType,
2856 "gc.statepoint call argument does not match wrapped "
2857 "function type",
2858 Call);
2859
2860 if (TargetFuncType->isVarArg()) {
2861 AttributeSet ArgAttrs = Attrs.getParamAttrs(5 + i);
2862 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
2863 "Attribute 'sret' cannot be used for vararg call arguments!", Call);
2864 }
2865 }
2866
2867 const int EndCallArgsInx = 4 + NumCallArgs;
2868
2869 const Value *NumTransitionArgsV = Call.getArgOperand(EndCallArgsInx + 1);
2870 Check(isa<ConstantInt>(NumTransitionArgsV),
2871 "gc.statepoint number of transition arguments "
2872 "must be constant integer",
2873 Call);
2874 const int NumTransitionArgs =
2875 cast<ConstantInt>(NumTransitionArgsV)->getZExtValue();
2876 Check(NumTransitionArgs == 0,
2877 "gc.statepoint w/inline transition bundle is deprecated", Call);
2878 const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs;
2879
2880 const Value *NumDeoptArgsV = Call.getArgOperand(EndTransitionArgsInx + 1);
2881 Check(isa<ConstantInt>(NumDeoptArgsV),
2882 "gc.statepoint number of deoptimization arguments "
2883 "must be constant integer",
2884 Call);
2885 const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue();
2886 Check(NumDeoptArgs == 0,
2887 "gc.statepoint w/inline deopt operands is deprecated", Call);
2888
2889 const int ExpectedNumArgs = 7 + NumCallArgs;
2890 Check(ExpectedNumArgs == (int)Call.arg_size(),
2891 "gc.statepoint too many arguments", Call);
2892
2893 // Check that the only uses of this gc.statepoint are gc.result or
2894 // gc.relocate calls which are tied to this statepoint and thus part
2895 // of the same statepoint sequence
2896 for (const User *U : Call.users()) {
2897 const CallInst *UserCall = dyn_cast<const CallInst>(U);
2898 Check(UserCall, "illegal use of statepoint token", Call, U);
2899 if (!UserCall)
2900 continue;
2901 Check(isa<GCRelocateInst>(UserCall) || isa<GCResultInst>(UserCall),
2902 "gc.result or gc.relocate are the only value uses "
2903 "of a gc.statepoint",
2904 Call, U);
2905 if (isa<GCResultInst>(UserCall)) {
2906 Check(UserCall->getArgOperand(0) == &Call,
2907 "gc.result connected to wrong gc.statepoint", Call, UserCall);
2908 } else if (isa<GCRelocateInst>(Call)) {
2909 Check(UserCall->getArgOperand(0) == &Call,
2910 "gc.relocate connected to wrong gc.statepoint", Call, UserCall);
2911 }
2912 }
2913
2914 // Note: It is legal for a single derived pointer to be listed multiple
2915 // times. It's non-optimal, but it is legal. It can also happen after
2916 // insertion if we strip a bitcast away.
2917 // Note: It is really tempting to check that each base is relocated and
2918 // that a derived pointer is never reused as a base pointer. This turns
2919 // out to be problematic since optimizations run after safepoint insertion
2920 // can recognize equality properties that the insertion logic doesn't know
2921 // about. See example statepoint.ll in the verifier subdirectory
2922}
2923
2924void Verifier::verifyFrameRecoverIndices() {
2925 for (auto &Counts : FrameEscapeInfo) {
2926 Function *F = Counts.first;
2927 unsigned EscapedObjectCount = Counts.second.first;
2928 unsigned MaxRecoveredIndex = Counts.second.second;
2929 Check(MaxRecoveredIndex <= EscapedObjectCount,
2930 "all indices passed to llvm.localrecover must be less than the "
2931 "number of arguments passed to llvm.localescape in the parent "
2932 "function",
2933 F);
2934 }
2935}
2936
2937static Instruction *getSuccPad(Instruction *Terminator) {
2938 BasicBlock *UnwindDest;
2939 if (auto *II = dyn_cast<InvokeInst>(Terminator))
2940 UnwindDest = II->getUnwindDest();
2941 else if (auto *CSI = dyn_cast<CatchSwitchInst>(Terminator))
2942 UnwindDest = CSI->getUnwindDest();
2943 else
2944 UnwindDest = cast<CleanupReturnInst>(Terminator)->getUnwindDest();
2945 return &*UnwindDest->getFirstNonPHIIt();
2946}
2947
2948void Verifier::verifySiblingFuncletUnwinds() {
2949 llvm::TimeTraceScope timeScope("Verifier verify sibling funclet unwinds");
2950 SmallPtrSet<Instruction *, 8> Visited;
2951 SmallPtrSet<Instruction *, 8> Active;
2952 for (const auto &Pair : SiblingFuncletInfo) {
2953 Instruction *PredPad = Pair.first;
2954 if (Visited.count(PredPad))
2955 continue;
2956 Active.insert(PredPad);
2957 Instruction *Terminator = Pair.second;
2958 do {
2959 Instruction *SuccPad = getSuccPad(Terminator);
2960 if (Active.count(SuccPad)) {
2961 // Found a cycle; report error
2962 Instruction *CyclePad = SuccPad;
2963 SmallVector<Instruction *, 8> CycleNodes;
2964 do {
2965 CycleNodes.push_back(CyclePad);
2966 Instruction *CycleTerminator = SiblingFuncletInfo[CyclePad];
2967 if (CycleTerminator != CyclePad)
2968 CycleNodes.push_back(CycleTerminator);
2969 CyclePad = getSuccPad(CycleTerminator);
2970 } while (CyclePad != SuccPad);
2971 Check(false, "EH pads can't handle each other's exceptions",
2972 ArrayRef<Instruction *>(CycleNodes));
2973 }
2974 // Don't re-walk a node we've already checked
2975 if (!Visited.insert(SuccPad).second)
2976 break;
2977 // Walk to this successor if it has a map entry.
2978 PredPad = SuccPad;
2979 auto TermI = SiblingFuncletInfo.find(PredPad);
2980 if (TermI == SiblingFuncletInfo.end())
2981 break;
2982 Terminator = TermI->second;
2983 Active.insert(PredPad);
2984 } while (true);
2985 // Each node only has one successor, so we've walked all the active
2986 // nodes' successors.
2987 Active.clear();
2988 }
2989}
2990
2991// visitFunction - Verify that a function is ok.
2992//
2993void Verifier::visitFunction(const Function &F) {
2994 visitGlobalValue(F);
2995
2996 // Check function arguments.
2997 FunctionType *FT = F.getFunctionType();
2998 unsigned NumArgs = F.arg_size();
2999
3000 Check(&Context == &F.getContext(),
3001 "Function context does not match Module context!", &F);
3002
3003 Check(!F.hasCommonLinkage(), "Functions may not have common linkage", &F);
3004 Check(FT->getNumParams() == NumArgs,
3005 "# formal arguments must match # of arguments for function type!", &F,
3006 FT);
3007 Check(F.getReturnType()->isFirstClassType() ||
3008 F.getReturnType()->isVoidTy() || F.getReturnType()->isStructTy(),
3009 "Functions cannot return aggregate values!", &F);
3010
3011 Check(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(),
3012 "Invalid struct return type!", &F);
3013
3014 if (MaybeAlign A = F.getAlign()) {
3015 Check(A->value() <= Value::MaximumAlignment,
3016 "huge alignment values are unsupported", &F);
3017 }
3018
3019 AttributeList Attrs = F.getAttributes();
3020
3021 Check(verifyAttributeCount(Attrs, FT->getNumParams()),
3022 "Attribute after last parameter!", &F);
3023
3024 bool IsIntrinsic = F.isIntrinsic();
3025
3026 // Check function attributes.
3027 verifyFunctionAttrs(FT, Attrs, &F, IsIntrinsic, /* IsInlineAsm */ false);
3028
3029 // On function declarations/definitions, we do not support the builtin
3030 // attribute. We do not check this in VerifyFunctionAttrs since that is
3031 // checking for Attributes that can/can not ever be on functions.
3032 Check(!Attrs.hasFnAttr(Attribute::Builtin),
3033 "Attribute 'builtin' can only be applied to a callsite.", &F);
3034
3035 Check(!Attrs.hasAttrSomewhere(Attribute::ElementType),
3036 "Attribute 'elementtype' can only be applied to a callsite.", &F);
3037
3038 Check(!Attrs.hasFnAttr("aarch64_zt0_undef"),
3039 "Attribute 'aarch64_zt0_undef' can only be applied to a callsite.");
3040
3041 if (Attrs.hasFnAttr(Attribute::Naked))
3042 for (const Argument &Arg : F.args())
3043 Check(Arg.use_empty(), "cannot use argument of naked function", &Arg);
3044
3045 // Check that this function meets the restrictions on this calling convention.
3046 // Sometimes varargs is used for perfectly forwarding thunks, so some of these
3047 // restrictions can be lifted.
3048 switch (F.getCallingConv()) {
3049 default:
3050 case CallingConv::C:
3051 break;
3052 case CallingConv::X86_INTR: {
3053 Check(F.arg_empty() || Attrs.hasParamAttr(0, Attribute::ByVal),
3054 "Calling convention parameter requires byval", &F);
3055 break;
3056 }
3057 case CallingConv::AMDGPU_KERNEL:
3058 case CallingConv::SPIR_KERNEL:
3059 case CallingConv::AMDGPU_CS_Chain:
3060 case CallingConv::AMDGPU_CS_ChainPreserve:
3061 Check(F.getReturnType()->isVoidTy(),
3062 "Calling convention requires void return type", &F);
3063 [[fallthrough]];
3064 case CallingConv::AMDGPU_VS:
3065 case CallingConv::AMDGPU_HS:
3066 case CallingConv::AMDGPU_GS:
3067 case CallingConv::AMDGPU_PS:
3068 case CallingConv::AMDGPU_CS:
3069 Check(!F.hasStructRetAttr(), "Calling convention does not allow sret", &F);
3070 if (F.getCallingConv() != CallingConv::SPIR_KERNEL) {
3071 const unsigned StackAS = DL.getAllocaAddrSpace();
3072 unsigned i = 0;
3073 for (const Argument &Arg : F.args()) {
3074 Check(!Attrs.hasParamAttr(i, Attribute::ByVal),
3075 "Calling convention disallows byval", &F);
3076 Check(!Attrs.hasParamAttr(i, Attribute::Preallocated),
3077 "Calling convention disallows preallocated", &F);
3078 Check(!Attrs.hasParamAttr(i, Attribute::InAlloca),
3079 "Calling convention disallows inalloca", &F);
3080
3081 if (Attrs.hasParamAttr(i, Attribute::ByRef)) {
3082 // FIXME: Should also disallow LDS and GDS, but we don't have the enum
3083 // value here.
3084 Check(Arg.getType()->getPointerAddressSpace() != StackAS,
3085 "Calling convention disallows stack byref", &F);
3086 }
3087
3088 ++i;
3089 }
3090 }
3091
3092 [[fallthrough]];
3093 case CallingConv::Fast:
3094 case CallingConv::Cold:
3095 case CallingConv::Intel_OCL_BI:
3096 case CallingConv::PTX_Kernel:
3097 case CallingConv::PTX_Device:
3098 Check(!F.isVarArg(),
3099 "Calling convention does not support varargs or "
3100 "perfect forwarding!",
3101 &F);
3102 break;
3103 case CallingConv::AMDGPU_Gfx_WholeWave:
3104 Check(!F.arg_empty() && F.arg_begin()->getType()->isIntegerTy(1),
3105 "Calling convention requires first argument to be i1", &F);
3106 Check(!F.arg_begin()->hasInRegAttr(),
3107 "Calling convention requires first argument to not be inreg", &F);
3108 Check(!F.isVarArg(),
3109 "Calling convention does not support varargs or "
3110 "perfect forwarding!",
3111 &F);
3112 break;
3113 }
3114
3115 // Check that the argument values match the function type for this function...
3116 unsigned i = 0;
3117 for (const Argument &Arg : F.args()) {
3118 Check(Arg.getType() == FT->getParamType(i),
3119 "Argument value does not match function argument type!", &Arg,
3120 FT->getParamType(i));
3121 Check(Arg.getType()->isFirstClassType(),
3122 "Function arguments must have first-class types!", &Arg);
3123 if (!IsIntrinsic) {
3124 Check(!Arg.getType()->isMetadataTy(),
3125 "Function takes metadata but isn't an intrinsic", &Arg, &F);
3126 Check(!Arg.getType()->isTokenLikeTy(),
3127 "Function takes token but isn't an intrinsic", &Arg, &F);
3128 Check(!Arg.getType()->isX86_AMXTy(),
3129 "Function takes x86_amx but isn't an intrinsic", &Arg, &F);
3130 }
3131
3132 // Check that swifterror argument is only used by loads and stores.
3133 if (Attrs.hasParamAttr(i, Attribute::SwiftError)) {
3134 verifySwiftErrorValue(&Arg);
3135 }
3136 ++i;
3137 }
3138
3139 if (!IsIntrinsic) {
3140 Check(!F.getReturnType()->isTokenLikeTy(),
3141 "Function returns a token but isn't an intrinsic", &F);
3142 Check(!F.getReturnType()->isX86_AMXTy(),
3143 "Function returns a x86_amx but isn't an intrinsic", &F);
3144 }
3145
3146 // Get the function metadata attachments.
3148 F.getAllMetadata(MDs);
3149 assert(F.hasMetadata() != MDs.empty() && "Bit out-of-sync");
3150 verifyFunctionMetadata(MDs);
3151
3152 // Check validity of the personality function
3153 if (F.hasPersonalityFn()) {
3154 auto *Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
3155 if (Per)
3156 Check(Per->getParent() == F.getParent(),
3157 "Referencing personality function in another module!", &F,
3158 F.getParent(), Per, Per->getParent());
3159 }
3160
3161 // EH funclet coloring can be expensive, recompute on-demand
3162 BlockEHFuncletColors.clear();
3163
3164 if (F.isMaterializable()) {
3165 // Function has a body somewhere we can't see.
3166 Check(MDs.empty(), "unmaterialized function cannot have metadata", &F,
3167 MDs.empty() ? nullptr : MDs.front().second);
3168 } else if (F.isDeclaration()) {
3169 for (const auto &I : MDs) {
3170 // This is used for call site debug information.
3171 CheckDI(I.first != LLVMContext::MD_dbg ||
3172 !cast<DISubprogram>(I.second)->isDistinct(),
3173 "function declaration may only have a unique !dbg attachment",
3174 &F);
3175 Check(I.first != LLVMContext::MD_prof,
3176 "function declaration may not have a !prof attachment", &F);
3177
3178 // Verify the metadata itself.
3179 visitMDNode(*I.second, AreDebugLocsAllowed::Yes);
3180 }
3181 Check(!F.hasPersonalityFn(),
3182 "Function declaration shouldn't have a personality routine", &F);
3183 } else {
3184 // Verify that this function (which has a body) is not named "llvm.*". It
3185 // is not legal to define intrinsics.
3186 Check(!IsIntrinsic, "llvm intrinsics cannot be defined!", &F);
3187
3188 // Check the entry node
3189 const BasicBlock *Entry = &F.getEntryBlock();
3190 Check(pred_empty(Entry),
3191 "Entry block to function must not have predecessors!", Entry);
3192
3193 // The address of the entry block cannot be taken, unless it is dead.
3194 if (Entry->hasAddressTaken()) {
3195 Check(!BlockAddress::lookup(Entry)->isConstantUsed(),
3196 "blockaddress may not be used with the entry block!", Entry);
3197 }
3198
3199 unsigned NumDebugAttachments = 0, NumProfAttachments = 0,
3200 NumKCFIAttachments = 0;
3201 // Visit metadata attachments.
3202 for (const auto &I : MDs) {
3203 // Verify that the attachment is legal.
3204 auto AllowLocs = AreDebugLocsAllowed::No;
3205 switch (I.first) {
3206 default:
3207 break;
3208 case LLVMContext::MD_dbg: {
3209 ++NumDebugAttachments;
3210 CheckDI(NumDebugAttachments == 1,
3211 "function must have a single !dbg attachment", &F, I.second);
3212 CheckDI(isa<DISubprogram>(I.second),
3213 "function !dbg attachment must be a subprogram", &F, I.second);
3214 CheckDI(cast<DISubprogram>(I.second)->isDistinct(),
3215 "function definition may only have a distinct !dbg attachment",
3216 &F);
3217
3218 auto *SP = cast<DISubprogram>(I.second);
3219 const Function *&AttachedTo = DISubprogramAttachments[SP];
3220 CheckDI(!AttachedTo || AttachedTo == &F,
3221 "DISubprogram attached to more than one function", SP, &F);
3222 AttachedTo = &F;
3223 AllowLocs = AreDebugLocsAllowed::Yes;
3224 break;
3225 }
3226 case LLVMContext::MD_prof:
3227 ++NumProfAttachments;
3228 Check(NumProfAttachments == 1,
3229 "function must have a single !prof attachment", &F, I.second);
3230 break;
3231 case LLVMContext::MD_kcfi_type:
3232 ++NumKCFIAttachments;
3233 Check(NumKCFIAttachments == 1,
3234 "function must have a single !kcfi_type attachment", &F,
3235 I.second);
3236 break;
3237 }
3238
3239 // Verify the metadata itself.
3240 visitMDNode(*I.second, AllowLocs);
3241 }
3242 }
3243
3244 // If this function is actually an intrinsic, verify that it is only used in
3245 // direct call/invokes, never having its "address taken".
3246 // Only do this if the module is materialized, otherwise we don't have all the
3247 // uses.
3248 if (F.isIntrinsic() && F.getParent()->isMaterialized()) {
3249 const User *U;
3250 if (F.hasAddressTaken(&U, false, true, false,
3251 /*IgnoreARCAttachedCall=*/true))
3252 Check(false, "Invalid user of intrinsic instruction!", U);
3253 }
3254
3255 // Check intrinsics' signatures.
3256 switch (F.getIntrinsicID()) {
3257 case Intrinsic::experimental_gc_get_pointer_base: {
3258 FunctionType *FT = F.getFunctionType();
3259 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3260 Check(isa<PointerType>(F.getReturnType()),
3261 "gc.get.pointer.base must return a pointer", F);
3262 Check(FT->getParamType(0) == F.getReturnType(),
3263 "gc.get.pointer.base operand and result must be of the same type", F);
3264 break;
3265 }
3266 case Intrinsic::experimental_gc_get_pointer_offset: {
3267 FunctionType *FT = F.getFunctionType();
3268 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3269 Check(isa<PointerType>(FT->getParamType(0)),
3270 "gc.get.pointer.offset operand must be a pointer", F);
3271 Check(F.getReturnType()->isIntegerTy(),
3272 "gc.get.pointer.offset must return integer", F);
3273 break;
3274 }
3275 }
3276
3277 auto *N = F.getSubprogram();
3278 HasDebugInfo = (N != nullptr);
3279 if (!HasDebugInfo)
3280 return;
3281
3282 // Check that all !dbg attachments lead to back to N.
3283 //
3284 // FIXME: Check this incrementally while visiting !dbg attachments.
3285 // FIXME: Only check when N is the canonical subprogram for F.
3286 SmallPtrSet<const MDNode *, 32> Seen;
3287 auto VisitDebugLoc = [&](const Instruction &I, const MDNode *Node) {
3288 // Be careful about using DILocation here since we might be dealing with
3289 // broken code (this is the Verifier after all).
3290 const DILocation *DL = dyn_cast_or_null<DILocation>(Node);
3291 if (!DL)
3292 return;
3293 if (!Seen.insert(DL).second)
3294 return;
3295
3296 Metadata *Parent = DL->getRawScope();
3297 CheckDI(Parent && isa<DILocalScope>(Parent),
3298 "DILocation's scope must be a DILocalScope", N, &F, &I, DL, Parent);
3299
3300 DILocalScope *Scope = DL->getInlinedAtScope();
3301 Check(Scope, "Failed to find DILocalScope", DL);
3302
3303 if (!Seen.insert(Scope).second)
3304 return;
3305
3306 DISubprogram *SP = Scope->getSubprogram();
3307
3308 // Scope and SP could be the same MDNode and we don't want to skip
3309 // validation in that case
3310 if ((Scope != SP) && !Seen.insert(SP).second)
3311 return;
3312
3313 CheckDI(SP->describes(&F),
3314 "!dbg attachment points at wrong subprogram for function", N, &F,
3315 &I, DL, Scope, SP);
3316 };
3317 for (auto &BB : F)
3318 for (auto &I : BB) {
3319 VisitDebugLoc(I, I.getDebugLoc().getAsMDNode());
3320 // The llvm.loop annotations also contain two DILocations.
3321 if (auto MD = I.getMetadata(LLVMContext::MD_loop))
3322 for (unsigned i = 1; i < MD->getNumOperands(); ++i)
3323 VisitDebugLoc(I, dyn_cast_or_null<MDNode>(MD->getOperand(i)));
3324 if (BrokenDebugInfo)
3325 return;
3326 }
3327}
3328
3329// verifyBasicBlock - Verify that a basic block is well formed...
3330//
3331void Verifier::visitBasicBlock(BasicBlock &BB) {
3332 InstsInThisBlock.clear();
3333 ConvergenceVerifyHelper.visit(BB);
3334
3335 // Ensure that basic blocks have terminators!
3336 Check(BB.getTerminator(), "Basic Block does not have terminator!", &BB);
3337
3338 // Check constraints that this basic block imposes on all of the PHI nodes in
3339 // it.
3340 if (isa<PHINode>(BB.front())) {
3341 SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
3343 llvm::sort(Preds);
3344 for (const PHINode &PN : BB.phis()) {
3345 Check(PN.getNumIncomingValues() == Preds.size(),
3346 "PHINode should have one entry for each predecessor of its "
3347 "parent basic block!",
3348 &PN);
3349
3350 // Get and sort all incoming values in the PHI node...
3351 Values.clear();
3352 Values.reserve(PN.getNumIncomingValues());
3353 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
3354 Values.push_back(
3355 std::make_pair(PN.getIncomingBlock(i), PN.getIncomingValue(i)));
3356 llvm::sort(Values);
3357
3358 for (unsigned i = 0, e = Values.size(); i != e; ++i) {
3359 // Check to make sure that if there is more than one entry for a
3360 // particular basic block in this PHI node, that the incoming values are
3361 // all identical.
3362 //
3363 Check(i == 0 || Values[i].first != Values[i - 1].first ||
3364 Values[i].second == Values[i - 1].second,
3365 "PHI node has multiple entries for the same basic block with "
3366 "different incoming values!",
3367 &PN, Values[i].first, Values[i].second, Values[i - 1].second);
3368
3369 // Check to make sure that the predecessors and PHI node entries are
3370 // matched up.
3371 Check(Values[i].first == Preds[i],
3372 "PHI node entries do not match predecessors!", &PN,
3373 Values[i].first, Preds[i]);
3374 }
3375 }
3376 }
3377
3378 // Check that all instructions have their parent pointers set up correctly.
3379 for (auto &I : BB)
3380 {
3381 Check(I.getParent() == &BB, "Instruction has bogus parent pointer!");
3382 }
3383
3384 // Confirm that no issues arise from the debug program.
3385 CheckDI(!BB.getTrailingDbgRecords(), "Basic Block has trailing DbgRecords!",
3386 &BB);
3387}
3388
3389void Verifier::visitTerminator(Instruction &I) {
3390 // Ensure that terminators only exist at the end of the basic block.
3391 Check(&I == I.getParent()->getTerminator(),
3392 "Terminator found in the middle of a basic block!", I.getParent());
3393 visitInstruction(I);
3394}
3395
3396void Verifier::visitBranchInst(BranchInst &BI) {
3397 if (BI.isConditional()) {
3399 "Branch condition is not 'i1' type!", &BI, BI.getCondition());
3400 }
3401 visitTerminator(BI);
3402}
3403
3404void Verifier::visitReturnInst(ReturnInst &RI) {
3405 Function *F = RI.getParent()->getParent();
3406 unsigned N = RI.getNumOperands();
3407 if (F->getReturnType()->isVoidTy())
3408 Check(N == 0,
3409 "Found return instr that returns non-void in Function of void "
3410 "return type!",
3411 &RI, F->getReturnType());
3412 else
3413 Check(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(),
3414 "Function return type does not match operand "
3415 "type of return inst!",
3416 &RI, F->getReturnType());
3417
3418 // Check to make sure that the return value has necessary properties for
3419 // terminators...
3420 visitTerminator(RI);
3421}
3422
3423void Verifier::visitSwitchInst(SwitchInst &SI) {
3424 Check(SI.getType()->isVoidTy(), "Switch must have void result type!", &SI);
3425 // Check to make sure that all of the constants in the switch instruction
3426 // have the same type as the switched-on value.
3427 Type *SwitchTy = SI.getCondition()->getType();
3428 SmallPtrSet<ConstantInt*, 32> Constants;
3429 for (auto &Case : SI.cases()) {
3430 Check(isa<ConstantInt>(Case.getCaseValue()),
3431 "Case value is not a constant integer.", &SI);
3432 Check(Case.getCaseValue()->getType() == SwitchTy,
3433 "Switch constants must all be same type as switch value!", &SI);
3434 Check(Constants.insert(Case.getCaseValue()).second,
3435 "Duplicate integer as switch case", &SI, Case.getCaseValue());
3436 }
3437
3438 visitTerminator(SI);
3439}
3440
3441void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
3443 "Indirectbr operand must have pointer type!", &BI);
3444 for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i)
3446 "Indirectbr destinations must all have pointer type!", &BI);
3447
3448 visitTerminator(BI);
3449}
3450
3451void Verifier::visitCallBrInst(CallBrInst &CBI) {
3452 if (!CBI.isInlineAsm()) {
3454 "Callbr: indirect function / invalid signature");
3455 Check(!CBI.hasOperandBundles(),
3456 "Callbr for intrinsics currently doesn't support operand bundles");
3457
3458 switch (CBI.getIntrinsicID()) {
3459 case Intrinsic::amdgcn_kill: {
3460 Check(CBI.getNumIndirectDests() == 1,
3461 "Callbr amdgcn_kill only supports one indirect dest");
3462 bool Unreachable = isa<UnreachableInst>(CBI.getIndirectDest(0)->begin());
3463 CallInst *Call = dyn_cast<CallInst>(CBI.getIndirectDest(0)->begin());
3464 Check(Unreachable || (Call && Call->getIntrinsicID() ==
3465 Intrinsic::amdgcn_unreachable),
3466 "Callbr amdgcn_kill indirect dest needs to be unreachable");
3467 break;
3468 }
3469 default:
3470 CheckFailed(
3471 "Callbr currently only supports asm-goto and selected intrinsics");
3472 }
3473 visitIntrinsicCall(CBI.getIntrinsicID(), CBI);
3474 } else {
3475 const InlineAsm *IA = cast<InlineAsm>(CBI.getCalledOperand());
3476 Check(!IA->canThrow(), "Unwinding from Callbr is not allowed");
3477
3478 verifyInlineAsmCall(CBI);
3479 }
3480 visitTerminator(CBI);
3481}
3482
3483void Verifier::visitSelectInst(SelectInst &SI) {
3484 Check(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1),
3485 SI.getOperand(2)),
3486 "Invalid operands for select instruction!", &SI);
3487
3488 Check(SI.getTrueValue()->getType() == SI.getType(),
3489 "Select values must have same type as select instruction!", &SI);
3490 visitInstruction(SI);
3491}
3492
3493/// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of
3494/// a pass, if any exist, it's an error.
3495///
3496void Verifier::visitUserOp1(Instruction &I) {
3497 Check(false, "User-defined operators should not live outside of a pass!", &I);
3498}
3499
3500void Verifier::visitTruncInst(TruncInst &I) {
3501 // Get the source and destination types
3502 Type *SrcTy = I.getOperand(0)->getType();
3503 Type *DestTy = I.getType();
3504
3505 // Get the size of the types in bits, we'll need this later
3506 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3507 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3508
3509 Check(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer", &I);
3510 Check(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer", &I);
3511 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3512 "trunc source and destination must both be a vector or neither", &I);
3513 Check(SrcBitSize > DestBitSize, "DestTy too big for Trunc", &I);
3514
3515 visitInstruction(I);
3516}
3517
3518void Verifier::visitZExtInst(ZExtInst &I) {
3519 // Get the source and destination types
3520 Type *SrcTy = I.getOperand(0)->getType();
3521 Type *DestTy = I.getType();
3522
3523 // Get the size of the types in bits, we'll need this later
3524 Check(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I);
3525 Check(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer", &I);
3526 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3527 "zext source and destination must both be a vector or neither", &I);
3528 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3529 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3530
3531 Check(SrcBitSize < DestBitSize, "Type too small for ZExt", &I);
3532
3533 visitInstruction(I);
3534}
3535
3536void Verifier::visitSExtInst(SExtInst &I) {
3537 // Get the source and destination types
3538 Type *SrcTy = I.getOperand(0)->getType();
3539 Type *DestTy = I.getType();
3540
3541 // Get the size of the types in bits, we'll need this later
3542 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3543 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3544
3545 Check(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer", &I);
3546 Check(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer", &I);
3547 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3548 "sext source and destination must both be a vector or neither", &I);
3549 Check(SrcBitSize < DestBitSize, "Type too small for SExt", &I);
3550
3551 visitInstruction(I);
3552}
3553
3554void Verifier::visitFPTruncInst(FPTruncInst &I) {
3555 // Get the source and destination types
3556 Type *SrcTy = I.getOperand(0)->getType();
3557 Type *DestTy = I.getType();
3558 // Get the size of the types in bits, we'll need this later
3559 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3560 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3561
3562 Check(SrcTy->isFPOrFPVectorTy(), "FPTrunc only operates on FP", &I);
3563 Check(DestTy->isFPOrFPVectorTy(), "FPTrunc only produces an FP", &I);
3564 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3565 "fptrunc source and destination must both be a vector or neither", &I);
3566 Check(SrcBitSize > DestBitSize, "DestTy too big for FPTrunc", &I);
3567
3568 visitInstruction(I);
3569}
3570
3571void Verifier::visitFPExtInst(FPExtInst &I) {
3572 // Get the source and destination types
3573 Type *SrcTy = I.getOperand(0)->getType();
3574 Type *DestTy = I.getType();
3575
3576 // Get the size of the types in bits, we'll need this later
3577 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3578 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3579
3580 Check(SrcTy->isFPOrFPVectorTy(), "FPExt only operates on FP", &I);
3581 Check(DestTy->isFPOrFPVectorTy(), "FPExt only produces an FP", &I);
3582 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3583 "fpext source and destination must both be a vector or neither", &I);
3584 Check(SrcBitSize < DestBitSize, "DestTy too small for FPExt", &I);
3585
3586 visitInstruction(I);
3587}
3588
3589void Verifier::visitUIToFPInst(UIToFPInst &I) {
3590 // Get the source and destination types
3591 Type *SrcTy = I.getOperand(0)->getType();
3592 Type *DestTy = I.getType();
3593
3594 bool SrcVec = SrcTy->isVectorTy();
3595 bool DstVec = DestTy->isVectorTy();
3596
3597 Check(SrcVec == DstVec,
3598 "UIToFP source and dest must both be vector or scalar", &I);
3599 Check(SrcTy->isIntOrIntVectorTy(),
3600 "UIToFP source must be integer or integer vector", &I);
3601 Check(DestTy->isFPOrFPVectorTy(), "UIToFP result must be FP or FP vector",
3602 &I);
3603
3604 if (SrcVec && DstVec)
3605 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3606 cast<VectorType>(DestTy)->getElementCount(),
3607 "UIToFP source and dest vector length mismatch", &I);
3608
3609 visitInstruction(I);
3610}
3611
3612void Verifier::visitSIToFPInst(SIToFPInst &I) {
3613 // Get the source and destination types
3614 Type *SrcTy = I.getOperand(0)->getType();
3615 Type *DestTy = I.getType();
3616
3617 bool SrcVec = SrcTy->isVectorTy();
3618 bool DstVec = DestTy->isVectorTy();
3619
3620 Check(SrcVec == DstVec,
3621 "SIToFP source and dest must both be vector or scalar", &I);
3622 Check(SrcTy->isIntOrIntVectorTy(),
3623 "SIToFP source must be integer or integer vector", &I);
3624 Check(DestTy->isFPOrFPVectorTy(), "SIToFP result must be FP or FP vector",
3625 &I);
3626
3627 if (SrcVec && DstVec)
3628 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3629 cast<VectorType>(DestTy)->getElementCount(),
3630 "SIToFP source and dest vector length mismatch", &I);
3631
3632 visitInstruction(I);
3633}
3634
3635void Verifier::visitFPToUIInst(FPToUIInst &I) {
3636 // Get the source and destination types
3637 Type *SrcTy = I.getOperand(0)->getType();
3638 Type *DestTy = I.getType();
3639
3640 bool SrcVec = SrcTy->isVectorTy();
3641 bool DstVec = DestTy->isVectorTy();
3642
3643 Check(SrcVec == DstVec,
3644 "FPToUI source and dest must both be vector or scalar", &I);
3645 Check(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector", &I);
3646 Check(DestTy->isIntOrIntVectorTy(),
3647 "FPToUI result must be integer or integer vector", &I);
3648
3649 if (SrcVec && DstVec)
3650 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3651 cast<VectorType>(DestTy)->getElementCount(),
3652 "FPToUI source and dest vector length mismatch", &I);
3653
3654 visitInstruction(I);
3655}
3656
3657void Verifier::visitFPToSIInst(FPToSIInst &I) {
3658 // Get the source and destination types
3659 Type *SrcTy = I.getOperand(0)->getType();
3660 Type *DestTy = I.getType();
3661
3662 bool SrcVec = SrcTy->isVectorTy();
3663 bool DstVec = DestTy->isVectorTy();
3664
3665 Check(SrcVec == DstVec,
3666 "FPToSI source and dest must both be vector or scalar", &I);
3667 Check(SrcTy->isFPOrFPVectorTy(), "FPToSI source must be FP or FP vector", &I);
3668 Check(DestTy->isIntOrIntVectorTy(),
3669 "FPToSI result must be integer or integer vector", &I);
3670
3671 if (SrcVec && DstVec)
3672 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3673 cast<VectorType>(DestTy)->getElementCount(),
3674 "FPToSI source and dest vector length mismatch", &I);
3675
3676 visitInstruction(I);
3677}
3678
3679void Verifier::checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V) {
3680 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToAddr source must be pointer", V);
3681 Check(DestTy->isIntOrIntVectorTy(), "PtrToAddr result must be integral", V);
3682 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToAddr type mismatch",
3683 V);
3684
3685 if (SrcTy->isVectorTy()) {
3686 auto *VSrc = cast<VectorType>(SrcTy);
3687 auto *VDest = cast<VectorType>(DestTy);
3688 Check(VSrc->getElementCount() == VDest->getElementCount(),
3689 "PtrToAddr vector length mismatch", V);
3690 }
3691
3692 Type *AddrTy = DL.getAddressType(SrcTy);
3693 Check(AddrTy == DestTy, "PtrToAddr result must be address width", V);
3694}
3695
3696void Verifier::visitPtrToAddrInst(PtrToAddrInst &I) {
3697 checkPtrToAddr(I.getOperand(0)->getType(), I.getType(), I);
3698 visitInstruction(I);
3699}
3700
3701void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
3702 // Get the source and destination types
3703 Type *SrcTy = I.getOperand(0)->getType();
3704 Type *DestTy = I.getType();
3705
3706 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToInt source must be pointer", &I);
3707
3708 Check(DestTy->isIntOrIntVectorTy(), "PtrToInt result must be integral", &I);
3709 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch",
3710 &I);
3711
3712 if (SrcTy->isVectorTy()) {
3713 auto *VSrc = cast<VectorType>(SrcTy);
3714 auto *VDest = cast<VectorType>(DestTy);
3715 Check(VSrc->getElementCount() == VDest->getElementCount(),
3716 "PtrToInt Vector length mismatch", &I);
3717 }
3718
3719 visitInstruction(I);
3720}
3721
3722void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
3723 // Get the source and destination types
3724 Type *SrcTy = I.getOperand(0)->getType();
3725 Type *DestTy = I.getType();
3726
3727 Check(SrcTy->isIntOrIntVectorTy(), "IntToPtr source must be an integral", &I);
3728 Check(DestTy->isPtrOrPtrVectorTy(), "IntToPtr result must be a pointer", &I);
3729
3730 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch",
3731 &I);
3732 if (SrcTy->isVectorTy()) {
3733 auto *VSrc = cast<VectorType>(SrcTy);
3734 auto *VDest = cast<VectorType>(DestTy);
3735 Check(VSrc->getElementCount() == VDest->getElementCount(),
3736 "IntToPtr Vector length mismatch", &I);
3737 }
3738 visitInstruction(I);
3739}
3740
3741void Verifier::visitBitCastInst(BitCastInst &I) {
3742 Check(
3743 CastInst::castIsValid(Instruction::BitCast, I.getOperand(0), I.getType()),
3744 "Invalid bitcast", &I);
3745 visitInstruction(I);
3746}
3747
3748void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
3749 Type *SrcTy = I.getOperand(0)->getType();
3750 Type *DestTy = I.getType();
3751
3752 Check(SrcTy->isPtrOrPtrVectorTy(), "AddrSpaceCast source must be a pointer",
3753 &I);
3754 Check(DestTy->isPtrOrPtrVectorTy(), "AddrSpaceCast result must be a pointer",
3755 &I);
3757 "AddrSpaceCast must be between different address spaces", &I);
3758 if (auto *SrcVTy = dyn_cast<VectorType>(SrcTy))
3759 Check(SrcVTy->getElementCount() ==
3760 cast<VectorType>(DestTy)->getElementCount(),
3761 "AddrSpaceCast vector pointer number of elements mismatch", &I);
3762 visitInstruction(I);
3763}
3764
3765/// visitPHINode - Ensure that a PHI node is well formed.
3766///
3767void Verifier::visitPHINode(PHINode &PN) {
3768 // Ensure that the PHI nodes are all grouped together at the top of the block.
3769 // This can be tested by checking whether the instruction before this is
3770 // either nonexistent (because this is begin()) or is a PHI node. If not,
3771 // then there is some other instruction before a PHI.
3772 Check(&PN == &PN.getParent()->front() ||
3774 "PHI nodes not grouped at top of basic block!", &PN, PN.getParent());
3775
3776 // Check that a PHI doesn't yield a Token.
3777 Check(!PN.getType()->isTokenLikeTy(), "PHI nodes cannot have token type!");
3778
3779 // Check that all of the values of the PHI node have the same type as the
3780 // result.
3781 for (Value *IncValue : PN.incoming_values()) {
3782 Check(PN.getType() == IncValue->getType(),
3783 "PHI node operands are not the same type as the result!", &PN);
3784 }
3785
3786 // All other PHI node constraints are checked in the visitBasicBlock method.
3787
3788 visitInstruction(PN);
3789}
3790
3791void Verifier::visitCallBase(CallBase &Call) {
3793 "Called function must be a pointer!", Call);
3794 FunctionType *FTy = Call.getFunctionType();
3795
3796 // Verify that the correct number of arguments are being passed
3797 if (FTy->isVarArg())
3798 Check(Call.arg_size() >= FTy->getNumParams(),
3799 "Called function requires more parameters than were provided!", Call);
3800 else
3801 Check(Call.arg_size() == FTy->getNumParams(),
3802 "Incorrect number of arguments passed to called function!", Call);
3803
3804 // Verify that all arguments to the call match the function type.
3805 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
3806 Check(Call.getArgOperand(i)->getType() == FTy->getParamType(i),
3807 "Call parameter type does not match function signature!",
3808 Call.getArgOperand(i), FTy->getParamType(i), Call);
3809
3810 AttributeList Attrs = Call.getAttributes();
3811
3812 Check(verifyAttributeCount(Attrs, Call.arg_size()),
3813 "Attribute after last parameter!", Call);
3814
3815 Function *Callee =
3817 bool IsIntrinsic = Callee && Callee->isIntrinsic();
3818 if (IsIntrinsic)
3819 Check(Callee->getValueType() == FTy,
3820 "Intrinsic called with incompatible signature", Call);
3821
3822 // Verify if the calling convention of the callee is callable.
3824 "calling convention does not permit calls", Call);
3825
3826 // Disallow passing/returning values with alignment higher than we can
3827 // represent.
3828 // FIXME: Consider making DataLayout cap the alignment, so this isn't
3829 // necessary.
3830 auto VerifyTypeAlign = [&](Type *Ty, const Twine &Message) {
3831 if (!Ty->isSized())
3832 return;
3833 Align ABIAlign = DL.getABITypeAlign(Ty);
3834 Check(ABIAlign.value() <= Value::MaximumAlignment,
3835 "Incorrect alignment of " + Message + " to called function!", Call);
3836 };
3837
3838 if (!IsIntrinsic) {
3839 VerifyTypeAlign(FTy->getReturnType(), "return type");
3840 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3841 Type *Ty = FTy->getParamType(i);
3842 VerifyTypeAlign(Ty, "argument passed");
3843 }
3844 }
3845
3846 if (Attrs.hasFnAttr(Attribute::Speculatable)) {
3847 // Don't allow speculatable on call sites, unless the underlying function
3848 // declaration is also speculatable.
3849 Check(Callee && Callee->isSpeculatable(),
3850 "speculatable attribute may not apply to call sites", Call);
3851 }
3852
3853 if (Attrs.hasFnAttr(Attribute::Preallocated)) {
3854 Check(Call.getIntrinsicID() == Intrinsic::call_preallocated_arg,
3855 "preallocated as a call site attribute can only be on "
3856 "llvm.call.preallocated.arg");
3857 }
3858
3859 // Verify call attributes.
3860 verifyFunctionAttrs(FTy, Attrs, &Call, IsIntrinsic, Call.isInlineAsm());
3861
3862 // Conservatively check the inalloca argument.
3863 // We have a bug if we can find that there is an underlying alloca without
3864 // inalloca.
3865 if (Call.hasInAllocaArgument()) {
3866 Value *InAllocaArg = Call.getArgOperand(FTy->getNumParams() - 1);
3867 if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets()))
3868 Check(AI->isUsedWithInAlloca(),
3869 "inalloca argument for call has mismatched alloca", AI, Call);
3870 }
3871
3872 // For each argument of the callsite, if it has the swifterror argument,
3873 // make sure the underlying alloca/parameter it comes from has a swifterror as
3874 // well.
3875 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3876 if (Call.paramHasAttr(i, Attribute::SwiftError)) {
3877 Value *SwiftErrorArg = Call.getArgOperand(i);
3878 if (auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets())) {
3879 Check(AI->isSwiftError(),
3880 "swifterror argument for call has mismatched alloca", AI, Call);
3881 continue;
3882 }
3883 auto ArgI = dyn_cast<Argument>(SwiftErrorArg);
3884 Check(ArgI, "swifterror argument should come from an alloca or parameter",
3885 SwiftErrorArg, Call);
3886 Check(ArgI->hasSwiftErrorAttr(),
3887 "swifterror argument for call has mismatched parameter", ArgI,
3888 Call);
3889 }
3890
3891 if (Attrs.hasParamAttr(i, Attribute::ImmArg)) {
3892 // Don't allow immarg on call sites, unless the underlying declaration
3893 // also has the matching immarg.
3894 Check(Callee && Callee->hasParamAttribute(i, Attribute::ImmArg),
3895 "immarg may not apply only to call sites", Call.getArgOperand(i),
3896 Call);
3897 }
3898
3899 if (Call.paramHasAttr(i, Attribute::ImmArg)) {
3900 Value *ArgVal = Call.getArgOperand(i);
3901 Check(isa<ConstantInt>(ArgVal) || isa<ConstantFP>(ArgVal),
3902 "immarg operand has non-immediate parameter", ArgVal, Call);
3903
3904 // If the imm-arg is an integer and also has a range attached,
3905 // check if the given value is within the range.
3906 if (Call.paramHasAttr(i, Attribute::Range)) {
3907 if (auto *CI = dyn_cast<ConstantInt>(ArgVal)) {
3908 const ConstantRange &CR =
3909 Call.getParamAttr(i, Attribute::Range).getValueAsConstantRange();
3910 Check(CR.contains(CI->getValue()),
3911 "immarg value " + Twine(CI->getValue().getSExtValue()) +
3912 " out of range [" + Twine(CR.getLower().getSExtValue()) +
3913 ", " + Twine(CR.getUpper().getSExtValue()) + ")",
3914 Call);
3915 }
3916 }
3917 }
3918
3919 if (Call.paramHasAttr(i, Attribute::Preallocated)) {
3920 Value *ArgVal = Call.getArgOperand(i);
3921 bool hasOB =
3923 bool isMustTail = Call.isMustTailCall();
3924 Check(hasOB != isMustTail,
3925 "preallocated operand either requires a preallocated bundle or "
3926 "the call to be musttail (but not both)",
3927 ArgVal, Call);
3928 }
3929 }
3930
3931 if (FTy->isVarArg()) {
3932 // FIXME? is 'nest' even legal here?
3933 bool SawNest = false;
3934 bool SawReturned = false;
3935
3936 for (unsigned Idx = 0; Idx < FTy->getNumParams(); ++Idx) {
3937 if (Attrs.hasParamAttr(Idx, Attribute::Nest))
3938 SawNest = true;
3939 if (Attrs.hasParamAttr(Idx, Attribute::Returned))
3940 SawReturned = true;
3941 }
3942
3943 // Check attributes on the varargs part.
3944 for (unsigned Idx = FTy->getNumParams(); Idx < Call.arg_size(); ++Idx) {
3945 Type *Ty = Call.getArgOperand(Idx)->getType();
3946 AttributeSet ArgAttrs = Attrs.getParamAttrs(Idx);
3947 verifyParameterAttrs(ArgAttrs, Ty, &Call);
3948
3949 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
3950 Check(!SawNest, "More than one parameter has attribute nest!", Call);
3951 SawNest = true;
3952 }
3953
3954 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
3955 Check(!SawReturned, "More than one parameter has attribute returned!",
3956 Call);
3957 Check(Ty->canLosslesslyBitCastTo(FTy->getReturnType()),
3958 "Incompatible argument and return types for 'returned' "
3959 "attribute",
3960 Call);
3961 SawReturned = true;
3962 }
3963
3964 // Statepoint intrinsic is vararg but the wrapped function may be not.
3965 // Allow sret here and check the wrapped function in verifyStatepoint.
3966 if (Call.getIntrinsicID() != Intrinsic::experimental_gc_statepoint)
3967 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
3968 "Attribute 'sret' cannot be used for vararg call arguments!",
3969 Call);
3970
3971 if (ArgAttrs.hasAttribute(Attribute::InAlloca))
3972 Check(Idx == Call.arg_size() - 1,
3973 "inalloca isn't on the last argument!", Call);
3974 }
3975 }
3976
3977 // Verify that there's no metadata unless it's a direct call to an intrinsic.
3978 if (!IsIntrinsic) {
3979 for (Type *ParamTy : FTy->params()) {
3980 Check(!ParamTy->isMetadataTy(),
3981 "Function has metadata parameter but isn't an intrinsic", Call);
3982 Check(!ParamTy->isTokenLikeTy(),
3983 "Function has token parameter but isn't an intrinsic", Call);
3984 }
3985 }
3986
3987 // Verify that indirect calls don't return tokens.
3988 if (!Call.getCalledFunction()) {
3989 Check(!FTy->getReturnType()->isTokenLikeTy(),
3990 "Return type cannot be token for indirect call!");
3991 Check(!FTy->getReturnType()->isX86_AMXTy(),
3992 "Return type cannot be x86_amx for indirect call!");
3993 }
3994
3996 visitIntrinsicCall(ID, Call);
3997
3998 // Verify that a callsite has at most one "deopt", at most one "funclet", at
3999 // most one "gc-transition", at most one "cfguardtarget", at most one
4000 // "preallocated" operand bundle, and at most one "ptrauth" operand bundle.
4001 bool FoundDeoptBundle = false, FoundFuncletBundle = false,
4002 FoundGCTransitionBundle = false, FoundCFGuardTargetBundle = false,
4003 FoundPreallocatedBundle = false, FoundGCLiveBundle = false,
4004 FoundPtrauthBundle = false, FoundKCFIBundle = false,
4005 FoundAttachedCallBundle = false;
4006 for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) {
4007 OperandBundleUse BU = Call.getOperandBundleAt(i);
4008 uint32_t Tag = BU.getTagID();
4009 if (Tag == LLVMContext::OB_deopt) {
4010 Check(!FoundDeoptBundle, "Multiple deopt operand bundles", Call);
4011 FoundDeoptBundle = true;
4012 } else if (Tag == LLVMContext::OB_gc_transition) {
4013 Check(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles",
4014 Call);
4015 FoundGCTransitionBundle = true;
4016 } else if (Tag == LLVMContext::OB_funclet) {
4017 Check(!FoundFuncletBundle, "Multiple funclet operand bundles", Call);
4018 FoundFuncletBundle = true;
4019 Check(BU.Inputs.size() == 1,
4020 "Expected exactly one funclet bundle operand", Call);
4021 Check(isa<FuncletPadInst>(BU.Inputs.front()),
4022 "Funclet bundle operands should correspond to a FuncletPadInst",
4023 Call);
4024 } else if (Tag == LLVMContext::OB_cfguardtarget) {
4025 Check(!FoundCFGuardTargetBundle, "Multiple CFGuardTarget operand bundles",
4026 Call);
4027 FoundCFGuardTargetBundle = true;
4028 Check(BU.Inputs.size() == 1,
4029 "Expected exactly one cfguardtarget bundle operand", Call);
4030 } else if (Tag == LLVMContext::OB_ptrauth) {
4031 Check(!FoundPtrauthBundle, "Multiple ptrauth operand bundles", Call);
4032 FoundPtrauthBundle = true;
4033 Check(BU.Inputs.size() == 2,
4034 "Expected exactly two ptrauth bundle operands", Call);
4035 Check(isa<ConstantInt>(BU.Inputs[0]) &&
4036 BU.Inputs[0]->getType()->isIntegerTy(32),
4037 "Ptrauth bundle key operand must be an i32 constant", Call);
4038 Check(BU.Inputs[1]->getType()->isIntegerTy(64),
4039 "Ptrauth bundle discriminator operand must be an i64", Call);
4040 } else if (Tag == LLVMContext::OB_kcfi) {
4041 Check(!FoundKCFIBundle, "Multiple kcfi operand bundles", Call);
4042 FoundKCFIBundle = true;
4043 Check(BU.Inputs.size() == 1, "Expected exactly one kcfi bundle operand",
4044 Call);
4045 Check(isa<ConstantInt>(BU.Inputs[0]) &&
4046 BU.Inputs[0]->getType()->isIntegerTy(32),
4047 "Kcfi bundle operand must be an i32 constant", Call);
4048 } else if (Tag == LLVMContext::OB_preallocated) {
4049 Check(!FoundPreallocatedBundle, "Multiple preallocated operand bundles",
4050 Call);
4051 FoundPreallocatedBundle = true;
4052 Check(BU.Inputs.size() == 1,
4053 "Expected exactly one preallocated bundle operand", Call);
4054 auto Input = dyn_cast<IntrinsicInst>(BU.Inputs.front());
4055 Check(Input &&
4056 Input->getIntrinsicID() == Intrinsic::call_preallocated_setup,
4057 "\"preallocated\" argument must be a token from "
4058 "llvm.call.preallocated.setup",
4059 Call);
4060 } else if (Tag == LLVMContext::OB_gc_live) {
4061 Check(!FoundGCLiveBundle, "Multiple gc-live operand bundles", Call);
4062 FoundGCLiveBundle = true;
4064 Check(!FoundAttachedCallBundle,
4065 "Multiple \"clang.arc.attachedcall\" operand bundles", Call);
4066 FoundAttachedCallBundle = true;
4067 verifyAttachedCallBundle(Call, BU);
4068 }
4069 }
4070
4071 // Verify that callee and callsite agree on whether to use pointer auth.
4072 Check(!(Call.getCalledFunction() && FoundPtrauthBundle),
4073 "Direct call cannot have a ptrauth bundle", Call);
4074
4075 // Verify that each inlinable callsite of a debug-info-bearing function in a
4076 // debug-info-bearing function has a debug location attached to it. Failure to
4077 // do so causes assertion failures when the inliner sets up inline scope info
4078 // (Interposable functions are not inlinable, neither are functions without
4079 // definitions.)
4085 "inlinable function call in a function with "
4086 "debug info must have a !dbg location",
4087 Call);
4088
4089 if (Call.isInlineAsm())
4090 verifyInlineAsmCall(Call);
4091
4092 ConvergenceVerifyHelper.visit(Call);
4093
4094 visitInstruction(Call);
4095}
4096
4097void Verifier::verifyTailCCMustTailAttrs(const AttrBuilder &Attrs,
4098 StringRef Context) {
4099 Check(!Attrs.contains(Attribute::InAlloca),
4100 Twine("inalloca attribute not allowed in ") + Context);
4101 Check(!Attrs.contains(Attribute::InReg),
4102 Twine("inreg attribute not allowed in ") + Context);
4103 Check(!Attrs.contains(Attribute::SwiftError),
4104 Twine("swifterror attribute not allowed in ") + Context);
4105 Check(!Attrs.contains(Attribute::Preallocated),
4106 Twine("preallocated attribute not allowed in ") + Context);
4107 Check(!Attrs.contains(Attribute::ByRef),
4108 Twine("byref attribute not allowed in ") + Context);
4109}
4110
4111/// Two types are "congruent" if they are identical, or if they are both pointer
4112/// types with different pointee types and the same address space.
4113static bool isTypeCongruent(Type *L, Type *R) {
4114 if (L == R)
4115 return true;
4118 if (!PL || !PR)
4119 return false;
4120 return PL->getAddressSpace() == PR->getAddressSpace();
4121}
4122
4123static AttrBuilder getParameterABIAttributes(LLVMContext& C, unsigned I, AttributeList Attrs) {
4124 static const Attribute::AttrKind ABIAttrs[] = {
4125 Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
4126 Attribute::InReg, Attribute::StackAlignment, Attribute::SwiftSelf,
4127 Attribute::SwiftAsync, Attribute::SwiftError, Attribute::Preallocated,
4128 Attribute::ByRef};
4129 AttrBuilder Copy(C);
4130 for (auto AK : ABIAttrs) {
4131 Attribute Attr = Attrs.getParamAttrs(I).getAttribute(AK);
4132 if (Attr.isValid())
4133 Copy.addAttribute(Attr);
4134 }
4135
4136 // `align` is ABI-affecting only in combination with `byval` or `byref`.
4137 if (Attrs.hasParamAttr(I, Attribute::Alignment) &&
4138 (Attrs.hasParamAttr(I, Attribute::ByVal) ||
4139 Attrs.hasParamAttr(I, Attribute::ByRef)))
4140 Copy.addAlignmentAttr(Attrs.getParamAlignment(I));
4141 return Copy;
4142}
4143
4144void Verifier::verifyMustTailCall(CallInst &CI) {
4145 Check(!CI.isInlineAsm(), "cannot use musttail call with inline asm", &CI);
4146
4147 Function *F = CI.getParent()->getParent();
4148 FunctionType *CallerTy = F->getFunctionType();
4149 FunctionType *CalleeTy = CI.getFunctionType();
4150 Check(CallerTy->isVarArg() == CalleeTy->isVarArg(),
4151 "cannot guarantee tail call due to mismatched varargs", &CI);
4152 Check(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()),
4153 "cannot guarantee tail call due to mismatched return types", &CI);
4154
4155 // - The calling conventions of the caller and callee must match.
4156 Check(F->getCallingConv() == CI.getCallingConv(),
4157 "cannot guarantee tail call due to mismatched calling conv", &CI);
4158
4159 // - The call must immediately precede a :ref:`ret <i_ret>` instruction,
4160 // or a pointer bitcast followed by a ret instruction.
4161 // - The ret instruction must return the (possibly bitcasted) value
4162 // produced by the call or void.
4163 Value *RetVal = &CI;
4165
4166 // Handle the optional bitcast.
4167 if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Next)) {
4168 Check(BI->getOperand(0) == RetVal,
4169 "bitcast following musttail call must use the call", BI);
4170 RetVal = BI;
4171 Next = BI->getNextNode();
4172 }
4173
4174 // Check the return.
4175 ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Next);
4176 Check(Ret, "musttail call must precede a ret with an optional bitcast", &CI);
4177 Check(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal ||
4179 "musttail call result must be returned", Ret);
4180
4181 AttributeList CallerAttrs = F->getAttributes();
4182 AttributeList CalleeAttrs = CI.getAttributes();
4183 if (CI.getCallingConv() == CallingConv::SwiftTail ||
4184 CI.getCallingConv() == CallingConv::Tail) {
4185 StringRef CCName =
4186 CI.getCallingConv() == CallingConv::Tail ? "tailcc" : "swifttailcc";
4187
4188 // - Only sret, byval, swiftself, and swiftasync ABI-impacting attributes
4189 // are allowed in swifttailcc call
4190 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4191 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
4192 SmallString<32> Context{CCName, StringRef(" musttail caller")};
4193 verifyTailCCMustTailAttrs(ABIAttrs, Context);
4194 }
4195 for (unsigned I = 0, E = CalleeTy->getNumParams(); I != E; ++I) {
4196 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
4197 SmallString<32> Context{CCName, StringRef(" musttail callee")};
4198 verifyTailCCMustTailAttrs(ABIAttrs, Context);
4199 }
4200 // - Varargs functions are not allowed
4201 Check(!CallerTy->isVarArg(), Twine("cannot guarantee ") + CCName +
4202 " tail call for varargs function");
4203 return;
4204 }
4205
4206 // - The caller and callee prototypes must match. Pointer types of
4207 // parameters or return types may differ in pointee type, but not
4208 // address space.
4209 if (!CI.getIntrinsicID()) {
4210 Check(CallerTy->getNumParams() == CalleeTy->getNumParams(),
4211 "cannot guarantee tail call due to mismatched parameter counts", &CI);
4212 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4213 Check(
4214 isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)),
4215 "cannot guarantee tail call due to mismatched parameter types", &CI);
4216 }
4217 }
4218
4219 // - All ABI-impacting function attributes, such as sret, byval, inreg,
4220 // returned, preallocated, and inalloca, must match.
4221 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4222 AttrBuilder CallerABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
4223 AttrBuilder CalleeABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
4224 Check(CallerABIAttrs == CalleeABIAttrs,
4225 "cannot guarantee tail call due to mismatched ABI impacting "
4226 "function attributes",
4227 &CI, CI.getOperand(I));
4228 }
4229}
4230
4231void Verifier::visitCallInst(CallInst &CI) {
4232 visitCallBase(CI);
4233
4234 if (CI.isMustTailCall())
4235 verifyMustTailCall(CI);
4236}
4237
4238void Verifier::visitInvokeInst(InvokeInst &II) {
4239 visitCallBase(II);
4240
4241 // Verify that the first non-PHI instruction of the unwind destination is an
4242 // exception handling instruction.
4243 Check(
4244 II.getUnwindDest()->isEHPad(),
4245 "The unwind destination does not have an exception handling instruction!",
4246 &II);
4247
4248 visitTerminator(II);
4249}
4250
4251/// visitUnaryOperator - Check the argument to the unary operator.
4252///
4253void Verifier::visitUnaryOperator(UnaryOperator &U) {
4254 Check(U.getType() == U.getOperand(0)->getType(),
4255 "Unary operators must have same type for"
4256 "operands and result!",
4257 &U);
4258
4259 switch (U.getOpcode()) {
4260 // Check that floating-point arithmetic operators are only used with
4261 // floating-point operands.
4262 case Instruction::FNeg:
4263 Check(U.getType()->isFPOrFPVectorTy(),
4264 "FNeg operator only works with float types!", &U);
4265 break;
4266 default:
4267 llvm_unreachable("Unknown UnaryOperator opcode!");
4268 }
4269
4270 visitInstruction(U);
4271}
4272
4273/// visitBinaryOperator - Check that both arguments to the binary operator are
4274/// of the same type!
4275///
4276void Verifier::visitBinaryOperator(BinaryOperator &B) {
4277 Check(B.getOperand(0)->getType() == B.getOperand(1)->getType(),
4278 "Both operands to a binary operator are not of the same type!", &B);
4279
4280 switch (B.getOpcode()) {
4281 // Check that integer arithmetic operators are only used with
4282 // integral operands.
4283 case Instruction::Add:
4284 case Instruction::Sub:
4285 case Instruction::Mul:
4286 case Instruction::SDiv:
4287 case Instruction::UDiv:
4288 case Instruction::SRem:
4289 case Instruction::URem:
4290 Check(B.getType()->isIntOrIntVectorTy(),
4291 "Integer arithmetic operators only work with integral types!", &B);
4292 Check(B.getType() == B.getOperand(0)->getType(),
4293 "Integer arithmetic operators must have same type "
4294 "for operands and result!",
4295 &B);
4296 break;
4297 // Check that floating-point arithmetic operators are only used with
4298 // floating-point operands.
4299 case Instruction::FAdd:
4300 case Instruction::FSub:
4301 case Instruction::FMul:
4302 case Instruction::FDiv:
4303 case Instruction::FRem:
4304 Check(B.getType()->isFPOrFPVectorTy(),
4305 "Floating-point arithmetic operators only work with "
4306 "floating-point types!",
4307 &B);
4308 Check(B.getType() == B.getOperand(0)->getType(),
4309 "Floating-point arithmetic operators must have same type "
4310 "for operands and result!",
4311 &B);
4312 break;
4313 // Check that logical operators are only used with integral operands.
4314 case Instruction::And:
4315 case Instruction::Or:
4316 case Instruction::Xor:
4317 Check(B.getType()->isIntOrIntVectorTy(),
4318 "Logical operators only work with integral types!", &B);
4319 Check(B.getType() == B.getOperand(0)->getType(),
4320 "Logical operators must have same type for operands and result!", &B);
4321 break;
4322 case Instruction::Shl:
4323 case Instruction::LShr:
4324 case Instruction::AShr:
4325 Check(B.getType()->isIntOrIntVectorTy(),
4326 "Shifts only work with integral types!", &B);
4327 Check(B.getType() == B.getOperand(0)->getType(),
4328 "Shift return type must be same as operands!", &B);
4329 break;
4330 default:
4331 llvm_unreachable("Unknown BinaryOperator opcode!");
4332 }
4333
4334 visitInstruction(B);
4335}
4336
4337void Verifier::visitICmpInst(ICmpInst &IC) {
4338 // Check that the operands are the same type
4339 Type *Op0Ty = IC.getOperand(0)->getType();
4340 Type *Op1Ty = IC.getOperand(1)->getType();
4341 Check(Op0Ty == Op1Ty,
4342 "Both operands to ICmp instruction are not of the same type!", &IC);
4343 // Check that the operands are the right type
4344 Check(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPtrOrPtrVectorTy(),
4345 "Invalid operand types for ICmp instruction", &IC);
4346 // Check that the predicate is valid.
4347 Check(IC.isIntPredicate(), "Invalid predicate in ICmp instruction!", &IC);
4348
4349 visitInstruction(IC);
4350}
4351
4352void Verifier::visitFCmpInst(FCmpInst &FC) {
4353 // Check that the operands are the same type
4354 Type *Op0Ty = FC.getOperand(0)->getType();
4355 Type *Op1Ty = FC.getOperand(1)->getType();
4356 Check(Op0Ty == Op1Ty,
4357 "Both operands to FCmp instruction are not of the same type!", &FC);
4358 // Check that the operands are the right type
4359 Check(Op0Ty->isFPOrFPVectorTy(), "Invalid operand types for FCmp instruction",
4360 &FC);
4361 // Check that the predicate is valid.
4362 Check(FC.isFPPredicate(), "Invalid predicate in FCmp instruction!", &FC);
4363
4364 visitInstruction(FC);
4365}
4366
4367void Verifier::visitExtractElementInst(ExtractElementInst &EI) {
4369 "Invalid extractelement operands!", &EI);
4370 visitInstruction(EI);
4371}
4372
4373void Verifier::visitInsertElementInst(InsertElementInst &IE) {
4374 Check(InsertElementInst::isValidOperands(IE.getOperand(0), IE.getOperand(1),
4375 IE.getOperand(2)),
4376 "Invalid insertelement operands!", &IE);
4377 visitInstruction(IE);
4378}
4379
4380void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) {
4382 SV.getShuffleMask()),
4383 "Invalid shufflevector operands!", &SV);
4384 visitInstruction(SV);
4385}
4386
4387void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
4388 Type *TargetTy = GEP.getPointerOperandType()->getScalarType();
4389
4390 Check(isa<PointerType>(TargetTy),
4391 "GEP base pointer is not a vector or a vector of pointers", &GEP);
4392 Check(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP);
4393
4394 if (auto *STy = dyn_cast<StructType>(GEP.getSourceElementType())) {
4395 Check(!STy->isScalableTy(),
4396 "getelementptr cannot target structure that contains scalable vector"
4397 "type",
4398 &GEP);
4399 }
4400
4401 SmallVector<Value *, 16> Idxs(GEP.indices());
4402 Check(
4403 all_of(Idxs, [](Value *V) { return V->getType()->isIntOrIntVectorTy(); }),
4404 "GEP indexes must be integers", &GEP);
4405 Type *ElTy =
4406 GetElementPtrInst::getIndexedType(GEP.getSourceElementType(), Idxs);
4407 Check(ElTy, "Invalid indices for GEP pointer type!", &GEP);
4408
4409 PointerType *PtrTy = dyn_cast<PointerType>(GEP.getType()->getScalarType());
4410
4411 Check(PtrTy && GEP.getResultElementType() == ElTy,
4412 "GEP is not of right type for indices!", &GEP, ElTy);
4413
4414 if (auto *GEPVTy = dyn_cast<VectorType>(GEP.getType())) {
4415 // Additional checks for vector GEPs.
4416 ElementCount GEPWidth = GEPVTy->getElementCount();
4417 if (GEP.getPointerOperandType()->isVectorTy())
4418 Check(
4419 GEPWidth ==
4420 cast<VectorType>(GEP.getPointerOperandType())->getElementCount(),
4421 "Vector GEP result width doesn't match operand's", &GEP);
4422 for (Value *Idx : Idxs) {
4423 Type *IndexTy = Idx->getType();
4424 if (auto *IndexVTy = dyn_cast<VectorType>(IndexTy)) {
4425 ElementCount IndexWidth = IndexVTy->getElementCount();
4426 Check(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP);
4427 }
4428 Check(IndexTy->isIntOrIntVectorTy(),
4429 "All GEP indices should be of integer type");
4430 }
4431 }
4432
4433 Check(GEP.getAddressSpace() == PtrTy->getAddressSpace(),
4434 "GEP address space doesn't match type", &GEP);
4435
4436 visitInstruction(GEP);
4437}
4438
4439static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
4440 return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
4441}
4442
4443/// Verify !range and !absolute_symbol metadata. These have the same
4444/// restrictions, except !absolute_symbol allows the full set.
4445void Verifier::verifyRangeLikeMetadata(const Value &I, const MDNode *Range,
4446 Type *Ty, RangeLikeMetadataKind Kind) {
4447 unsigned NumOperands = Range->getNumOperands();
4448 Check(NumOperands % 2 == 0, "Unfinished range!", Range);
4449 unsigned NumRanges = NumOperands / 2;
4450 Check(NumRanges >= 1, "It should have at least one range!", Range);
4451
4452 ConstantRange LastRange(1, true); // Dummy initial value
4453 for (unsigned i = 0; i < NumRanges; ++i) {
4454 ConstantInt *Low =
4455 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i));
4456 Check(Low, "The lower limit must be an integer!", Low);
4457 ConstantInt *High =
4458 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i + 1));
4459 Check(High, "The upper limit must be an integer!", High);
4460
4461 Check(High->getType() == Low->getType(), "Range pair types must match!",
4462 &I);
4463
4464 if (Kind == RangeLikeMetadataKind::NoaliasAddrspace) {
4465 Check(High->getType()->isIntegerTy(32),
4466 "noalias.addrspace type must be i32!", &I);
4467 } else {
4468 Check(High->getType() == Ty->getScalarType(),
4469 "Range types must match instruction type!", &I);
4470 }
4471
4472 APInt HighV = High->getValue();
4473 APInt LowV = Low->getValue();
4474
4475 // ConstantRange asserts if the ranges are the same except for the min/max
4476 // value. Leave the cases it tolerates for the empty range error below.
4477 Check(LowV != HighV || LowV.isMaxValue() || LowV.isMinValue(),
4478 "The upper and lower limits cannot be the same value", &I);
4479
4480 ConstantRange CurRange(LowV, HighV);
4481 Check(!CurRange.isEmptySet() &&
4482 (Kind == RangeLikeMetadataKind::AbsoluteSymbol ||
4483 !CurRange.isFullSet()),
4484 "Range must not be empty!", Range);
4485 if (i != 0) {
4486 Check(CurRange.intersectWith(LastRange).isEmptySet(),
4487 "Intervals are overlapping", Range);
4488 Check(LowV.sgt(LastRange.getLower()), "Intervals are not in order",
4489 Range);
4490 Check(!isContiguous(CurRange, LastRange), "Intervals are contiguous",
4491 Range);
4492 }
4493 LastRange = ConstantRange(LowV, HighV);
4494 }
4495 if (NumRanges > 2) {
4496 APInt FirstLow =
4497 mdconst::dyn_extract<ConstantInt>(Range->getOperand(0))->getValue();
4498 APInt FirstHigh =
4499 mdconst::dyn_extract<ConstantInt>(Range->getOperand(1))->getValue();
4500 ConstantRange FirstRange(FirstLow, FirstHigh);
4501 Check(FirstRange.intersectWith(LastRange).isEmptySet(),
4502 "Intervals are overlapping", Range);
4503 Check(!isContiguous(FirstRange, LastRange), "Intervals are contiguous",
4504 Range);
4505 }
4506}
4507
4508void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) {
4509 assert(Range && Range == I.getMetadata(LLVMContext::MD_range) &&
4510 "precondition violation");
4511 verifyRangeLikeMetadata(I, Range, Ty, RangeLikeMetadataKind::Range);
4512}
4513
4514void Verifier::visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range,
4515 Type *Ty) {
4516 assert(Range && Range == I.getMetadata(LLVMContext::MD_noalias_addrspace) &&
4517 "precondition violation");
4518 verifyRangeLikeMetadata(I, Range, Ty,
4519 RangeLikeMetadataKind::NoaliasAddrspace);
4520}
4521
4522void Verifier::checkAtomicMemAccessSize(Type *Ty, const Instruction *I) {
4523 unsigned Size = DL.getTypeSizeInBits(Ty).getFixedValue();
4524 Check(Size >= 8, "atomic memory access' size must be byte-sized", Ty, I);
4525 Check(!(Size & (Size - 1)),
4526 "atomic memory access' operand must have a power-of-two size", Ty, I);
4527}
4528
4529void Verifier::visitLoadInst(LoadInst &LI) {
4531 Check(PTy, "Load operand must be a pointer.", &LI);
4532 Type *ElTy = LI.getType();
4533 if (MaybeAlign A = LI.getAlign()) {
4534 Check(A->value() <= Value::MaximumAlignment,
4535 "huge alignment values are unsupported", &LI);
4536 }
4537 Check(ElTy->isSized(), "loading unsized types is not allowed", &LI);
4538 if (LI.isAtomic()) {
4539 Check(LI.getOrdering() != AtomicOrdering::Release &&
4540 LI.getOrdering() != AtomicOrdering::AcquireRelease,
4541 "Load cannot have Release ordering", &LI);
4542 Check(ElTy->getScalarType()->isIntOrPtrTy() ||
4544 "atomic load operand must have integer, pointer, floating point, "
4545 "or vector type!",
4546 ElTy, &LI);
4547
4548 checkAtomicMemAccessSize(ElTy, &LI);
4549 } else {
4551 "Non-atomic load cannot have SynchronizationScope specified", &LI);
4552 }
4553
4554 visitInstruction(LI);
4555}
4556
4557void Verifier::visitStoreInst(StoreInst &SI) {
4558 PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType());
4559 Check(PTy, "Store operand must be a pointer.", &SI);
4560 Type *ElTy = SI.getOperand(0)->getType();
4561 if (MaybeAlign A = SI.getAlign()) {
4562 Check(A->value() <= Value::MaximumAlignment,
4563 "huge alignment values are unsupported", &SI);
4564 }
4565 Check(ElTy->isSized(), "storing unsized types is not allowed", &SI);
4566 if (SI.isAtomic()) {
4567 Check(SI.getOrdering() != AtomicOrdering::Acquire &&
4568 SI.getOrdering() != AtomicOrdering::AcquireRelease,
4569 "Store cannot have Acquire ordering", &SI);
4570 Check(ElTy->getScalarType()->isIntOrPtrTy() ||
4572 "atomic store operand must have integer, pointer, floating point, "
4573 "or vector type!",
4574 ElTy, &SI);
4575 checkAtomicMemAccessSize(ElTy, &SI);
4576 } else {
4577 Check(SI.getSyncScopeID() == SyncScope::System,
4578 "Non-atomic store cannot have SynchronizationScope specified", &SI);
4579 }
4580 visitInstruction(SI);
4581}
4582
4583/// Check that SwiftErrorVal is used as a swifterror argument in CS.
4584void Verifier::verifySwiftErrorCall(CallBase &Call,
4585 const Value *SwiftErrorVal) {
4586 for (const auto &I : llvm::enumerate(Call.args())) {
4587 if (I.value() == SwiftErrorVal) {
4588 Check(Call.paramHasAttr(I.index(), Attribute::SwiftError),
4589 "swifterror value when used in a callsite should be marked "
4590 "with swifterror attribute",
4591 SwiftErrorVal, Call);
4592 }
4593 }
4594}
4595
4596void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) {
4597 // Check that swifterror value is only used by loads, stores, or as
4598 // a swifterror argument.
4599 for (const User *U : SwiftErrorVal->users()) {
4601 isa<InvokeInst>(U),
4602 "swifterror value can only be loaded and stored from, or "
4603 "as a swifterror argument!",
4604 SwiftErrorVal, U);
4605 // If it is used by a store, check it is the second operand.
4606 if (auto StoreI = dyn_cast<StoreInst>(U))
4607 Check(StoreI->getOperand(1) == SwiftErrorVal,
4608 "swifterror value should be the second operand when used "
4609 "by stores",
4610 SwiftErrorVal, U);
4611 if (auto *Call = dyn_cast<CallBase>(U))
4612 verifySwiftErrorCall(*const_cast<CallBase *>(Call), SwiftErrorVal);
4613 }
4614}
4615
4616void Verifier::visitAllocaInst(AllocaInst &AI) {
4617 Type *Ty = AI.getAllocatedType();
4618 SmallPtrSet<Type*, 4> Visited;
4619 Check(Ty->isSized(&Visited), "Cannot allocate unsized type", &AI);
4620 // Check if it's a target extension type that disallows being used on the
4621 // stack.
4623 "Alloca has illegal target extension type", &AI);
4625 "Alloca array size must have integer type", &AI);
4626 if (MaybeAlign A = AI.getAlign()) {
4627 Check(A->value() <= Value::MaximumAlignment,
4628 "huge alignment values are unsupported", &AI);
4629 }
4630
4631 if (AI.isSwiftError()) {
4632 Check(Ty->isPointerTy(), "swifterror alloca must have pointer type", &AI);
4634 "swifterror alloca must not be array allocation", &AI);
4635 verifySwiftErrorValue(&AI);
4636 }
4637
4638 if (TT.isAMDGPU()) {
4640 "alloca on amdgpu must be in addrspace(5)", &AI);
4641 }
4642
4643 visitInstruction(AI);
4644}
4645
4646void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
4647 Type *ElTy = CXI.getOperand(1)->getType();
4648 Check(ElTy->isIntOrPtrTy(),
4649 "cmpxchg operand must have integer or pointer type", ElTy, &CXI);
4650 checkAtomicMemAccessSize(ElTy, &CXI);
4651 visitInstruction(CXI);
4652}
4653
4654void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
4655 Check(RMWI.getOrdering() != AtomicOrdering::Unordered,
4656 "atomicrmw instructions cannot be unordered.", &RMWI);
4657 auto Op = RMWI.getOperation();
4658 Type *ElTy = RMWI.getOperand(1)->getType();
4659 if (Op == AtomicRMWInst::Xchg) {
4660 Check(ElTy->isIntegerTy() || ElTy->isFloatingPointTy() ||
4661 ElTy->isPointerTy(),
4662 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4663 " operand must have integer or floating point type!",
4664 &RMWI, ElTy);
4665 } else if (AtomicRMWInst::isFPOperation(Op)) {
4667 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4668 " operand must have floating-point or fixed vector of floating-point "
4669 "type!",
4670 &RMWI, ElTy);
4671 } else {
4672 Check(ElTy->isIntegerTy(),
4673 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4674 " operand must have integer type!",
4675 &RMWI, ElTy);
4676 }
4677 checkAtomicMemAccessSize(ElTy, &RMWI);
4679 "Invalid binary operation!", &RMWI);
4680 visitInstruction(RMWI);
4681}
4682
4683void Verifier::visitFenceInst(FenceInst &FI) {
4684 const AtomicOrdering Ordering = FI.getOrdering();
4685 Check(Ordering == AtomicOrdering::Acquire ||
4686 Ordering == AtomicOrdering::Release ||
4687 Ordering == AtomicOrdering::AcquireRelease ||
4688 Ordering == AtomicOrdering::SequentiallyConsistent,
4689 "fence instructions may only have acquire, release, acq_rel, or "
4690 "seq_cst ordering.",
4691 &FI);
4692 visitInstruction(FI);
4693}
4694
4695void Verifier::visitExtractValueInst(ExtractValueInst &EVI) {
4697 EVI.getIndices()) == EVI.getType(),
4698 "Invalid ExtractValueInst operands!", &EVI);
4699
4700 visitInstruction(EVI);
4701}
4702
4703void Verifier::visitInsertValueInst(InsertValueInst &IVI) {
4705 IVI.getIndices()) ==
4706 IVI.getOperand(1)->getType(),
4707 "Invalid InsertValueInst operands!", &IVI);
4708
4709 visitInstruction(IVI);
4710}
4711
4712static Value *getParentPad(Value *EHPad) {
4713 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
4714 return FPI->getParentPad();
4715
4716 return cast<CatchSwitchInst>(EHPad)->getParentPad();
4717}
4718
4719void Verifier::visitEHPadPredecessors(Instruction &I) {
4720 assert(I.isEHPad());
4721
4722 BasicBlock *BB = I.getParent();
4723 Function *F = BB->getParent();
4724
4725 Check(BB != &F->getEntryBlock(), "EH pad cannot be in entry block.", &I);
4726
4727 if (auto *LPI = dyn_cast<LandingPadInst>(&I)) {
4728 // The landingpad instruction defines its parent as a landing pad block. The
4729 // landing pad block may be branched to only by the unwind edge of an
4730 // invoke.
4731 for (BasicBlock *PredBB : predecessors(BB)) {
4732 const auto *II = dyn_cast<InvokeInst>(PredBB->getTerminator());
4733 Check(II && II->getUnwindDest() == BB && II->getNormalDest() != BB,
4734 "Block containing LandingPadInst must be jumped to "
4735 "only by the unwind edge of an invoke.",
4736 LPI);
4737 }
4738 return;
4739 }
4740 if (auto *CPI = dyn_cast<CatchPadInst>(&I)) {
4741 if (!pred_empty(BB))
4742 Check(BB->getUniquePredecessor() == CPI->getCatchSwitch()->getParent(),
4743 "Block containg CatchPadInst must be jumped to "
4744 "only by its catchswitch.",
4745 CPI);
4746 Check(BB != CPI->getCatchSwitch()->getUnwindDest(),
4747 "Catchswitch cannot unwind to one of its catchpads",
4748 CPI->getCatchSwitch(), CPI);
4749 return;
4750 }
4751
4752 // Verify that each pred has a legal terminator with a legal to/from EH
4753 // pad relationship.
4754 Instruction *ToPad = &I;
4755 Value *ToPadParent = getParentPad(ToPad);
4756 for (BasicBlock *PredBB : predecessors(BB)) {
4757 Instruction *TI = PredBB->getTerminator();
4758 Value *FromPad;
4759 if (auto *II = dyn_cast<InvokeInst>(TI)) {
4760 Check(II->getUnwindDest() == BB && II->getNormalDest() != BB,
4761 "EH pad must be jumped to via an unwind edge", ToPad, II);
4762 auto *CalledFn =
4763 dyn_cast<Function>(II->getCalledOperand()->stripPointerCasts());
4764 if (CalledFn && CalledFn->isIntrinsic() && II->doesNotThrow() &&
4765 !IntrinsicInst::mayLowerToFunctionCall(CalledFn->getIntrinsicID()))
4766 continue;
4767 if (auto Bundle = II->getOperandBundle(LLVMContext::OB_funclet))
4768 FromPad = Bundle->Inputs[0];
4769 else
4770 FromPad = ConstantTokenNone::get(II->getContext());
4771 } else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
4772 FromPad = CRI->getOperand(0);
4773 Check(FromPad != ToPadParent, "A cleanupret must exit its cleanup", CRI);
4774 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) {
4775 FromPad = CSI;
4776 } else {
4777 Check(false, "EH pad must be jumped to via an unwind edge", ToPad, TI);
4778 }
4779
4780 // The edge may exit from zero or more nested pads.
4781 SmallPtrSet<Value *, 8> Seen;
4782 for (;; FromPad = getParentPad(FromPad)) {
4783 Check(FromPad != ToPad,
4784 "EH pad cannot handle exceptions raised within it", FromPad, TI);
4785 if (FromPad == ToPadParent) {
4786 // This is a legal unwind edge.
4787 break;
4788 }
4789 Check(!isa<ConstantTokenNone>(FromPad),
4790 "A single unwind edge may only enter one EH pad", TI);
4791 Check(Seen.insert(FromPad).second, "EH pad jumps through a cycle of pads",
4792 FromPad);
4793
4794 // This will be diagnosed on the corresponding instruction already. We
4795 // need the extra check here to make sure getParentPad() works.
4796 Check(isa<FuncletPadInst>(FromPad) || isa<CatchSwitchInst>(FromPad),
4797 "Parent pad must be catchpad/cleanuppad/catchswitch", TI);
4798 }
4799 }
4800}
4801
4802void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
4803 // The landingpad instruction is ill-formed if it doesn't have any clauses and
4804 // isn't a cleanup.
4805 Check(LPI.getNumClauses() > 0 || LPI.isCleanup(),
4806 "LandingPadInst needs at least one clause or to be a cleanup.", &LPI);
4807
4808 visitEHPadPredecessors(LPI);
4809
4810 if (!LandingPadResultTy)
4811 LandingPadResultTy = LPI.getType();
4812 else
4813 Check(LandingPadResultTy == LPI.getType(),
4814 "The landingpad instruction should have a consistent result type "
4815 "inside a function.",
4816 &LPI);
4817
4818 Function *F = LPI.getParent()->getParent();
4819 Check(F->hasPersonalityFn(),
4820 "LandingPadInst needs to be in a function with a personality.", &LPI);
4821
4822 // The landingpad instruction must be the first non-PHI instruction in the
4823 // block.
4824 Check(LPI.getParent()->getLandingPadInst() == &LPI,
4825 "LandingPadInst not the first non-PHI instruction in the block.", &LPI);
4826
4827 for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) {
4828 Constant *Clause = LPI.getClause(i);
4829 if (LPI.isCatch(i)) {
4830 Check(isa<PointerType>(Clause->getType()),
4831 "Catch operand does not have pointer type!", &LPI);
4832 } else {
4833 Check(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI);
4835 "Filter operand is not an array of constants!", &LPI);
4836 }
4837 }
4838
4839 visitInstruction(LPI);
4840}
4841
4842void Verifier::visitResumeInst(ResumeInst &RI) {
4844 "ResumeInst needs to be in a function with a personality.", &RI);
4845
4846 if (!LandingPadResultTy)
4847 LandingPadResultTy = RI.getValue()->getType();
4848 else
4849 Check(LandingPadResultTy == RI.getValue()->getType(),
4850 "The resume instruction should have a consistent result type "
4851 "inside a function.",
4852 &RI);
4853
4854 visitTerminator(RI);
4855}
4856
4857void Verifier::visitCatchPadInst(CatchPadInst &CPI) {
4858 BasicBlock *BB = CPI.getParent();
4859
4860 Function *F = BB->getParent();
4861 Check(F->hasPersonalityFn(),
4862 "CatchPadInst needs to be in a function with a personality.", &CPI);
4863
4865 "CatchPadInst needs to be directly nested in a CatchSwitchInst.",
4866 CPI.getParentPad());
4867
4868 // The catchpad instruction must be the first non-PHI instruction in the
4869 // block.
4870 Check(&*BB->getFirstNonPHIIt() == &CPI,
4871 "CatchPadInst not the first non-PHI instruction in the block.", &CPI);
4872
4873 visitEHPadPredecessors(CPI);
4874 visitFuncletPadInst(CPI);
4875}
4876
4877void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) {
4878 Check(isa<CatchPadInst>(CatchReturn.getOperand(0)),
4879 "CatchReturnInst needs to be provided a CatchPad", &CatchReturn,
4880 CatchReturn.getOperand(0));
4881
4882 visitTerminator(CatchReturn);
4883}
4884
4885void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) {
4886 BasicBlock *BB = CPI.getParent();
4887
4888 Function *F = BB->getParent();
4889 Check(F->hasPersonalityFn(),
4890 "CleanupPadInst needs to be in a function with a personality.", &CPI);
4891
4892 // The cleanuppad instruction must be the first non-PHI instruction in the
4893 // block.
4894 Check(&*BB->getFirstNonPHIIt() == &CPI,
4895 "CleanupPadInst not the first non-PHI instruction in the block.", &CPI);
4896
4897 auto *ParentPad = CPI.getParentPad();
4898 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
4899 "CleanupPadInst has an invalid parent.", &CPI);
4900
4901 visitEHPadPredecessors(CPI);
4902 visitFuncletPadInst(CPI);
4903}
4904
4905void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) {
4906 User *FirstUser = nullptr;
4907 Value *FirstUnwindPad = nullptr;
4908 SmallVector<FuncletPadInst *, 8> Worklist({&FPI});
4909 SmallPtrSet<FuncletPadInst *, 8> Seen;
4910
4911 while (!Worklist.empty()) {
4912 FuncletPadInst *CurrentPad = Worklist.pop_back_val();
4913 Check(Seen.insert(CurrentPad).second,
4914 "FuncletPadInst must not be nested within itself", CurrentPad);
4915 Value *UnresolvedAncestorPad = nullptr;
4916 for (User *U : CurrentPad->users()) {
4917 BasicBlock *UnwindDest;
4918 if (auto *CRI = dyn_cast<CleanupReturnInst>(U)) {
4919 UnwindDest = CRI->getUnwindDest();
4920 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(U)) {
4921 // We allow catchswitch unwind to caller to nest
4922 // within an outer pad that unwinds somewhere else,
4923 // because catchswitch doesn't have a nounwind variant.
4924 // See e.g. SimplifyCFGOpt::SimplifyUnreachable.
4925 if (CSI->unwindsToCaller())
4926 continue;
4927 UnwindDest = CSI->getUnwindDest();
4928 } else if (auto *II = dyn_cast<InvokeInst>(U)) {
4929 UnwindDest = II->getUnwindDest();
4930 } else if (isa<CallInst>(U)) {
4931 // Calls which don't unwind may be found inside funclet
4932 // pads that unwind somewhere else. We don't *require*
4933 // such calls to be annotated nounwind.
4934 continue;
4935 } else if (auto *CPI = dyn_cast<CleanupPadInst>(U)) {
4936 // The unwind dest for a cleanup can only be found by
4937 // recursive search. Add it to the worklist, and we'll
4938 // search for its first use that determines where it unwinds.
4939 Worklist.push_back(CPI);
4940 continue;
4941 } else {
4942 Check(isa<CatchReturnInst>(U), "Bogus funclet pad use", U);
4943 continue;
4944 }
4945
4946 Value *UnwindPad;
4947 bool ExitsFPI;
4948 if (UnwindDest) {
4949 UnwindPad = &*UnwindDest->getFirstNonPHIIt();
4950 if (!cast<Instruction>(UnwindPad)->isEHPad())
4951 continue;
4952 Value *UnwindParent = getParentPad(UnwindPad);
4953 // Ignore unwind edges that don't exit CurrentPad.
4954 if (UnwindParent == CurrentPad)
4955 continue;
4956 // Determine whether the original funclet pad is exited,
4957 // and if we are scanning nested pads determine how many
4958 // of them are exited so we can stop searching their
4959 // children.
4960 Value *ExitedPad = CurrentPad;
4961 ExitsFPI = false;
4962 do {
4963 if (ExitedPad == &FPI) {
4964 ExitsFPI = true;
4965 // Now we can resolve any ancestors of CurrentPad up to
4966 // FPI, but not including FPI since we need to make sure
4967 // to check all direct users of FPI for consistency.
4968 UnresolvedAncestorPad = &FPI;
4969 break;
4970 }
4971 Value *ExitedParent = getParentPad(ExitedPad);
4972 if (ExitedParent == UnwindParent) {
4973 // ExitedPad is the ancestor-most pad which this unwind
4974 // edge exits, so we can resolve up to it, meaning that
4975 // ExitedParent is the first ancestor still unresolved.
4976 UnresolvedAncestorPad = ExitedParent;
4977 break;
4978 }
4979 ExitedPad = ExitedParent;
4980 } while (!isa<ConstantTokenNone>(ExitedPad));
4981 } else {
4982 // Unwinding to caller exits all pads.
4983 UnwindPad = ConstantTokenNone::get(FPI.getContext());
4984 ExitsFPI = true;
4985 UnresolvedAncestorPad = &FPI;
4986 }
4987
4988 if (ExitsFPI) {
4989 // This unwind edge exits FPI. Make sure it agrees with other
4990 // such edges.
4991 if (FirstUser) {
4992 Check(UnwindPad == FirstUnwindPad,
4993 "Unwind edges out of a funclet "
4994 "pad must have the same unwind "
4995 "dest",
4996 &FPI, U, FirstUser);
4997 } else {
4998 FirstUser = U;
4999 FirstUnwindPad = UnwindPad;
5000 // Record cleanup sibling unwinds for verifySiblingFuncletUnwinds
5001 if (isa<CleanupPadInst>(&FPI) && !isa<ConstantTokenNone>(UnwindPad) &&
5002 getParentPad(UnwindPad) == getParentPad(&FPI))
5003 SiblingFuncletInfo[&FPI] = cast<Instruction>(U);
5004 }
5005 }
5006 // Make sure we visit all uses of FPI, but for nested pads stop as
5007 // soon as we know where they unwind to.
5008 if (CurrentPad != &FPI)
5009 break;
5010 }
5011 if (UnresolvedAncestorPad) {
5012 if (CurrentPad == UnresolvedAncestorPad) {
5013 // When CurrentPad is FPI itself, we don't mark it as resolved even if
5014 // we've found an unwind edge that exits it, because we need to verify
5015 // all direct uses of FPI.
5016 assert(CurrentPad == &FPI);
5017 continue;
5018 }
5019 // Pop off the worklist any nested pads that we've found an unwind
5020 // destination for. The pads on the worklist are the uncles,
5021 // great-uncles, etc. of CurrentPad. We've found an unwind destination
5022 // for all ancestors of CurrentPad up to but not including
5023 // UnresolvedAncestorPad.
5024 Value *ResolvedPad = CurrentPad;
5025 while (!Worklist.empty()) {
5026 Value *UnclePad = Worklist.back();
5027 Value *AncestorPad = getParentPad(UnclePad);
5028 // Walk ResolvedPad up the ancestor list until we either find the
5029 // uncle's parent or the last resolved ancestor.
5030 while (ResolvedPad != AncestorPad) {
5031 Value *ResolvedParent = getParentPad(ResolvedPad);
5032 if (ResolvedParent == UnresolvedAncestorPad) {
5033 break;
5034 }
5035 ResolvedPad = ResolvedParent;
5036 }
5037 // If the resolved ancestor search didn't find the uncle's parent,
5038 // then the uncle is not yet resolved.
5039 if (ResolvedPad != AncestorPad)
5040 break;
5041 // This uncle is resolved, so pop it from the worklist.
5042 Worklist.pop_back();
5043 }
5044 }
5045 }
5046
5047 if (FirstUnwindPad) {
5048 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FPI.getParentPad())) {
5049 BasicBlock *SwitchUnwindDest = CatchSwitch->getUnwindDest();
5050 Value *SwitchUnwindPad;
5051 if (SwitchUnwindDest)
5052 SwitchUnwindPad = &*SwitchUnwindDest->getFirstNonPHIIt();
5053 else
5054 SwitchUnwindPad = ConstantTokenNone::get(FPI.getContext());
5055 Check(SwitchUnwindPad == FirstUnwindPad,
5056 "Unwind edges out of a catch must have the same unwind dest as "
5057 "the parent catchswitch",
5058 &FPI, FirstUser, CatchSwitch);
5059 }
5060 }
5061
5062 visitInstruction(FPI);
5063}
5064
5065void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) {
5066 BasicBlock *BB = CatchSwitch.getParent();
5067
5068 Function *F = BB->getParent();
5069 Check(F->hasPersonalityFn(),
5070 "CatchSwitchInst needs to be in a function with a personality.",
5071 &CatchSwitch);
5072
5073 // The catchswitch instruction must be the first non-PHI instruction in the
5074 // block.
5075 Check(&*BB->getFirstNonPHIIt() == &CatchSwitch,
5076 "CatchSwitchInst not the first non-PHI instruction in the block.",
5077 &CatchSwitch);
5078
5079 auto *ParentPad = CatchSwitch.getParentPad();
5080 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
5081 "CatchSwitchInst has an invalid parent.", ParentPad);
5082
5083 if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) {
5084 BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt();
5085 Check(I->isEHPad() && !isa<LandingPadInst>(I),
5086 "CatchSwitchInst must unwind to an EH block which is not a "
5087 "landingpad.",
5088 &CatchSwitch);
5089
5090 // Record catchswitch sibling unwinds for verifySiblingFuncletUnwinds
5091 if (getParentPad(&*I) == ParentPad)
5092 SiblingFuncletInfo[&CatchSwitch] = &CatchSwitch;
5093 }
5094
5095 Check(CatchSwitch.getNumHandlers() != 0,
5096 "CatchSwitchInst cannot have empty handler list", &CatchSwitch);
5097
5098 for (BasicBlock *Handler : CatchSwitch.handlers()) {
5099 Check(isa<CatchPadInst>(Handler->getFirstNonPHIIt()),
5100 "CatchSwitchInst handlers must be catchpads", &CatchSwitch, Handler);
5101 }
5102
5103 visitEHPadPredecessors(CatchSwitch);
5104 visitTerminator(CatchSwitch);
5105}
5106
5107void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) {
5109 "CleanupReturnInst needs to be provided a CleanupPad", &CRI,
5110 CRI.getOperand(0));
5111
5112 if (BasicBlock *UnwindDest = CRI.getUnwindDest()) {
5113 BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt();
5114 Check(I->isEHPad() && !isa<LandingPadInst>(I),
5115 "CleanupReturnInst must unwind to an EH block which is not a "
5116 "landingpad.",
5117 &CRI);
5118 }
5119
5120 visitTerminator(CRI);
5121}
5122
5123void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
5124 Instruction *Op = cast<Instruction>(I.getOperand(i));
5125 // If the we have an invalid invoke, don't try to compute the dominance.
5126 // We already reject it in the invoke specific checks and the dominance
5127 // computation doesn't handle multiple edges.
5128 if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) {
5129 if (II->getNormalDest() == II->getUnwindDest())
5130 return;
5131 }
5132
5133 // Quick check whether the def has already been encountered in the same block.
5134 // PHI nodes are not checked to prevent accepting preceding PHIs, because PHI
5135 // uses are defined to happen on the incoming edge, not at the instruction.
5136 //
5137 // FIXME: If this operand is a MetadataAsValue (wrapping a LocalAsMetadata)
5138 // wrapping an SSA value, assert that we've already encountered it. See
5139 // related FIXME in Mapper::mapLocalAsMetadata in ValueMapper.cpp.
5140 if (!isa<PHINode>(I) && InstsInThisBlock.count(Op))
5141 return;
5142
5143 const Use &U = I.getOperandUse(i);
5144 Check(DT.dominates(Op, U), "Instruction does not dominate all uses!", Op, &I);
5145}
5146
5147void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) {
5148 Check(I.getType()->isPointerTy(),
5149 "dereferenceable, dereferenceable_or_null "
5150 "apply only to pointer types",
5151 &I);
5153 "dereferenceable, dereferenceable_or_null apply only to load"
5154 " and inttoptr instructions, use attributes for calls or invokes",
5155 &I);
5156 Check(MD->getNumOperands() == 1,
5157 "dereferenceable, dereferenceable_or_null "
5158 "take one operand!",
5159 &I);
5160 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0));
5161 Check(CI && CI->getType()->isIntegerTy(64),
5162 "dereferenceable, "
5163 "dereferenceable_or_null metadata value must be an i64!",
5164 &I);
5165}
5166
5167void Verifier::visitNofreeMetadata(Instruction &I, MDNode *MD) {
5168 Check(I.getType()->isPointerTy(), "nofree applies only to pointer types", &I);
5169 Check((isa<IntToPtrInst>(I)), "nofree applies only to inttoptr instruction",
5170 &I);
5171 Check(MD->getNumOperands() == 0, "nofree metadata must be empty", &I);
5172}
5173
5174void Verifier::visitProfMetadata(Instruction &I, MDNode *MD) {
5175 auto GetBranchingTerminatorNumOperands = [&]() {
5176 unsigned ExpectedNumOperands = 0;
5177 if (BranchInst *BI = dyn_cast<BranchInst>(&I))
5178 ExpectedNumOperands = BI->getNumSuccessors();
5179 else if (SwitchInst *SI = dyn_cast<SwitchInst>(&I))
5180 ExpectedNumOperands = SI->getNumSuccessors();
5181 else if (isa<CallInst>(&I))
5182 ExpectedNumOperands = 1;
5183 else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(&I))
5184 ExpectedNumOperands = IBI->getNumDestinations();
5185 else if (isa<SelectInst>(&I))
5186 ExpectedNumOperands = 2;
5187 else if (CallBrInst *CI = dyn_cast<CallBrInst>(&I))
5188 ExpectedNumOperands = CI->getNumSuccessors();
5189 return ExpectedNumOperands;
5190 };
5191 Check(MD->getNumOperands() >= 1,
5192 "!prof annotations should have at least 1 operand", MD);
5193 // Check first operand.
5194 Check(MD->getOperand(0) != nullptr, "first operand should not be null", MD);
5196 "expected string with name of the !prof annotation", MD);
5197 MDString *MDS = cast<MDString>(MD->getOperand(0));
5198 StringRef ProfName = MDS->getString();
5199
5201 Check(GetBranchingTerminatorNumOperands() != 0 || isa<InvokeInst>(I),
5202 "'unknown' !prof should only appear on instructions on which "
5203 "'branch_weights' would",
5204 MD);
5205 verifyUnknownProfileMetadata(MD);
5206 return;
5207 }
5208
5209 Check(MD->getNumOperands() >= 2,
5210 "!prof annotations should have no less than 2 operands", MD);
5211
5212 // Check consistency of !prof branch_weights metadata.
5213 if (ProfName == MDProfLabels::BranchWeights) {
5214 unsigned NumBranchWeights = getNumBranchWeights(*MD);
5215 if (isa<InvokeInst>(&I)) {
5216 Check(NumBranchWeights == 1 || NumBranchWeights == 2,
5217 "Wrong number of InvokeInst branch_weights operands", MD);
5218 } else {
5219 const unsigned ExpectedNumOperands = GetBranchingTerminatorNumOperands();
5220 if (ExpectedNumOperands == 0)
5221 CheckFailed("!prof branch_weights are not allowed for this instruction",
5222 MD);
5223
5224 Check(NumBranchWeights == ExpectedNumOperands, "Wrong number of operands",
5225 MD);
5226 }
5227 for (unsigned i = getBranchWeightOffset(MD); i < MD->getNumOperands();
5228 ++i) {
5229 auto &MDO = MD->getOperand(i);
5230 Check(MDO, "second operand should not be null", MD);
5232 "!prof brunch_weights operand is not a const int");
5233 }
5234 } else if (ProfName == MDProfLabels::ValueProfile) {
5235 Check(isValueProfileMD(MD), "invalid value profiling metadata", MD);
5236 ConstantInt *KindInt = mdconst::dyn_extract<ConstantInt>(MD->getOperand(1));
5237 Check(KindInt, "VP !prof missing kind argument", MD);
5238
5239 auto Kind = KindInt->getZExtValue();
5240 Check(Kind >= InstrProfValueKind::IPVK_First &&
5241 Kind <= InstrProfValueKind::IPVK_Last,
5242 "Invalid VP !prof kind", MD);
5243 Check(MD->getNumOperands() % 2 == 1,
5244 "VP !prof should have an even number "
5245 "of arguments after 'VP'",
5246 MD);
5247 if (Kind == InstrProfValueKind::IPVK_IndirectCallTarget ||
5248 Kind == InstrProfValueKind::IPVK_MemOPSize)
5250 "VP !prof indirect call or memop size expected to be applied to "
5251 "CallBase instructions only",
5252 MD);
5253 } else {
5254 CheckFailed("expected either branch_weights or VP profile name", MD);
5255 }
5256}
5257
5258void Verifier::visitDIAssignIDMetadata(Instruction &I, MDNode *MD) {
5259 assert(I.hasMetadata(LLVMContext::MD_DIAssignID));
5260 // DIAssignID metadata must be attached to either an alloca or some form of
5261 // store/memory-writing instruction.
5262 // FIXME: We allow all intrinsic insts here to avoid trying to enumerate all
5263 // possible store intrinsics.
5264 bool ExpectedInstTy =
5266 CheckDI(ExpectedInstTy, "!DIAssignID attached to unexpected instruction kind",
5267 I, MD);
5268 // Iterate over the MetadataAsValue uses of the DIAssignID - these should
5269 // only be found as DbgAssignIntrinsic operands.
5270 if (auto *AsValue = MetadataAsValue::getIfExists(Context, MD)) {
5271 for (auto *User : AsValue->users()) {
5273 "!DIAssignID should only be used by llvm.dbg.assign intrinsics",
5274 MD, User);
5275 // All of the dbg.assign intrinsics should be in the same function as I.
5276 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(User))
5277 CheckDI(DAI->getFunction() == I.getFunction(),
5278 "dbg.assign not in same function as inst", DAI, &I);
5279 }
5280 }
5281 for (DbgVariableRecord *DVR :
5282 cast<DIAssignID>(MD)->getAllDbgVariableRecordUsers()) {
5283 CheckDI(DVR->isDbgAssign(),
5284 "!DIAssignID should only be used by Assign DVRs.", MD, DVR);
5285 CheckDI(DVR->getFunction() == I.getFunction(),
5286 "DVRAssign not in same function as inst", DVR, &I);
5287 }
5288}
5289
5290void Verifier::visitMMRAMetadata(Instruction &I, MDNode *MD) {
5292 "!mmra metadata attached to unexpected instruction kind", I, MD);
5293
5294 // MMRA Metadata should either be a tag, e.g. !{!"foo", !"bar"}, or a
5295 // list of tags such as !2 in the following example:
5296 // !0 = !{!"a", !"b"}
5297 // !1 = !{!"c", !"d"}
5298 // !2 = !{!0, !1}
5299 if (MMRAMetadata::isTagMD(MD))
5300 return;
5301
5302 Check(isa<MDTuple>(MD), "!mmra expected to be a metadata tuple", I, MD);
5303 for (const MDOperand &MDOp : MD->operands())
5304 Check(MMRAMetadata::isTagMD(MDOp.get()),
5305 "!mmra metadata tuple operand is not an MMRA tag", I, MDOp.get());
5306}
5307
5308void Verifier::visitCallStackMetadata(MDNode *MD) {
5309 // Call stack metadata should consist of a list of at least 1 constant int
5310 // (representing a hash of the location).
5311 Check(MD->getNumOperands() >= 1,
5312 "call stack metadata should have at least 1 operand", MD);
5313
5314 for (const auto &Op : MD->operands())
5316 "call stack metadata operand should be constant integer", Op);
5317}
5318
5319void Verifier::visitMemProfMetadata(Instruction &I, MDNode *MD) {
5320 Check(isa<CallBase>(I), "!memprof metadata should only exist on calls", &I);
5321 Check(MD->getNumOperands() >= 1,
5322 "!memprof annotations should have at least 1 metadata operand "
5323 "(MemInfoBlock)",
5324 MD);
5325
5326 // Check each MIB
5327 for (auto &MIBOp : MD->operands()) {
5328 MDNode *MIB = dyn_cast<MDNode>(MIBOp);
5329 // The first operand of an MIB should be the call stack metadata.
5330 // There rest of the operands should be MDString tags, and there should be
5331 // at least one.
5332 Check(MIB->getNumOperands() >= 2,
5333 "Each !memprof MemInfoBlock should have at least 2 operands", MIB);
5334
5335 // Check call stack metadata (first operand).
5336 Check(MIB->getOperand(0) != nullptr,
5337 "!memprof MemInfoBlock first operand should not be null", MIB);
5338 Check(isa<MDNode>(MIB->getOperand(0)),
5339 "!memprof MemInfoBlock first operand should be an MDNode", MIB);
5340 MDNode *StackMD = dyn_cast<MDNode>(MIB->getOperand(0));
5341 visitCallStackMetadata(StackMD);
5342
5343 // The next set of 1 or more operands should be MDString.
5344 unsigned I = 1;
5345 for (; I < MIB->getNumOperands(); ++I) {
5346 if (!isa<MDString>(MIB->getOperand(I))) {
5347 Check(I > 1,
5348 "!memprof MemInfoBlock second operand should be an MDString",
5349 MIB);
5350 break;
5351 }
5352 }
5353
5354 // Any remaining should be MDNode that are pairs of integers
5355 for (; I < MIB->getNumOperands(); ++I) {
5356 MDNode *OpNode = dyn_cast<MDNode>(MIB->getOperand(I));
5357 Check(OpNode, "Not all !memprof MemInfoBlock operands 2 to N are MDNode",
5358 MIB);
5359 Check(OpNode->getNumOperands() == 2,
5360 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with 2 "
5361 "operands",
5362 MIB);
5363 // Check that all of Op's operands are ConstantInt.
5364 Check(llvm::all_of(OpNode->operands(),
5365 [](const MDOperand &Op) {
5366 return mdconst::hasa<ConstantInt>(Op);
5367 }),
5368 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with "
5369 "ConstantInt operands",
5370 MIB);
5371 }
5372 }
5373}
5374
5375void Verifier::visitCallsiteMetadata(Instruction &I, MDNode *MD) {
5376 Check(isa<CallBase>(I), "!callsite metadata should only exist on calls", &I);
5377 // Verify the partial callstack annotated from memprof profiles. This callsite
5378 // is a part of a profiled allocation callstack.
5379 visitCallStackMetadata(MD);
5380}
5381
5382static inline bool isConstantIntMetadataOperand(const Metadata *MD) {
5383 if (auto *VAL = dyn_cast<ValueAsMetadata>(MD))
5384 return isa<ConstantInt>(VAL->getValue());
5385 return false;
5386}
5387
5388void Verifier::visitCalleeTypeMetadata(Instruction &I, MDNode *MD) {
5389 Check(isa<CallBase>(I), "!callee_type metadata should only exist on calls",
5390 &I);
5391 for (Metadata *Op : MD->operands()) {
5393 "The callee_type metadata must be a list of type metadata nodes", Op);
5394 auto *TypeMD = cast<MDNode>(Op);
5395 Check(TypeMD->getNumOperands() == 2,
5396 "Well-formed generalized type metadata must contain exactly two "
5397 "operands",
5398 Op);
5399 Check(isConstantIntMetadataOperand(TypeMD->getOperand(0)) &&
5400 mdconst::extract<ConstantInt>(TypeMD->getOperand(0))->isZero(),
5401 "The first operand of type metadata for functions must be zero", Op);
5402 Check(TypeMD->hasGeneralizedMDString(),
5403 "Only generalized type metadata can be part of the callee_type "
5404 "metadata list",
5405 Op);
5406 }
5407}
5408
5409void Verifier::visitAnnotationMetadata(MDNode *Annotation) {
5410 Check(isa<MDTuple>(Annotation), "annotation must be a tuple");
5411 Check(Annotation->getNumOperands() >= 1,
5412 "annotation must have at least one operand");
5413 for (const MDOperand &Op : Annotation->operands()) {
5414 bool TupleOfStrings =
5415 isa<MDTuple>(Op.get()) &&
5416 all_of(cast<MDTuple>(Op)->operands(), [](auto &Annotation) {
5417 return isa<MDString>(Annotation.get());
5418 });
5419 Check(isa<MDString>(Op.get()) || TupleOfStrings,
5420 "operands must be a string or a tuple of strings");
5421 }
5422}
5423
5424void Verifier::visitAliasScopeMetadata(const MDNode *MD) {
5425 unsigned NumOps = MD->getNumOperands();
5426 Check(NumOps >= 2 && NumOps <= 3, "scope must have two or three operands",
5427 MD);
5428 Check(MD->getOperand(0).get() == MD || isa<MDString>(MD->getOperand(0)),
5429 "first scope operand must be self-referential or string", MD);
5430 if (NumOps == 3)
5432 "third scope operand must be string (if used)", MD);
5433
5434 MDNode *Domain = dyn_cast<MDNode>(MD->getOperand(1));
5435 Check(Domain != nullptr, "second scope operand must be MDNode", MD);
5436
5437 unsigned NumDomainOps = Domain->getNumOperands();
5438 Check(NumDomainOps >= 1 && NumDomainOps <= 2,
5439 "domain must have one or two operands", Domain);
5440 Check(Domain->getOperand(0).get() == Domain ||
5441 isa<MDString>(Domain->getOperand(0)),
5442 "first domain operand must be self-referential or string", Domain);
5443 if (NumDomainOps == 2)
5444 Check(isa<MDString>(Domain->getOperand(1)),
5445 "second domain operand must be string (if used)", Domain);
5446}
5447
5448void Verifier::visitAliasScopeListMetadata(const MDNode *MD) {
5449 for (const MDOperand &Op : MD->operands()) {
5450 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5451 Check(OpMD != nullptr, "scope list must consist of MDNodes", MD);
5452 visitAliasScopeMetadata(OpMD);
5453 }
5454}
5455
5456void Verifier::visitAccessGroupMetadata(const MDNode *MD) {
5457 auto IsValidAccessScope = [](const MDNode *MD) {
5458 return MD->getNumOperands() == 0 && MD->isDistinct();
5459 };
5460
5461 // It must be either an access scope itself...
5462 if (IsValidAccessScope(MD))
5463 return;
5464
5465 // ...or a list of access scopes.
5466 for (const MDOperand &Op : MD->operands()) {
5467 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5468 Check(OpMD != nullptr, "Access scope list must consist of MDNodes", MD);
5469 Check(IsValidAccessScope(OpMD),
5470 "Access scope list contains invalid access scope", MD);
5471 }
5472}
5473
5474void Verifier::visitCapturesMetadata(Instruction &I, const MDNode *Captures) {
5475 static const char *ValidArgs[] = {"address_is_null", "address",
5476 "read_provenance", "provenance"};
5477
5478 auto *SI = dyn_cast<StoreInst>(&I);
5479 Check(SI, "!captures metadata can only be applied to store instructions", &I);
5480 Check(SI->getValueOperand()->getType()->isPointerTy(),
5481 "!captures metadata can only be applied to store with value operand of "
5482 "pointer type",
5483 &I);
5484 Check(Captures->getNumOperands() != 0, "!captures metadata cannot be empty",
5485 &I);
5486
5487 for (Metadata *Op : Captures->operands()) {
5488 auto *Str = dyn_cast<MDString>(Op);
5489 Check(Str, "!captures metadata must be a list of strings", &I);
5490 Check(is_contained(ValidArgs, Str->getString()),
5491 "invalid entry in !captures metadata", &I, Str);
5492 }
5493}
5494
5495void Verifier::visitAllocTokenMetadata(Instruction &I, MDNode *MD) {
5496 Check(isa<CallBase>(I), "!alloc_token should only exist on calls", &I);
5497 Check(MD->getNumOperands() == 2, "!alloc_token must have 2 operands", MD);
5498 Check(isa<MDString>(MD->getOperand(0)), "expected string", MD);
5500 "expected integer constant", MD);
5501}
5502
5503/// verifyInstruction - Verify that an instruction is well formed.
5504///
5505void Verifier::visitInstruction(Instruction &I) {
5506 BasicBlock *BB = I.getParent();
5507 Check(BB, "Instruction not embedded in basic block!", &I);
5508
5509 if (!isa<PHINode>(I)) { // Check that non-phi nodes are not self referential
5510 for (User *U : I.users()) {
5511 Check(U != (User *)&I || !DT.isReachableFromEntry(BB),
5512 "Only PHI nodes may reference their own value!", &I);
5513 }
5514 }
5515
5516 // Check that void typed values don't have names
5517 Check(!I.getType()->isVoidTy() || !I.hasName(),
5518 "Instruction has a name, but provides a void value!", &I);
5519
5520 // Check that the return value of the instruction is either void or a legal
5521 // value type.
5522 Check(I.getType()->isVoidTy() || I.getType()->isFirstClassType(),
5523 "Instruction returns a non-scalar type!", &I);
5524
5525 // Check that the instruction doesn't produce metadata. Calls are already
5526 // checked against the callee type.
5527 Check(!I.getType()->isMetadataTy() || isa<CallInst>(I) || isa<InvokeInst>(I),
5528 "Invalid use of metadata!", &I);
5529
5530 // Check that all uses of the instruction, if they are instructions
5531 // themselves, actually have parent basic blocks. If the use is not an
5532 // instruction, it is an error!
5533 for (Use &U : I.uses()) {
5534 if (Instruction *Used = dyn_cast<Instruction>(U.getUser()))
5535 Check(Used->getParent() != nullptr,
5536 "Instruction referencing"
5537 " instruction not embedded in a basic block!",
5538 &I, Used);
5539 else {
5540 CheckFailed("Use of instruction is not an instruction!", U);
5541 return;
5542 }
5543 }
5544
5545 // Get a pointer to the call base of the instruction if it is some form of
5546 // call.
5547 const CallBase *CBI = dyn_cast<CallBase>(&I);
5548
5549 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
5550 Check(I.getOperand(i) != nullptr, "Instruction has null operand!", &I);
5551
5552 // Check to make sure that only first-class-values are operands to
5553 // instructions.
5554 if (!I.getOperand(i)->getType()->isFirstClassType()) {
5555 Check(false, "Instruction operands must be first-class values!", &I);
5556 }
5557
5558 if (Function *F = dyn_cast<Function>(I.getOperand(i))) {
5559 // This code checks whether the function is used as the operand of a
5560 // clang_arc_attachedcall operand bundle.
5561 auto IsAttachedCallOperand = [](Function *F, const CallBase *CBI,
5562 int Idx) {
5563 return CBI && CBI->isOperandBundleOfType(
5565 };
5566
5567 // Check to make sure that the "address of" an intrinsic function is never
5568 // taken. Ignore cases where the address of the intrinsic function is used
5569 // as the argument of operand bundle "clang.arc.attachedcall" as those
5570 // cases are handled in verifyAttachedCallBundle.
5571 Check((!F->isIntrinsic() ||
5572 (CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i)) ||
5573 IsAttachedCallOperand(F, CBI, i)),
5574 "Cannot take the address of an intrinsic!", &I);
5575 Check(!F->isIntrinsic() || isa<CallInst>(I) || isa<CallBrInst>(I) ||
5576 F->getIntrinsicID() == Intrinsic::donothing ||
5577 F->getIntrinsicID() == Intrinsic::seh_try_begin ||
5578 F->getIntrinsicID() == Intrinsic::seh_try_end ||
5579 F->getIntrinsicID() == Intrinsic::seh_scope_begin ||
5580 F->getIntrinsicID() == Intrinsic::seh_scope_end ||
5581 F->getIntrinsicID() == Intrinsic::coro_resume ||
5582 F->getIntrinsicID() == Intrinsic::coro_destroy ||
5583 F->getIntrinsicID() == Intrinsic::coro_await_suspend_void ||
5584 F->getIntrinsicID() == Intrinsic::coro_await_suspend_bool ||
5585 F->getIntrinsicID() == Intrinsic::coro_await_suspend_handle ||
5586 F->getIntrinsicID() ==
5587 Intrinsic::experimental_patchpoint_void ||
5588 F->getIntrinsicID() == Intrinsic::experimental_patchpoint ||
5589 F->getIntrinsicID() == Intrinsic::fake_use ||
5590 F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint ||
5591 F->getIntrinsicID() == Intrinsic::wasm_throw ||
5592 F->getIntrinsicID() == Intrinsic::wasm_rethrow ||
5593 IsAttachedCallOperand(F, CBI, i),
5594 "Cannot invoke an intrinsic other than donothing, patchpoint, "
5595 "statepoint, coro_resume, coro_destroy, clang.arc.attachedcall or "
5596 "wasm.(re)throw",
5597 &I);
5598 Check(F->getParent() == &M, "Referencing function in another module!", &I,
5599 &M, F, F->getParent());
5600 } else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) {
5601 Check(OpBB->getParent() == BB->getParent(),
5602 "Referring to a basic block in another function!", &I);
5603 } else if (Argument *OpArg = dyn_cast<Argument>(I.getOperand(i))) {
5604 Check(OpArg->getParent() == BB->getParent(),
5605 "Referring to an argument in another function!", &I);
5606 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) {
5607 Check(GV->getParent() == &M, "Referencing global in another module!", &I,
5608 &M, GV, GV->getParent());
5609 } else if (Instruction *OpInst = dyn_cast<Instruction>(I.getOperand(i))) {
5610 Check(OpInst->getFunction() == BB->getParent(),
5611 "Referring to an instruction in another function!", &I);
5612 verifyDominatesUse(I, i);
5613 } else if (isa<InlineAsm>(I.getOperand(i))) {
5614 Check(CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i),
5615 "Cannot take the address of an inline asm!", &I);
5616 } else if (auto *C = dyn_cast<Constant>(I.getOperand(i))) {
5617 visitConstantExprsRecursively(C);
5618 }
5619 }
5620
5621 if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) {
5622 Check(I.getType()->isFPOrFPVectorTy(),
5623 "fpmath requires a floating point result!", &I);
5624 Check(MD->getNumOperands() == 1, "fpmath takes one operand!", &I);
5625 if (ConstantFP *CFP0 =
5627 const APFloat &Accuracy = CFP0->getValueAPF();
5628 Check(&Accuracy.getSemantics() == &APFloat::IEEEsingle(),
5629 "fpmath accuracy must have float type", &I);
5630 Check(Accuracy.isFiniteNonZero() && !Accuracy.isNegative(),
5631 "fpmath accuracy not a positive number!", &I);
5632 } else {
5633 Check(false, "invalid fpmath accuracy!", &I);
5634 }
5635 }
5636
5637 if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) {
5639 "Ranges are only for loads, calls and invokes!", &I);
5640 visitRangeMetadata(I, Range, I.getType());
5641 }
5642
5643 if (MDNode *Range = I.getMetadata(LLVMContext::MD_noalias_addrspace)) {
5646 "noalias.addrspace are only for memory operations!", &I);
5647 visitNoaliasAddrspaceMetadata(I, Range, I.getType());
5648 }
5649
5650 if (I.hasMetadata(LLVMContext::MD_invariant_group)) {
5652 "invariant.group metadata is only for loads and stores", &I);
5653 }
5654
5655 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nonnull)) {
5656 Check(I.getType()->isPointerTy(), "nonnull applies only to pointer types",
5657 &I);
5659 "nonnull applies only to load instructions, use attributes"
5660 " for calls or invokes",
5661 &I);
5662 Check(MD->getNumOperands() == 0, "nonnull metadata must be empty", &I);
5663 }
5664
5665 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable))
5666 visitDereferenceableMetadata(I, MD);
5667
5668 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable_or_null))
5669 visitDereferenceableMetadata(I, MD);
5670
5671 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nofree))
5672 visitNofreeMetadata(I, MD);
5673
5674 if (MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa))
5675 TBAAVerifyHelper.visitTBAAMetadata(&I, TBAA);
5676
5677 if (MDNode *MD = I.getMetadata(LLVMContext::MD_noalias))
5678 visitAliasScopeListMetadata(MD);
5679 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alias_scope))
5680 visitAliasScopeListMetadata(MD);
5681
5682 if (MDNode *MD = I.getMetadata(LLVMContext::MD_access_group))
5683 visitAccessGroupMetadata(MD);
5684
5685 if (MDNode *AlignMD = I.getMetadata(LLVMContext::MD_align)) {
5686 Check(I.getType()->isPointerTy(), "align applies only to pointer types",
5687 &I);
5689 "align applies only to load instructions, "
5690 "use attributes for calls or invokes",
5691 &I);
5692 Check(AlignMD->getNumOperands() == 1, "align takes one operand!", &I);
5693 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(AlignMD->getOperand(0));
5694 Check(CI && CI->getType()->isIntegerTy(64),
5695 "align metadata value must be an i64!", &I);
5696 uint64_t Align = CI->getZExtValue();
5697 Check(isPowerOf2_64(Align), "align metadata value must be a power of 2!",
5698 &I);
5699 Check(Align <= Value::MaximumAlignment,
5700 "alignment is larger that implementation defined limit", &I);
5701 }
5702
5703 if (MDNode *MD = I.getMetadata(LLVMContext::MD_prof))
5704 visitProfMetadata(I, MD);
5705
5706 if (MDNode *MD = I.getMetadata(LLVMContext::MD_memprof))
5707 visitMemProfMetadata(I, MD);
5708
5709 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callsite))
5710 visitCallsiteMetadata(I, MD);
5711
5712 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callee_type))
5713 visitCalleeTypeMetadata(I, MD);
5714
5715 if (MDNode *MD = I.getMetadata(LLVMContext::MD_DIAssignID))
5716 visitDIAssignIDMetadata(I, MD);
5717
5718 if (MDNode *MMRA = I.getMetadata(LLVMContext::MD_mmra))
5719 visitMMRAMetadata(I, MMRA);
5720
5721 if (MDNode *Annotation = I.getMetadata(LLVMContext::MD_annotation))
5722 visitAnnotationMetadata(Annotation);
5723
5724 if (MDNode *Captures = I.getMetadata(LLVMContext::MD_captures))
5725 visitCapturesMetadata(I, Captures);
5726
5727 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alloc_token))
5728 visitAllocTokenMetadata(I, MD);
5729
5730 if (MDNode *N = I.getDebugLoc().getAsMDNode()) {
5731 CheckDI(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N);
5732 visitMDNode(*N, AreDebugLocsAllowed::Yes);
5733
5734 if (auto *DL = dyn_cast<DILocation>(N)) {
5735 if (DL->getAtomGroup()) {
5736 CheckDI(DL->getScope()->getSubprogram()->getKeyInstructionsEnabled(),
5737 "DbgLoc uses atomGroup but DISubprogram doesn't have Key "
5738 "Instructions enabled",
5739 DL, DL->getScope()->getSubprogram());
5740 }
5741 }
5742 }
5743
5745 I.getAllMetadata(MDs);
5746 for (auto Attachment : MDs) {
5747 unsigned Kind = Attachment.first;
5748 auto AllowLocs =
5749 (Kind == LLVMContext::MD_dbg || Kind == LLVMContext::MD_loop)
5750 ? AreDebugLocsAllowed::Yes
5751 : AreDebugLocsAllowed::No;
5752 visitMDNode(*Attachment.second, AllowLocs);
5753 }
5754
5755 InstsInThisBlock.insert(&I);
5756}
5757
5758/// Allow intrinsics to be verified in different ways.
5759void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
5761 Check(IF->isDeclaration(), "Intrinsic functions should never be defined!",
5762 IF);
5763
5764 // Verify that the intrinsic prototype lines up with what the .td files
5765 // describe.
5766 FunctionType *IFTy = IF->getFunctionType();
5767 bool IsVarArg = IFTy->isVarArg();
5768
5772
5773 // Walk the descriptors to extract overloaded types.
5778 "Intrinsic has incorrect return type!", IF);
5780 "Intrinsic has incorrect argument type!", IF);
5781
5782 // Verify if the intrinsic call matches the vararg property.
5783 if (IsVarArg)
5785 "Intrinsic was not defined with variable arguments!", IF);
5786 else
5788 "Callsite was not defined with variable arguments!", IF);
5789
5790 // All descriptors should be absorbed by now.
5791 Check(TableRef.empty(), "Intrinsic has too few arguments!", IF);
5792
5793 // Now that we have the intrinsic ID and the actual argument types (and we
5794 // know they are legal for the intrinsic!) get the intrinsic name through the
5795 // usual means. This allows us to verify the mangling of argument types into
5796 // the name.
5797 const std::string ExpectedName =
5798 Intrinsic::getName(ID, ArgTys, IF->getParent(), IFTy);
5799 Check(ExpectedName == IF->getName(),
5800 "Intrinsic name not mangled correctly for type arguments! "
5801 "Should be: " +
5802 ExpectedName,
5803 IF);
5804
5805 // If the intrinsic takes MDNode arguments, verify that they are either global
5806 // or are local to *this* function.
5807 for (Value *V : Call.args()) {
5808 if (auto *MD = dyn_cast<MetadataAsValue>(V))
5809 visitMetadataAsValue(*MD, Call.getCaller());
5810 if (auto *Const = dyn_cast<Constant>(V))
5811 Check(!Const->getType()->isX86_AMXTy(),
5812 "const x86_amx is not allowed in argument!");
5813 }
5814
5815 switch (ID) {
5816 default:
5817 break;
5818 case Intrinsic::assume: {
5819 if (Call.hasOperandBundles()) {
5821 Check(Cond && Cond->isOne(),
5822 "assume with operand bundles must have i1 true condition", Call);
5823 }
5824 for (auto &Elem : Call.bundle_op_infos()) {
5825 unsigned ArgCount = Elem.End - Elem.Begin;
5826 // Separate storage assumptions are special insofar as they're the only
5827 // operand bundles allowed on assumes that aren't parameter attributes.
5828 if (Elem.Tag->getKey() == "separate_storage") {
5829 Check(ArgCount == 2,
5830 "separate_storage assumptions should have 2 arguments", Call);
5831 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy() &&
5832 Call.getOperand(Elem.Begin + 1)->getType()->isPointerTy(),
5833 "arguments to separate_storage assumptions should be pointers",
5834 Call);
5835 continue;
5836 }
5837 Check(Elem.Tag->getKey() == "ignore" ||
5838 Attribute::isExistingAttribute(Elem.Tag->getKey()),
5839 "tags must be valid attribute names", Call);
5840 Attribute::AttrKind Kind =
5841 Attribute::getAttrKindFromName(Elem.Tag->getKey());
5842 if (Kind == Attribute::Alignment) {
5843 Check(ArgCount <= 3 && ArgCount >= 2,
5844 "alignment assumptions should have 2 or 3 arguments", Call);
5845 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5846 "first argument should be a pointer", Call);
5847 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5848 "second argument should be an integer", Call);
5849 if (ArgCount == 3)
5850 Check(Call.getOperand(Elem.Begin + 2)->getType()->isIntegerTy(),
5851 "third argument should be an integer if present", Call);
5852 continue;
5853 }
5854 if (Kind == Attribute::Dereferenceable) {
5855 Check(ArgCount == 2,
5856 "dereferenceable assumptions should have 2 arguments", Call);
5857 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5858 "first argument should be a pointer", Call);
5859 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5860 "second argument should be an integer", Call);
5861 continue;
5862 }
5863 Check(ArgCount <= 2, "too many arguments", Call);
5864 if (Kind == Attribute::None)
5865 break;
5866 if (Attribute::isIntAttrKind(Kind)) {
5867 Check(ArgCount == 2, "this attribute should have 2 arguments", Call);
5868 Check(isa<ConstantInt>(Call.getOperand(Elem.Begin + 1)),
5869 "the second argument should be a constant integral value", Call);
5870 } else if (Attribute::canUseAsParamAttr(Kind)) {
5871 Check((ArgCount) == 1, "this attribute should have one argument", Call);
5872 } else if (Attribute::canUseAsFnAttr(Kind)) {
5873 Check((ArgCount) == 0, "this attribute has no argument", Call);
5874 }
5875 }
5876 break;
5877 }
5878 case Intrinsic::ucmp:
5879 case Intrinsic::scmp: {
5880 Type *SrcTy = Call.getOperand(0)->getType();
5881 Type *DestTy = Call.getType();
5882
5883 Check(DestTy->getScalarSizeInBits() >= 2,
5884 "result type must be at least 2 bits wide", Call);
5885
5886 bool IsDestTypeVector = DestTy->isVectorTy();
5887 Check(SrcTy->isVectorTy() == IsDestTypeVector,
5888 "ucmp/scmp argument and result types must both be either vector or "
5889 "scalar types",
5890 Call);
5891 if (IsDestTypeVector) {
5892 auto SrcVecLen = cast<VectorType>(SrcTy)->getElementCount();
5893 auto DestVecLen = cast<VectorType>(DestTy)->getElementCount();
5894 Check(SrcVecLen == DestVecLen,
5895 "return type and arguments must have the same number of "
5896 "elements",
5897 Call);
5898 }
5899 break;
5900 }
5901 case Intrinsic::coro_id: {
5902 auto *InfoArg = Call.getArgOperand(3)->stripPointerCasts();
5903 if (isa<ConstantPointerNull>(InfoArg))
5904 break;
5905 auto *GV = dyn_cast<GlobalVariable>(InfoArg);
5906 Check(GV && GV->isConstant() && GV->hasDefinitiveInitializer(),
5907 "info argument of llvm.coro.id must refer to an initialized "
5908 "constant");
5909 Constant *Init = GV->getInitializer();
5911 "info argument of llvm.coro.id must refer to either a struct or "
5912 "an array");
5913 break;
5914 }
5915 case Intrinsic::is_fpclass: {
5916 const ConstantInt *TestMask = cast<ConstantInt>(Call.getOperand(1));
5917 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
5918 "unsupported bits for llvm.is.fpclass test mask");
5919 break;
5920 }
5921 case Intrinsic::fptrunc_round: {
5922 // Check the rounding mode
5923 Metadata *MD = nullptr;
5925 if (MAV)
5926 MD = MAV->getMetadata();
5927
5928 Check(MD != nullptr, "missing rounding mode argument", Call);
5929
5930 Check(isa<MDString>(MD),
5931 ("invalid value for llvm.fptrunc.round metadata operand"
5932 " (the operand should be a string)"),
5933 MD);
5934
5935 std::optional<RoundingMode> RoundMode =
5936 convertStrToRoundingMode(cast<MDString>(MD)->getString());
5937 Check(RoundMode && *RoundMode != RoundingMode::Dynamic,
5938 "unsupported rounding mode argument", Call);
5939 break;
5940 }
5941#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
5942#include "llvm/IR/VPIntrinsics.def"
5943#undef BEGIN_REGISTER_VP_INTRINSIC
5944 visitVPIntrinsic(cast<VPIntrinsic>(Call));
5945 break;
5946#define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC) \
5947 case Intrinsic::INTRINSIC:
5948#include "llvm/IR/ConstrainedOps.def"
5949#undef INSTRUCTION
5950 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(Call));
5951 break;
5952 case Intrinsic::dbg_declare: // llvm.dbg.declare
5953 case Intrinsic::dbg_value: // llvm.dbg.value
5954 case Intrinsic::dbg_assign: // llvm.dbg.assign
5955 case Intrinsic::dbg_label: // llvm.dbg.label
5956 // We no longer interpret debug intrinsics (the old variable-location
5957 // design). They're meaningless as far as LLVM is concerned we could make
5958 // it an error for them to appear, but it's possible we'll have users
5959 // converting back to intrinsics for the forseeable future (such as DXIL),
5960 // so tolerate their existance.
5961 break;
5962 case Intrinsic::memcpy:
5963 case Intrinsic::memcpy_inline:
5964 case Intrinsic::memmove:
5965 case Intrinsic::memset:
5966 case Intrinsic::memset_inline:
5967 break;
5968 case Intrinsic::experimental_memset_pattern: {
5969 const auto Memset = cast<MemSetPatternInst>(&Call);
5970 Check(Memset->getValue()->getType()->isSized(),
5971 "unsized types cannot be used as memset patterns", Call);
5972 break;
5973 }
5974 case Intrinsic::memcpy_element_unordered_atomic:
5975 case Intrinsic::memmove_element_unordered_atomic:
5976 case Intrinsic::memset_element_unordered_atomic: {
5977 const auto *AMI = cast<AnyMemIntrinsic>(&Call);
5978
5979 ConstantInt *ElementSizeCI =
5980 cast<ConstantInt>(AMI->getRawElementSizeInBytes());
5981 const APInt &ElementSizeVal = ElementSizeCI->getValue();
5982 Check(ElementSizeVal.isPowerOf2(),
5983 "element size of the element-wise atomic memory intrinsic "
5984 "must be a power of 2",
5985 Call);
5986
5987 auto IsValidAlignment = [&](MaybeAlign Alignment) {
5988 return Alignment && ElementSizeVal.ule(Alignment->value());
5989 };
5990 Check(IsValidAlignment(AMI->getDestAlign()),
5991 "incorrect alignment of the destination argument", Call);
5992 if (const auto *AMT = dyn_cast<AnyMemTransferInst>(AMI)) {
5993 Check(IsValidAlignment(AMT->getSourceAlign()),
5994 "incorrect alignment of the source argument", Call);
5995 }
5996 break;
5997 }
5998 case Intrinsic::call_preallocated_setup: {
5999 auto *NumArgs = cast<ConstantInt>(Call.getArgOperand(0));
6000 bool FoundCall = false;
6001 for (User *U : Call.users()) {
6002 auto *UseCall = dyn_cast<CallBase>(U);
6003 Check(UseCall != nullptr,
6004 "Uses of llvm.call.preallocated.setup must be calls");
6005 Intrinsic::ID IID = UseCall->getIntrinsicID();
6006 if (IID == Intrinsic::call_preallocated_arg) {
6007 auto *AllocArgIndex = dyn_cast<ConstantInt>(UseCall->getArgOperand(1));
6008 Check(AllocArgIndex != nullptr,
6009 "llvm.call.preallocated.alloc arg index must be a constant");
6010 auto AllocArgIndexInt = AllocArgIndex->getValue();
6011 Check(AllocArgIndexInt.sge(0) &&
6012 AllocArgIndexInt.slt(NumArgs->getValue()),
6013 "llvm.call.preallocated.alloc arg index must be between 0 and "
6014 "corresponding "
6015 "llvm.call.preallocated.setup's argument count");
6016 } else if (IID == Intrinsic::call_preallocated_teardown) {
6017 // nothing to do
6018 } else {
6019 Check(!FoundCall, "Can have at most one call corresponding to a "
6020 "llvm.call.preallocated.setup");
6021 FoundCall = true;
6022 size_t NumPreallocatedArgs = 0;
6023 for (unsigned i = 0; i < UseCall->arg_size(); i++) {
6024 if (UseCall->paramHasAttr(i, Attribute::Preallocated)) {
6025 ++NumPreallocatedArgs;
6026 }
6027 }
6028 Check(NumPreallocatedArgs != 0,
6029 "cannot use preallocated intrinsics on a call without "
6030 "preallocated arguments");
6031 Check(NumArgs->equalsInt(NumPreallocatedArgs),
6032 "llvm.call.preallocated.setup arg size must be equal to number "
6033 "of preallocated arguments "
6034 "at call site",
6035 Call, *UseCall);
6036 // getOperandBundle() cannot be called if more than one of the operand
6037 // bundle exists. There is already a check elsewhere for this, so skip
6038 // here if we see more than one.
6039 if (UseCall->countOperandBundlesOfType(LLVMContext::OB_preallocated) >
6040 1) {
6041 return;
6042 }
6043 auto PreallocatedBundle =
6044 UseCall->getOperandBundle(LLVMContext::OB_preallocated);
6045 Check(PreallocatedBundle,
6046 "Use of llvm.call.preallocated.setup outside intrinsics "
6047 "must be in \"preallocated\" operand bundle");
6048 Check(PreallocatedBundle->Inputs.front().get() == &Call,
6049 "preallocated bundle must have token from corresponding "
6050 "llvm.call.preallocated.setup");
6051 }
6052 }
6053 break;
6054 }
6055 case Intrinsic::call_preallocated_arg: {
6056 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
6057 Check(Token &&
6058 Token->getIntrinsicID() == Intrinsic::call_preallocated_setup,
6059 "llvm.call.preallocated.arg token argument must be a "
6060 "llvm.call.preallocated.setup");
6061 Check(Call.hasFnAttr(Attribute::Preallocated),
6062 "llvm.call.preallocated.arg must be called with a \"preallocated\" "
6063 "call site attribute");
6064 break;
6065 }
6066 case Intrinsic::call_preallocated_teardown: {
6067 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
6068 Check(Token &&
6069 Token->getIntrinsicID() == Intrinsic::call_preallocated_setup,
6070 "llvm.call.preallocated.teardown token argument must be a "
6071 "llvm.call.preallocated.setup");
6072 break;
6073 }
6074 case Intrinsic::gcroot:
6075 case Intrinsic::gcwrite:
6076 case Intrinsic::gcread:
6077 if (ID == Intrinsic::gcroot) {
6078 AllocaInst *AI =
6080 Check(AI, "llvm.gcroot parameter #1 must be an alloca.", Call);
6082 "llvm.gcroot parameter #2 must be a constant.", Call);
6083 if (!AI->getAllocatedType()->isPointerTy()) {
6085 "llvm.gcroot parameter #1 must either be a pointer alloca, "
6086 "or argument #2 must be a non-null constant.",
6087 Call);
6088 }
6089 }
6090
6091 Check(Call.getParent()->getParent()->hasGC(),
6092 "Enclosing function does not use GC.", Call);
6093 break;
6094 case Intrinsic::init_trampoline:
6096 "llvm.init_trampoline parameter #2 must resolve to a function.",
6097 Call);
6098 break;
6099 case Intrinsic::prefetch:
6100 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6101 "rw argument to llvm.prefetch must be 0-1", Call);
6102 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6103 "locality argument to llvm.prefetch must be 0-3", Call);
6104 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6105 "cache type argument to llvm.prefetch must be 0-1", Call);
6106 break;
6107 case Intrinsic::reloc_none: {
6109 cast<MetadataAsValue>(Call.getArgOperand(0))->getMetadata()),
6110 "llvm.reloc.none argument must be a metadata string", &Call);
6111 break;
6112 }
6113 case Intrinsic::stackprotector:
6115 "llvm.stackprotector parameter #2 must resolve to an alloca.", Call);
6116 break;
6117 case Intrinsic::localescape: {
6118 BasicBlock *BB = Call.getParent();
6119 Check(BB->isEntryBlock(), "llvm.localescape used outside of entry block",
6120 Call);
6121 Check(!SawFrameEscape, "multiple calls to llvm.localescape in one function",
6122 Call);
6123 for (Value *Arg : Call.args()) {
6124 if (isa<ConstantPointerNull>(Arg))
6125 continue; // Null values are allowed as placeholders.
6126 auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
6127 Check(AI && AI->isStaticAlloca(),
6128 "llvm.localescape only accepts static allocas", Call);
6129 }
6130 FrameEscapeInfo[BB->getParent()].first = Call.arg_size();
6131 SawFrameEscape = true;
6132 break;
6133 }
6134 case Intrinsic::localrecover: {
6136 Function *Fn = dyn_cast<Function>(FnArg);
6137 Check(Fn && !Fn->isDeclaration(),
6138 "llvm.localrecover first "
6139 "argument must be function defined in this module",
6140 Call);
6141 auto *IdxArg = cast<ConstantInt>(Call.getArgOperand(2));
6142 auto &Entry = FrameEscapeInfo[Fn];
6143 Entry.second = unsigned(
6144 std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1));
6145 break;
6146 }
6147
6148 case Intrinsic::experimental_gc_statepoint:
6149 if (auto *CI = dyn_cast<CallInst>(&Call))
6150 Check(!CI->isInlineAsm(),
6151 "gc.statepoint support for inline assembly unimplemented", CI);
6152 Check(Call.getParent()->getParent()->hasGC(),
6153 "Enclosing function does not use GC.", Call);
6154
6155 verifyStatepoint(Call);
6156 break;
6157 case Intrinsic::experimental_gc_result: {
6158 Check(Call.getParent()->getParent()->hasGC(),
6159 "Enclosing function does not use GC.", Call);
6160
6161 auto *Statepoint = Call.getArgOperand(0);
6162 if (isa<UndefValue>(Statepoint))
6163 break;
6164
6165 // Are we tied to a statepoint properly?
6166 const auto *StatepointCall = dyn_cast<CallBase>(Statepoint);
6167 Check(StatepointCall && StatepointCall->getIntrinsicID() ==
6168 Intrinsic::experimental_gc_statepoint,
6169 "gc.result operand #1 must be from a statepoint", Call,
6170 Call.getArgOperand(0));
6171
6172 // Check that result type matches wrapped callee.
6173 auto *TargetFuncType =
6174 cast<FunctionType>(StatepointCall->getParamElementType(2));
6175 Check(Call.getType() == TargetFuncType->getReturnType(),
6176 "gc.result result type does not match wrapped callee", Call);
6177 break;
6178 }
6179 case Intrinsic::experimental_gc_relocate: {
6180 Check(Call.arg_size() == 3, "wrong number of arguments", Call);
6181
6183 "gc.relocate must return a pointer or a vector of pointers", Call);
6184
6185 // Check that this relocate is correctly tied to the statepoint
6186
6187 // This is case for relocate on the unwinding path of an invoke statepoint
6188 if (LandingPadInst *LandingPad =
6190
6191 const BasicBlock *InvokeBB =
6192 LandingPad->getParent()->getUniquePredecessor();
6193
6194 // Landingpad relocates should have only one predecessor with invoke
6195 // statepoint terminator
6196 Check(InvokeBB, "safepoints should have unique landingpads",
6197 LandingPad->getParent());
6198 Check(InvokeBB->getTerminator(), "safepoint block should be well formed",
6199 InvokeBB);
6201 "gc relocate should be linked to a statepoint", InvokeBB);
6202 } else {
6203 // In all other cases relocate should be tied to the statepoint directly.
6204 // This covers relocates on a normal return path of invoke statepoint and
6205 // relocates of a call statepoint.
6206 auto *Token = Call.getArgOperand(0);
6208 "gc relocate is incorrectly tied to the statepoint", Call, Token);
6209 }
6210
6211 // Verify rest of the relocate arguments.
6212 const Value &StatepointCall = *cast<GCRelocateInst>(Call).getStatepoint();
6213
6214 // Both the base and derived must be piped through the safepoint.
6217 "gc.relocate operand #2 must be integer offset", Call);
6218
6219 Value *Derived = Call.getArgOperand(2);
6220 Check(isa<ConstantInt>(Derived),
6221 "gc.relocate operand #3 must be integer offset", Call);
6222
6223 const uint64_t BaseIndex = cast<ConstantInt>(Base)->getZExtValue();
6224 const uint64_t DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue();
6225
6226 // Check the bounds
6227 if (isa<UndefValue>(StatepointCall))
6228 break;
6229 if (auto Opt = cast<GCStatepointInst>(StatepointCall)
6230 .getOperandBundle(LLVMContext::OB_gc_live)) {
6231 Check(BaseIndex < Opt->Inputs.size(),
6232 "gc.relocate: statepoint base index out of bounds", Call);
6233 Check(DerivedIndex < Opt->Inputs.size(),
6234 "gc.relocate: statepoint derived index out of bounds", Call);
6235 }
6236
6237 // Relocated value must be either a pointer type or vector-of-pointer type,
6238 // but gc_relocate does not need to return the same pointer type as the
6239 // relocated pointer. It can be casted to the correct type later if it's
6240 // desired. However, they must have the same address space and 'vectorness'
6241 GCRelocateInst &Relocate = cast<GCRelocateInst>(Call);
6242 auto *ResultType = Call.getType();
6243 auto *DerivedType = Relocate.getDerivedPtr()->getType();
6244 auto *BaseType = Relocate.getBasePtr()->getType();
6245
6246 Check(BaseType->isPtrOrPtrVectorTy(),
6247 "gc.relocate: relocated value must be a pointer", Call);
6248 Check(DerivedType->isPtrOrPtrVectorTy(),
6249 "gc.relocate: relocated value must be a pointer", Call);
6250
6251 Check(ResultType->isVectorTy() == DerivedType->isVectorTy(),
6252 "gc.relocate: vector relocates to vector and pointer to pointer",
6253 Call);
6254 Check(
6255 ResultType->getPointerAddressSpace() ==
6256 DerivedType->getPointerAddressSpace(),
6257 "gc.relocate: relocating a pointer shouldn't change its address space",
6258 Call);
6259
6260 auto GC = llvm::getGCStrategy(Relocate.getFunction()->getGC());
6261 Check(GC, "gc.relocate: calling function must have GCStrategy",
6262 Call.getFunction());
6263 if (GC) {
6264 auto isGCPtr = [&GC](Type *PTy) {
6265 return GC->isGCManagedPointer(PTy->getScalarType()).value_or(true);
6266 };
6267 Check(isGCPtr(ResultType), "gc.relocate: must return gc pointer", Call);
6268 Check(isGCPtr(BaseType),
6269 "gc.relocate: relocated value must be a gc pointer", Call);
6270 Check(isGCPtr(DerivedType),
6271 "gc.relocate: relocated value must be a gc pointer", Call);
6272 }
6273 break;
6274 }
6275 case Intrinsic::experimental_patchpoint: {
6276 if (Call.getCallingConv() == CallingConv::AnyReg) {
6278 "patchpoint: invalid return type used with anyregcc", Call);
6279 }
6280 break;
6281 }
6282 case Intrinsic::eh_exceptioncode:
6283 case Intrinsic::eh_exceptionpointer: {
6285 "eh.exceptionpointer argument must be a catchpad", Call);
6286 break;
6287 }
6288 case Intrinsic::get_active_lane_mask: {
6290 "get_active_lane_mask: must return a "
6291 "vector",
6292 Call);
6293 auto *ElemTy = Call.getType()->getScalarType();
6294 Check(ElemTy->isIntegerTy(1),
6295 "get_active_lane_mask: element type is not "
6296 "i1",
6297 Call);
6298 break;
6299 }
6300 case Intrinsic::experimental_get_vector_length: {
6301 ConstantInt *VF = cast<ConstantInt>(Call.getArgOperand(1));
6302 Check(!VF->isNegative() && !VF->isZero(),
6303 "get_vector_length: VF must be positive", Call);
6304 break;
6305 }
6306 case Intrinsic::masked_load: {
6307 Check(Call.getType()->isVectorTy(), "masked_load: must return a vector",
6308 Call);
6309
6311 Value *PassThru = Call.getArgOperand(2);
6312 Check(Mask->getType()->isVectorTy(), "masked_load: mask must be vector",
6313 Call);
6314 Check(PassThru->getType() == Call.getType(),
6315 "masked_load: pass through and return type must match", Call);
6316 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
6317 cast<VectorType>(Call.getType())->getElementCount(),
6318 "masked_load: vector mask must be same length as return", Call);
6319 break;
6320 }
6321 case Intrinsic::masked_store: {
6322 Value *Val = Call.getArgOperand(0);
6324 Check(Mask->getType()->isVectorTy(), "masked_store: mask must be vector",
6325 Call);
6326 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
6327 cast<VectorType>(Val->getType())->getElementCount(),
6328 "masked_store: vector mask must be same length as value", Call);
6329 break;
6330 }
6331
6332 case Intrinsic::experimental_guard: {
6333 Check(isa<CallInst>(Call), "experimental_guard cannot be invoked", Call);
6335 "experimental_guard must have exactly one "
6336 "\"deopt\" operand bundle");
6337 break;
6338 }
6339
6340 case Intrinsic::experimental_deoptimize: {
6341 Check(isa<CallInst>(Call), "experimental_deoptimize cannot be invoked",
6342 Call);
6344 "experimental_deoptimize must have exactly one "
6345 "\"deopt\" operand bundle");
6347 "experimental_deoptimize return type must match caller return type");
6348
6349 if (isa<CallInst>(Call)) {
6351 Check(RI,
6352 "calls to experimental_deoptimize must be followed by a return");
6353
6354 if (!Call.getType()->isVoidTy() && RI)
6355 Check(RI->getReturnValue() == &Call,
6356 "calls to experimental_deoptimize must be followed by a return "
6357 "of the value computed by experimental_deoptimize");
6358 }
6359
6360 break;
6361 }
6362 case Intrinsic::vastart: {
6364 "va_start called in a non-varargs function");
6365 break;
6366 }
6367 case Intrinsic::get_dynamic_area_offset: {
6368 auto *IntTy = dyn_cast<IntegerType>(Call.getType());
6369 Check(IntTy && DL.getPointerSizeInBits(DL.getAllocaAddrSpace()) ==
6370 IntTy->getBitWidth(),
6371 "get_dynamic_area_offset result type must be scalar integer matching "
6372 "alloca address space width",
6373 Call);
6374 break;
6375 }
6376 case Intrinsic::vector_reduce_and:
6377 case Intrinsic::vector_reduce_or:
6378 case Intrinsic::vector_reduce_xor:
6379 case Intrinsic::vector_reduce_add:
6380 case Intrinsic::vector_reduce_mul:
6381 case Intrinsic::vector_reduce_smax:
6382 case Intrinsic::vector_reduce_smin:
6383 case Intrinsic::vector_reduce_umax:
6384 case Intrinsic::vector_reduce_umin: {
6385 Type *ArgTy = Call.getArgOperand(0)->getType();
6386 Check(ArgTy->isIntOrIntVectorTy() && ArgTy->isVectorTy(),
6387 "Intrinsic has incorrect argument type!");
6388 break;
6389 }
6390 case Intrinsic::vector_reduce_fmax:
6391 case Intrinsic::vector_reduce_fmin: {
6392 Type *ArgTy = Call.getArgOperand(0)->getType();
6393 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6394 "Intrinsic has incorrect argument type!");
6395 break;
6396 }
6397 case Intrinsic::vector_reduce_fadd:
6398 case Intrinsic::vector_reduce_fmul: {
6399 // Unlike the other reductions, the first argument is a start value. The
6400 // second argument is the vector to be reduced.
6401 Type *ArgTy = Call.getArgOperand(1)->getType();
6402 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6403 "Intrinsic has incorrect argument type!");
6404 break;
6405 }
6406 case Intrinsic::smul_fix:
6407 case Intrinsic::smul_fix_sat:
6408 case Intrinsic::umul_fix:
6409 case Intrinsic::umul_fix_sat:
6410 case Intrinsic::sdiv_fix:
6411 case Intrinsic::sdiv_fix_sat:
6412 case Intrinsic::udiv_fix:
6413 case Intrinsic::udiv_fix_sat: {
6414 Value *Op1 = Call.getArgOperand(0);
6415 Value *Op2 = Call.getArgOperand(1);
6417 "first operand of [us][mul|div]_fix[_sat] must be an int type or "
6418 "vector of ints");
6420 "second operand of [us][mul|div]_fix[_sat] must be an int type or "
6421 "vector of ints");
6422
6423 auto *Op3 = cast<ConstantInt>(Call.getArgOperand(2));
6424 Check(Op3->getType()->isIntegerTy(),
6425 "third operand of [us][mul|div]_fix[_sat] must be an int type");
6426 Check(Op3->getBitWidth() <= 32,
6427 "third operand of [us][mul|div]_fix[_sat] must fit within 32 bits");
6428
6429 if (ID == Intrinsic::smul_fix || ID == Intrinsic::smul_fix_sat ||
6430 ID == Intrinsic::sdiv_fix || ID == Intrinsic::sdiv_fix_sat) {
6431 Check(Op3->getZExtValue() < Op1->getType()->getScalarSizeInBits(),
6432 "the scale of s[mul|div]_fix[_sat] must be less than the width of "
6433 "the operands");
6434 } else {
6435 Check(Op3->getZExtValue() <= Op1->getType()->getScalarSizeInBits(),
6436 "the scale of u[mul|div]_fix[_sat] must be less than or equal "
6437 "to the width of the operands");
6438 }
6439 break;
6440 }
6441 case Intrinsic::lrint:
6442 case Intrinsic::llrint:
6443 case Intrinsic::lround:
6444 case Intrinsic::llround: {
6445 Type *ValTy = Call.getArgOperand(0)->getType();
6446 Type *ResultTy = Call.getType();
6447 auto *VTy = dyn_cast<VectorType>(ValTy);
6448 auto *RTy = dyn_cast<VectorType>(ResultTy);
6449 Check(ValTy->isFPOrFPVectorTy() && ResultTy->isIntOrIntVectorTy(),
6450 ExpectedName + ": argument must be floating-point or vector "
6451 "of floating-points, and result must be integer or "
6452 "vector of integers",
6453 &Call);
6454 Check(ValTy->isVectorTy() == ResultTy->isVectorTy(),
6455 ExpectedName + ": argument and result disagree on vector use", &Call);
6456 if (VTy) {
6457 Check(VTy->getElementCount() == RTy->getElementCount(),
6458 ExpectedName + ": argument must be same length as result", &Call);
6459 }
6460 break;
6461 }
6462 case Intrinsic::bswap: {
6463 Type *Ty = Call.getType();
6464 unsigned Size = Ty->getScalarSizeInBits();
6465 Check(Size % 16 == 0, "bswap must be an even number of bytes", &Call);
6466 break;
6467 }
6468 case Intrinsic::invariant_start: {
6469 ConstantInt *InvariantSize = dyn_cast<ConstantInt>(Call.getArgOperand(0));
6470 Check(InvariantSize &&
6471 (!InvariantSize->isNegative() || InvariantSize->isMinusOne()),
6472 "invariant_start parameter must be -1, 0 or a positive number",
6473 &Call);
6474 break;
6475 }
6476 case Intrinsic::matrix_multiply:
6477 case Intrinsic::matrix_transpose:
6478 case Intrinsic::matrix_column_major_load:
6479 case Intrinsic::matrix_column_major_store: {
6481 ConstantInt *Stride = nullptr;
6482 ConstantInt *NumRows;
6483 ConstantInt *NumColumns;
6484 VectorType *ResultTy;
6485 Type *Op0ElemTy = nullptr;
6486 Type *Op1ElemTy = nullptr;
6487 switch (ID) {
6488 case Intrinsic::matrix_multiply: {
6489 NumRows = cast<ConstantInt>(Call.getArgOperand(2));
6490 ConstantInt *N = cast<ConstantInt>(Call.getArgOperand(3));
6491 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6493 ->getNumElements() ==
6494 NumRows->getZExtValue() * N->getZExtValue(),
6495 "First argument of a matrix operation does not match specified "
6496 "shape!");
6498 ->getNumElements() ==
6499 N->getZExtValue() * NumColumns->getZExtValue(),
6500 "Second argument of a matrix operation does not match specified "
6501 "shape!");
6502
6503 ResultTy = cast<VectorType>(Call.getType());
6504 Op0ElemTy =
6505 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6506 Op1ElemTy =
6507 cast<VectorType>(Call.getArgOperand(1)->getType())->getElementType();
6508 break;
6509 }
6510 case Intrinsic::matrix_transpose:
6511 NumRows = cast<ConstantInt>(Call.getArgOperand(1));
6512 NumColumns = cast<ConstantInt>(Call.getArgOperand(2));
6513 ResultTy = cast<VectorType>(Call.getType());
6514 Op0ElemTy =
6515 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6516 break;
6517 case Intrinsic::matrix_column_major_load: {
6519 NumRows = cast<ConstantInt>(Call.getArgOperand(3));
6520 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6521 ResultTy = cast<VectorType>(Call.getType());
6522 break;
6523 }
6524 case Intrinsic::matrix_column_major_store: {
6526 NumRows = cast<ConstantInt>(Call.getArgOperand(4));
6527 NumColumns = cast<ConstantInt>(Call.getArgOperand(5));
6528 ResultTy = cast<VectorType>(Call.getArgOperand(0)->getType());
6529 Op0ElemTy =
6530 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6531 break;
6532 }
6533 default:
6534 llvm_unreachable("unexpected intrinsic");
6535 }
6536
6537 Check(ResultTy->getElementType()->isIntegerTy() ||
6538 ResultTy->getElementType()->isFloatingPointTy(),
6539 "Result type must be an integer or floating-point type!", IF);
6540
6541 if (Op0ElemTy)
6542 Check(ResultTy->getElementType() == Op0ElemTy,
6543 "Vector element type mismatch of the result and first operand "
6544 "vector!",
6545 IF);
6546
6547 if (Op1ElemTy)
6548 Check(ResultTy->getElementType() == Op1ElemTy,
6549 "Vector element type mismatch of the result and second operand "
6550 "vector!",
6551 IF);
6552
6554 NumRows->getZExtValue() * NumColumns->getZExtValue(),
6555 "Result of a matrix operation does not fit in the returned vector!");
6556
6557 if (Stride) {
6558 Check(Stride->getBitWidth() <= 64, "Stride bitwidth cannot exceed 64!",
6559 IF);
6560 Check(Stride->getZExtValue() >= NumRows->getZExtValue(),
6561 "Stride must be greater or equal than the number of rows!", IF);
6562 }
6563
6564 break;
6565 }
6566 case Intrinsic::vector_splice: {
6568 int64_t Idx = cast<ConstantInt>(Call.getArgOperand(2))->getSExtValue();
6569 int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
6570 if (VecTy->isScalableTy() && Call.getParent() &&
6571 Call.getParent()->getParent()) {
6572 AttributeList Attrs = Call.getParent()->getParent()->getAttributes();
6573 if (Attrs.hasFnAttr(Attribute::VScaleRange))
6574 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
6575 }
6576 Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
6577 (Idx >= 0 && Idx < KnownMinNumElements),
6578 "The splice index exceeds the range [-VL, VL-1] where VL is the "
6579 "known minimum number of elements in the vector. For scalable "
6580 "vectors the minimum number of elements is determined from "
6581 "vscale_range.",
6582 &Call);
6583 break;
6584 }
6585 case Intrinsic::stepvector: {
6587 Check(VecTy && VecTy->getScalarType()->isIntegerTy() &&
6588 VecTy->getScalarSizeInBits() >= 8,
6589 "stepvector only supported for vectors of integers "
6590 "with a bitwidth of at least 8.",
6591 &Call);
6592 break;
6593 }
6594 case Intrinsic::experimental_vector_match: {
6595 Value *Op1 = Call.getArgOperand(0);
6596 Value *Op2 = Call.getArgOperand(1);
6598
6599 VectorType *Op1Ty = dyn_cast<VectorType>(Op1->getType());
6600 VectorType *Op2Ty = dyn_cast<VectorType>(Op2->getType());
6601 VectorType *MaskTy = dyn_cast<VectorType>(Mask->getType());
6602
6603 Check(Op1Ty && Op2Ty && MaskTy, "Operands must be vectors.", &Call);
6605 "Second operand must be a fixed length vector.", &Call);
6606 Check(Op1Ty->getElementType()->isIntegerTy(),
6607 "First operand must be a vector of integers.", &Call);
6608 Check(Op1Ty->getElementType() == Op2Ty->getElementType(),
6609 "First two operands must have the same element type.", &Call);
6610 Check(Op1Ty->getElementCount() == MaskTy->getElementCount(),
6611 "First operand and mask must have the same number of elements.",
6612 &Call);
6613 Check(MaskTy->getElementType()->isIntegerTy(1),
6614 "Mask must be a vector of i1's.", &Call);
6615 Check(Call.getType() == MaskTy, "Return type must match the mask type.",
6616 &Call);
6617 break;
6618 }
6619 case Intrinsic::vector_insert: {
6620 Value *Vec = Call.getArgOperand(0);
6621 Value *SubVec = Call.getArgOperand(1);
6622 Value *Idx = Call.getArgOperand(2);
6623 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6624
6625 VectorType *VecTy = cast<VectorType>(Vec->getType());
6626 VectorType *SubVecTy = cast<VectorType>(SubVec->getType());
6627
6628 ElementCount VecEC = VecTy->getElementCount();
6629 ElementCount SubVecEC = SubVecTy->getElementCount();
6630 Check(VecTy->getElementType() == SubVecTy->getElementType(),
6631 "vector_insert parameters must have the same element "
6632 "type.",
6633 &Call);
6634 Check(IdxN % SubVecEC.getKnownMinValue() == 0,
6635 "vector_insert index must be a constant multiple of "
6636 "the subvector's known minimum vector length.");
6637
6638 // If this insertion is not the 'mixed' case where a fixed vector is
6639 // inserted into a scalable vector, ensure that the insertion of the
6640 // subvector does not overrun the parent vector.
6641 if (VecEC.isScalable() == SubVecEC.isScalable()) {
6642 Check(IdxN < VecEC.getKnownMinValue() &&
6643 IdxN + SubVecEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6644 "subvector operand of vector_insert would overrun the "
6645 "vector being inserted into.");
6646 }
6647 break;
6648 }
6649 case Intrinsic::vector_extract: {
6650 Value *Vec = Call.getArgOperand(0);
6651 Value *Idx = Call.getArgOperand(1);
6652 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6653
6654 VectorType *ResultTy = cast<VectorType>(Call.getType());
6655 VectorType *VecTy = cast<VectorType>(Vec->getType());
6656
6657 ElementCount VecEC = VecTy->getElementCount();
6658 ElementCount ResultEC = ResultTy->getElementCount();
6659
6660 Check(ResultTy->getElementType() == VecTy->getElementType(),
6661 "vector_extract result must have the same element "
6662 "type as the input vector.",
6663 &Call);
6664 Check(IdxN % ResultEC.getKnownMinValue() == 0,
6665 "vector_extract index must be a constant multiple of "
6666 "the result type's known minimum vector length.");
6667
6668 // If this extraction is not the 'mixed' case where a fixed vector is
6669 // extracted from a scalable vector, ensure that the extraction does not
6670 // overrun the parent vector.
6671 if (VecEC.isScalable() == ResultEC.isScalable()) {
6672 Check(IdxN < VecEC.getKnownMinValue() &&
6673 IdxN + ResultEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6674 "vector_extract would overrun.");
6675 }
6676 break;
6677 }
6678 case Intrinsic::vector_partial_reduce_fadd:
6679 case Intrinsic::vector_partial_reduce_add: {
6682
6683 unsigned VecWidth = VecTy->getElementCount().getKnownMinValue();
6684 unsigned AccWidth = AccTy->getElementCount().getKnownMinValue();
6685
6686 Check((VecWidth % AccWidth) == 0,
6687 "Invalid vector widths for partial "
6688 "reduction. The width of the input vector "
6689 "must be a positive integer multiple of "
6690 "the width of the accumulator vector.");
6691 break;
6692 }
6693 case Intrinsic::experimental_noalias_scope_decl: {
6694 NoAliasScopeDecls.push_back(cast<IntrinsicInst>(&Call));
6695 break;
6696 }
6697 case Intrinsic::preserve_array_access_index:
6698 case Intrinsic::preserve_struct_access_index:
6699 case Intrinsic::aarch64_ldaxr:
6700 case Intrinsic::aarch64_ldxr:
6701 case Intrinsic::arm_ldaex:
6702 case Intrinsic::arm_ldrex: {
6703 Type *ElemTy = Call.getParamElementType(0);
6704 Check(ElemTy, "Intrinsic requires elementtype attribute on first argument.",
6705 &Call);
6706 break;
6707 }
6708 case Intrinsic::aarch64_stlxr:
6709 case Intrinsic::aarch64_stxr:
6710 case Intrinsic::arm_stlex:
6711 case Intrinsic::arm_strex: {
6712 Type *ElemTy = Call.getAttributes().getParamElementType(1);
6713 Check(ElemTy,
6714 "Intrinsic requires elementtype attribute on second argument.",
6715 &Call);
6716 break;
6717 }
6718 case Intrinsic::aarch64_prefetch: {
6719 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6720 "write argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6721 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6722 "target argument to llvm.aarch64.prefetch must be 0-3", Call);
6723 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6724 "stream argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6725 Check(cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue() < 2,
6726 "isdata argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6727 break;
6728 }
6729 case Intrinsic::callbr_landingpad: {
6730 const auto *CBR = dyn_cast<CallBrInst>(Call.getOperand(0));
6731 Check(CBR, "intrinstic requires callbr operand", &Call);
6732 if (!CBR)
6733 break;
6734
6735 const BasicBlock *LandingPadBB = Call.getParent();
6736 const BasicBlock *PredBB = LandingPadBB->getUniquePredecessor();
6737 if (!PredBB) {
6738 CheckFailed("Intrinsic in block must have 1 unique predecessor", &Call);
6739 break;
6740 }
6741 if (!isa<CallBrInst>(PredBB->getTerminator())) {
6742 CheckFailed("Intrinsic must have corresponding callbr in predecessor",
6743 &Call);
6744 break;
6745 }
6746 Check(llvm::is_contained(CBR->getIndirectDests(), LandingPadBB),
6747 "Intrinsic's corresponding callbr must have intrinsic's parent basic "
6748 "block in indirect destination list",
6749 &Call);
6750 const Instruction &First = *LandingPadBB->begin();
6751 Check(&First == &Call, "No other instructions may proceed intrinsic",
6752 &Call);
6753 break;
6754 }
6755 case Intrinsic::amdgcn_cs_chain: {
6756 auto CallerCC = Call.getCaller()->getCallingConv();
6757 switch (CallerCC) {
6758 case CallingConv::AMDGPU_CS:
6759 case CallingConv::AMDGPU_CS_Chain:
6760 case CallingConv::AMDGPU_CS_ChainPreserve:
6761 case CallingConv::AMDGPU_ES:
6762 case CallingConv::AMDGPU_GS:
6763 case CallingConv::AMDGPU_HS:
6764 case CallingConv::AMDGPU_LS:
6765 case CallingConv::AMDGPU_VS:
6766 break;
6767 default:
6768 CheckFailed("Intrinsic cannot be called from functions with this "
6769 "calling convention",
6770 &Call);
6771 break;
6772 }
6773
6774 Check(Call.paramHasAttr(2, Attribute::InReg),
6775 "SGPR arguments must have the `inreg` attribute", &Call);
6776 Check(!Call.paramHasAttr(3, Attribute::InReg),
6777 "VGPR arguments must not have the `inreg` attribute", &Call);
6778
6779 auto *Next = Call.getNextNode();
6780 bool IsAMDUnreachable = Next && isa<IntrinsicInst>(Next) &&
6781 cast<IntrinsicInst>(Next)->getIntrinsicID() ==
6782 Intrinsic::amdgcn_unreachable;
6783 Check(Next && (isa<UnreachableInst>(Next) || IsAMDUnreachable),
6784 "llvm.amdgcn.cs.chain must be followed by unreachable", &Call);
6785 break;
6786 }
6787 case Intrinsic::amdgcn_init_exec_from_input: {
6788 const Argument *Arg = dyn_cast<Argument>(Call.getOperand(0));
6789 Check(Arg && Arg->hasInRegAttr(),
6790 "only inreg arguments to the parent function are valid as inputs to "
6791 "this intrinsic",
6792 &Call);
6793 break;
6794 }
6795 case Intrinsic::amdgcn_set_inactive_chain_arg: {
6796 auto CallerCC = Call.getCaller()->getCallingConv();
6797 switch (CallerCC) {
6798 case CallingConv::AMDGPU_CS_Chain:
6799 case CallingConv::AMDGPU_CS_ChainPreserve:
6800 break;
6801 default:
6802 CheckFailed("Intrinsic can only be used from functions with the "
6803 "amdgpu_cs_chain or amdgpu_cs_chain_preserve "
6804 "calling conventions",
6805 &Call);
6806 break;
6807 }
6808
6809 unsigned InactiveIdx = 1;
6810 Check(!Call.paramHasAttr(InactiveIdx, Attribute::InReg),
6811 "Value for inactive lanes must not have the `inreg` attribute",
6812 &Call);
6813 Check(isa<Argument>(Call.getArgOperand(InactiveIdx)),
6814 "Value for inactive lanes must be a function argument", &Call);
6815 Check(!cast<Argument>(Call.getArgOperand(InactiveIdx))->hasInRegAttr(),
6816 "Value for inactive lanes must be a VGPR function argument", &Call);
6817 break;
6818 }
6819 case Intrinsic::amdgcn_call_whole_wave: {
6821 Check(F, "Indirect whole wave calls are not allowed", &Call);
6822
6823 CallingConv::ID CC = F->getCallingConv();
6824 Check(CC == CallingConv::AMDGPU_Gfx_WholeWave,
6825 "Callee must have the amdgpu_gfx_whole_wave calling convention",
6826 &Call);
6827
6828 Check(!F->isVarArg(), "Variadic whole wave calls are not allowed", &Call);
6829
6830 Check(Call.arg_size() == F->arg_size(),
6831 "Call argument count must match callee argument count", &Call);
6832
6833 // The first argument of the call is the callee, and the first argument of
6834 // the callee is the active mask. The rest of the arguments must match.
6835 Check(F->arg_begin()->getType()->isIntegerTy(1),
6836 "Callee must have i1 as its first argument", &Call);
6837 for (auto [CallArg, FuncArg] :
6838 drop_begin(zip_equal(Call.args(), F->args()))) {
6839 Check(CallArg->getType() == FuncArg.getType(),
6840 "Argument types must match", &Call);
6841
6842 // Check that inreg attributes match between call site and function
6843 Check(Call.paramHasAttr(FuncArg.getArgNo(), Attribute::InReg) ==
6844 FuncArg.hasInRegAttr(),
6845 "Argument inreg attributes must match", &Call);
6846 }
6847 break;
6848 }
6849 case Intrinsic::amdgcn_s_prefetch_data: {
6850 Check(
6853 "llvm.amdgcn.s.prefetch.data only supports global or constant memory");
6854 break;
6855 }
6856 case Intrinsic::amdgcn_mfma_scale_f32_16x16x128_f8f6f4:
6857 case Intrinsic::amdgcn_mfma_scale_f32_32x32x64_f8f6f4: {
6858 Value *Src0 = Call.getArgOperand(0);
6859 Value *Src1 = Call.getArgOperand(1);
6860
6861 uint64_t CBSZ = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
6862 uint64_t BLGP = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
6863 Check(CBSZ <= 4, "invalid value for cbsz format", Call,
6864 Call.getArgOperand(3));
6865 Check(BLGP <= 4, "invalid value for blgp format", Call,
6866 Call.getArgOperand(4));
6867
6868 // AMDGPU::MFMAScaleFormats values
6869 auto getFormatNumRegs = [](unsigned FormatVal) {
6870 switch (FormatVal) {
6871 case 0:
6872 case 1:
6873 return 8u;
6874 case 2:
6875 case 3:
6876 return 6u;
6877 case 4:
6878 return 4u;
6879 default:
6880 llvm_unreachable("invalid format value");
6881 }
6882 };
6883
6884 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
6885 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
6886 return false;
6887 unsigned NumElts = Ty->getNumElements();
6888 return NumElts == 4 || NumElts == 6 || NumElts == 8;
6889 };
6890
6891 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
6892 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
6893 Check(isValidSrcASrcBVector(Src0Ty),
6894 "operand 0 must be 4, 6 or 8 element i32 vector", &Call, Src0);
6895 Check(isValidSrcASrcBVector(Src1Ty),
6896 "operand 1 must be 4, 6 or 8 element i32 vector", &Call, Src1);
6897
6898 // Permit excess registers for the format.
6899 Check(Src0Ty->getNumElements() >= getFormatNumRegs(CBSZ),
6900 "invalid vector type for format", &Call, Src0, Call.getArgOperand(3));
6901 Check(Src1Ty->getNumElements() >= getFormatNumRegs(BLGP),
6902 "invalid vector type for format", &Call, Src1, Call.getArgOperand(5));
6903 break;
6904 }
6905 case Intrinsic::amdgcn_wmma_f32_16x16x128_f8f6f4:
6906 case Intrinsic::amdgcn_wmma_scale_f32_16x16x128_f8f6f4:
6907 case Intrinsic::amdgcn_wmma_scale16_f32_16x16x128_f8f6f4: {
6908 Value *Src0 = Call.getArgOperand(1);
6909 Value *Src1 = Call.getArgOperand(3);
6910
6911 unsigned FmtA = cast<ConstantInt>(Call.getArgOperand(0))->getZExtValue();
6912 unsigned FmtB = cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue();
6913 Check(FmtA <= 4, "invalid value for matrix format", Call,
6914 Call.getArgOperand(0));
6915 Check(FmtB <= 4, "invalid value for matrix format", Call,
6916 Call.getArgOperand(2));
6917
6918 // AMDGPU::MatrixFMT values
6919 auto getFormatNumRegs = [](unsigned FormatVal) {
6920 switch (FormatVal) {
6921 case 0:
6922 case 1:
6923 return 16u;
6924 case 2:
6925 case 3:
6926 return 12u;
6927 case 4:
6928 return 8u;
6929 default:
6930 llvm_unreachable("invalid format value");
6931 }
6932 };
6933
6934 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
6935 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
6936 return false;
6937 unsigned NumElts = Ty->getNumElements();
6938 return NumElts == 16 || NumElts == 12 || NumElts == 8;
6939 };
6940
6941 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
6942 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
6943 Check(isValidSrcASrcBVector(Src0Ty),
6944 "operand 1 must be 8, 12 or 16 element i32 vector", &Call, Src0);
6945 Check(isValidSrcASrcBVector(Src1Ty),
6946 "operand 3 must be 8, 12 or 16 element i32 vector", &Call, Src1);
6947
6948 // Permit excess registers for the format.
6949 Check(Src0Ty->getNumElements() >= getFormatNumRegs(FmtA),
6950 "invalid vector type for format", &Call, Src0, Call.getArgOperand(0));
6951 Check(Src1Ty->getNumElements() >= getFormatNumRegs(FmtB),
6952 "invalid vector type for format", &Call, Src1, Call.getArgOperand(2));
6953 break;
6954 }
6955 case Intrinsic::amdgcn_cooperative_atomic_load_32x4B:
6956 case Intrinsic::amdgcn_cooperative_atomic_load_16x8B:
6957 case Intrinsic::amdgcn_cooperative_atomic_load_8x16B:
6958 case Intrinsic::amdgcn_cooperative_atomic_store_32x4B:
6959 case Intrinsic::amdgcn_cooperative_atomic_store_16x8B:
6960 case Intrinsic::amdgcn_cooperative_atomic_store_8x16B: {
6961 // Check we only use this intrinsic on the FLAT or GLOBAL address spaces.
6962 Value *PtrArg = Call.getArgOperand(0);
6963 const unsigned AS = PtrArg->getType()->getPointerAddressSpace();
6965 "cooperative atomic intrinsics require a generic or global pointer",
6966 &Call, PtrArg);
6967
6968 // Last argument must be a MD string
6970 MDNode *MD = cast<MDNode>(Op->getMetadata());
6971 Check((MD->getNumOperands() == 1) && isa<MDString>(MD->getOperand(0)),
6972 "cooperative atomic intrinsics require that the last argument is a "
6973 "metadata string",
6974 &Call, Op);
6975 break;
6976 }
6977 case Intrinsic::nvvm_setmaxnreg_inc_sync_aligned_u32:
6978 case Intrinsic::nvvm_setmaxnreg_dec_sync_aligned_u32: {
6979 Value *V = Call.getArgOperand(0);
6980 unsigned RegCount = cast<ConstantInt>(V)->getZExtValue();
6981 Check(RegCount % 8 == 0,
6982 "reg_count argument to nvvm.setmaxnreg must be in multiples of 8");
6983 break;
6984 }
6985 case Intrinsic::experimental_convergence_entry:
6986 case Intrinsic::experimental_convergence_anchor:
6987 break;
6988 case Intrinsic::experimental_convergence_loop:
6989 break;
6990 case Intrinsic::ptrmask: {
6991 Type *Ty0 = Call.getArgOperand(0)->getType();
6992 Type *Ty1 = Call.getArgOperand(1)->getType();
6994 "llvm.ptrmask intrinsic first argument must be pointer or vector "
6995 "of pointers",
6996 &Call);
6997 Check(
6998 Ty0->isVectorTy() == Ty1->isVectorTy(),
6999 "llvm.ptrmask intrinsic arguments must be both scalars or both vectors",
7000 &Call);
7001 if (Ty0->isVectorTy())
7002 Check(cast<VectorType>(Ty0)->getElementCount() ==
7003 cast<VectorType>(Ty1)->getElementCount(),
7004 "llvm.ptrmask intrinsic arguments must have the same number of "
7005 "elements",
7006 &Call);
7007 Check(DL.getIndexTypeSizeInBits(Ty0) == Ty1->getScalarSizeInBits(),
7008 "llvm.ptrmask intrinsic second argument bitwidth must match "
7009 "pointer index type size of first argument",
7010 &Call);
7011 break;
7012 }
7013 case Intrinsic::thread_pointer: {
7015 DL.getDefaultGlobalsAddressSpace(),
7016 "llvm.thread.pointer intrinsic return type must be for the globals "
7017 "address space",
7018 &Call);
7019 break;
7020 }
7021 case Intrinsic::threadlocal_address: {
7022 const Value &Arg0 = *Call.getArgOperand(0);
7023 Check(isa<GlobalValue>(Arg0),
7024 "llvm.threadlocal.address first argument must be a GlobalValue");
7025 Check(cast<GlobalValue>(Arg0).isThreadLocal(),
7026 "llvm.threadlocal.address operand isThreadLocal() must be true");
7027 break;
7028 }
7029 case Intrinsic::lifetime_start:
7030 case Intrinsic::lifetime_end: {
7031 Value *Ptr = Call.getArgOperand(0);
7033 "llvm.lifetime.start/end can only be used on alloca or poison",
7034 &Call);
7035 break;
7036 }
7037 };
7038
7039 // Verify that there aren't any unmediated control transfers between funclets.
7041 Function *F = Call.getParent()->getParent();
7042 if (F->hasPersonalityFn() &&
7043 isScopedEHPersonality(classifyEHPersonality(F->getPersonalityFn()))) {
7044 // Run EH funclet coloring on-demand and cache results for other intrinsic
7045 // calls in this function
7046 if (BlockEHFuncletColors.empty())
7047 BlockEHFuncletColors = colorEHFunclets(*F);
7048
7049 // Check for catch-/cleanup-pad in first funclet block
7050 bool InEHFunclet = false;
7051 BasicBlock *CallBB = Call.getParent();
7052 const ColorVector &CV = BlockEHFuncletColors.find(CallBB)->second;
7053 assert(CV.size() > 0 && "Uncolored block");
7054 for (BasicBlock *ColorFirstBB : CV)
7055 if (auto It = ColorFirstBB->getFirstNonPHIIt();
7056 It != ColorFirstBB->end())
7058 InEHFunclet = true;
7059
7060 // Check for funclet operand bundle
7061 bool HasToken = false;
7062 for (unsigned I = 0, E = Call.getNumOperandBundles(); I != E; ++I)
7064 HasToken = true;
7065
7066 // This would cause silent code truncation in WinEHPrepare
7067 if (InEHFunclet)
7068 Check(HasToken, "Missing funclet token on intrinsic call", &Call);
7069 }
7070 }
7071}
7072
7073/// Carefully grab the subprogram from a local scope.
7074///
7075/// This carefully grabs the subprogram from a local scope, avoiding the
7076/// built-in assertions that would typically fire.
7078 if (!LocalScope)
7079 return nullptr;
7080
7081 if (auto *SP = dyn_cast<DISubprogram>(LocalScope))
7082 return SP;
7083
7084 if (auto *LB = dyn_cast<DILexicalBlockBase>(LocalScope))
7085 return getSubprogram(LB->getRawScope());
7086
7087 // Just return null; broken scope chains are checked elsewhere.
7088 assert(!isa<DILocalScope>(LocalScope) && "Unknown type of local scope");
7089 return nullptr;
7090}
7091
7092void Verifier::visit(DbgLabelRecord &DLR) {
7094 "invalid #dbg_label intrinsic variable", &DLR, DLR.getRawLabel());
7095
7096 // Ignore broken !dbg attachments; they're checked elsewhere.
7097 if (MDNode *N = DLR.getDebugLoc().getAsMDNode())
7098 if (!isa<DILocation>(N))
7099 return;
7100
7101 BasicBlock *BB = DLR.getParent();
7102 Function *F = BB ? BB->getParent() : nullptr;
7103
7104 // The scopes for variables and !dbg attachments must agree.
7105 DILabel *Label = DLR.getLabel();
7106 DILocation *Loc = DLR.getDebugLoc();
7107 CheckDI(Loc, "#dbg_label record requires a !dbg attachment", &DLR, BB, F);
7108
7109 DISubprogram *LabelSP = getSubprogram(Label->getRawScope());
7110 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
7111 if (!LabelSP || !LocSP)
7112 return;
7113
7114 CheckDI(LabelSP == LocSP,
7115 "mismatched subprogram between #dbg_label label and !dbg attachment",
7116 &DLR, BB, F, Label, Label->getScope()->getSubprogram(), Loc,
7117 Loc->getScope()->getSubprogram());
7118}
7119
7120void Verifier::visit(DbgVariableRecord &DVR) {
7121 BasicBlock *BB = DVR.getParent();
7122 Function *F = BB->getParent();
7123
7124 CheckDI(DVR.getType() == DbgVariableRecord::LocationType::Value ||
7125 DVR.getType() == DbgVariableRecord::LocationType::Declare ||
7126 DVR.getType() == DbgVariableRecord::LocationType::DeclareValue ||
7127 DVR.getType() == DbgVariableRecord::LocationType::Assign,
7128 "invalid #dbg record type", &DVR, DVR.getType(), BB, F);
7129
7130 // The location for a DbgVariableRecord must be either a ValueAsMetadata,
7131 // DIArgList, or an empty MDNode (which is a legacy representation for an
7132 // "undef" location).
7133 auto *MD = DVR.getRawLocation();
7134 CheckDI(MD && (isa<ValueAsMetadata>(MD) || isa<DIArgList>(MD) ||
7135 (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands())),
7136 "invalid #dbg record address/value", &DVR, MD, BB, F);
7137 if (auto *VAM = dyn_cast<ValueAsMetadata>(MD)) {
7138 visitValueAsMetadata(*VAM, F);
7139 if (DVR.isDbgDeclare()) {
7140 // Allow integers here to support inttoptr salvage.
7141 Type *Ty = VAM->getValue()->getType();
7142 CheckDI(Ty->isPointerTy() || Ty->isIntegerTy(),
7143 "location of #dbg_declare must be a pointer or int", &DVR, MD, BB,
7144 F);
7145 }
7146 } else if (auto *AL = dyn_cast<DIArgList>(MD)) {
7147 visitDIArgList(*AL, F);
7148 }
7149
7151 "invalid #dbg record variable", &DVR, DVR.getRawVariable(), BB, F);
7152 visitMDNode(*DVR.getRawVariable(), AreDebugLocsAllowed::No);
7153
7155 "invalid #dbg record expression", &DVR, DVR.getRawExpression(), BB,
7156 F);
7157 visitMDNode(*DVR.getExpression(), AreDebugLocsAllowed::No);
7158
7159 if (DVR.isDbgAssign()) {
7161 "invalid #dbg_assign DIAssignID", &DVR, DVR.getRawAssignID(), BB,
7162 F);
7163 visitMDNode(*cast<DIAssignID>(DVR.getRawAssignID()),
7164 AreDebugLocsAllowed::No);
7165
7166 const auto *RawAddr = DVR.getRawAddress();
7167 // Similarly to the location above, the address for an assign
7168 // DbgVariableRecord must be a ValueAsMetadata or an empty MDNode, which
7169 // represents an undef address.
7170 CheckDI(
7171 isa<ValueAsMetadata>(RawAddr) ||
7172 (isa<MDNode>(RawAddr) && !cast<MDNode>(RawAddr)->getNumOperands()),
7173 "invalid #dbg_assign address", &DVR, DVR.getRawAddress(), BB, F);
7174 if (auto *VAM = dyn_cast<ValueAsMetadata>(RawAddr))
7175 visitValueAsMetadata(*VAM, F);
7176
7178 "invalid #dbg_assign address expression", &DVR,
7179 DVR.getRawAddressExpression(), BB, F);
7180 visitMDNode(*DVR.getAddressExpression(), AreDebugLocsAllowed::No);
7181
7182 // All of the linked instructions should be in the same function as DVR.
7183 for (Instruction *I : at::getAssignmentInsts(&DVR))
7184 CheckDI(DVR.getFunction() == I->getFunction(),
7185 "inst not in same function as #dbg_assign", I, &DVR, BB, F);
7186 }
7187
7188 // This check is redundant with one in visitLocalVariable().
7189 DILocalVariable *Var = DVR.getVariable();
7190 CheckDI(isType(Var->getRawType()), "invalid type ref", Var, Var->getRawType(),
7191 BB, F);
7192
7193 auto *DLNode = DVR.getDebugLoc().getAsMDNode();
7194 CheckDI(isa_and_nonnull<DILocation>(DLNode), "invalid #dbg record DILocation",
7195 &DVR, DLNode, BB, F);
7196 DILocation *Loc = DVR.getDebugLoc();
7197
7198 // The scopes for variables and !dbg attachments must agree.
7199 DISubprogram *VarSP = getSubprogram(Var->getRawScope());
7200 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
7201 if (!VarSP || !LocSP)
7202 return; // Broken scope chains are checked elsewhere.
7203
7204 CheckDI(VarSP == LocSP,
7205 "mismatched subprogram between #dbg record variable and DILocation",
7206 &DVR, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
7207 Loc->getScope()->getSubprogram(), BB, F);
7208
7209 verifyFnArgs(DVR);
7210}
7211
7212void Verifier::visitVPIntrinsic(VPIntrinsic &VPI) {
7213 if (auto *VPCast = dyn_cast<VPCastIntrinsic>(&VPI)) {
7214 auto *RetTy = cast<VectorType>(VPCast->getType());
7215 auto *ValTy = cast<VectorType>(VPCast->getOperand(0)->getType());
7216 Check(RetTy->getElementCount() == ValTy->getElementCount(),
7217 "VP cast intrinsic first argument and result vector lengths must be "
7218 "equal",
7219 *VPCast);
7220
7221 switch (VPCast->getIntrinsicID()) {
7222 default:
7223 llvm_unreachable("Unknown VP cast intrinsic");
7224 case Intrinsic::vp_trunc:
7225 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
7226 "llvm.vp.trunc intrinsic first argument and result element type "
7227 "must be integer",
7228 *VPCast);
7229 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
7230 "llvm.vp.trunc intrinsic the bit size of first argument must be "
7231 "larger than the bit size of the return type",
7232 *VPCast);
7233 break;
7234 case Intrinsic::vp_zext:
7235 case Intrinsic::vp_sext:
7236 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
7237 "llvm.vp.zext or llvm.vp.sext intrinsic first argument and result "
7238 "element type must be integer",
7239 *VPCast);
7240 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
7241 "llvm.vp.zext or llvm.vp.sext intrinsic the bit size of first "
7242 "argument must be smaller than the bit size of the return type",
7243 *VPCast);
7244 break;
7245 case Intrinsic::vp_fptoui:
7246 case Intrinsic::vp_fptosi:
7247 case Intrinsic::vp_lrint:
7248 case Intrinsic::vp_llrint:
7249 Check(
7250 RetTy->isIntOrIntVectorTy() && ValTy->isFPOrFPVectorTy(),
7251 "llvm.vp.fptoui, llvm.vp.fptosi, llvm.vp.lrint or llvm.vp.llrint" "intrinsic first argument element "
7252 "type must be floating-point and result element type must be integer",
7253 *VPCast);
7254 break;
7255 case Intrinsic::vp_uitofp:
7256 case Intrinsic::vp_sitofp:
7257 Check(
7258 RetTy->isFPOrFPVectorTy() && ValTy->isIntOrIntVectorTy(),
7259 "llvm.vp.uitofp or llvm.vp.sitofp intrinsic first argument element "
7260 "type must be integer and result element type must be floating-point",
7261 *VPCast);
7262 break;
7263 case Intrinsic::vp_fptrunc:
7264 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
7265 "llvm.vp.fptrunc intrinsic first argument and result element type "
7266 "must be floating-point",
7267 *VPCast);
7268 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
7269 "llvm.vp.fptrunc intrinsic the bit size of first argument must be "
7270 "larger than the bit size of the return type",
7271 *VPCast);
7272 break;
7273 case Intrinsic::vp_fpext:
7274 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
7275 "llvm.vp.fpext intrinsic first argument and result element type "
7276 "must be floating-point",
7277 *VPCast);
7278 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
7279 "llvm.vp.fpext intrinsic the bit size of first argument must be "
7280 "smaller than the bit size of the return type",
7281 *VPCast);
7282 break;
7283 case Intrinsic::vp_ptrtoint:
7284 Check(RetTy->isIntOrIntVectorTy() && ValTy->isPtrOrPtrVectorTy(),
7285 "llvm.vp.ptrtoint intrinsic first argument element type must be "
7286 "pointer and result element type must be integer",
7287 *VPCast);
7288 break;
7289 case Intrinsic::vp_inttoptr:
7290 Check(RetTy->isPtrOrPtrVectorTy() && ValTy->isIntOrIntVectorTy(),
7291 "llvm.vp.inttoptr intrinsic first argument element type must be "
7292 "integer and result element type must be pointer",
7293 *VPCast);
7294 break;
7295 }
7296 }
7297
7298 switch (VPI.getIntrinsicID()) {
7299 case Intrinsic::vp_fcmp: {
7300 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
7302 "invalid predicate for VP FP comparison intrinsic", &VPI);
7303 break;
7304 }
7305 case Intrinsic::vp_icmp: {
7306 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
7308 "invalid predicate for VP integer comparison intrinsic", &VPI);
7309 break;
7310 }
7311 case Intrinsic::vp_is_fpclass: {
7312 auto TestMask = cast<ConstantInt>(VPI.getOperand(1));
7313 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
7314 "unsupported bits for llvm.vp.is.fpclass test mask");
7315 break;
7316 }
7317 case Intrinsic::experimental_vp_splice: {
7318 VectorType *VecTy = cast<VectorType>(VPI.getType());
7319 int64_t Idx = cast<ConstantInt>(VPI.getArgOperand(2))->getSExtValue();
7320 int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
7321 if (VPI.getParent() && VPI.getParent()->getParent()) {
7322 AttributeList Attrs = VPI.getParent()->getParent()->getAttributes();
7323 if (Attrs.hasFnAttr(Attribute::VScaleRange))
7324 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
7325 }
7326 Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
7327 (Idx >= 0 && Idx < KnownMinNumElements),
7328 "The splice index exceeds the range [-VL, VL-1] where VL is the "
7329 "known minimum number of elements in the vector. For scalable "
7330 "vectors the minimum number of elements is determined from "
7331 "vscale_range.",
7332 &VPI);
7333 break;
7334 }
7335 }
7336}
7337
7338void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
7339 unsigned NumOperands = FPI.getNonMetadataArgCount();
7340 bool HasRoundingMD =
7342
7343 // Add the expected number of metadata operands.
7344 NumOperands += (1 + HasRoundingMD);
7345
7346 // Compare intrinsics carry an extra predicate metadata operand.
7348 NumOperands += 1;
7349 Check((FPI.arg_size() == NumOperands),
7350 "invalid arguments for constrained FP intrinsic", &FPI);
7351
7352 switch (FPI.getIntrinsicID()) {
7353 case Intrinsic::experimental_constrained_lrint:
7354 case Intrinsic::experimental_constrained_llrint: {
7355 Type *ValTy = FPI.getArgOperand(0)->getType();
7356 Type *ResultTy = FPI.getType();
7357 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
7358 "Intrinsic does not support vectors", &FPI);
7359 break;
7360 }
7361
7362 case Intrinsic::experimental_constrained_lround:
7363 case Intrinsic::experimental_constrained_llround: {
7364 Type *ValTy = FPI.getArgOperand(0)->getType();
7365 Type *ResultTy = FPI.getType();
7366 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
7367 "Intrinsic does not support vectors", &FPI);
7368 break;
7369 }
7370
7371 case Intrinsic::experimental_constrained_fcmp:
7372 case Intrinsic::experimental_constrained_fcmps: {
7373 auto Pred = cast<ConstrainedFPCmpIntrinsic>(&FPI)->getPredicate();
7375 "invalid predicate for constrained FP comparison intrinsic", &FPI);
7376 break;
7377 }
7378
7379 case Intrinsic::experimental_constrained_fptosi:
7380 case Intrinsic::experimental_constrained_fptoui: {
7381 Value *Operand = FPI.getArgOperand(0);
7382 ElementCount SrcEC;
7383 Check(Operand->getType()->isFPOrFPVectorTy(),
7384 "Intrinsic first argument must be floating point", &FPI);
7385 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7386 SrcEC = cast<VectorType>(OperandT)->getElementCount();
7387 }
7388
7389 Operand = &FPI;
7390 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
7391 "Intrinsic first argument and result disagree on vector use", &FPI);
7392 Check(Operand->getType()->isIntOrIntVectorTy(),
7393 "Intrinsic result must be an integer", &FPI);
7394 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7395 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
7396 "Intrinsic first argument and result vector lengths must be equal",
7397 &FPI);
7398 }
7399 break;
7400 }
7401
7402 case Intrinsic::experimental_constrained_sitofp:
7403 case Intrinsic::experimental_constrained_uitofp: {
7404 Value *Operand = FPI.getArgOperand(0);
7405 ElementCount SrcEC;
7406 Check(Operand->getType()->isIntOrIntVectorTy(),
7407 "Intrinsic first argument must be integer", &FPI);
7408 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7409 SrcEC = cast<VectorType>(OperandT)->getElementCount();
7410 }
7411
7412 Operand = &FPI;
7413 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
7414 "Intrinsic first argument and result disagree on vector use", &FPI);
7415 Check(Operand->getType()->isFPOrFPVectorTy(),
7416 "Intrinsic result must be a floating point", &FPI);
7417 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7418 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
7419 "Intrinsic first argument and result vector lengths must be equal",
7420 &FPI);
7421 }
7422 break;
7423 }
7424
7425 case Intrinsic::experimental_constrained_fptrunc:
7426 case Intrinsic::experimental_constrained_fpext: {
7427 Value *Operand = FPI.getArgOperand(0);
7428 Type *OperandTy = Operand->getType();
7429 Value *Result = &FPI;
7430 Type *ResultTy = Result->getType();
7431 Check(OperandTy->isFPOrFPVectorTy(),
7432 "Intrinsic first argument must be FP or FP vector", &FPI);
7433 Check(ResultTy->isFPOrFPVectorTy(),
7434 "Intrinsic result must be FP or FP vector", &FPI);
7435 Check(OperandTy->isVectorTy() == ResultTy->isVectorTy(),
7436 "Intrinsic first argument and result disagree on vector use", &FPI);
7437 if (OperandTy->isVectorTy()) {
7438 Check(cast<VectorType>(OperandTy)->getElementCount() ==
7439 cast<VectorType>(ResultTy)->getElementCount(),
7440 "Intrinsic first argument and result vector lengths must be equal",
7441 &FPI);
7442 }
7443 if (FPI.getIntrinsicID() == Intrinsic::experimental_constrained_fptrunc) {
7444 Check(OperandTy->getScalarSizeInBits() > ResultTy->getScalarSizeInBits(),
7445 "Intrinsic first argument's type must be larger than result type",
7446 &FPI);
7447 } else {
7448 Check(OperandTy->getScalarSizeInBits() < ResultTy->getScalarSizeInBits(),
7449 "Intrinsic first argument's type must be smaller than result type",
7450 &FPI);
7451 }
7452 break;
7453 }
7454
7455 default:
7456 break;
7457 }
7458
7459 // If a non-metadata argument is passed in a metadata slot then the
7460 // error will be caught earlier when the incorrect argument doesn't
7461 // match the specification in the intrinsic call table. Thus, no
7462 // argument type check is needed here.
7463
7464 Check(FPI.getExceptionBehavior().has_value(),
7465 "invalid exception behavior argument", &FPI);
7466 if (HasRoundingMD) {
7467 Check(FPI.getRoundingMode().has_value(), "invalid rounding mode argument",
7468 &FPI);
7469 }
7470}
7471
7472void Verifier::verifyFragmentExpression(const DbgVariableRecord &DVR) {
7473 DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(DVR.getRawVariable());
7474 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7475
7476 // We don't know whether this intrinsic verified correctly.
7477 if (!V || !E || !E->isValid())
7478 return;
7479
7480 // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
7481 auto Fragment = E->getFragmentInfo();
7482 if (!Fragment)
7483 return;
7484
7485 // The frontend helps out GDB by emitting the members of local anonymous
7486 // unions as artificial local variables with shared storage. When SROA splits
7487 // the storage for artificial local variables that are smaller than the entire
7488 // union, the overhang piece will be outside of the allotted space for the
7489 // variable and this check fails.
7490 // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
7491 if (V->isArtificial())
7492 return;
7493
7494 verifyFragmentExpression(*V, *Fragment, &DVR);
7495}
7496
7497template <typename ValueOrMetadata>
7498void Verifier::verifyFragmentExpression(const DIVariable &V,
7500 ValueOrMetadata *Desc) {
7501 // If there's no size, the type is broken, but that should be checked
7502 // elsewhere.
7503 auto VarSize = V.getSizeInBits();
7504 if (!VarSize)
7505 return;
7506
7507 unsigned FragSize = Fragment.SizeInBits;
7508 unsigned FragOffset = Fragment.OffsetInBits;
7509 CheckDI(FragSize + FragOffset <= *VarSize,
7510 "fragment is larger than or outside of variable", Desc, &V);
7511 CheckDI(FragSize != *VarSize, "fragment covers entire variable", Desc, &V);
7512}
7513
7514void Verifier::verifyFnArgs(const DbgVariableRecord &DVR) {
7515 // This function does not take the scope of noninlined function arguments into
7516 // account. Don't run it if current function is nodebug, because it may
7517 // contain inlined debug intrinsics.
7518 if (!HasDebugInfo)
7519 return;
7520
7521 // For performance reasons only check non-inlined ones.
7522 if (DVR.getDebugLoc()->getInlinedAt())
7523 return;
7524
7525 DILocalVariable *Var = DVR.getVariable();
7526 CheckDI(Var, "#dbg record without variable");
7527
7528 unsigned ArgNo = Var->getArg();
7529 if (!ArgNo)
7530 return;
7531
7532 // Verify there are no duplicate function argument debug info entries.
7533 // These will cause hard-to-debug assertions in the DWARF backend.
7534 if (DebugFnArgs.size() < ArgNo)
7535 DebugFnArgs.resize(ArgNo, nullptr);
7536
7537 auto *Prev = DebugFnArgs[ArgNo - 1];
7538 DebugFnArgs[ArgNo - 1] = Var;
7539 CheckDI(!Prev || (Prev == Var), "conflicting debug info for argument", &DVR,
7540 Prev, Var);
7541}
7542
7543void Verifier::verifyNotEntryValue(const DbgVariableRecord &DVR) {
7544 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7545
7546 // We don't know whether this intrinsic verified correctly.
7547 if (!E || !E->isValid())
7548 return;
7549
7551 Value *VarValue = DVR.getVariableLocationOp(0);
7552 if (isa<UndefValue>(VarValue) || isa<PoisonValue>(VarValue))
7553 return;
7554 // We allow EntryValues for swift async arguments, as they have an
7555 // ABI-guarantee to be turned into a specific register.
7556 if (auto *ArgLoc = dyn_cast_or_null<Argument>(VarValue);
7557 ArgLoc && ArgLoc->hasAttribute(Attribute::SwiftAsync))
7558 return;
7559 }
7560
7561 CheckDI(!E->isEntryValue(),
7562 "Entry values are only allowed in MIR unless they target a "
7563 "swiftasync Argument",
7564 &DVR);
7565}
7566
7567void Verifier::verifyCompileUnits() {
7568 // When more than one Module is imported into the same context, such as during
7569 // an LTO build before linking the modules, ODR type uniquing may cause types
7570 // to point to a different CU. This check does not make sense in this case.
7571 if (M.getContext().isODRUniquingDebugTypes())
7572 return;
7573 auto *CUs = M.getNamedMetadata("llvm.dbg.cu");
7574 SmallPtrSet<const Metadata *, 2> Listed;
7575 if (CUs)
7576 Listed.insert_range(CUs->operands());
7577 for (const auto *CU : CUVisited)
7578 CheckDI(Listed.count(CU), "DICompileUnit not listed in llvm.dbg.cu", CU);
7579 CUVisited.clear();
7580}
7581
7582void Verifier::verifyDeoptimizeCallingConvs() {
7583 if (DeoptimizeDeclarations.empty())
7584 return;
7585
7586 const Function *First = DeoptimizeDeclarations[0];
7587 for (const auto *F : ArrayRef(DeoptimizeDeclarations).slice(1)) {
7588 Check(First->getCallingConv() == F->getCallingConv(),
7589 "All llvm.experimental.deoptimize declarations must have the same "
7590 "calling convention",
7591 First, F);
7592 }
7593}
7594
7595void Verifier::verifyAttachedCallBundle(const CallBase &Call,
7596 const OperandBundleUse &BU) {
7597 FunctionType *FTy = Call.getFunctionType();
7598
7599 Check((FTy->getReturnType()->isPointerTy() ||
7600 (Call.doesNotReturn() && FTy->getReturnType()->isVoidTy())),
7601 "a call with operand bundle \"clang.arc.attachedcall\" must call a "
7602 "function returning a pointer or a non-returning function that has a "
7603 "void return type",
7604 Call);
7605
7606 Check(BU.Inputs.size() == 1 && isa<Function>(BU.Inputs.front()),
7607 "operand bundle \"clang.arc.attachedcall\" requires one function as "
7608 "an argument",
7609 Call);
7610
7611 auto *Fn = cast<Function>(BU.Inputs.front());
7612 Intrinsic::ID IID = Fn->getIntrinsicID();
7613
7614 if (IID) {
7615 Check((IID == Intrinsic::objc_retainAutoreleasedReturnValue ||
7616 IID == Intrinsic::objc_claimAutoreleasedReturnValue ||
7617 IID == Intrinsic::objc_unsafeClaimAutoreleasedReturnValue),
7618 "invalid function argument", Call);
7619 } else {
7620 StringRef FnName = Fn->getName();
7621 Check((FnName == "objc_retainAutoreleasedReturnValue" ||
7622 FnName == "objc_claimAutoreleasedReturnValue" ||
7623 FnName == "objc_unsafeClaimAutoreleasedReturnValue"),
7624 "invalid function argument", Call);
7625 }
7626}
7627
7628void Verifier::verifyNoAliasScopeDecl() {
7629 if (NoAliasScopeDecls.empty())
7630 return;
7631
7632 // only a single scope must be declared at a time.
7633 for (auto *II : NoAliasScopeDecls) {
7634 assert(II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl &&
7635 "Not a llvm.experimental.noalias.scope.decl ?");
7636 const auto *ScopeListMV = dyn_cast<MetadataAsValue>(
7638 Check(ScopeListMV != nullptr,
7639 "llvm.experimental.noalias.scope.decl must have a MetadataAsValue "
7640 "argument",
7641 II);
7642
7643 const auto *ScopeListMD = dyn_cast<MDNode>(ScopeListMV->getMetadata());
7644 Check(ScopeListMD != nullptr, "!id.scope.list must point to an MDNode", II);
7645 Check(ScopeListMD->getNumOperands() == 1,
7646 "!id.scope.list must point to a list with a single scope", II);
7647 visitAliasScopeListMetadata(ScopeListMD);
7648 }
7649
7650 // Only check the domination rule when requested. Once all passes have been
7651 // adapted this option can go away.
7653 return;
7654
7655 // Now sort the intrinsics based on the scope MDNode so that declarations of
7656 // the same scopes are next to each other.
7657 auto GetScope = [](IntrinsicInst *II) {
7658 const auto *ScopeListMV = cast<MetadataAsValue>(
7660 return &cast<MDNode>(ScopeListMV->getMetadata())->getOperand(0);
7661 };
7662
7663 // We are sorting on MDNode pointers here. For valid input IR this is ok.
7664 // TODO: Sort on Metadata ID to avoid non-deterministic error messages.
7665 auto Compare = [GetScope](IntrinsicInst *Lhs, IntrinsicInst *Rhs) {
7666 return GetScope(Lhs) < GetScope(Rhs);
7667 };
7668
7669 llvm::sort(NoAliasScopeDecls, Compare);
7670
7671 // Go over the intrinsics and check that for the same scope, they are not
7672 // dominating each other.
7673 auto ItCurrent = NoAliasScopeDecls.begin();
7674 while (ItCurrent != NoAliasScopeDecls.end()) {
7675 auto CurScope = GetScope(*ItCurrent);
7676 auto ItNext = ItCurrent;
7677 do {
7678 ++ItNext;
7679 } while (ItNext != NoAliasScopeDecls.end() &&
7680 GetScope(*ItNext) == CurScope);
7681
7682 // [ItCurrent, ItNext) represents the declarations for the same scope.
7683 // Ensure they are not dominating each other.. but only if it is not too
7684 // expensive.
7685 if (ItNext - ItCurrent < 32)
7686 for (auto *I : llvm::make_range(ItCurrent, ItNext))
7687 for (auto *J : llvm::make_range(ItCurrent, ItNext))
7688 if (I != J)
7689 Check(!DT.dominates(I, J),
7690 "llvm.experimental.noalias.scope.decl dominates another one "
7691 "with the same scope",
7692 I);
7693 ItCurrent = ItNext;
7694 }
7695}
7696
7697//===----------------------------------------------------------------------===//
7698// Implement the public interfaces to this file...
7699//===----------------------------------------------------------------------===//
7700
7702 Function &F = const_cast<Function &>(f);
7703
7704 // Don't use a raw_null_ostream. Printing IR is expensive.
7705 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/true, *f.getParent());
7706
7707 // Note that this function's return value is inverted from what you would
7708 // expect of a function called "verify".
7709 return !V.verify(F);
7710}
7711
7713 bool *BrokenDebugInfo) {
7714 // Don't use a raw_null_ostream. Printing IR is expensive.
7715 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/!BrokenDebugInfo, M);
7716
7717 bool Broken = false;
7718 for (const Function &F : M)
7719 Broken |= !V.verify(F);
7720
7721 Broken |= !V.verify();
7722 if (BrokenDebugInfo)
7723 *BrokenDebugInfo = V.hasBrokenDebugInfo();
7724 // Note that this function's return value is inverted from what you would
7725 // expect of a function called "verify".
7726 return Broken;
7727}
7728
7729namespace {
7730
7731struct VerifierLegacyPass : public FunctionPass {
7732 static char ID;
7733
7734 std::unique_ptr<Verifier> V;
7735 bool FatalErrors = true;
7736
7737 VerifierLegacyPass() : FunctionPass(ID) {
7739 }
7740 explicit VerifierLegacyPass(bool FatalErrors)
7741 : FunctionPass(ID),
7742 FatalErrors(FatalErrors) {
7744 }
7745
7746 bool doInitialization(Module &M) override {
7747 V = std::make_unique<Verifier>(
7748 &dbgs(), /*ShouldTreatBrokenDebugInfoAsError=*/false, M);
7749 return false;
7750 }
7751
7752 bool runOnFunction(Function &F) override {
7753 if (!V->verify(F) && FatalErrors) {
7754 errs() << "in function " << F.getName() << '\n';
7755 report_fatal_error("Broken function found, compilation aborted!");
7756 }
7757 return false;
7758 }
7759
7760 bool doFinalization(Module &M) override {
7761 bool HasErrors = false;
7762 for (Function &F : M)
7763 if (F.isDeclaration())
7764 HasErrors |= !V->verify(F);
7765
7766 HasErrors |= !V->verify();
7767 if (FatalErrors && (HasErrors || V->hasBrokenDebugInfo()))
7768 report_fatal_error("Broken module found, compilation aborted!");
7769 return false;
7770 }
7771
7772 void getAnalysisUsage(AnalysisUsage &AU) const override {
7773 AU.setPreservesAll();
7774 }
7775};
7776
7777} // end anonymous namespace
7778
7779/// Helper to issue failure from the TBAA verification
7780template <typename... Tys> void TBAAVerifier::CheckFailed(Tys &&... Args) {
7781 if (Diagnostic)
7782 return Diagnostic->CheckFailed(Args...);
7783}
7784
7785#define CheckTBAA(C, ...) \
7786 do { \
7787 if (!(C)) { \
7788 CheckFailed(__VA_ARGS__); \
7789 return false; \
7790 } \
7791 } while (false)
7792
7793/// Verify that \p BaseNode can be used as the "base type" in the struct-path
7794/// TBAA scheme. This means \p BaseNode is either a scalar node, or a
7795/// struct-type node describing an aggregate data structure (like a struct).
7796TBAAVerifier::TBAABaseNodeSummary
7797TBAAVerifier::verifyTBAABaseNode(const Instruction *I, const MDNode *BaseNode,
7798 bool IsNewFormat) {
7799 if (BaseNode->getNumOperands() < 2) {
7800 CheckFailed("Base nodes must have at least two operands", I, BaseNode);
7801 return {true, ~0u};
7802 }
7803
7804 auto Itr = TBAABaseNodes.find(BaseNode);
7805 if (Itr != TBAABaseNodes.end())
7806 return Itr->second;
7807
7808 auto Result = verifyTBAABaseNodeImpl(I, BaseNode, IsNewFormat);
7809 auto InsertResult = TBAABaseNodes.insert({BaseNode, Result});
7810 (void)InsertResult;
7811 assert(InsertResult.second && "We just checked!");
7812 return Result;
7813}
7814
7815TBAAVerifier::TBAABaseNodeSummary
7816TBAAVerifier::verifyTBAABaseNodeImpl(const Instruction *I,
7817 const MDNode *BaseNode, bool IsNewFormat) {
7818 const TBAAVerifier::TBAABaseNodeSummary InvalidNode = {true, ~0u};
7819
7820 if (BaseNode->getNumOperands() == 2) {
7821 // Scalar nodes can only be accessed at offset 0.
7822 return isValidScalarTBAANode(BaseNode)
7823 ? TBAAVerifier::TBAABaseNodeSummary({false, 0})
7824 : InvalidNode;
7825 }
7826
7827 if (IsNewFormat) {
7828 if (BaseNode->getNumOperands() % 3 != 0) {
7829 CheckFailed("Access tag nodes must have the number of operands that is a "
7830 "multiple of 3!", BaseNode);
7831 return InvalidNode;
7832 }
7833 } else {
7834 if (BaseNode->getNumOperands() % 2 != 1) {
7835 CheckFailed("Struct tag nodes must have an odd number of operands!",
7836 BaseNode);
7837 return InvalidNode;
7838 }
7839 }
7840
7841 // Check the type size field.
7842 if (IsNewFormat) {
7843 auto *TypeSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7844 BaseNode->getOperand(1));
7845 if (!TypeSizeNode) {
7846 CheckFailed("Type size nodes must be constants!", I, BaseNode);
7847 return InvalidNode;
7848 }
7849 }
7850
7851 // Check the type name field. In the new format it can be anything.
7852 if (!IsNewFormat && !isa<MDString>(BaseNode->getOperand(0))) {
7853 CheckFailed("Struct tag nodes have a string as their first operand",
7854 BaseNode);
7855 return InvalidNode;
7856 }
7857
7858 bool Failed = false;
7859
7860 std::optional<APInt> PrevOffset;
7861 unsigned BitWidth = ~0u;
7862
7863 // We've already checked that BaseNode is not a degenerate root node with one
7864 // operand in \c verifyTBAABaseNode, so this loop should run at least once.
7865 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
7866 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
7867 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
7868 Idx += NumOpsPerField) {
7869 const MDOperand &FieldTy = BaseNode->getOperand(Idx);
7870 const MDOperand &FieldOffset = BaseNode->getOperand(Idx + 1);
7871 if (!isa<MDNode>(FieldTy)) {
7872 CheckFailed("Incorrect field entry in struct type node!", I, BaseNode);
7873 Failed = true;
7874 continue;
7875 }
7876
7877 auto *OffsetEntryCI =
7879 if (!OffsetEntryCI) {
7880 CheckFailed("Offset entries must be constants!", I, BaseNode);
7881 Failed = true;
7882 continue;
7883 }
7884
7885 if (BitWidth == ~0u)
7886 BitWidth = OffsetEntryCI->getBitWidth();
7887
7888 if (OffsetEntryCI->getBitWidth() != BitWidth) {
7889 CheckFailed(
7890 "Bitwidth between the offsets and struct type entries must match", I,
7891 BaseNode);
7892 Failed = true;
7893 continue;
7894 }
7895
7896 // NB! As far as I can tell, we generate a non-strictly increasing offset
7897 // sequence only from structs that have zero size bit fields. When
7898 // recursing into a contained struct in \c getFieldNodeFromTBAABaseNode we
7899 // pick the field lexically the latest in struct type metadata node. This
7900 // mirrors the actual behavior of the alias analysis implementation.
7901 bool IsAscending =
7902 !PrevOffset || PrevOffset->ule(OffsetEntryCI->getValue());
7903
7904 if (!IsAscending) {
7905 CheckFailed("Offsets must be increasing!", I, BaseNode);
7906 Failed = true;
7907 }
7908
7909 PrevOffset = OffsetEntryCI->getValue();
7910
7911 if (IsNewFormat) {
7912 auto *MemberSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7913 BaseNode->getOperand(Idx + 2));
7914 if (!MemberSizeNode) {
7915 CheckFailed("Member size entries must be constants!", I, BaseNode);
7916 Failed = true;
7917 continue;
7918 }
7919 }
7920 }
7921
7922 return Failed ? InvalidNode
7923 : TBAAVerifier::TBAABaseNodeSummary(false, BitWidth);
7924}
7925
7926static bool IsRootTBAANode(const MDNode *MD) {
7927 return MD->getNumOperands() < 2;
7928}
7929
7930static bool IsScalarTBAANodeImpl(const MDNode *MD,
7932 if (MD->getNumOperands() != 2 && MD->getNumOperands() != 3)
7933 return false;
7934
7935 if (!isa<MDString>(MD->getOperand(0)))
7936 return false;
7937
7938 if (MD->getNumOperands() == 3) {
7940 if (!(Offset && Offset->isZero() && isa<MDString>(MD->getOperand(0))))
7941 return false;
7942 }
7943
7944 auto *Parent = dyn_cast_or_null<MDNode>(MD->getOperand(1));
7945 return Parent && Visited.insert(Parent).second &&
7946 (IsRootTBAANode(Parent) || IsScalarTBAANodeImpl(Parent, Visited));
7947}
7948
7949bool TBAAVerifier::isValidScalarTBAANode(const MDNode *MD) {
7950 auto ResultIt = TBAAScalarNodes.find(MD);
7951 if (ResultIt != TBAAScalarNodes.end())
7952 return ResultIt->second;
7953
7954 SmallPtrSet<const MDNode *, 4> Visited;
7955 bool Result = IsScalarTBAANodeImpl(MD, Visited);
7956 auto InsertResult = TBAAScalarNodes.insert({MD, Result});
7957 (void)InsertResult;
7958 assert(InsertResult.second && "Just checked!");
7959
7960 return Result;
7961}
7962
7963/// Returns the field node at the offset \p Offset in \p BaseNode. Update \p
7964/// Offset in place to be the offset within the field node returned.
7965///
7966/// We assume we've okayed \p BaseNode via \c verifyTBAABaseNode.
7967MDNode *TBAAVerifier::getFieldNodeFromTBAABaseNode(const Instruction *I,
7968 const MDNode *BaseNode,
7969 APInt &Offset,
7970 bool IsNewFormat) {
7971 assert(BaseNode->getNumOperands() >= 2 && "Invalid base node!");
7972
7973 // Scalar nodes have only one possible "field" -- their parent in the access
7974 // hierarchy. Offset must be zero at this point, but our caller is supposed
7975 // to check that.
7976 if (BaseNode->getNumOperands() == 2)
7977 return cast<MDNode>(BaseNode->getOperand(1));
7978
7979 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
7980 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
7981 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
7982 Idx += NumOpsPerField) {
7983 auto *OffsetEntryCI =
7984 mdconst::extract<ConstantInt>(BaseNode->getOperand(Idx + 1));
7985 if (OffsetEntryCI->getValue().ugt(Offset)) {
7986 if (Idx == FirstFieldOpNo) {
7987 CheckFailed("Could not find TBAA parent in struct type node", I,
7988 BaseNode, &Offset);
7989 return nullptr;
7990 }
7991
7992 unsigned PrevIdx = Idx - NumOpsPerField;
7993 auto *PrevOffsetEntryCI =
7994 mdconst::extract<ConstantInt>(BaseNode->getOperand(PrevIdx + 1));
7995 Offset -= PrevOffsetEntryCI->getValue();
7996 return cast<MDNode>(BaseNode->getOperand(PrevIdx));
7997 }
7998 }
7999
8000 unsigned LastIdx = BaseNode->getNumOperands() - NumOpsPerField;
8001 auto *LastOffsetEntryCI = mdconst::extract<ConstantInt>(
8002 BaseNode->getOperand(LastIdx + 1));
8003 Offset -= LastOffsetEntryCI->getValue();
8004 return cast<MDNode>(BaseNode->getOperand(LastIdx));
8005}
8006
8008 if (!Type || Type->getNumOperands() < 3)
8009 return false;
8010
8011 // In the new format type nodes shall have a reference to the parent type as
8012 // its first operand.
8013 return isa_and_nonnull<MDNode>(Type->getOperand(0));
8014}
8015
8017 CheckTBAA(MD->getNumOperands() > 0, "TBAA metadata cannot have 0 operands", I,
8018 MD);
8019
8020 if (I)
8024 "This instruction shall not have a TBAA access tag!", I);
8025
8026 bool IsStructPathTBAA =
8027 isa<MDNode>(MD->getOperand(0)) && MD->getNumOperands() >= 3;
8028
8029 CheckTBAA(IsStructPathTBAA,
8030 "Old-style TBAA is no longer allowed, use struct-path TBAA instead",
8031 I);
8032
8033 MDNode *BaseNode = dyn_cast_or_null<MDNode>(MD->getOperand(0));
8034 MDNode *AccessType = dyn_cast_or_null<MDNode>(MD->getOperand(1));
8035
8036 bool IsNewFormat = isNewFormatTBAATypeNode(AccessType);
8037
8038 if (IsNewFormat) {
8039 CheckTBAA(MD->getNumOperands() == 4 || MD->getNumOperands() == 5,
8040 "Access tag metadata must have either 4 or 5 operands", I, MD);
8041 } else {
8042 CheckTBAA(MD->getNumOperands() < 5,
8043 "Struct tag metadata must have either 3 or 4 operands", I, MD);
8044 }
8045
8046 // Check the access size field.
8047 if (IsNewFormat) {
8048 auto *AccessSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
8049 MD->getOperand(3));
8050 CheckTBAA(AccessSizeNode, "Access size field must be a constant", I, MD);
8051 }
8052
8053 // Check the immutability flag.
8054 unsigned ImmutabilityFlagOpNo = IsNewFormat ? 4 : 3;
8055 if (MD->getNumOperands() == ImmutabilityFlagOpNo + 1) {
8056 auto *IsImmutableCI = mdconst::dyn_extract_or_null<ConstantInt>(
8057 MD->getOperand(ImmutabilityFlagOpNo));
8058 CheckTBAA(IsImmutableCI,
8059 "Immutability tag on struct tag metadata must be a constant", I,
8060 MD);
8061 CheckTBAA(
8062 IsImmutableCI->isZero() || IsImmutableCI->isOne(),
8063 "Immutability part of the struct tag metadata must be either 0 or 1", I,
8064 MD);
8065 }
8066
8067 CheckTBAA(BaseNode && AccessType,
8068 "Malformed struct tag metadata: base and access-type "
8069 "should be non-null and point to Metadata nodes",
8070 I, MD, BaseNode, AccessType);
8071
8072 if (!IsNewFormat) {
8073 CheckTBAA(isValidScalarTBAANode(AccessType),
8074 "Access type node must be a valid scalar type", I, MD,
8075 AccessType);
8076 }
8077
8079 CheckTBAA(OffsetCI, "Offset must be constant integer", I, MD);
8080
8081 APInt Offset = OffsetCI->getValue();
8082 bool SeenAccessTypeInPath = false;
8083
8084 SmallPtrSet<MDNode *, 4> StructPath;
8085
8086 for (/* empty */; BaseNode && !IsRootTBAANode(BaseNode);
8087 BaseNode =
8088 getFieldNodeFromTBAABaseNode(I, BaseNode, Offset, IsNewFormat)) {
8089 if (!StructPath.insert(BaseNode).second) {
8090 CheckFailed("Cycle detected in struct path", I, MD);
8091 return false;
8092 }
8093
8094 bool Invalid;
8095 unsigned BaseNodeBitWidth;
8096 std::tie(Invalid, BaseNodeBitWidth) =
8097 verifyTBAABaseNode(I, BaseNode, IsNewFormat);
8098
8099 // If the base node is invalid in itself, then we've already printed all the
8100 // errors we wanted to print.
8101 if (Invalid)
8102 return false;
8103
8104 SeenAccessTypeInPath |= BaseNode == AccessType;
8105
8106 if (isValidScalarTBAANode(BaseNode) || BaseNode == AccessType)
8107 CheckTBAA(Offset == 0, "Offset not zero at the point of scalar access", I,
8108 MD, &Offset);
8109
8110 CheckTBAA(BaseNodeBitWidth == Offset.getBitWidth() ||
8111 (BaseNodeBitWidth == 0 && Offset == 0) ||
8112 (IsNewFormat && BaseNodeBitWidth == ~0u),
8113 "Access bit-width not the same as description bit-width", I, MD,
8114 BaseNodeBitWidth, Offset.getBitWidth());
8115
8116 if (IsNewFormat && SeenAccessTypeInPath)
8117 break;
8118 }
8119
8120 CheckTBAA(SeenAccessTypeInPath, "Did not see access type in access path!", I,
8121 MD);
8122 return true;
8123}
8124
8125char VerifierLegacyPass::ID = 0;
8126INITIALIZE_PASS(VerifierLegacyPass, "verify", "Module Verifier", false, false)
8127
8129 return new VerifierLegacyPass(FatalErrors);
8130}
8131
8132AnalysisKey VerifierAnalysis::Key;
8139
8144
8146 auto Res = AM.getResult<VerifierAnalysis>(M);
8147 if (FatalErrors && (Res.IRBroken || Res.DebugInfoBroken))
8148 report_fatal_error("Broken module found, compilation aborted!");
8149
8150 return PreservedAnalyses::all();
8151}
8152
8154 auto res = AM.getResult<VerifierAnalysis>(F);
8155 if (res.IRBroken && FatalErrors)
8156 report_fatal_error("Broken function found, compilation aborted!");
8157
8158 return PreservedAnalyses::all();
8159}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU address space definition.
ArrayRef< TableEntry > TableRef
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Atomic ordering constants.
@ RetAttr
@ FnAttr
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file declares the LLVM IR specialization of the GenericConvergenceVerifier template.
static DISubprogram * getSubprogram(bool IsDistinct, Ts &&...Args)
dxil translate DXIL Translate Metadata
This file defines the DenseMap class.
This file contains constants used for implementing Dwarf debug support.
static bool runOnFunction(Function &F, bool PostInlining)
#define Check(C,...)
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Machine Check Debug Module
This file implements a map that provides insertion order iteration.
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
static bool isContiguous(const ConstantRange &A, const ConstantRange &B)
This file contains the declarations for metadata subclasses.
#define T
#define T1
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t High
uint64_t IntrinsicInst * II
#define P(N)
ppc ctr loops verify
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
static unsigned getNumElements(Type *Ty)
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
This file contains some templates that are useful if you are working with the STL at all.
verify safepoint Safepoint IR Verifier
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
static bool IsScalarTBAANodeImpl(const MDNode *MD, SmallPtrSetImpl< const MDNode * > &Visited)
static bool isType(const Metadata *MD)
static Instruction * getSuccPad(Instruction *Terminator)
static bool isMDTuple(const Metadata *MD)
static bool isNewFormatTBAATypeNode(llvm::MDNode *Type)
#define CheckDI(C,...)
We know that a debug info condition should be true, if not print an error message.
Definition Verifier.cpp:681
static void forEachUser(const Value *User, SmallPtrSet< const Value *, 32 > &Visited, llvm::function_ref< bool(const Value *)> Callback)
Definition Verifier.cpp:722
static bool isDINode(const Metadata *MD)
static bool isScope(const Metadata *MD)
static cl::opt< bool > VerifyNoAliasScopeDomination("verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false), cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical " "scopes are not dominating"))
static bool isTypeCongruent(Type *L, Type *R)
Two types are "congruent" if they are identical, or if they are both pointer types with different poi...
#define CheckTBAA(C,...)
static bool isConstantIntMetadataOperand(const Metadata *MD)
static bool IsRootTBAANode(const MDNode *MD)
static Value * getParentPad(Value *EHPad)
static bool hasConflictingReferenceFlags(unsigned Flags)
Detect mutually exclusive flags.
static AttrBuilder getParameterABIAttributes(LLVMContext &C, unsigned I, AttributeList Attrs)
static const char PassName[]
bool isFiniteNonZero() const
Definition APFloat.h:1441
bool isNegative() const
Definition APFloat.h:1431
const fltSemantics & getSemantics() const
Definition APFloat.h:1439
Class for arbitrary precision integers.
Definition APInt.h:78
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition APInt.h:1202
bool isMinValue() const
Determine if this is the smallest unsigned value.
Definition APInt.h:418
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition APInt.h:1151
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:441
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1563
bool isMaxValue() const
Determine if this is the largest unsigned value.
Definition APInt.h:400
This class represents a conversion between pointers from one address space to another.
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
unsigned getAddressSpace() const
Return the address space for the allocation.
LLVM_ABI bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
const Value * getArraySize() const
Get the number of elements allocated.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
void setPreservesAll()
Set by analyses that do not transform their input at all.
LLVM_ABI bool hasInRegAttr() const
Return true if this argument has the inreg attribute.
Definition Function.cpp:293
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
static bool isFPOperation(BinOp Op)
BinOp getOperation() const
static LLVM_ABI StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
bool contains(Attribute::AttrKind A) const
Return true if the builder has the specified attribute.
LLVM_ABI bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
LLVM_ABI std::string getAsString(bool InAttrGrp=false) const
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:69
LLVM_ABI const ConstantRange & getValueAsConstantRange() const
Return the attribute's value as a ConstantRange.
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition Attributes.h:88
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:223
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:459
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:528
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
const Instruction & front() const
Definition BasicBlock.h:482
LLVM_ABI const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
This class represents a no-op cast from one type to another.
static LLVM_ABI BlockAddress * lookup(const BasicBlock *BB)
Lookup an existing BlockAddress constant for the given BasicBlock.
bool isConditional() const
Value * getCondition() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool isInlineAsm() const
Check if this call is an inline asm statement.
bool hasInAllocaArgument() const
Determine if there are is an inalloca argument.
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool doesNotAccessMemory(unsigned OpNo) const
bool hasFnAttr(Attribute::AttrKind Kind) const
Determine whether this call has the given attribute.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
CallingConv::ID getCallingConv() const
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Get the attribute of a given kind from a given arg.
iterator_range< bundle_op_iterator > bundle_op_infos()
Return the range [bundle_op_info_begin, bundle_op_info_end).
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
bool onlyReadsMemory(unsigned OpNo) const
Value * getCalledOperand() const
Type * getParamElementType(unsigned ArgNo) const
Extract the elementtype type for a parameter.
Value * getArgOperand(unsigned i) const
FunctionType * getFunctionType() const
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
bool doesNotReturn() const
Determine if the call cannot return.
LLVM_ABI bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
bool hasOperandBundles() const
Return true if this User has any operand bundles.
LLVM_ABI Function * getCaller()
Helper to get the caller (the parent function).
BasicBlock * getIndirectDest(unsigned i) const
unsigned getNumIndirectDests() const
Return the number of callbr indirect dest labels.
bool isMustTailCall() const
static LLVM_ABI bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
unsigned getNumHandlers() const
return the number of 'handlers' in this catchswitch instruction, except the default handler
Value * getParentPad() const
BasicBlock * getUnwindDest() const
handler_range handlers()
iteration adapter for range-for loops.
BasicBlock * getUnwindDest() const
bool isFPPredicate() const
Definition InstrTypes.h:782
bool isIntPredicate() const
Definition InstrTypes.h:783
static bool isIntPredicate(Predicate P)
Definition InstrTypes.h:776
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
Definition Constants.h:231
bool isNegative() const
Definition Constants.h:214
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition Constants.h:219
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition Constants.h:162
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:168
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:159
Constant * getAddrDiscriminator() const
The address discriminator if any, or the null constant.
Definition Constants.h:1078
Constant * getPointer() const
The pointer that is signed in this ptrauth signed pointer.
Definition Constants.h:1065
ConstantInt * getKey() const
The Key ID, an i32 constant.
Definition Constants.h:1068
Constant * getDeactivationSymbol() const
Definition Constants.h:1087
ConstantInt * getDiscriminator() const
The integer discriminator, an i64 constant, or 0.
Definition Constants.h:1071
static LLVM_ABI bool isOrderedRanges(ArrayRef< ConstantRange > RangesRef)
This class represents a range of values.
const APInt & getLower() const
Return the lower value for this range.
const APInt & getUpper() const
Return the upper value for this range.
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
static LLVM_ABI ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition Constants.cpp:90
LLVM_ABI std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
LLVM_ABI std::optional< RoundingMode > getRoundingMode() const
LLVM_ABI unsigned getNonMetadataArgCount() const
DbgVariableFragmentInfo FragmentInfo
@ FixedPointBinary
Scale factor 2^Factor.
@ FixedPointDecimal
Scale factor 10^Factor.
@ FixedPointRational
Arbitrary rational scale factor.
DIGlobalVariable * getVariable() const
LLVM_ABI DISubprogram * getSubprogram() const
Get the subprogram for this scope.
DILocalScope * getScope() const
Get the local scope for this variable.
Metadata * getRawScope() const
Base class for scope-like contexts.
Subprogram description. Uses SubclassData1.
static const DIScope * getRawRetainedNodeScope(const MDNode *N)
Base class for template parameters.
Base class for variables.
Metadata * getRawType() const
Metadata * getRawScope() const
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
LLVM_ABI void print(raw_ostream &O, bool IsForDebug=false) const
DebugLoc getDebugLoc() const
LLVM_ABI const BasicBlock * getParent() const
LLVM_ABI Function * getFunction()
Record of a variable value-assignment, aka a non instruction representation of the dbg....
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
DIExpression * getExpression() const
DILocalVariable * getVariable() const
Metadata * getRawLocation() const
Returns the metadata operand for the first location description.
@ End
Marks the end of the concrete types.
@ Any
To indicate all LocationTypes in searches.
DIExpression * getAddressExpression() const
MDNode * getAsMDNode() const
Return this as a bar MDNode.
Definition DebugLoc.h:290
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:205
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
bool empty() const
Definition DenseMap.h:109
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:241
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:164
This instruction extracts a single (scalar) element from a VectorType value.
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
ArrayRef< unsigned > getIndices() const
static LLVM_ABI Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
This instruction compares its operands according to the predicate given to the constructor.
This class represents an extension of floating point types.
This class represents a cast from floating point to signed integer.
This class represents a cast from floating point to unsigned integer.
This class represents a truncation of floating point types.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Value * getParentPad() const
Convenience accessors.
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
Type * getReturnType() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition Function.h:209
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition Function.h:244
DISubprogram * getSubprogram() const
Get the attached subprogram.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:270
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition Function.h:903
const Function & getFunction() const
Definition Function.h:164
const std::string & getGC() const
Definition Function.cpp:834
Type * getReturnType() const
Returns the type of the ret val.
Definition Function.h:214
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
Definition Function.h:227
LLVM_ABI Value * getBasePtr() const
LLVM_ABI Value * getDerivedPtr() const
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
static bool isValidLinkage(LinkageTypes L)
Definition GlobalAlias.h:98
const Constant * getAliasee() const
Definition GlobalAlias.h:87
LLVM_ABI const Function * getResolverFunction() const
Definition Globals.cpp:665
static bool isValidLinkage(LinkageTypes L)
Definition GlobalIFunc.h:86
const Constant * getResolver() const
Definition GlobalIFunc.h:73
LLVM_ABI void getAllMetadata(SmallVectorImpl< std::pair< unsigned, MDNode * > > &MDs) const
Appends all metadata attached to this value to MDs, sorting by KindID.
bool hasComdat() const
MDNode * getMetadata(unsigned KindID) const
Get the current metadata attachments for the given kind, if any.
Definition Value.h:576
bool hasExternalLinkage() const
bool isDSOLocal() const
bool isImplicitDSOLocal() const
LLVM_ABI bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition Globals.cpp:328
bool hasValidDeclarationLinkage() const
LinkageTypes getLinkage() const
bool hasDefaultVisibility() const
bool hasPrivateLinkage() const
bool hasHiddenVisibility() const
bool hasExternalWeakLinkage() const
bool hasDLLImportStorageClass() const
bool hasDLLExportStorageClass() const
bool isDeclarationForLinker() const
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
PointerType * getType() const
Global values are always pointers.
LLVM_ABI bool isInterposable() const
Return true if this global's definition can be substituted with an arbitrary definition at link time ...
Definition Globals.cpp:107
bool hasComdat() const
bool hasCommonLinkage() const
bool hasGlobalUnnamedAddr() const
bool hasAppendingLinkage() const
bool hasAvailableExternallyLinkage() const
Type * getValueType() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool hasInitializer() const
Definitions have initializers, declarations don't.
MaybeAlign getAlign() const
Returns the alignment of the given variable.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
This instruction compares its operands according to the predicate given to the constructor.
BasicBlock * getDestination(unsigned i)
Return the specified destination.
unsigned getNumDestinations() const
return the number of possible destinations in this indirectbr instruction.
unsigned getNumSuccessors() const
This instruction inserts a single (scalar) element into a VectorType value.
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
ArrayRef< unsigned > getIndices() const
Base class for instruction visitors.
Definition InstVisitor.h:78
void visit(Iterator Start, Iterator End)
Definition InstVisitor.h:87
LLVM_ABI unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
This class represents a cast from an integer to a pointer.
static LLVM_ABI bool mayLowerToFunctionCall(Intrinsic::ID IID)
Check if the intrinsic might lower into a regular function call in the course of IR transformations.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Align getAlign() const
Return the alignment of the access that is being performed.
Metadata node.
Definition Metadata.h:1078
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1442
bool isTemporary() const
Definition Metadata.h:1262
ArrayRef< MDOperand > operands() const
Definition Metadata.h:1440
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1448
bool isDistinct() const
Definition Metadata.h:1261
bool isResolved() const
Check if node is fully resolved.
Definition Metadata.h:1258
LLVMContext & getContext() const
Definition Metadata.h:1242
bool equalsStr(StringRef Str) const
Definition Metadata.h:922
Metadata * get() const
Definition Metadata.h:929
LLVM_ABI StringRef getString() const
Definition Metadata.cpp:618
static LLVM_ABI bool isTagMD(const Metadata *MD)
This class implements a map that also provides access to all stored values in a deterministic order.
Definition MapVector.h:36
static LLVM_ABI MetadataAsValue * getIfExists(LLVMContext &Context, Metadata *MD)
Definition Metadata.cpp:112
Metadata * getMetadata() const
Definition Metadata.h:201
Root of the metadata hierarchy.
Definition Metadata.h:64
LLVM_ABI void print(raw_ostream &OS, const Module *M=nullptr, bool IsForDebug=false) const
Print.
unsigned getMetadataID() const
Definition Metadata.h:104
Manage lifetime of a slot tracker for printing IR.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
LLVM_ABI StringRef getName() const
LLVM_ABI void print(raw_ostream &ROS, bool IsForDebug=false) const
LLVM_ABI unsigned getNumOperands() const
iterator_range< op_iterator > operands()
Definition Metadata.h:1853
op_range incoming_values()
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
This class represents a cast from a pointer to an address (non-capturing ptrtoint).
This class represents a cast from a pointer to an integer.
Value * getValue() const
Convenience accessor.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
This class represents a sign extension of integer types.
This class represents a cast from signed integer to floating point.
static LLVM_ABI const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
This instruction constructs a fixed permutation of two input vectors.
static LLVM_ABI bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
static LLVM_ABI void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
void insert_range(Range &&R)
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
void reserve(size_type N)
iterator insert(iterator I, T &&Elt)
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition StringRef.h:702
static constexpr size_t npos
Definition StringRef.h:57
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition StringRef.h:472
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:261
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:143
unsigned getNumElements() const
Random access to the elements.
LLVM_ABI Type * getTypeAtIndex(const Value *V) const
Given an index value into the type, return the type of the element.
Definition Type.cpp:718
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Returns true if this struct contains a scalable vector.
Definition Type.cpp:440
Verify that the TBAA Metadatas are valid.
Definition Verifier.h:40
LLVM_ABI bool visitTBAAMetadata(const Instruction *I, const MDNode *MD)
Visit an instruction, or a TBAA node itself as part of a metadata, and return true if it is valid,...
unsigned size() const
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
LLVM_ABI bool containsNonGlobalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a global...
Definition Type.cpp:74
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition Type.h:264
LLVM_ABI bool containsNonLocalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a local.
Definition Type.cpp:90
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
Definition Type.cpp:61
bool isLabelTy() const
Return true if this is 'label'.
Definition Type.h:228
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition Type.h:246
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
bool isTokenLikeTy() const
Returns true if this is 'token' or a token-like target type.s.
Definition Type.cpp:1065
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isSingleValueType() const
Return true if the type is a valid type for a register in codegen.
Definition Type.h:296
LLVM_ABI bool canLosslesslyBitCastTo(Type *Ty) const
Return true if this type could be converted with a lossless BitCast to type 'Ty'.
Definition Type.cpp:153
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition Type.h:311
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:230
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:184
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition Type.h:270
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
Definition Type.h:255
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition Type.h:225
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
bool isMetadataTy() const
Return true if this is 'metadata'.
Definition Type.h:231
This class represents a cast unsigned integer to floating point.
op_range operands()
Definition User.h:292
Value * getOperand(unsigned i) const
Definition User.h:232
unsigned getNumOperands() const
Definition User.h:254
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
Value * getValue() const
Definition Metadata.h:498
LLVM Value Representation.
Definition Value.h:75
iterator_range< user_iterator > materialized_users()
Definition Value.h:420
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI const Value * stripInBoundsOffsets(function_ref< void(const Value *)> Func=[](const Value *) {}) const
Strip off pointer casts and inbounds GEPs.
Definition Value.cpp:812
iterator_range< user_iterator > users()
Definition Value.h:426
bool materialized_use_empty() const
Definition Value.h:351
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:701
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1099
bool hasName() const
Definition Value.h:262
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
Check a module for errors, and report separate error states for IR and debug info errors.
Definition Verifier.h:109
LLVM_ABI Result run(Module &M, ModuleAnalysisManager &)
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
This class represents zero extension of integer types.
constexpr bool isNonZero() const
Definition TypeSize.h:155
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
CallInst * Call
This file contains the declaration of the Comdat class, which represents a single COMDAT in LLVM.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ FLAT_ADDRESS
Address space for flat memory.
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
@ PRIVATE_ADDRESS
Address space for private memory.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
bool isFlatGlobalAddrSpace(unsigned AS)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Entry
Definition COFF.h:862
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
LLVM_ABI MatchIntrinsicTypesResult matchIntrinsicSignature(FunctionType *FTy, ArrayRef< IITDescriptor > &Infos, SmallVectorImpl< Type * > &ArgTys)
Match the specified function type with the type constraints specified by the .td file.
LLVM_ABI void getIntrinsicInfoTableEntries(ID id, SmallVectorImpl< IITDescriptor > &T)
Return the IIT table descriptor for the specified intrinsic into an array of IITDescriptors.
@ MatchIntrinsicTypes_NoMatchRet
Definition Intrinsics.h:260
@ MatchIntrinsicTypes_NoMatchArg
Definition Intrinsics.h:261
LLVM_ABI bool hasConstrainedFPRoundingModeOperand(ID QID)
Returns true if the intrinsic ID is for one of the "ConstrainedFloating-Point Intrinsics" that take r...
LLVM_ABI StringRef getName(ID id)
Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
static const int NoAliasScopeDeclScopeArg
Definition Intrinsics.h:41
LLVM_ABI bool matchIntrinsicVarArg(bool isVarArg, ArrayRef< IITDescriptor > &Infos)
Verify if the intrinsic has variable arguments.
std::variant< std::monostate, Loc::Single, Loc::Multi, Loc::MMI, Loc::EntryValue > Variant
Alias for the std::variant specialization base class of DbgVariable.
Definition DwarfDebug.h:189
Flag
These should be considered private to the implementation of the MCInstrDesc class.
@ System
Synchronized with respect to all concurrently executing threads.
Definition LLVMContext.h:58
LLVM_ABI std::optional< VFInfo > tryDemangleForVFABI(StringRef MangledName, const FunctionType *FTy)
Function to construct a VFInfo out of a mangled names in the following format:
@ CE
Windows NT (Windows on ARM)
Definition MCAsmInfo.h:48
LLVM_ABI AssignmentInstRange getAssignmentInsts(DIAssignID *ID)
Return a range of instructions (typically just one) that have ID as an attachment.
initializer< Ty > init(const Ty &Val)
@ DW_MACINFO_undef
Definition Dwarf.h:818
@ DW_MACINFO_start_file
Definition Dwarf.h:819
@ DW_MACINFO_define
Definition Dwarf.h:817
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract_or_null(Y &&MD)
Extract a Value from Metadata, if any, allowing null.
Definition Metadata.h:708
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract(Y &&MD)
Extract a Value from Metadata, if any.
Definition Metadata.h:695
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:667
@ User
could "use" a pointer
NodeAddr< UseNode * > Use
Definition RDFGraph.h:385
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
Definition Threading.h:280
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1737
LLVM_ABI bool canInstructionHaveMMRAs(const Instruction &I)
detail::zippy< detail::zip_first, T, U, Args... > zip_equal(T &&t, U &&u, Args &&...args)
zip iterator that assumes that all iteratees have the same length.
Definition STLExtras.h:839
LLVM_ABI unsigned getBranchWeightOffset(const MDNode *ProfileData)
Return the offset to the first branch weight data.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2484
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
AllocFnKind
Definition Attributes.h:51
testing::Matcher< const detail::ErrorHolder & > Failed()
Definition Error.h:198
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2148
LLVM_ABI DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition MathExtras.h:284
bool isa_and_nonnull(const Y &Val)
Definition Casting.h:676
Op::Description Desc
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
GenericConvergenceVerifier< SSAContext > ConvergenceVerifier
LLVM_ABI void initializeVerifierLegacyPassPass(PassRegistry &)
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
bool isModSet(const ModRefInfo MRI)
Definition ModRef.h:49
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1634
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI bool isValueProfileMD(const MDNode *ProfileData)
Checks if an MDNode contains value profiling Metadata.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
LLVM_ABI unsigned getNumBranchWeights(const MDNode &ProfileData)
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:74
LLVM_ABI FunctionPass * createVerifierPass(bool FatalErrors=true)
FunctionAddr VTableAddr Next
Definition InstrProf.h:141
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
TinyPtrVector< BasicBlock * > ColorVector
LLVM_ABI const char * LLVMLoopEstimatedTripCount
Profile-based loop metadata that should be accessed only by using llvm::getLoopEstimatedTripCount and...
DenormalMode parseDenormalFPAttribute(StringRef Str)
Returns the denormal mode to use for inputs and outputs.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
Definition FPEnv.cpp:25
LLVM_ABI std::unique_ptr< GCStrategy > getGCStrategy(const StringRef Name)
Lookup the GCStrategy object associated with the given gc name.
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1909
bool pred_empty(const BasicBlock *BB)
Definition CFG.h:119
bool isHexDigit(char C)
Checks if character C is a hexadecimal numeric character.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
constexpr bool isCallableCC(CallingConv::ID CC)
LLVM_ABI bool verifyModule(const Module &M, raw_ostream *OS=nullptr, bool *BrokenDebugInfo=nullptr)
Check a module for errors.
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
Definition MIRParser.h:39
#define N
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition Analysis.h:29
static LLVM_ABI const char * SyntheticFunctionEntryCount
static LLVM_ABI const char * UnknownBranchWeightsMarker
static LLVM_ABI const char * ValueProfile
static LLVM_ABI const char * FunctionEntryCount
static LLVM_ABI const char * BranchWeights
uint32_t getTagID() const
Return the tag of this operand bundle as an integer.
ArrayRef< Use > Inputs
void DebugInfoCheckFailed(const Twine &Message)
A debug info check failed.
Definition Verifier.cpp:305
VerifierSupport(raw_ostream *OS, const Module &M)
Definition Verifier.cpp:154
bool Broken
Track the brokenness of the module while recursively visiting.
Definition Verifier.cpp:148
void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A check failed (with values to print).
Definition Verifier.cpp:298
bool BrokenDebugInfo
Broken debug info can be "recovered" from by stripping the debug info.
Definition Verifier.cpp:150
LLVMContext & Context
Definition Verifier.cpp:145
bool TreatBrokenDebugInfoAsError
Whether to treat broken debug info as an error.
Definition Verifier.cpp:152
void CheckFailed(const Twine &Message)
A check failed, so printout out the condition and the message.
Definition Verifier.cpp:287
const Module & M
Definition Verifier.cpp:141
const DataLayout & DL
Definition Verifier.cpp:144
void DebugInfoCheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A debug info check failed (with values to print).
Definition Verifier.cpp:314
const Triple & TT
Definition Verifier.cpp:143
ModuleSlotTracker MST
Definition Verifier.cpp:142