LLVM 22.0.0git
Verifier.cpp
Go to the documentation of this file.
1//===-- Verifier.cpp - Implement the Module Verifier -----------------------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the function verifier interface, that can be used for some
10// basic correctness checking of input to the system.
11//
12// Note that this does not provide full `Java style' security and verifications,
13// instead it just tries to ensure that code is well-formed.
14//
15// * Both of a binary operator's parameters are of the same type
16// * Verify that the indices of mem access instructions match other operands
17// * Verify that arithmetic and other things are only performed on first-class
18// types. Verify that shifts & logicals only happen on integrals f.e.
19// * All of the constants in a switch statement are of the correct type
20// * The code is in valid SSA form
21// * It should be illegal to put a label into any other type (like a structure)
22// or to return one. [except constant arrays!]
23// * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad
24// * PHI nodes must have an entry for each predecessor, with no extras.
25// * PHI nodes must be the first thing in a basic block, all grouped together
26// * All basic blocks should only end with terminator insts, not contain them
27// * The entry node to a function must not have predecessors
28// * All Instructions must be embedded into a basic block
29// * Functions cannot take a void-typed parameter
30// * Verify that a function's argument list agrees with it's declared type.
31// * It is illegal to specify a name for a void value.
32// * It is illegal to have a internal global value with no initializer
33// * It is illegal to have a ret instruction that returns a value that does not
34// agree with the function return value type.
35// * Function call argument types match the function prototype
36// * A landing pad is defined by a landingpad instruction, and can be jumped to
37// only by the unwind edge of an invoke instruction.
38// * A landingpad instruction must be the first non-PHI instruction in the
39// block.
40// * Landingpad instructions must be in a function with a personality function.
41// * Convergence control intrinsics are introduced in ConvergentOperations.rst.
42// The applied restrictions are too numerous to list here.
43// * The convergence entry intrinsic and the loop heart must be the first
44// non-PHI instruction in their respective block. This does not conflict with
45// the landing pads, since these two kinds cannot occur in the same block.
46// * All other things that are tested by asserts spread about the code...
47//
48//===----------------------------------------------------------------------===//
49
50#include "llvm/IR/Verifier.h"
51#include "llvm/ADT/APFloat.h"
52#include "llvm/ADT/APInt.h"
53#include "llvm/ADT/ArrayRef.h"
54#include "llvm/ADT/DenseMap.h"
55#include "llvm/ADT/MapVector.h"
56#include "llvm/ADT/STLExtras.h"
60#include "llvm/ADT/StringRef.h"
61#include "llvm/ADT/Twine.h"
63#include "llvm/IR/Argument.h"
65#include "llvm/IR/Attributes.h"
66#include "llvm/IR/BasicBlock.h"
67#include "llvm/IR/CFG.h"
68#include "llvm/IR/CallingConv.h"
69#include "llvm/IR/Comdat.h"
70#include "llvm/IR/Constant.h"
73#include "llvm/IR/Constants.h"
75#include "llvm/IR/DataLayout.h"
76#include "llvm/IR/DebugInfo.h"
78#include "llvm/IR/DebugLoc.h"
80#include "llvm/IR/Dominators.h"
82#include "llvm/IR/Function.h"
83#include "llvm/IR/GCStrategy.h"
84#include "llvm/IR/GlobalAlias.h"
85#include "llvm/IR/GlobalValue.h"
87#include "llvm/IR/InlineAsm.h"
88#include "llvm/IR/InstVisitor.h"
89#include "llvm/IR/InstrTypes.h"
90#include "llvm/IR/Instruction.h"
93#include "llvm/IR/Intrinsics.h"
94#include "llvm/IR/IntrinsicsAArch64.h"
95#include "llvm/IR/IntrinsicsAMDGPU.h"
96#include "llvm/IR/IntrinsicsARM.h"
97#include "llvm/IR/IntrinsicsNVPTX.h"
98#include "llvm/IR/IntrinsicsWebAssembly.h"
99#include "llvm/IR/LLVMContext.h"
101#include "llvm/IR/Metadata.h"
102#include "llvm/IR/Module.h"
104#include "llvm/IR/PassManager.h"
106#include "llvm/IR/Statepoint.h"
107#include "llvm/IR/Type.h"
108#include "llvm/IR/Use.h"
109#include "llvm/IR/User.h"
111#include "llvm/IR/Value.h"
113#include "llvm/Pass.h"
117#include "llvm/Support/Casting.h"
121#include "llvm/Support/ModRef.h"
124#include <algorithm>
125#include <cassert>
126#include <cstdint>
127#include <memory>
128#include <optional>
129#include <string>
130#include <utility>
131
132using namespace llvm;
133
135 "verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false),
136 cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical "
137 "scopes are not dominating"));
138
141 const Module &M;
143 const Triple &TT;
146
147 /// Track the brokenness of the module while recursively visiting.
148 bool Broken = false;
149 /// Broken debug info can be "recovered" from by stripping the debug info.
150 bool BrokenDebugInfo = false;
151 /// Whether to treat broken debug info as an error.
153
155 : OS(OS), M(M), MST(&M), TT(M.getTargetTriple()), DL(M.getDataLayout()),
156 Context(M.getContext()) {}
157
158private:
159 void Write(const Module *M) {
160 *OS << "; ModuleID = '" << M->getModuleIdentifier() << "'\n";
161 }
162
163 void Write(const Value *V) {
164 if (V)
165 Write(*V);
166 }
167
168 void Write(const Value &V) {
169 if (isa<Instruction>(V)) {
170 V.print(*OS, MST);
171 *OS << '\n';
172 } else {
173 V.printAsOperand(*OS, true, MST);
174 *OS << '\n';
175 }
176 }
177
178 void Write(const DbgRecord *DR) {
179 if (DR) {
180 DR->print(*OS, MST, false);
181 *OS << '\n';
182 }
183 }
184
186 switch (Type) {
188 *OS << "value";
189 break;
191 *OS << "declare";
192 break;
194 *OS << "declare_value";
195 break;
197 *OS << "assign";
198 break;
200 *OS << "end";
201 break;
203 *OS << "any";
204 break;
205 };
206 }
207
208 void Write(const Metadata *MD) {
209 if (!MD)
210 return;
211 MD->print(*OS, MST, &M);
212 *OS << '\n';
213 }
214
215 template <class T> void Write(const MDTupleTypedArrayWrapper<T> &MD) {
216 Write(MD.get());
217 }
218
219 void Write(const NamedMDNode *NMD) {
220 if (!NMD)
221 return;
222 NMD->print(*OS, MST);
223 *OS << '\n';
224 }
225
226 void Write(Type *T) {
227 if (!T)
228 return;
229 *OS << ' ' << *T;
230 }
231
232 void Write(const Comdat *C) {
233 if (!C)
234 return;
235 *OS << *C;
236 }
237
238 void Write(const APInt *AI) {
239 if (!AI)
240 return;
241 *OS << *AI << '\n';
242 }
243
244 void Write(const unsigned i) { *OS << i << '\n'; }
245
246 // NOLINTNEXTLINE(readability-identifier-naming)
247 void Write(const Attribute *A) {
248 if (!A)
249 return;
250 *OS << A->getAsString() << '\n';
251 }
252
253 // NOLINTNEXTLINE(readability-identifier-naming)
254 void Write(const AttributeSet *AS) {
255 if (!AS)
256 return;
257 *OS << AS->getAsString() << '\n';
258 }
259
260 // NOLINTNEXTLINE(readability-identifier-naming)
261 void Write(const AttributeList *AL) {
262 if (!AL)
263 return;
264 AL->print(*OS);
265 }
266
267 void Write(Printable P) { *OS << P << '\n'; }
268
269 template <typename T> void Write(ArrayRef<T> Vs) {
270 for (const T &V : Vs)
271 Write(V);
272 }
273
274 template <typename T1, typename... Ts>
275 void WriteTs(const T1 &V1, const Ts &... Vs) {
276 Write(V1);
277 WriteTs(Vs...);
278 }
279
280 template <typename... Ts> void WriteTs() {}
281
282public:
283 /// A check failed, so printout out the condition and the message.
284 ///
285 /// This provides a nice place to put a breakpoint if you want to see why
286 /// something is not correct.
287 void CheckFailed(const Twine &Message) {
288 if (OS)
289 *OS << Message << '\n';
290 Broken = true;
291 }
292
293 /// A check failed (with values to print).
294 ///
295 /// This calls the Message-only version so that the above is easier to set a
296 /// breakpoint on.
297 template <typename T1, typename... Ts>
298 void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) {
299 CheckFailed(Message);
300 if (OS)
301 WriteTs(V1, Vs...);
302 }
303
304 /// A debug info check failed.
305 void DebugInfoCheckFailed(const Twine &Message) {
306 if (OS)
307 *OS << Message << '\n';
309 BrokenDebugInfo = true;
310 }
311
312 /// A debug info check failed (with values to print).
313 template <typename T1, typename... Ts>
314 void DebugInfoCheckFailed(const Twine &Message, const T1 &V1,
315 const Ts &... Vs) {
316 DebugInfoCheckFailed(Message);
317 if (OS)
318 WriteTs(V1, Vs...);
319 }
320};
321
322namespace {
323
324class Verifier : public InstVisitor<Verifier>, VerifierSupport {
325 friend class InstVisitor<Verifier>;
326 DominatorTree DT;
327
328 /// When verifying a basic block, keep track of all of the
329 /// instructions we have seen so far.
330 ///
331 /// This allows us to do efficient dominance checks for the case when an
332 /// instruction has an operand that is an instruction in the same block.
333 SmallPtrSet<Instruction *, 16> InstsInThisBlock;
334
335 /// Keep track of the metadata nodes that have been checked already.
337
338 /// Keep track which DISubprogram is attached to which function.
340
341 /// Track all DICompileUnits visited.
343
344 /// The result type for a landingpad.
345 Type *LandingPadResultTy;
346
347 /// Whether we've seen a call to @llvm.localescape in this function
348 /// already.
349 bool SawFrameEscape;
350
351 /// Whether the current function has a DISubprogram attached to it.
352 bool HasDebugInfo = false;
353
354 /// Stores the count of how many objects were passed to llvm.localescape for a
355 /// given function and the largest index passed to llvm.localrecover.
357
358 // Maps catchswitches and cleanuppads that unwind to siblings to the
359 // terminators that indicate the unwind, used to detect cycles therein.
361
362 /// Cache which blocks are in which funclet, if an EH funclet personality is
363 /// in use. Otherwise empty.
364 DenseMap<BasicBlock *, ColorVector> BlockEHFuncletColors;
365
366 /// Cache of constants visited in search of ConstantExprs.
367 SmallPtrSet<const Constant *, 32> ConstantExprVisited;
368
369 /// Cache of declarations of the llvm.experimental.deoptimize.<ty> intrinsic.
370 SmallVector<const Function *, 4> DeoptimizeDeclarations;
371
372 /// Cache of attribute lists verified.
373 SmallPtrSet<const void *, 32> AttributeListsVisited;
374
375 // Verify that this GlobalValue is only used in this module.
376 // This map is used to avoid visiting uses twice. We can arrive at a user
377 // twice, if they have multiple operands. In particular for very large
378 // constant expressions, we can arrive at a particular user many times.
379 SmallPtrSet<const Value *, 32> GlobalValueVisited;
380
381 // Keeps track of duplicate function argument debug info.
383
384 TBAAVerifier TBAAVerifyHelper;
385 ConvergenceVerifier ConvergenceVerifyHelper;
386
387 SmallVector<IntrinsicInst *, 4> NoAliasScopeDecls;
388
389 void checkAtomicMemAccessSize(Type *Ty, const Instruction *I);
390
391public:
392 explicit Verifier(raw_ostream *OS, bool ShouldTreatBrokenDebugInfoAsError,
393 const Module &M)
394 : VerifierSupport(OS, M), LandingPadResultTy(nullptr),
395 SawFrameEscape(false), TBAAVerifyHelper(this) {
396 TreatBrokenDebugInfoAsError = ShouldTreatBrokenDebugInfoAsError;
397 }
398
399 bool hasBrokenDebugInfo() const { return BrokenDebugInfo; }
400
401 bool verify(const Function &F) {
402 llvm::TimeTraceScope timeScope("Verifier");
403 assert(F.getParent() == &M &&
404 "An instance of this class only works with a specific module!");
405
406 // First ensure the function is well-enough formed to compute dominance
407 // information, and directly compute a dominance tree. We don't rely on the
408 // pass manager to provide this as it isolates us from a potentially
409 // out-of-date dominator tree and makes it significantly more complex to run
410 // this code outside of a pass manager.
411 // FIXME: It's really gross that we have to cast away constness here.
412 if (!F.empty())
413 DT.recalculate(const_cast<Function &>(F));
414
415 for (const BasicBlock &BB : F) {
416 if (!BB.empty() && BB.back().isTerminator())
417 continue;
418
419 if (OS) {
420 *OS << "Basic Block in function '" << F.getName()
421 << "' does not have terminator!\n";
422 BB.printAsOperand(*OS, true, MST);
423 *OS << "\n";
424 }
425 return false;
426 }
427
428 auto FailureCB = [this](const Twine &Message) {
429 this->CheckFailed(Message);
430 };
431 ConvergenceVerifyHelper.initialize(OS, FailureCB, F);
432
433 Broken = false;
434 // FIXME: We strip const here because the inst visitor strips const.
435 visit(const_cast<Function &>(F));
436 verifySiblingFuncletUnwinds();
437
438 if (ConvergenceVerifyHelper.sawTokens())
439 ConvergenceVerifyHelper.verify(DT);
440
441 InstsInThisBlock.clear();
442 DebugFnArgs.clear();
443 LandingPadResultTy = nullptr;
444 SawFrameEscape = false;
445 SiblingFuncletInfo.clear();
446 verifyNoAliasScopeDecl();
447 NoAliasScopeDecls.clear();
448
449 return !Broken;
450 }
451
452 /// Verify the module that this instance of \c Verifier was initialized with.
453 bool verify() {
454 Broken = false;
455
456 // Collect all declarations of the llvm.experimental.deoptimize intrinsic.
457 for (const Function &F : M)
458 if (F.getIntrinsicID() == Intrinsic::experimental_deoptimize)
459 DeoptimizeDeclarations.push_back(&F);
460
461 // Now that we've visited every function, verify that we never asked to
462 // recover a frame index that wasn't escaped.
463 verifyFrameRecoverIndices();
464 for (const GlobalVariable &GV : M.globals())
465 visitGlobalVariable(GV);
466
467 for (const GlobalAlias &GA : M.aliases())
468 visitGlobalAlias(GA);
469
470 for (const GlobalIFunc &GI : M.ifuncs())
471 visitGlobalIFunc(GI);
472
473 for (const NamedMDNode &NMD : M.named_metadata())
474 visitNamedMDNode(NMD);
475
476 for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable())
477 visitComdat(SMEC.getValue());
478
479 visitModuleFlags();
480 visitModuleIdents();
481 visitModuleCommandLines();
482 visitModuleErrnoTBAA();
483
484 verifyCompileUnits();
485
486 verifyDeoptimizeCallingConvs();
487 DISubprogramAttachments.clear();
488 return !Broken;
489 }
490
491private:
492 /// Whether a metadata node is allowed to be, or contain, a DILocation.
493 enum class AreDebugLocsAllowed { No, Yes };
494
495 /// Metadata that should be treated as a range, with slightly different
496 /// requirements.
497 enum class RangeLikeMetadataKind {
498 Range, // MD_range
499 AbsoluteSymbol, // MD_absolute_symbol
500 NoaliasAddrspace // MD_noalias_addrspace
501 };
502
503 // Verification methods...
504 void visitGlobalValue(const GlobalValue &GV);
505 void visitGlobalVariable(const GlobalVariable &GV);
506 void visitGlobalAlias(const GlobalAlias &GA);
507 void visitGlobalIFunc(const GlobalIFunc &GI);
508 void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C);
509 void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited,
510 const GlobalAlias &A, const Constant &C);
511 void visitNamedMDNode(const NamedMDNode &NMD);
512 void visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs);
513 void visitMetadataAsValue(const MetadataAsValue &MD, Function *F);
514 void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F);
515 void visitDIArgList(const DIArgList &AL, Function *F);
516 void visitComdat(const Comdat &C);
517 void visitModuleIdents();
518 void visitModuleCommandLines();
519 void visitModuleErrnoTBAA();
520 void visitModuleFlags();
521 void visitModuleFlag(const MDNode *Op,
522 DenseMap<const MDString *, const MDNode *> &SeenIDs,
523 SmallVectorImpl<const MDNode *> &Requirements);
524 void visitModuleFlagCGProfileEntry(const MDOperand &MDO);
525 void visitFunction(const Function &F);
526 void visitBasicBlock(BasicBlock &BB);
527 void verifyRangeLikeMetadata(const Value &V, const MDNode *Range, Type *Ty,
528 RangeLikeMetadataKind Kind);
529 void visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty);
530 void visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range, Type *Ty);
531 void visitDereferenceableMetadata(Instruction &I, MDNode *MD);
532 void visitNofreeMetadata(Instruction &I, MDNode *MD);
533 void visitProfMetadata(Instruction &I, MDNode *MD);
534 void visitCallStackMetadata(MDNode *MD);
535 void visitMemProfMetadata(Instruction &I, MDNode *MD);
536 void visitCallsiteMetadata(Instruction &I, MDNode *MD);
537 void visitCalleeTypeMetadata(Instruction &I, MDNode *MD);
538 void visitDIAssignIDMetadata(Instruction &I, MDNode *MD);
539 void visitMMRAMetadata(Instruction &I, MDNode *MD);
540 void visitAnnotationMetadata(MDNode *Annotation);
541 void visitAliasScopeMetadata(const MDNode *MD);
542 void visitAliasScopeListMetadata(const MDNode *MD);
543 void visitAccessGroupMetadata(const MDNode *MD);
544 void visitCapturesMetadata(Instruction &I, const MDNode *Captures);
545 void visitAllocTokenMetadata(Instruction &I, MDNode *MD);
546
547 template <class Ty> bool isValidMetadataArray(const MDTuple &N);
548#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N);
549#include "llvm/IR/Metadata.def"
550 void visitDIScope(const DIScope &N);
551 void visitDIVariable(const DIVariable &N);
552 void visitDILexicalBlockBase(const DILexicalBlockBase &N);
553 void visitDITemplateParameter(const DITemplateParameter &N);
554
555 void visitTemplateParams(const MDNode &N, const Metadata &RawParams);
556
557 void visit(DbgLabelRecord &DLR);
558 void visit(DbgVariableRecord &DVR);
559 // InstVisitor overrides...
560 using InstVisitor<Verifier>::visit;
561 void visitDbgRecords(Instruction &I);
562 void visit(Instruction &I);
563
564 void visitTruncInst(TruncInst &I);
565 void visitZExtInst(ZExtInst &I);
566 void visitSExtInst(SExtInst &I);
567 void visitFPTruncInst(FPTruncInst &I);
568 void visitFPExtInst(FPExtInst &I);
569 void visitFPToUIInst(FPToUIInst &I);
570 void visitFPToSIInst(FPToSIInst &I);
571 void visitUIToFPInst(UIToFPInst &I);
572 void visitSIToFPInst(SIToFPInst &I);
573 void visitIntToPtrInst(IntToPtrInst &I);
574 void checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V);
575 void visitPtrToAddrInst(PtrToAddrInst &I);
576 void visitPtrToIntInst(PtrToIntInst &I);
577 void visitBitCastInst(BitCastInst &I);
578 void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
579 void visitPHINode(PHINode &PN);
580 void visitCallBase(CallBase &Call);
581 void visitUnaryOperator(UnaryOperator &U);
582 void visitBinaryOperator(BinaryOperator &B);
583 void visitICmpInst(ICmpInst &IC);
584 void visitFCmpInst(FCmpInst &FC);
585 void visitExtractElementInst(ExtractElementInst &EI);
586 void visitInsertElementInst(InsertElementInst &EI);
587 void visitShuffleVectorInst(ShuffleVectorInst &EI);
588 void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); }
589 void visitCallInst(CallInst &CI);
590 void visitInvokeInst(InvokeInst &II);
591 void visitGetElementPtrInst(GetElementPtrInst &GEP);
592 void visitLoadInst(LoadInst &LI);
593 void visitStoreInst(StoreInst &SI);
594 void verifyDominatesUse(Instruction &I, unsigned i);
595 void visitInstruction(Instruction &I);
596 void visitTerminator(Instruction &I);
597 void visitBranchInst(BranchInst &BI);
598 void visitReturnInst(ReturnInst &RI);
599 void visitSwitchInst(SwitchInst &SI);
600 void visitIndirectBrInst(IndirectBrInst &BI);
601 void visitCallBrInst(CallBrInst &CBI);
602 void visitSelectInst(SelectInst &SI);
603 void visitUserOp1(Instruction &I);
604 void visitUserOp2(Instruction &I) { visitUserOp1(I); }
605 void visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call);
606 void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI);
607 void visitVPIntrinsic(VPIntrinsic &VPI);
608 void visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI);
609 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
610 void visitAtomicRMWInst(AtomicRMWInst &RMWI);
611 void visitFenceInst(FenceInst &FI);
612 void visitAllocaInst(AllocaInst &AI);
613 void visitExtractValueInst(ExtractValueInst &EVI);
614 void visitInsertValueInst(InsertValueInst &IVI);
615 void visitEHPadPredecessors(Instruction &I);
616 void visitLandingPadInst(LandingPadInst &LPI);
617 void visitResumeInst(ResumeInst &RI);
618 void visitCatchPadInst(CatchPadInst &CPI);
619 void visitCatchReturnInst(CatchReturnInst &CatchReturn);
620 void visitCleanupPadInst(CleanupPadInst &CPI);
621 void visitFuncletPadInst(FuncletPadInst &FPI);
622 void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch);
623 void visitCleanupReturnInst(CleanupReturnInst &CRI);
624
625 void verifySwiftErrorCall(CallBase &Call, const Value *SwiftErrorVal);
626 void verifySwiftErrorValue(const Value *SwiftErrorVal);
627 void verifyTailCCMustTailAttrs(const AttrBuilder &Attrs, StringRef Context);
628 void verifyMustTailCall(CallInst &CI);
629 bool verifyAttributeCount(AttributeList Attrs, unsigned Params);
630 void verifyAttributeTypes(AttributeSet Attrs, const Value *V);
631 void verifyParameterAttrs(AttributeSet Attrs, Type *Ty, const Value *V);
632 void checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
633 const Value *V);
634 void verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
635 const Value *V, bool IsIntrinsic, bool IsInlineAsm);
636 void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs);
637 void verifyUnknownProfileMetadata(MDNode *MD);
638 void visitConstantExprsRecursively(const Constant *EntryC);
639 void visitConstantExpr(const ConstantExpr *CE);
640 void visitConstantPtrAuth(const ConstantPtrAuth *CPA);
641 void verifyInlineAsmCall(const CallBase &Call);
642 void verifyStatepoint(const CallBase &Call);
643 void verifyFrameRecoverIndices();
644 void verifySiblingFuncletUnwinds();
645
646 void verifyFragmentExpression(const DbgVariableRecord &I);
647 template <typename ValueOrMetadata>
648 void verifyFragmentExpression(const DIVariable &V,
650 ValueOrMetadata *Desc);
651 void verifyFnArgs(const DbgVariableRecord &DVR);
652 void verifyNotEntryValue(const DbgVariableRecord &I);
653
654 /// Module-level debug info verification...
655 void verifyCompileUnits();
656
657 /// Module-level verification that all @llvm.experimental.deoptimize
658 /// declarations share the same calling convention.
659 void verifyDeoptimizeCallingConvs();
660
661 void verifyAttachedCallBundle(const CallBase &Call,
662 const OperandBundleUse &BU);
663
664 /// Verify the llvm.experimental.noalias.scope.decl declarations
665 void verifyNoAliasScopeDecl();
666};
667
668} // end anonymous namespace
669
670/// We know that cond should be true, if not print an error message.
671#define Check(C, ...) \
672 do { \
673 if (!(C)) { \
674 CheckFailed(__VA_ARGS__); \
675 return; \
676 } \
677 } while (false)
678
679/// We know that a debug info condition should be true, if not print
680/// an error message.
681#define CheckDI(C, ...) \
682 do { \
683 if (!(C)) { \
684 DebugInfoCheckFailed(__VA_ARGS__); \
685 return; \
686 } \
687 } while (false)
688
689void Verifier::visitDbgRecords(Instruction &I) {
690 if (!I.DebugMarker)
691 return;
692 CheckDI(I.DebugMarker->MarkedInstr == &I,
693 "Instruction has invalid DebugMarker", &I);
694 CheckDI(!isa<PHINode>(&I) || !I.hasDbgRecords(),
695 "PHI Node must not have any attached DbgRecords", &I);
696 for (DbgRecord &DR : I.getDbgRecordRange()) {
697 CheckDI(DR.getMarker() == I.DebugMarker,
698 "DbgRecord had invalid DebugMarker", &I, &DR);
699 if (auto *Loc =
701 visitMDNode(*Loc, AreDebugLocsAllowed::Yes);
702 if (auto *DVR = dyn_cast<DbgVariableRecord>(&DR)) {
703 visit(*DVR);
704 // These have to appear after `visit` for consistency with existing
705 // intrinsic behaviour.
706 verifyFragmentExpression(*DVR);
707 verifyNotEntryValue(*DVR);
708 } else if (auto *DLR = dyn_cast<DbgLabelRecord>(&DR)) {
709 visit(*DLR);
710 }
711 }
712}
713
714void Verifier::visit(Instruction &I) {
715 visitDbgRecords(I);
716 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
717 Check(I.getOperand(i) != nullptr, "Operand is null", &I);
719}
720
721// Helper to iterate over indirect users. By returning false, the callback can ask to stop traversing further.
722static void forEachUser(const Value *User,
724 llvm::function_ref<bool(const Value *)> Callback) {
725 if (!Visited.insert(User).second)
726 return;
727
729 while (!WorkList.empty()) {
730 const Value *Cur = WorkList.pop_back_val();
731 if (!Visited.insert(Cur).second)
732 continue;
733 if (Callback(Cur))
734 append_range(WorkList, Cur->materialized_users());
735 }
736}
737
738void Verifier::visitGlobalValue(const GlobalValue &GV) {
740 "Global is external, but doesn't have external or weak linkage!", &GV);
741
742 if (const GlobalObject *GO = dyn_cast<GlobalObject>(&GV)) {
743 if (const MDNode *Associated =
744 GO->getMetadata(LLVMContext::MD_associated)) {
745 Check(Associated->getNumOperands() == 1,
746 "associated metadata must have one operand", &GV, Associated);
747 const Metadata *Op = Associated->getOperand(0).get();
748 Check(Op, "associated metadata must have a global value", GO, Associated);
749
750 const auto *VM = dyn_cast_or_null<ValueAsMetadata>(Op);
751 Check(VM, "associated metadata must be ValueAsMetadata", GO, Associated);
752 if (VM) {
753 Check(isa<PointerType>(VM->getValue()->getType()),
754 "associated value must be pointer typed", GV, Associated);
755
756 const Value *Stripped = VM->getValue()->stripPointerCastsAndAliases();
757 Check(isa<GlobalObject>(Stripped) || isa<Constant>(Stripped),
758 "associated metadata must point to a GlobalObject", GO, Stripped);
759 Check(Stripped != GO,
760 "global values should not associate to themselves", GO,
761 Associated);
762 }
763 }
764
765 // FIXME: Why is getMetadata on GlobalValue protected?
766 if (const MDNode *AbsoluteSymbol =
767 GO->getMetadata(LLVMContext::MD_absolute_symbol)) {
768 verifyRangeLikeMetadata(*GO, AbsoluteSymbol,
769 DL.getIntPtrType(GO->getType()),
770 RangeLikeMetadataKind::AbsoluteSymbol);
771 }
772 }
773
775 "Only global variables can have appending linkage!", &GV);
776
777 if (GV.hasAppendingLinkage()) {
778 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV);
779 Check(GVar && GVar->getValueType()->isArrayTy(),
780 "Only global arrays can have appending linkage!", GVar);
781 }
782
783 if (GV.isDeclarationForLinker())
784 Check(!GV.hasComdat(), "Declaration may not be in a Comdat!", &GV);
785
786 if (GV.hasDLLExportStorageClass()) {
788 "dllexport GlobalValue must have default or protected visibility",
789 &GV);
790 }
791 if (GV.hasDLLImportStorageClass()) {
793 "dllimport GlobalValue must have default visibility", &GV);
794 Check(!GV.isDSOLocal(), "GlobalValue with DLLImport Storage is dso_local!",
795 &GV);
796
797 Check((GV.isDeclaration() &&
800 "Global is marked as dllimport, but not external", &GV);
801 }
802
803 if (GV.isImplicitDSOLocal())
804 Check(GV.isDSOLocal(),
805 "GlobalValue with local linkage or non-default "
806 "visibility must be dso_local!",
807 &GV);
808
809 forEachUser(&GV, GlobalValueVisited, [&](const Value *V) -> bool {
810 if (const Instruction *I = dyn_cast<Instruction>(V)) {
811 if (!I->getParent() || !I->getParent()->getParent())
812 CheckFailed("Global is referenced by parentless instruction!", &GV, &M,
813 I);
814 else if (I->getParent()->getParent()->getParent() != &M)
815 CheckFailed("Global is referenced in a different module!", &GV, &M, I,
816 I->getParent()->getParent(),
817 I->getParent()->getParent()->getParent());
818 return false;
819 } else if (const Function *F = dyn_cast<Function>(V)) {
820 if (F->getParent() != &M)
821 CheckFailed("Global is used by function in a different module", &GV, &M,
822 F, F->getParent());
823 return false;
824 }
825 return true;
826 });
827}
828
829void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
830 Type *GVType = GV.getValueType();
831
832 if (MaybeAlign A = GV.getAlign()) {
833 Check(A->value() <= Value::MaximumAlignment,
834 "huge alignment values are unsupported", &GV);
835 }
836
837 if (GV.hasInitializer()) {
838 Check(GV.getInitializer()->getType() == GVType,
839 "Global variable initializer type does not match global "
840 "variable type!",
841 &GV);
843 "Global variable initializer must be sized", &GV);
844 visitConstantExprsRecursively(GV.getInitializer());
845 // If the global has common linkage, it must have a zero initializer and
846 // cannot be constant.
847 if (GV.hasCommonLinkage()) {
849 "'common' global must have a zero initializer!", &GV);
850 Check(!GV.isConstant(), "'common' global may not be marked constant!",
851 &GV);
852 Check(!GV.hasComdat(), "'common' global may not be in a Comdat!", &GV);
853 }
854 }
855
856 if (GV.hasName() && (GV.getName() == "llvm.global_ctors" ||
857 GV.getName() == "llvm.global_dtors")) {
859 "invalid linkage for intrinsic global variable", &GV);
861 "invalid uses of intrinsic global variable", &GV);
862
863 // Don't worry about emitting an error for it not being an array,
864 // visitGlobalValue will complain on appending non-array.
865 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
866 StructType *STy = dyn_cast<StructType>(ATy->getElementType());
867 PointerType *FuncPtrTy =
868 PointerType::get(Context, DL.getProgramAddressSpace());
869 Check(STy && (STy->getNumElements() == 2 || STy->getNumElements() == 3) &&
870 STy->getTypeAtIndex(0u)->isIntegerTy(32) &&
871 STy->getTypeAtIndex(1) == FuncPtrTy,
872 "wrong type for intrinsic global variable", &GV);
873 Check(STy->getNumElements() == 3,
874 "the third field of the element type is mandatory, "
875 "specify ptr null to migrate from the obsoleted 2-field form");
876 Type *ETy = STy->getTypeAtIndex(2);
877 Check(ETy->isPointerTy(), "wrong type for intrinsic global variable",
878 &GV);
879 }
880 }
881
882 if (GV.hasName() && (GV.getName() == "llvm.used" ||
883 GV.getName() == "llvm.compiler.used")) {
885 "invalid linkage for intrinsic global variable", &GV);
887 "invalid uses of intrinsic global variable", &GV);
888
889 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
890 PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType());
891 Check(PTy, "wrong type for intrinsic global variable", &GV);
892 if (GV.hasInitializer()) {
893 const Constant *Init = GV.getInitializer();
894 const ConstantArray *InitArray = dyn_cast<ConstantArray>(Init);
895 Check(InitArray, "wrong initializer for intrinsic global variable",
896 Init);
897 for (Value *Op : InitArray->operands()) {
898 Value *V = Op->stripPointerCasts();
901 Twine("invalid ") + GV.getName() + " member", V);
902 Check(V->hasName(),
903 Twine("members of ") + GV.getName() + " must be named", V);
904 }
905 }
906 }
907 }
908
909 // Visit any debug info attachments.
911 GV.getMetadata(LLVMContext::MD_dbg, MDs);
912 for (auto *MD : MDs) {
913 if (auto *GVE = dyn_cast<DIGlobalVariableExpression>(MD))
914 visitDIGlobalVariableExpression(*GVE);
915 else
916 CheckDI(false, "!dbg attachment of global variable must be a "
917 "DIGlobalVariableExpression");
918 }
919
920 // Scalable vectors cannot be global variables, since we don't know
921 // the runtime size.
922 Check(!GVType->isScalableTy(), "Globals cannot contain scalable types", &GV);
923
924 // Check if it is or contains a target extension type that disallows being
925 // used as a global.
927 "Global @" + GV.getName() + " has illegal target extension type",
928 GVType);
929
930 if (!GV.hasInitializer()) {
931 visitGlobalValue(GV);
932 return;
933 }
934
935 // Walk any aggregate initializers looking for bitcasts between address spaces
936 visitConstantExprsRecursively(GV.getInitializer());
937
938 visitGlobalValue(GV);
939}
940
941void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) {
942 SmallPtrSet<const GlobalAlias*, 4> Visited;
943 Visited.insert(&GA);
944 visitAliaseeSubExpr(Visited, GA, C);
945}
946
947void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited,
948 const GlobalAlias &GA, const Constant &C) {
951 cast<GlobalValue>(C).hasAvailableExternallyLinkage(),
952 "available_externally alias must point to available_externally "
953 "global value",
954 &GA);
955 }
956 if (const auto *GV = dyn_cast<GlobalValue>(&C)) {
958 Check(!GV->isDeclarationForLinker(), "Alias must point to a definition",
959 &GA);
960 }
961
962 if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) {
963 Check(Visited.insert(GA2).second, "Aliases cannot form a cycle", &GA);
964
965 Check(!GA2->isInterposable(),
966 "Alias cannot point to an interposable alias", &GA);
967 } else {
968 // Only continue verifying subexpressions of GlobalAliases.
969 // Do not recurse into global initializers.
970 return;
971 }
972 }
973
974 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
975 visitConstantExprsRecursively(CE);
976
977 for (const Use &U : C.operands()) {
978 Value *V = &*U;
979 if (const auto *GA2 = dyn_cast<GlobalAlias>(V))
980 visitAliaseeSubExpr(Visited, GA, *GA2->getAliasee());
981 else if (const auto *C2 = dyn_cast<Constant>(V))
982 visitAliaseeSubExpr(Visited, GA, *C2);
983 }
984}
985
986void Verifier::visitGlobalAlias(const GlobalAlias &GA) {
988 "Alias should have private, internal, linkonce, weak, linkonce_odr, "
989 "weak_odr, external, or available_externally linkage!",
990 &GA);
991 const Constant *Aliasee = GA.getAliasee();
992 Check(Aliasee, "Aliasee cannot be NULL!", &GA);
993 Check(GA.getType() == Aliasee->getType(),
994 "Alias and aliasee types should match!", &GA);
995
996 Check(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee),
997 "Aliasee should be either GlobalValue or ConstantExpr", &GA);
998
999 visitAliaseeSubExpr(GA, *Aliasee);
1000
1001 visitGlobalValue(GA);
1002}
1003
1004void Verifier::visitGlobalIFunc(const GlobalIFunc &GI) {
1005 visitGlobalValue(GI);
1006
1008 GI.getAllMetadata(MDs);
1009 for (const auto &I : MDs) {
1010 CheckDI(I.first != LLVMContext::MD_dbg,
1011 "an ifunc may not have a !dbg attachment", &GI);
1012 Check(I.first != LLVMContext::MD_prof,
1013 "an ifunc may not have a !prof attachment", &GI);
1014 visitMDNode(*I.second, AreDebugLocsAllowed::No);
1015 }
1016
1018 "IFunc should have private, internal, linkonce, weak, linkonce_odr, "
1019 "weak_odr, or external linkage!",
1020 &GI);
1021 // Pierce through ConstantExprs and GlobalAliases and check that the resolver
1022 // is a Function definition.
1023 const Function *Resolver = GI.getResolverFunction();
1024 Check(Resolver, "IFunc must have a Function resolver", &GI);
1025 Check(!Resolver->isDeclarationForLinker(),
1026 "IFunc resolver must be a definition", &GI);
1027
1028 // Check that the immediate resolver operand (prior to any bitcasts) has the
1029 // correct type.
1030 const Type *ResolverTy = GI.getResolver()->getType();
1031
1033 "IFunc resolver must return a pointer", &GI);
1034
1035 Check(ResolverTy == PointerType::get(Context, GI.getAddressSpace()),
1036 "IFunc resolver has incorrect type", &GI);
1037}
1038
1039void Verifier::visitNamedMDNode(const NamedMDNode &NMD) {
1040 // There used to be various other llvm.dbg.* nodes, but we don't support
1041 // upgrading them and we want to reserve the namespace for future uses.
1042 if (NMD.getName().starts_with("llvm.dbg."))
1043 CheckDI(NMD.getName() == "llvm.dbg.cu",
1044 "unrecognized named metadata node in the llvm.dbg namespace", &NMD);
1045 for (const MDNode *MD : NMD.operands()) {
1046 if (NMD.getName() == "llvm.dbg.cu")
1047 CheckDI(MD && isa<DICompileUnit>(MD), "invalid compile unit", &NMD, MD);
1048
1049 if (!MD)
1050 continue;
1051
1052 visitMDNode(*MD, AreDebugLocsAllowed::Yes);
1053 }
1054}
1055
1056void Verifier::visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs) {
1057 // Only visit each node once. Metadata can be mutually recursive, so this
1058 // avoids infinite recursion here, as well as being an optimization.
1059 if (!MDNodes.insert(&MD).second)
1060 return;
1061
1062 Check(&MD.getContext() == &Context,
1063 "MDNode context does not match Module context!", &MD);
1064
1065 switch (MD.getMetadataID()) {
1066 default:
1067 llvm_unreachable("Invalid MDNode subclass");
1068 case Metadata::MDTupleKind:
1069 break;
1070#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) \
1071 case Metadata::CLASS##Kind: \
1072 visit##CLASS(cast<CLASS>(MD)); \
1073 break;
1074#include "llvm/IR/Metadata.def"
1075 }
1076
1077 for (const Metadata *Op : MD.operands()) {
1078 if (!Op)
1079 continue;
1080 Check(!isa<LocalAsMetadata>(Op), "Invalid operand for global metadata!",
1081 &MD, Op);
1082 CheckDI(!isa<DILocation>(Op) || AllowLocs == AreDebugLocsAllowed::Yes,
1083 "DILocation not allowed within this metadata node", &MD, Op);
1084 if (auto *N = dyn_cast<MDNode>(Op)) {
1085 visitMDNode(*N, AllowLocs);
1086 continue;
1087 }
1088 if (auto *V = dyn_cast<ValueAsMetadata>(Op)) {
1089 visitValueAsMetadata(*V, nullptr);
1090 continue;
1091 }
1092 }
1093
1094 // Check llvm.loop.estimated_trip_count.
1095 if (MD.getNumOperands() > 0 &&
1097 Check(MD.getNumOperands() == 2, "Expected two operands", &MD);
1099 Check(Count && Count->getType()->isIntegerTy() &&
1100 cast<IntegerType>(Count->getType())->getBitWidth() <= 32,
1101 "Expected second operand to be an integer constant of type i32 or "
1102 "smaller",
1103 &MD);
1104 }
1105
1106 // Check these last, so we diagnose problems in operands first.
1107 Check(!MD.isTemporary(), "Expected no forward declarations!", &MD);
1108 Check(MD.isResolved(), "All nodes should be resolved!", &MD);
1109}
1110
1111void Verifier::visitValueAsMetadata(const ValueAsMetadata &MD, Function *F) {
1112 Check(MD.getValue(), "Expected valid value", &MD);
1113 Check(!MD.getValue()->getType()->isMetadataTy(),
1114 "Unexpected metadata round-trip through values", &MD, MD.getValue());
1115
1116 auto *L = dyn_cast<LocalAsMetadata>(&MD);
1117 if (!L)
1118 return;
1119
1120 Check(F, "function-local metadata used outside a function", L);
1121
1122 // If this was an instruction, bb, or argument, verify that it is in the
1123 // function that we expect.
1124 Function *ActualF = nullptr;
1125 if (Instruction *I = dyn_cast<Instruction>(L->getValue())) {
1126 Check(I->getParent(), "function-local metadata not in basic block", L, I);
1127 ActualF = I->getParent()->getParent();
1128 } else if (BasicBlock *BB = dyn_cast<BasicBlock>(L->getValue()))
1129 ActualF = BB->getParent();
1130 else if (Argument *A = dyn_cast<Argument>(L->getValue()))
1131 ActualF = A->getParent();
1132 assert(ActualF && "Unimplemented function local metadata case!");
1133
1134 Check(ActualF == F, "function-local metadata used in wrong function", L);
1135}
1136
1137void Verifier::visitDIArgList(const DIArgList &AL, Function *F) {
1138 for (const ValueAsMetadata *VAM : AL.getArgs())
1139 visitValueAsMetadata(*VAM, F);
1140}
1141
1142void Verifier::visitMetadataAsValue(const MetadataAsValue &MDV, Function *F) {
1143 Metadata *MD = MDV.getMetadata();
1144 if (auto *N = dyn_cast<MDNode>(MD)) {
1145 visitMDNode(*N, AreDebugLocsAllowed::No);
1146 return;
1147 }
1148
1149 // Only visit each node once. Metadata can be mutually recursive, so this
1150 // avoids infinite recursion here, as well as being an optimization.
1151 if (!MDNodes.insert(MD).second)
1152 return;
1153
1154 if (auto *V = dyn_cast<ValueAsMetadata>(MD))
1155 visitValueAsMetadata(*V, F);
1156
1157 if (auto *AL = dyn_cast<DIArgList>(MD))
1158 visitDIArgList(*AL, F);
1159}
1160
1161static bool isType(const Metadata *MD) { return !MD || isa<DIType>(MD); }
1162static bool isScope(const Metadata *MD) { return !MD || isa<DIScope>(MD); }
1163static bool isDINode(const Metadata *MD) { return !MD || isa<DINode>(MD); }
1164static bool isMDTuple(const Metadata *MD) { return !MD || isa<MDTuple>(MD); }
1165
1166void Verifier::visitDILocation(const DILocation &N) {
1167 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1168 "location requires a valid scope", &N, N.getRawScope());
1169 if (auto *IA = N.getRawInlinedAt())
1170 CheckDI(isa<DILocation>(IA), "inlined-at should be a location", &N, IA);
1171 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1172 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1173}
1174
1175void Verifier::visitGenericDINode(const GenericDINode &N) {
1176 CheckDI(N.getTag(), "invalid tag", &N);
1177}
1178
1179void Verifier::visitDIScope(const DIScope &N) {
1180 if (auto *F = N.getRawFile())
1181 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1182}
1183
1184void Verifier::visitDISubrangeType(const DISubrangeType &N) {
1185 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1186 auto *BaseType = N.getRawBaseType();
1187 CheckDI(!BaseType || isType(BaseType), "BaseType must be a type");
1188 auto *LBound = N.getRawLowerBound();
1189 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1190 isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1191 "LowerBound must be signed constant or DIVariable or DIExpression",
1192 &N);
1193 auto *UBound = N.getRawUpperBound();
1194 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1195 isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1196 "UpperBound must be signed constant or DIVariable or DIExpression",
1197 &N);
1198 auto *Stride = N.getRawStride();
1199 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1200 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1201 "Stride must be signed constant or DIVariable or DIExpression", &N);
1202 auto *Bias = N.getRawBias();
1203 CheckDI(!Bias || isa<ConstantAsMetadata>(Bias) || isa<DIVariable>(Bias) ||
1204 isa<DIExpression>(Bias),
1205 "Bias must be signed constant or DIVariable or DIExpression", &N);
1206 // Subrange types currently only support constant size.
1207 auto *Size = N.getRawSizeInBits();
1209 "SizeInBits must be a constant");
1210}
1211
1212void Verifier::visitDISubrange(const DISubrange &N) {
1213 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1214 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1215 "Subrange can have any one of count or upperBound", &N);
1216 auto *CBound = N.getRawCountNode();
1217 CheckDI(!CBound || isa<ConstantAsMetadata>(CBound) ||
1218 isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1219 "Count must be signed constant or DIVariable or DIExpression", &N);
1220 auto Count = N.getCount();
1222 cast<ConstantInt *>(Count)->getSExtValue() >= -1,
1223 "invalid subrange count", &N);
1224 auto *LBound = N.getRawLowerBound();
1225 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1226 isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1227 "LowerBound must be signed constant or DIVariable or DIExpression",
1228 &N);
1229 auto *UBound = N.getRawUpperBound();
1230 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1231 isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1232 "UpperBound must be signed constant or DIVariable or DIExpression",
1233 &N);
1234 auto *Stride = N.getRawStride();
1235 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1236 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1237 "Stride must be signed constant or DIVariable or DIExpression", &N);
1238}
1239
1240void Verifier::visitDIGenericSubrange(const DIGenericSubrange &N) {
1241 CheckDI(N.getTag() == dwarf::DW_TAG_generic_subrange, "invalid tag", &N);
1242 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1243 "GenericSubrange can have any one of count or upperBound", &N);
1244 auto *CBound = N.getRawCountNode();
1245 CheckDI(!CBound || isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1246 "Count must be signed constant or DIVariable or DIExpression", &N);
1247 auto *LBound = N.getRawLowerBound();
1248 CheckDI(LBound, "GenericSubrange must contain lowerBound", &N);
1249 CheckDI(isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1250 "LowerBound must be signed constant or DIVariable or DIExpression",
1251 &N);
1252 auto *UBound = N.getRawUpperBound();
1253 CheckDI(!UBound || isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1254 "UpperBound must be signed constant or DIVariable or DIExpression",
1255 &N);
1256 auto *Stride = N.getRawStride();
1257 CheckDI(Stride, "GenericSubrange must contain stride", &N);
1258 CheckDI(isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1259 "Stride must be signed constant or DIVariable or DIExpression", &N);
1260}
1261
1262void Verifier::visitDIEnumerator(const DIEnumerator &N) {
1263 CheckDI(N.getTag() == dwarf::DW_TAG_enumerator, "invalid tag", &N);
1264}
1265
1266void Verifier::visitDIBasicType(const DIBasicType &N) {
1267 CheckDI(N.getTag() == dwarf::DW_TAG_base_type ||
1268 N.getTag() == dwarf::DW_TAG_unspecified_type ||
1269 N.getTag() == dwarf::DW_TAG_string_type,
1270 "invalid tag", &N);
1271 // Basic types currently only support constant size.
1272 auto *Size = N.getRawSizeInBits();
1274 "SizeInBits must be a constant");
1275}
1276
1277void Verifier::visitDIFixedPointType(const DIFixedPointType &N) {
1278 visitDIBasicType(N);
1279
1280 CheckDI(N.getTag() == dwarf::DW_TAG_base_type, "invalid tag", &N);
1281 CheckDI(N.getEncoding() == dwarf::DW_ATE_signed_fixed ||
1282 N.getEncoding() == dwarf::DW_ATE_unsigned_fixed,
1283 "invalid encoding", &N);
1287 "invalid kind", &N);
1289 N.getFactorRaw() == 0,
1290 "factor should be 0 for rationals", &N);
1292 (N.getNumeratorRaw() == 0 && N.getDenominatorRaw() == 0),
1293 "numerator and denominator should be 0 for non-rationals", &N);
1294}
1295
1296void Verifier::visitDIStringType(const DIStringType &N) {
1297 CheckDI(N.getTag() == dwarf::DW_TAG_string_type, "invalid tag", &N);
1298 CheckDI(!(N.isBigEndian() && N.isLittleEndian()), "has conflicting flags",
1299 &N);
1300}
1301
1302void Verifier::visitDIDerivedType(const DIDerivedType &N) {
1303 // Common scope checks.
1304 visitDIScope(N);
1305
1306 CheckDI(N.getTag() == dwarf::DW_TAG_typedef ||
1307 N.getTag() == dwarf::DW_TAG_pointer_type ||
1308 N.getTag() == dwarf::DW_TAG_ptr_to_member_type ||
1309 N.getTag() == dwarf::DW_TAG_reference_type ||
1310 N.getTag() == dwarf::DW_TAG_rvalue_reference_type ||
1311 N.getTag() == dwarf::DW_TAG_const_type ||
1312 N.getTag() == dwarf::DW_TAG_immutable_type ||
1313 N.getTag() == dwarf::DW_TAG_volatile_type ||
1314 N.getTag() == dwarf::DW_TAG_restrict_type ||
1315 N.getTag() == dwarf::DW_TAG_atomic_type ||
1316 N.getTag() == dwarf::DW_TAG_LLVM_ptrauth_type ||
1317 N.getTag() == dwarf::DW_TAG_member ||
1318 (N.getTag() == dwarf::DW_TAG_variable && N.isStaticMember()) ||
1319 N.getTag() == dwarf::DW_TAG_inheritance ||
1320 N.getTag() == dwarf::DW_TAG_friend ||
1321 N.getTag() == dwarf::DW_TAG_set_type ||
1322 N.getTag() == dwarf::DW_TAG_template_alias,
1323 "invalid tag", &N);
1324 if (N.getTag() == dwarf::DW_TAG_ptr_to_member_type) {
1325 CheckDI(isType(N.getRawExtraData()), "invalid pointer to member type", &N,
1326 N.getRawExtraData());
1327 } else if (N.getTag() == dwarf::DW_TAG_template_alias) {
1328 CheckDI(isMDTuple(N.getRawExtraData()), "invalid template parameters", &N,
1329 N.getRawExtraData());
1330 } else if (N.getTag() == dwarf::DW_TAG_inheritance ||
1331 N.getTag() == dwarf::DW_TAG_member ||
1332 N.getTag() == dwarf::DW_TAG_variable) {
1333 auto *ExtraData = N.getRawExtraData();
1334 auto IsValidExtraData = [&]() {
1335 if (ExtraData == nullptr)
1336 return true;
1337 if (isa<ConstantAsMetadata>(ExtraData) || isa<MDString>(ExtraData) ||
1338 isa<DIObjCProperty>(ExtraData))
1339 return true;
1340 if (auto *Tuple = dyn_cast<MDTuple>(ExtraData)) {
1341 if (Tuple->getNumOperands() != 1)
1342 return false;
1343 return isa_and_nonnull<ConstantAsMetadata>(Tuple->getOperand(0).get());
1344 }
1345 return false;
1346 };
1347 CheckDI(IsValidExtraData(),
1348 "extraData must be ConstantAsMetadata, MDString, DIObjCProperty, "
1349 "or MDTuple with single ConstantAsMetadata operand",
1350 &N, ExtraData);
1351 }
1352
1353 if (N.getTag() == dwarf::DW_TAG_set_type) {
1354 if (auto *T = N.getRawBaseType()) {
1358 CheckDI(
1359 (Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type) ||
1360 (Subrange && Subrange->getTag() == dwarf::DW_TAG_subrange_type) ||
1361 (Basic && (Basic->getEncoding() == dwarf::DW_ATE_unsigned ||
1362 Basic->getEncoding() == dwarf::DW_ATE_signed ||
1363 Basic->getEncoding() == dwarf::DW_ATE_unsigned_char ||
1364 Basic->getEncoding() == dwarf::DW_ATE_signed_char ||
1365 Basic->getEncoding() == dwarf::DW_ATE_boolean)),
1366 "invalid set base type", &N, T);
1367 }
1368 }
1369
1370 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1371 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1372 N.getRawBaseType());
1373
1374 if (N.getDWARFAddressSpace()) {
1375 CheckDI(N.getTag() == dwarf::DW_TAG_pointer_type ||
1376 N.getTag() == dwarf::DW_TAG_reference_type ||
1377 N.getTag() == dwarf::DW_TAG_rvalue_reference_type,
1378 "DWARF address space only applies to pointer or reference types",
1379 &N);
1380 }
1381
1382 auto *Size = N.getRawSizeInBits();
1385 "SizeInBits must be a constant or DIVariable or DIExpression");
1386}
1387
1388/// Detect mutually exclusive flags.
1389static bool hasConflictingReferenceFlags(unsigned Flags) {
1390 return ((Flags & DINode::FlagLValueReference) &&
1391 (Flags & DINode::FlagRValueReference)) ||
1392 ((Flags & DINode::FlagTypePassByValue) &&
1393 (Flags & DINode::FlagTypePassByReference));
1394}
1395
1396void Verifier::visitTemplateParams(const MDNode &N, const Metadata &RawParams) {
1397 auto *Params = dyn_cast<MDTuple>(&RawParams);
1398 CheckDI(Params, "invalid template params", &N, &RawParams);
1399 for (Metadata *Op : Params->operands()) {
1400 CheckDI(Op && isa<DITemplateParameter>(Op), "invalid template parameter",
1401 &N, Params, Op);
1402 }
1403}
1404
1405void Verifier::visitDICompositeType(const DICompositeType &N) {
1406 // Common scope checks.
1407 visitDIScope(N);
1408
1409 CheckDI(N.getTag() == dwarf::DW_TAG_array_type ||
1410 N.getTag() == dwarf::DW_TAG_structure_type ||
1411 N.getTag() == dwarf::DW_TAG_union_type ||
1412 N.getTag() == dwarf::DW_TAG_enumeration_type ||
1413 N.getTag() == dwarf::DW_TAG_class_type ||
1414 N.getTag() == dwarf::DW_TAG_variant_part ||
1415 N.getTag() == dwarf::DW_TAG_variant ||
1416 N.getTag() == dwarf::DW_TAG_namelist,
1417 "invalid tag", &N);
1418
1419 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1420 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1421 N.getRawBaseType());
1422
1423 CheckDI(!N.getRawElements() || isa<MDTuple>(N.getRawElements()),
1424 "invalid composite elements", &N, N.getRawElements());
1425 CheckDI(isType(N.getRawVTableHolder()), "invalid vtable holder", &N,
1426 N.getRawVTableHolder());
1428 "invalid reference flags", &N);
1429 unsigned DIBlockByRefStruct = 1 << 4;
1430 CheckDI((N.getFlags() & DIBlockByRefStruct) == 0,
1431 "DIBlockByRefStruct on DICompositeType is no longer supported", &N);
1432 CheckDI(llvm::all_of(N.getElements(), [](const DINode *N) { return N; }),
1433 "DISubprogram contains null entry in `elements` field", &N);
1434
1435 if (N.isVector()) {
1436 const DINodeArray Elements = N.getElements();
1437 CheckDI(Elements.size() == 1 &&
1438 Elements[0]->getTag() == dwarf::DW_TAG_subrange_type,
1439 "invalid vector, expected one element of type subrange", &N);
1440 }
1441
1442 if (auto *Params = N.getRawTemplateParams())
1443 visitTemplateParams(N, *Params);
1444
1445 if (auto *D = N.getRawDiscriminator()) {
1446 CheckDI(isa<DIDerivedType>(D) && N.getTag() == dwarf::DW_TAG_variant_part,
1447 "discriminator can only appear on variant part");
1448 }
1449
1450 if (N.getRawDataLocation()) {
1451 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1452 "dataLocation can only appear in array type");
1453 }
1454
1455 if (N.getRawAssociated()) {
1456 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1457 "associated can only appear in array type");
1458 }
1459
1460 if (N.getRawAllocated()) {
1461 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1462 "allocated can only appear in array type");
1463 }
1464
1465 if (N.getRawRank()) {
1466 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1467 "rank can only appear in array type");
1468 }
1469
1470 if (N.getTag() == dwarf::DW_TAG_array_type) {
1471 CheckDI(N.getRawBaseType(), "array types must have a base type", &N);
1472 }
1473
1474 auto *Size = N.getRawSizeInBits();
1477 "SizeInBits must be a constant or DIVariable or DIExpression");
1478}
1479
1480void Verifier::visitDISubroutineType(const DISubroutineType &N) {
1481 CheckDI(N.getTag() == dwarf::DW_TAG_subroutine_type, "invalid tag", &N);
1482 if (auto *Types = N.getRawTypeArray()) {
1483 CheckDI(isa<MDTuple>(Types), "invalid composite elements", &N, Types);
1484 for (Metadata *Ty : N.getTypeArray()->operands()) {
1485 CheckDI(isType(Ty), "invalid subroutine type ref", &N, Types, Ty);
1486 }
1487 }
1489 "invalid reference flags", &N);
1490}
1491
1492void Verifier::visitDIFile(const DIFile &N) {
1493 CheckDI(N.getTag() == dwarf::DW_TAG_file_type, "invalid tag", &N);
1494 std::optional<DIFile::ChecksumInfo<StringRef>> Checksum = N.getChecksum();
1495 if (Checksum) {
1496 CheckDI(Checksum->Kind <= DIFile::ChecksumKind::CSK_Last,
1497 "invalid checksum kind", &N);
1498 size_t Size;
1499 switch (Checksum->Kind) {
1500 case DIFile::CSK_MD5:
1501 Size = 32;
1502 break;
1503 case DIFile::CSK_SHA1:
1504 Size = 40;
1505 break;
1506 case DIFile::CSK_SHA256:
1507 Size = 64;
1508 break;
1509 }
1510 CheckDI(Checksum->Value.size() == Size, "invalid checksum length", &N);
1511 CheckDI(Checksum->Value.find_if_not(llvm::isHexDigit) == StringRef::npos,
1512 "invalid checksum", &N);
1513 }
1514}
1515
1516void Verifier::visitDICompileUnit(const DICompileUnit &N) {
1517 CheckDI(N.isDistinct(), "compile units must be distinct", &N);
1518 CheckDI(N.getTag() == dwarf::DW_TAG_compile_unit, "invalid tag", &N);
1519
1520 // Don't bother verifying the compilation directory or producer string
1521 // as those could be empty.
1522 CheckDI(N.getRawFile() && isa<DIFile>(N.getRawFile()), "invalid file", &N,
1523 N.getRawFile());
1524 CheckDI(!N.getFile()->getFilename().empty(), "invalid filename", &N,
1525 N.getFile());
1526
1527 CheckDI((N.getEmissionKind() <= DICompileUnit::LastEmissionKind),
1528 "invalid emission kind", &N);
1529
1530 if (auto *Array = N.getRawEnumTypes()) {
1531 CheckDI(isa<MDTuple>(Array), "invalid enum list", &N, Array);
1532 for (Metadata *Op : N.getEnumTypes()->operands()) {
1534 CheckDI(Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type,
1535 "invalid enum type", &N, N.getEnumTypes(), Op);
1536 }
1537 }
1538 if (auto *Array = N.getRawRetainedTypes()) {
1539 CheckDI(isa<MDTuple>(Array), "invalid retained type list", &N, Array);
1540 for (Metadata *Op : N.getRetainedTypes()->operands()) {
1541 CheckDI(
1542 Op && (isa<DIType>(Op) || (isa<DISubprogram>(Op) &&
1543 !cast<DISubprogram>(Op)->isDefinition())),
1544 "invalid retained type", &N, Op);
1545 }
1546 }
1547 if (auto *Array = N.getRawGlobalVariables()) {
1548 CheckDI(isa<MDTuple>(Array), "invalid global variable list", &N, Array);
1549 for (Metadata *Op : N.getGlobalVariables()->operands()) {
1551 "invalid global variable ref", &N, Op);
1552 }
1553 }
1554 if (auto *Array = N.getRawImportedEntities()) {
1555 CheckDI(isa<MDTuple>(Array), "invalid imported entity list", &N, Array);
1556 for (Metadata *Op : N.getImportedEntities()->operands()) {
1557 CheckDI(Op && isa<DIImportedEntity>(Op), "invalid imported entity ref",
1558 &N, Op);
1559 }
1560 }
1561 if (auto *Array = N.getRawMacros()) {
1562 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1563 for (Metadata *Op : N.getMacros()->operands()) {
1564 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1565 }
1566 }
1567 CUVisited.insert(&N);
1568}
1569
1570void Verifier::visitDISubprogram(const DISubprogram &N) {
1571 CheckDI(N.getTag() == dwarf::DW_TAG_subprogram, "invalid tag", &N);
1572 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1573 if (auto *F = N.getRawFile())
1574 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1575 else
1576 CheckDI(N.getLine() == 0, "line specified with no file", &N, N.getLine());
1577 if (auto *T = N.getRawType())
1578 CheckDI(isa<DISubroutineType>(T), "invalid subroutine type", &N, T);
1579 CheckDI(isType(N.getRawContainingType()), "invalid containing type", &N,
1580 N.getRawContainingType());
1581 if (auto *Params = N.getRawTemplateParams())
1582 visitTemplateParams(N, *Params);
1583 if (auto *S = N.getRawDeclaration())
1584 CheckDI(isa<DISubprogram>(S) && !cast<DISubprogram>(S)->isDefinition(),
1585 "invalid subprogram declaration", &N, S);
1586 if (auto *RawNode = N.getRawRetainedNodes()) {
1587 auto *Node = dyn_cast<MDTuple>(RawNode);
1588 CheckDI(Node, "invalid retained nodes list", &N, RawNode);
1589 for (Metadata *Op : Node->operands()) {
1590 CheckDI(Op, "nullptr in retained nodes", &N, Node);
1591
1592 auto True = [](const Metadata *) { return true; };
1593 auto False = [](const Metadata *) { return false; };
1594 bool IsTypeCorrect =
1595 DISubprogram::visitRetainedNode<bool>(Op, True, True, True, False);
1596 CheckDI(IsTypeCorrect,
1597 "invalid retained nodes, expected DILocalVariable, DILabel or "
1598 "DIImportedEntity",
1599 &N, Node, Op);
1600
1601 auto *RetainedNode = cast<DINode>(Op);
1602 auto *RetainedNodeScope = dyn_cast_or_null<DILocalScope>(
1604 CheckDI(RetainedNodeScope,
1605 "invalid retained nodes, retained node is not local", &N, Node,
1606 RetainedNode);
1607 CheckDI(
1608 RetainedNodeScope->getSubprogram() == &N,
1609 "invalid retained nodes, retained node does not belong to subprogram",
1610 &N, Node, RetainedNode, RetainedNodeScope);
1611 }
1612 }
1614 "invalid reference flags", &N);
1615
1616 auto *Unit = N.getRawUnit();
1617 if (N.isDefinition()) {
1618 // Subprogram definitions (not part of the type hierarchy).
1619 CheckDI(N.isDistinct(), "subprogram definitions must be distinct", &N);
1620 CheckDI(Unit, "subprogram definitions must have a compile unit", &N);
1621 CheckDI(isa<DICompileUnit>(Unit), "invalid unit type", &N, Unit);
1622 // There's no good way to cross the CU boundary to insert a nested
1623 // DISubprogram definition in one CU into a type defined in another CU.
1624 auto *CT = dyn_cast_or_null<DICompositeType>(N.getRawScope());
1625 if (CT && CT->getRawIdentifier() &&
1626 M.getContext().isODRUniquingDebugTypes())
1627 CheckDI(N.getDeclaration(),
1628 "definition subprograms cannot be nested within DICompositeType "
1629 "when enabling ODR",
1630 &N);
1631 } else {
1632 // Subprogram declarations (part of the type hierarchy).
1633 CheckDI(!Unit, "subprogram declarations must not have a compile unit", &N);
1634 CheckDI(!N.getRawDeclaration(),
1635 "subprogram declaration must not have a declaration field");
1636 }
1637
1638 if (auto *RawThrownTypes = N.getRawThrownTypes()) {
1639 auto *ThrownTypes = dyn_cast<MDTuple>(RawThrownTypes);
1640 CheckDI(ThrownTypes, "invalid thrown types list", &N, RawThrownTypes);
1641 for (Metadata *Op : ThrownTypes->operands())
1642 CheckDI(Op && isa<DIType>(Op), "invalid thrown type", &N, ThrownTypes,
1643 Op);
1644 }
1645
1646 if (N.areAllCallsDescribed())
1647 CheckDI(N.isDefinition(),
1648 "DIFlagAllCallsDescribed must be attached to a definition");
1649}
1650
1651void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) {
1652 CheckDI(N.getTag() == dwarf::DW_TAG_lexical_block, "invalid tag", &N);
1653 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1654 "invalid local scope", &N, N.getRawScope());
1655 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1656 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1657}
1658
1659void Verifier::visitDILexicalBlock(const DILexicalBlock &N) {
1660 visitDILexicalBlockBase(N);
1661
1662 CheckDI(N.getLine() || !N.getColumn(),
1663 "cannot have column info without line info", &N);
1664}
1665
1666void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) {
1667 visitDILexicalBlockBase(N);
1668}
1669
1670void Verifier::visitDICommonBlock(const DICommonBlock &N) {
1671 CheckDI(N.getTag() == dwarf::DW_TAG_common_block, "invalid tag", &N);
1672 if (auto *S = N.getRawScope())
1673 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1674 if (auto *S = N.getRawDecl())
1675 CheckDI(isa<DIGlobalVariable>(S), "invalid declaration", &N, S);
1676}
1677
1678void Verifier::visitDINamespace(const DINamespace &N) {
1679 CheckDI(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag", &N);
1680 if (auto *S = N.getRawScope())
1681 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1682}
1683
1684void Verifier::visitDIMacro(const DIMacro &N) {
1685 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_define ||
1686 N.getMacinfoType() == dwarf::DW_MACINFO_undef,
1687 "invalid macinfo type", &N);
1688 CheckDI(!N.getName().empty(), "anonymous macro", &N);
1689 if (!N.getValue().empty()) {
1690 assert(N.getValue().data()[0] != ' ' && "Macro value has a space prefix");
1691 }
1692}
1693
1694void Verifier::visitDIMacroFile(const DIMacroFile &N) {
1695 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_start_file,
1696 "invalid macinfo type", &N);
1697 if (auto *F = N.getRawFile())
1698 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1699
1700 if (auto *Array = N.getRawElements()) {
1701 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1702 for (Metadata *Op : N.getElements()->operands()) {
1703 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1704 }
1705 }
1706}
1707
1708void Verifier::visitDIModule(const DIModule &N) {
1709 CheckDI(N.getTag() == dwarf::DW_TAG_module, "invalid tag", &N);
1710 CheckDI(!N.getName().empty(), "anonymous module", &N);
1711}
1712
1713void Verifier::visitDITemplateParameter(const DITemplateParameter &N) {
1714 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1715}
1716
1717void Verifier::visitDITemplateTypeParameter(const DITemplateTypeParameter &N) {
1718 visitDITemplateParameter(N);
1719
1720 CheckDI(N.getTag() == dwarf::DW_TAG_template_type_parameter, "invalid tag",
1721 &N);
1722}
1723
1724void Verifier::visitDITemplateValueParameter(
1725 const DITemplateValueParameter &N) {
1726 visitDITemplateParameter(N);
1727
1728 CheckDI(N.getTag() == dwarf::DW_TAG_template_value_parameter ||
1729 N.getTag() == dwarf::DW_TAG_GNU_template_template_param ||
1730 N.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack,
1731 "invalid tag", &N);
1732}
1733
1734void Verifier::visitDIVariable(const DIVariable &N) {
1735 if (auto *S = N.getRawScope())
1736 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1737 if (auto *F = N.getRawFile())
1738 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1739}
1740
1741void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) {
1742 // Checks common to all variables.
1743 visitDIVariable(N);
1744
1745 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1746 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1747 // Check only if the global variable is not an extern
1748 if (N.isDefinition())
1749 CheckDI(N.getType(), "missing global variable type", &N);
1750 if (auto *Member = N.getRawStaticDataMemberDeclaration()) {
1752 "invalid static data member declaration", &N, Member);
1753 }
1754}
1755
1756void Verifier::visitDILocalVariable(const DILocalVariable &N) {
1757 // Checks common to all variables.
1758 visitDIVariable(N);
1759
1760 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1761 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1762 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1763 "local variable requires a valid scope", &N, N.getRawScope());
1764 if (auto Ty = N.getType())
1765 CheckDI(!isa<DISubroutineType>(Ty), "invalid type", &N, N.getType());
1766}
1767
1768void Verifier::visitDIAssignID(const DIAssignID &N) {
1769 CheckDI(!N.getNumOperands(), "DIAssignID has no arguments", &N);
1770 CheckDI(N.isDistinct(), "DIAssignID must be distinct", &N);
1771}
1772
1773void Verifier::visitDILabel(const DILabel &N) {
1774 if (auto *S = N.getRawScope())
1775 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1776 if (auto *F = N.getRawFile())
1777 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1778
1779 CheckDI(N.getTag() == dwarf::DW_TAG_label, "invalid tag", &N);
1780 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1781 "label requires a valid scope", &N, N.getRawScope());
1782}
1783
1784void Verifier::visitDIExpression(const DIExpression &N) {
1785 CheckDI(N.isValid(), "invalid expression", &N);
1786}
1787
1788void Verifier::visitDIGlobalVariableExpression(
1789 const DIGlobalVariableExpression &GVE) {
1790 CheckDI(GVE.getVariable(), "missing variable");
1791 if (auto *Var = GVE.getVariable())
1792 visitDIGlobalVariable(*Var);
1793 if (auto *Expr = GVE.getExpression()) {
1794 visitDIExpression(*Expr);
1795 if (auto Fragment = Expr->getFragmentInfo())
1796 verifyFragmentExpression(*GVE.getVariable(), *Fragment, &GVE);
1797 }
1798}
1799
1800void Verifier::visitDIObjCProperty(const DIObjCProperty &N) {
1801 CheckDI(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag", &N);
1802 if (auto *T = N.getRawType())
1803 CheckDI(isType(T), "invalid type ref", &N, T);
1804 if (auto *F = N.getRawFile())
1805 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1806}
1807
1808void Verifier::visitDIImportedEntity(const DIImportedEntity &N) {
1809 CheckDI(N.getTag() == dwarf::DW_TAG_imported_module ||
1810 N.getTag() == dwarf::DW_TAG_imported_declaration,
1811 "invalid tag", &N);
1812 if (auto *S = N.getRawScope())
1813 CheckDI(isa<DIScope>(S), "invalid scope for imported entity", &N, S);
1814 CheckDI(isDINode(N.getRawEntity()), "invalid imported entity", &N,
1815 N.getRawEntity());
1816}
1817
1818void Verifier::visitComdat(const Comdat &C) {
1819 // In COFF the Module is invalid if the GlobalValue has private linkage.
1820 // Entities with private linkage don't have entries in the symbol table.
1821 if (TT.isOSBinFormatCOFF())
1822 if (const GlobalValue *GV = M.getNamedValue(C.getName()))
1823 Check(!GV->hasPrivateLinkage(), "comdat global value has private linkage",
1824 GV);
1825}
1826
1827void Verifier::visitModuleIdents() {
1828 const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident");
1829 if (!Idents)
1830 return;
1831
1832 // llvm.ident takes a list of metadata entry. Each entry has only one string.
1833 // Scan each llvm.ident entry and make sure that this requirement is met.
1834 for (const MDNode *N : Idents->operands()) {
1835 Check(N->getNumOperands() == 1,
1836 "incorrect number of operands in llvm.ident metadata", N);
1837 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1838 ("invalid value for llvm.ident metadata entry operand"
1839 "(the operand should be a string)"),
1840 N->getOperand(0));
1841 }
1842}
1843
1844void Verifier::visitModuleCommandLines() {
1845 const NamedMDNode *CommandLines = M.getNamedMetadata("llvm.commandline");
1846 if (!CommandLines)
1847 return;
1848
1849 // llvm.commandline takes a list of metadata entry. Each entry has only one
1850 // string. Scan each llvm.commandline entry and make sure that this
1851 // requirement is met.
1852 for (const MDNode *N : CommandLines->operands()) {
1853 Check(N->getNumOperands() == 1,
1854 "incorrect number of operands in llvm.commandline metadata", N);
1855 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1856 ("invalid value for llvm.commandline metadata entry operand"
1857 "(the operand should be a string)"),
1858 N->getOperand(0));
1859 }
1860}
1861
1862void Verifier::visitModuleErrnoTBAA() {
1863 const NamedMDNode *ErrnoTBAA = M.getNamedMetadata("llvm.errno.tbaa");
1864 if (!ErrnoTBAA)
1865 return;
1866
1867 Check(ErrnoTBAA->getNumOperands() >= 1,
1868 "llvm.errno.tbaa must have at least one operand", ErrnoTBAA);
1869
1870 for (const MDNode *N : ErrnoTBAA->operands())
1871 TBAAVerifyHelper.visitTBAAMetadata(nullptr, N);
1872}
1873
1874void Verifier::visitModuleFlags() {
1875 const NamedMDNode *Flags = M.getModuleFlagsMetadata();
1876 if (!Flags) return;
1877
1878 // Scan each flag, and track the flags and requirements.
1879 DenseMap<const MDString*, const MDNode*> SeenIDs;
1880 SmallVector<const MDNode*, 16> Requirements;
1881 uint64_t PAuthABIPlatform = -1;
1882 uint64_t PAuthABIVersion = -1;
1883 for (const MDNode *MDN : Flags->operands()) {
1884 visitModuleFlag(MDN, SeenIDs, Requirements);
1885 if (MDN->getNumOperands() != 3)
1886 continue;
1887 if (const auto *FlagName = dyn_cast_or_null<MDString>(MDN->getOperand(1))) {
1888 if (FlagName->getString() == "aarch64-elf-pauthabi-platform") {
1889 if (const auto *PAP =
1891 PAuthABIPlatform = PAP->getZExtValue();
1892 } else if (FlagName->getString() == "aarch64-elf-pauthabi-version") {
1893 if (const auto *PAV =
1895 PAuthABIVersion = PAV->getZExtValue();
1896 }
1897 }
1898 }
1899
1900 if ((PAuthABIPlatform == uint64_t(-1)) != (PAuthABIVersion == uint64_t(-1)))
1901 CheckFailed("either both or no 'aarch64-elf-pauthabi-platform' and "
1902 "'aarch64-elf-pauthabi-version' module flags must be present");
1903
1904 // Validate that the requirements in the module are valid.
1905 for (const MDNode *Requirement : Requirements) {
1906 const MDString *Flag = cast<MDString>(Requirement->getOperand(0));
1907 const Metadata *ReqValue = Requirement->getOperand(1);
1908
1909 const MDNode *Op = SeenIDs.lookup(Flag);
1910 if (!Op) {
1911 CheckFailed("invalid requirement on flag, flag is not present in module",
1912 Flag);
1913 continue;
1914 }
1915
1916 if (Op->getOperand(2) != ReqValue) {
1917 CheckFailed(("invalid requirement on flag, "
1918 "flag does not have the required value"),
1919 Flag);
1920 continue;
1921 }
1922 }
1923}
1924
1925void
1926Verifier::visitModuleFlag(const MDNode *Op,
1927 DenseMap<const MDString *, const MDNode *> &SeenIDs,
1928 SmallVectorImpl<const MDNode *> &Requirements) {
1929 // Each module flag should have three arguments, the merge behavior (a
1930 // constant int), the flag ID (an MDString), and the value.
1931 Check(Op->getNumOperands() == 3,
1932 "incorrect number of operands in module flag", Op);
1933 Module::ModFlagBehavior MFB;
1934 if (!Module::isValidModFlagBehavior(Op->getOperand(0), MFB)) {
1936 "invalid behavior operand in module flag (expected constant integer)",
1937 Op->getOperand(0));
1938 Check(false,
1939 "invalid behavior operand in module flag (unexpected constant)",
1940 Op->getOperand(0));
1941 }
1942 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
1943 Check(ID, "invalid ID operand in module flag (expected metadata string)",
1944 Op->getOperand(1));
1945
1946 // Check the values for behaviors with additional requirements.
1947 switch (MFB) {
1948 case Module::Error:
1949 case Module::Warning:
1950 case Module::Override:
1951 // These behavior types accept any value.
1952 break;
1953
1954 case Module::Min: {
1955 auto *V = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1956 Check(V && V->getValue().isNonNegative(),
1957 "invalid value for 'min' module flag (expected constant non-negative "
1958 "integer)",
1959 Op->getOperand(2));
1960 break;
1961 }
1962
1963 case Module::Max: {
1965 "invalid value for 'max' module flag (expected constant integer)",
1966 Op->getOperand(2));
1967 break;
1968 }
1969
1970 case Module::Require: {
1971 // The value should itself be an MDNode with two operands, a flag ID (an
1972 // MDString), and a value.
1973 MDNode *Value = dyn_cast<MDNode>(Op->getOperand(2));
1974 Check(Value && Value->getNumOperands() == 2,
1975 "invalid value for 'require' module flag (expected metadata pair)",
1976 Op->getOperand(2));
1977 Check(isa<MDString>(Value->getOperand(0)),
1978 ("invalid value for 'require' module flag "
1979 "(first value operand should be a string)"),
1980 Value->getOperand(0));
1981
1982 // Append it to the list of requirements, to check once all module flags are
1983 // scanned.
1984 Requirements.push_back(Value);
1985 break;
1986 }
1987
1988 case Module::Append:
1989 case Module::AppendUnique: {
1990 // These behavior types require the operand be an MDNode.
1991 Check(isa<MDNode>(Op->getOperand(2)),
1992 "invalid value for 'append'-type module flag "
1993 "(expected a metadata node)",
1994 Op->getOperand(2));
1995 break;
1996 }
1997 }
1998
1999 // Unless this is a "requires" flag, check the ID is unique.
2000 if (MFB != Module::Require) {
2001 bool Inserted = SeenIDs.insert(std::make_pair(ID, Op)).second;
2002 Check(Inserted,
2003 "module flag identifiers must be unique (or of 'require' type)", ID);
2004 }
2005
2006 if (ID->getString() == "wchar_size") {
2007 ConstantInt *Value
2009 Check(Value, "wchar_size metadata requires constant integer argument");
2010 }
2011
2012 if (ID->getString() == "Linker Options") {
2013 // If the llvm.linker.options named metadata exists, we assume that the
2014 // bitcode reader has upgraded the module flag. Otherwise the flag might
2015 // have been created by a client directly.
2016 Check(M.getNamedMetadata("llvm.linker.options"),
2017 "'Linker Options' named metadata no longer supported");
2018 }
2019
2020 if (ID->getString() == "SemanticInterposition") {
2021 ConstantInt *Value =
2023 Check(Value,
2024 "SemanticInterposition metadata requires constant integer argument");
2025 }
2026
2027 if (ID->getString() == "CG Profile") {
2028 for (const MDOperand &MDO : cast<MDNode>(Op->getOperand(2))->operands())
2029 visitModuleFlagCGProfileEntry(MDO);
2030 }
2031}
2032
2033void Verifier::visitModuleFlagCGProfileEntry(const MDOperand &MDO) {
2034 auto CheckFunction = [&](const MDOperand &FuncMDO) {
2035 if (!FuncMDO)
2036 return;
2037 auto F = dyn_cast<ValueAsMetadata>(FuncMDO);
2038 Check(F && isa<Function>(F->getValue()->stripPointerCasts()),
2039 "expected a Function or null", FuncMDO);
2040 };
2041 auto Node = dyn_cast_or_null<MDNode>(MDO);
2042 Check(Node && Node->getNumOperands() == 3, "expected a MDNode triple", MDO);
2043 CheckFunction(Node->getOperand(0));
2044 CheckFunction(Node->getOperand(1));
2045 auto Count = dyn_cast_or_null<ConstantAsMetadata>(Node->getOperand(2));
2046 Check(Count && Count->getType()->isIntegerTy(),
2047 "expected an integer constant", Node->getOperand(2));
2048}
2049
2050void Verifier::verifyAttributeTypes(AttributeSet Attrs, const Value *V) {
2051 for (Attribute A : Attrs) {
2052
2053 if (A.isStringAttribute()) {
2054#define GET_ATTR_NAMES
2055#define ATTRIBUTE_ENUM(ENUM_NAME, DISPLAY_NAME)
2056#define ATTRIBUTE_STRBOOL(ENUM_NAME, DISPLAY_NAME) \
2057 if (A.getKindAsString() == #DISPLAY_NAME) { \
2058 auto V = A.getValueAsString(); \
2059 if (!(V.empty() || V == "true" || V == "false")) \
2060 CheckFailed("invalid value for '" #DISPLAY_NAME "' attribute: " + V + \
2061 ""); \
2062 }
2063
2064#include "llvm/IR/Attributes.inc"
2065 continue;
2066 }
2067
2068 if (A.isIntAttribute() != Attribute::isIntAttrKind(A.getKindAsEnum())) {
2069 CheckFailed("Attribute '" + A.getAsString() + "' should have an Argument",
2070 V);
2071 return;
2072 }
2073 }
2074}
2075
2076// VerifyParameterAttrs - Check the given attributes for an argument or return
2077// value of the specified type. The value V is printed in error messages.
2078void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty,
2079 const Value *V) {
2080 if (!Attrs.hasAttributes())
2081 return;
2082
2083 verifyAttributeTypes(Attrs, V);
2084
2085 for (Attribute Attr : Attrs)
2086 Check(Attr.isStringAttribute() ||
2087 Attribute::canUseAsParamAttr(Attr.getKindAsEnum()),
2088 "Attribute '" + Attr.getAsString() + "' does not apply to parameters",
2089 V);
2090
2091 if (Attrs.hasAttribute(Attribute::ImmArg)) {
2092 unsigned AttrCount =
2093 Attrs.getNumAttributes() - Attrs.hasAttribute(Attribute::Range);
2094 Check(AttrCount == 1,
2095 "Attribute 'immarg' is incompatible with other attributes except the "
2096 "'range' attribute",
2097 V);
2098 }
2099
2100 // Check for mutually incompatible attributes. Only inreg is compatible with
2101 // sret.
2102 unsigned AttrCount = 0;
2103 AttrCount += Attrs.hasAttribute(Attribute::ByVal);
2104 AttrCount += Attrs.hasAttribute(Attribute::InAlloca);
2105 AttrCount += Attrs.hasAttribute(Attribute::Preallocated);
2106 AttrCount += Attrs.hasAttribute(Attribute::StructRet) ||
2107 Attrs.hasAttribute(Attribute::InReg);
2108 AttrCount += Attrs.hasAttribute(Attribute::Nest);
2109 AttrCount += Attrs.hasAttribute(Attribute::ByRef);
2110 Check(AttrCount <= 1,
2111 "Attributes 'byval', 'inalloca', 'preallocated', 'inreg', 'nest', "
2112 "'byref', and 'sret' are incompatible!",
2113 V);
2114
2115 Check(!(Attrs.hasAttribute(Attribute::InAlloca) &&
2116 Attrs.hasAttribute(Attribute::ReadOnly)),
2117 "Attributes "
2118 "'inalloca and readonly' are incompatible!",
2119 V);
2120
2121 Check(!(Attrs.hasAttribute(Attribute::StructRet) &&
2122 Attrs.hasAttribute(Attribute::Returned)),
2123 "Attributes "
2124 "'sret and returned' are incompatible!",
2125 V);
2126
2127 Check(!(Attrs.hasAttribute(Attribute::ZExt) &&
2128 Attrs.hasAttribute(Attribute::SExt)),
2129 "Attributes "
2130 "'zeroext and signext' are incompatible!",
2131 V);
2132
2133 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
2134 Attrs.hasAttribute(Attribute::ReadOnly)),
2135 "Attributes "
2136 "'readnone and readonly' are incompatible!",
2137 V);
2138
2139 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
2140 Attrs.hasAttribute(Attribute::WriteOnly)),
2141 "Attributes "
2142 "'readnone and writeonly' are incompatible!",
2143 V);
2144
2145 Check(!(Attrs.hasAttribute(Attribute::ReadOnly) &&
2146 Attrs.hasAttribute(Attribute::WriteOnly)),
2147 "Attributes "
2148 "'readonly and writeonly' are incompatible!",
2149 V);
2150
2151 Check(!(Attrs.hasAttribute(Attribute::NoInline) &&
2152 Attrs.hasAttribute(Attribute::AlwaysInline)),
2153 "Attributes "
2154 "'noinline and alwaysinline' are incompatible!",
2155 V);
2156
2157 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2158 Attrs.hasAttribute(Attribute::ReadNone)),
2159 "Attributes writable and readnone are incompatible!", V);
2160
2161 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2162 Attrs.hasAttribute(Attribute::ReadOnly)),
2163 "Attributes writable and readonly are incompatible!", V);
2164
2165 AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible(Ty, Attrs);
2166 for (Attribute Attr : Attrs) {
2167 if (!Attr.isStringAttribute() &&
2168 IncompatibleAttrs.contains(Attr.getKindAsEnum())) {
2169 CheckFailed("Attribute '" + Attr.getAsString() +
2170 "' applied to incompatible type!", V);
2171 return;
2172 }
2173 }
2174
2175 if (isa<PointerType>(Ty)) {
2176 if (Attrs.hasAttribute(Attribute::Alignment)) {
2177 Align AttrAlign = Attrs.getAlignment().valueOrOne();
2178 Check(AttrAlign.value() <= Value::MaximumAlignment,
2179 "huge alignment values are unsupported", V);
2180 }
2181 if (Attrs.hasAttribute(Attribute::ByVal)) {
2182 Type *ByValTy = Attrs.getByValType();
2183 SmallPtrSet<Type *, 4> Visited;
2184 Check(ByValTy->isSized(&Visited),
2185 "Attribute 'byval' does not support unsized types!", V);
2186 // Check if it is or contains a target extension type that disallows being
2187 // used on the stack.
2189 "'byval' argument has illegal target extension type", V);
2190 Check(DL.getTypeAllocSize(ByValTy).getKnownMinValue() < (1ULL << 32),
2191 "huge 'byval' arguments are unsupported", V);
2192 }
2193 if (Attrs.hasAttribute(Attribute::ByRef)) {
2194 SmallPtrSet<Type *, 4> Visited;
2195 Check(Attrs.getByRefType()->isSized(&Visited),
2196 "Attribute 'byref' does not support unsized types!", V);
2197 Check(DL.getTypeAllocSize(Attrs.getByRefType()).getKnownMinValue() <
2198 (1ULL << 32),
2199 "huge 'byref' arguments are unsupported", V);
2200 }
2201 if (Attrs.hasAttribute(Attribute::InAlloca)) {
2202 SmallPtrSet<Type *, 4> Visited;
2203 Check(Attrs.getInAllocaType()->isSized(&Visited),
2204 "Attribute 'inalloca' does not support unsized types!", V);
2205 Check(DL.getTypeAllocSize(Attrs.getInAllocaType()).getKnownMinValue() <
2206 (1ULL << 32),
2207 "huge 'inalloca' arguments are unsupported", V);
2208 }
2209 if (Attrs.hasAttribute(Attribute::Preallocated)) {
2210 SmallPtrSet<Type *, 4> Visited;
2211 Check(Attrs.getPreallocatedType()->isSized(&Visited),
2212 "Attribute 'preallocated' does not support unsized types!", V);
2213 Check(
2214 DL.getTypeAllocSize(Attrs.getPreallocatedType()).getKnownMinValue() <
2215 (1ULL << 32),
2216 "huge 'preallocated' arguments are unsupported", V);
2217 }
2218 }
2219
2220 if (Attrs.hasAttribute(Attribute::Initializes)) {
2221 auto Inits = Attrs.getAttribute(Attribute::Initializes).getInitializes();
2222 Check(!Inits.empty(), "Attribute 'initializes' does not support empty list",
2223 V);
2225 "Attribute 'initializes' does not support unordered ranges", V);
2226 }
2227
2228 if (Attrs.hasAttribute(Attribute::NoFPClass)) {
2229 uint64_t Val = Attrs.getAttribute(Attribute::NoFPClass).getValueAsInt();
2230 Check(Val != 0, "Attribute 'nofpclass' must have at least one test bit set",
2231 V);
2232 Check((Val & ~static_cast<unsigned>(fcAllFlags)) == 0,
2233 "Invalid value for 'nofpclass' test mask", V);
2234 }
2235 if (Attrs.hasAttribute(Attribute::Range)) {
2236 const ConstantRange &CR =
2237 Attrs.getAttribute(Attribute::Range).getValueAsConstantRange();
2239 "Range bit width must match type bit width!", V);
2240 }
2241}
2242
2243void Verifier::checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
2244 const Value *V) {
2245 if (Attrs.hasFnAttr(Attr)) {
2246 StringRef S = Attrs.getFnAttr(Attr).getValueAsString();
2247 unsigned N;
2248 if (S.getAsInteger(10, N))
2249 CheckFailed("\"" + Attr + "\" takes an unsigned integer: " + S, V);
2250 }
2251}
2252
2253// Check parameter attributes against a function type.
2254// The value V is printed in error messages.
2255void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
2256 const Value *V, bool IsIntrinsic,
2257 bool IsInlineAsm) {
2258 if (Attrs.isEmpty())
2259 return;
2260
2261 if (AttributeListsVisited.insert(Attrs.getRawPointer()).second) {
2262 Check(Attrs.hasParentContext(Context),
2263 "Attribute list does not match Module context!", &Attrs, V);
2264 for (const auto &AttrSet : Attrs) {
2265 Check(!AttrSet.hasAttributes() || AttrSet.hasParentContext(Context),
2266 "Attribute set does not match Module context!", &AttrSet, V);
2267 for (const auto &A : AttrSet) {
2268 Check(A.hasParentContext(Context),
2269 "Attribute does not match Module context!", &A, V);
2270 }
2271 }
2272 }
2273
2274 bool SawNest = false;
2275 bool SawReturned = false;
2276 bool SawSRet = false;
2277 bool SawSwiftSelf = false;
2278 bool SawSwiftAsync = false;
2279 bool SawSwiftError = false;
2280
2281 // Verify return value attributes.
2282 AttributeSet RetAttrs = Attrs.getRetAttrs();
2283 for (Attribute RetAttr : RetAttrs)
2284 Check(RetAttr.isStringAttribute() ||
2285 Attribute::canUseAsRetAttr(RetAttr.getKindAsEnum()),
2286 "Attribute '" + RetAttr.getAsString() +
2287 "' does not apply to function return values",
2288 V);
2289
2290 unsigned MaxParameterWidth = 0;
2291 auto GetMaxParameterWidth = [&MaxParameterWidth](Type *Ty) {
2292 if (Ty->isVectorTy()) {
2293 if (auto *VT = dyn_cast<FixedVectorType>(Ty)) {
2294 unsigned Size = VT->getPrimitiveSizeInBits().getFixedValue();
2295 if (Size > MaxParameterWidth)
2296 MaxParameterWidth = Size;
2297 }
2298 }
2299 };
2300 GetMaxParameterWidth(FT->getReturnType());
2301 verifyParameterAttrs(RetAttrs, FT->getReturnType(), V);
2302
2303 // Verify parameter attributes.
2304 for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
2305 Type *Ty = FT->getParamType(i);
2306 AttributeSet ArgAttrs = Attrs.getParamAttrs(i);
2307
2308 if (!IsIntrinsic) {
2309 Check(!ArgAttrs.hasAttribute(Attribute::ImmArg),
2310 "immarg attribute only applies to intrinsics", V);
2311 if (!IsInlineAsm)
2312 Check(!ArgAttrs.hasAttribute(Attribute::ElementType),
2313 "Attribute 'elementtype' can only be applied to intrinsics"
2314 " and inline asm.",
2315 V);
2316 }
2317
2318 verifyParameterAttrs(ArgAttrs, Ty, V);
2319 GetMaxParameterWidth(Ty);
2320
2321 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
2322 Check(!SawNest, "More than one parameter has attribute nest!", V);
2323 SawNest = true;
2324 }
2325
2326 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
2327 Check(!SawReturned, "More than one parameter has attribute returned!", V);
2328 Check(Ty->canLosslesslyBitCastTo(FT->getReturnType()),
2329 "Incompatible argument and return types for 'returned' attribute",
2330 V);
2331 SawReturned = true;
2332 }
2333
2334 if (ArgAttrs.hasAttribute(Attribute::StructRet)) {
2335 Check(!SawSRet, "Cannot have multiple 'sret' parameters!", V);
2336 Check(i == 0 || i == 1,
2337 "Attribute 'sret' is not on first or second parameter!", V);
2338 SawSRet = true;
2339 }
2340
2341 if (ArgAttrs.hasAttribute(Attribute::SwiftSelf)) {
2342 Check(!SawSwiftSelf, "Cannot have multiple 'swiftself' parameters!", V);
2343 SawSwiftSelf = true;
2344 }
2345
2346 if (ArgAttrs.hasAttribute(Attribute::SwiftAsync)) {
2347 Check(!SawSwiftAsync, "Cannot have multiple 'swiftasync' parameters!", V);
2348 SawSwiftAsync = true;
2349 }
2350
2351 if (ArgAttrs.hasAttribute(Attribute::SwiftError)) {
2352 Check(!SawSwiftError, "Cannot have multiple 'swifterror' parameters!", V);
2353 SawSwiftError = true;
2354 }
2355
2356 if (ArgAttrs.hasAttribute(Attribute::InAlloca)) {
2357 Check(i == FT->getNumParams() - 1,
2358 "inalloca isn't on the last parameter!", V);
2359 }
2360 }
2361
2362 if (!Attrs.hasFnAttrs())
2363 return;
2364
2365 verifyAttributeTypes(Attrs.getFnAttrs(), V);
2366 for (Attribute FnAttr : Attrs.getFnAttrs())
2367 Check(FnAttr.isStringAttribute() ||
2368 Attribute::canUseAsFnAttr(FnAttr.getKindAsEnum()),
2369 "Attribute '" + FnAttr.getAsString() +
2370 "' does not apply to functions!",
2371 V);
2372
2373 Check(!(Attrs.hasFnAttr(Attribute::NoInline) &&
2374 Attrs.hasFnAttr(Attribute::AlwaysInline)),
2375 "Attributes 'noinline and alwaysinline' are incompatible!", V);
2376
2377 if (Attrs.hasFnAttr(Attribute::OptimizeNone)) {
2378 Check(Attrs.hasFnAttr(Attribute::NoInline),
2379 "Attribute 'optnone' requires 'noinline'!", V);
2380
2381 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2382 "Attributes 'optsize and optnone' are incompatible!", V);
2383
2384 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2385 "Attributes 'minsize and optnone' are incompatible!", V);
2386
2387 Check(!Attrs.hasFnAttr(Attribute::OptimizeForDebugging),
2388 "Attributes 'optdebug and optnone' are incompatible!", V);
2389 }
2390
2391 Check(!(Attrs.hasFnAttr(Attribute::SanitizeRealtime) &&
2392 Attrs.hasFnAttr(Attribute::SanitizeRealtimeBlocking)),
2393 "Attributes "
2394 "'sanitize_realtime and sanitize_realtime_blocking' are incompatible!",
2395 V);
2396
2397 if (Attrs.hasFnAttr(Attribute::OptimizeForDebugging)) {
2398 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2399 "Attributes 'optsize and optdebug' are incompatible!", V);
2400
2401 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2402 "Attributes 'minsize and optdebug' are incompatible!", V);
2403 }
2404
2405 Check(!Attrs.hasAttrSomewhere(Attribute::Writable) ||
2406 isModSet(Attrs.getMemoryEffects().getModRef(IRMemLocation::ArgMem)),
2407 "Attribute writable and memory without argmem: write are incompatible!",
2408 V);
2409
2410 if (Attrs.hasFnAttr("aarch64_pstate_sm_enabled")) {
2411 Check(!Attrs.hasFnAttr("aarch64_pstate_sm_compatible"),
2412 "Attributes 'aarch64_pstate_sm_enabled and "
2413 "aarch64_pstate_sm_compatible' are incompatible!",
2414 V);
2415 }
2416
2417 Check((Attrs.hasFnAttr("aarch64_new_za") + Attrs.hasFnAttr("aarch64_in_za") +
2418 Attrs.hasFnAttr("aarch64_inout_za") +
2419 Attrs.hasFnAttr("aarch64_out_za") +
2420 Attrs.hasFnAttr("aarch64_preserves_za") +
2421 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2422 "Attributes 'aarch64_new_za', 'aarch64_in_za', 'aarch64_out_za', "
2423 "'aarch64_inout_za', 'aarch64_preserves_za' and "
2424 "'aarch64_za_state_agnostic' are mutually exclusive",
2425 V);
2426
2427 Check((Attrs.hasFnAttr("aarch64_new_zt0") +
2428 Attrs.hasFnAttr("aarch64_in_zt0") +
2429 Attrs.hasFnAttr("aarch64_inout_zt0") +
2430 Attrs.hasFnAttr("aarch64_out_zt0") +
2431 Attrs.hasFnAttr("aarch64_preserves_zt0") +
2432 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2433 "Attributes 'aarch64_new_zt0', 'aarch64_in_zt0', 'aarch64_out_zt0', "
2434 "'aarch64_inout_zt0', 'aarch64_preserves_zt0' and "
2435 "'aarch64_za_state_agnostic' are mutually exclusive",
2436 V);
2437
2438 if (Attrs.hasFnAttr(Attribute::JumpTable)) {
2439 const GlobalValue *GV = cast<GlobalValue>(V);
2441 "Attribute 'jumptable' requires 'unnamed_addr'", V);
2442 }
2443
2444 if (auto Args = Attrs.getFnAttrs().getAllocSizeArgs()) {
2445 auto CheckParam = [&](StringRef Name, unsigned ParamNo) {
2446 if (ParamNo >= FT->getNumParams()) {
2447 CheckFailed("'allocsize' " + Name + " argument is out of bounds", V);
2448 return false;
2449 }
2450
2451 if (!FT->getParamType(ParamNo)->isIntegerTy()) {
2452 CheckFailed("'allocsize' " + Name +
2453 " argument must refer to an integer parameter",
2454 V);
2455 return false;
2456 }
2457
2458 return true;
2459 };
2460
2461 if (!CheckParam("element size", Args->first))
2462 return;
2463
2464 if (Args->second && !CheckParam("number of elements", *Args->second))
2465 return;
2466 }
2467
2468 if (Attrs.hasFnAttr(Attribute::AllocKind)) {
2469 AllocFnKind K = Attrs.getAllocKind();
2471 K & (AllocFnKind::Alloc | AllocFnKind::Realloc | AllocFnKind::Free);
2472 if (!is_contained(
2473 {AllocFnKind::Alloc, AllocFnKind::Realloc, AllocFnKind::Free},
2474 Type))
2475 CheckFailed(
2476 "'allockind()' requires exactly one of alloc, realloc, and free");
2477 if ((Type == AllocFnKind::Free) &&
2478 ((K & (AllocFnKind::Uninitialized | AllocFnKind::Zeroed |
2479 AllocFnKind::Aligned)) != AllocFnKind::Unknown))
2480 CheckFailed("'allockind(\"free\")' doesn't allow uninitialized, zeroed, "
2481 "or aligned modifiers.");
2482 AllocFnKind ZeroedUninit = AllocFnKind::Uninitialized | AllocFnKind::Zeroed;
2483 if ((K & ZeroedUninit) == ZeroedUninit)
2484 CheckFailed("'allockind()' can't be both zeroed and uninitialized");
2485 }
2486
2487 if (Attribute A = Attrs.getFnAttr("alloc-variant-zeroed"); A.isValid()) {
2488 StringRef S = A.getValueAsString();
2489 Check(!S.empty(), "'alloc-variant-zeroed' must not be empty");
2490 Function *Variant = M.getFunction(S);
2491 if (Variant) {
2492 Attribute Family = Attrs.getFnAttr("alloc-family");
2493 Attribute VariantFamily = Variant->getFnAttribute("alloc-family");
2494 if (Family.isValid())
2495 Check(VariantFamily.isValid() &&
2496 VariantFamily.getValueAsString() == Family.getValueAsString(),
2497 "'alloc-variant-zeroed' must name a function belonging to the "
2498 "same 'alloc-family'");
2499
2500 Check(Variant->hasFnAttribute(Attribute::AllocKind) &&
2501 (Variant->getFnAttribute(Attribute::AllocKind).getAllocKind() &
2502 AllocFnKind::Zeroed) != AllocFnKind::Unknown,
2503 "'alloc-variant-zeroed' must name a function with "
2504 "'allockind(\"zeroed\")'");
2505
2506 Check(FT == Variant->getFunctionType(),
2507 "'alloc-variant-zeroed' must name a function with the same "
2508 "signature");
2509 }
2510 }
2511
2512 if (Attrs.hasFnAttr(Attribute::VScaleRange)) {
2513 unsigned VScaleMin = Attrs.getFnAttrs().getVScaleRangeMin();
2514 if (VScaleMin == 0)
2515 CheckFailed("'vscale_range' minimum must be greater than 0", V);
2516 else if (!isPowerOf2_32(VScaleMin))
2517 CheckFailed("'vscale_range' minimum must be power-of-two value", V);
2518 std::optional<unsigned> VScaleMax = Attrs.getFnAttrs().getVScaleRangeMax();
2519 if (VScaleMax && VScaleMin > VScaleMax)
2520 CheckFailed("'vscale_range' minimum cannot be greater than maximum", V);
2521 else if (VScaleMax && !isPowerOf2_32(*VScaleMax))
2522 CheckFailed("'vscale_range' maximum must be power-of-two value", V);
2523 }
2524
2525 if (Attribute FPAttr = Attrs.getFnAttr("frame-pointer"); FPAttr.isValid()) {
2526 StringRef FP = FPAttr.getValueAsString();
2527 if (FP != "all" && FP != "non-leaf" && FP != "none" && FP != "reserved" &&
2528 FP != "non-leaf-no-reserve")
2529 CheckFailed("invalid value for 'frame-pointer' attribute: " + FP, V);
2530 }
2531
2532 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-prefix", V);
2533 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-entry", V);
2534 if (Attrs.hasFnAttr("patchable-function-entry-section"))
2535 Check(!Attrs.getFnAttr("patchable-function-entry-section")
2536 .getValueAsString()
2537 .empty(),
2538 "\"patchable-function-entry-section\" must not be empty");
2539 checkUnsignedBaseTenFuncAttr(Attrs, "warn-stack-size", V);
2540
2541 if (auto A = Attrs.getFnAttr("sign-return-address"); A.isValid()) {
2542 StringRef S = A.getValueAsString();
2543 if (S != "none" && S != "all" && S != "non-leaf")
2544 CheckFailed("invalid value for 'sign-return-address' attribute: " + S, V);
2545 }
2546
2547 if (auto A = Attrs.getFnAttr("sign-return-address-key"); A.isValid()) {
2548 StringRef S = A.getValueAsString();
2549 if (S != "a_key" && S != "b_key")
2550 CheckFailed("invalid value for 'sign-return-address-key' attribute: " + S,
2551 V);
2552 if (auto AA = Attrs.getFnAttr("sign-return-address"); !AA.isValid()) {
2553 CheckFailed(
2554 "'sign-return-address-key' present without `sign-return-address`");
2555 }
2556 }
2557
2558 if (auto A = Attrs.getFnAttr("branch-target-enforcement"); A.isValid()) {
2559 StringRef S = A.getValueAsString();
2560 if (S != "" && S != "true" && S != "false")
2561 CheckFailed(
2562 "invalid value for 'branch-target-enforcement' attribute: " + S, V);
2563 }
2564
2565 if (auto A = Attrs.getFnAttr("branch-protection-pauth-lr"); A.isValid()) {
2566 StringRef S = A.getValueAsString();
2567 if (S != "" && S != "true" && S != "false")
2568 CheckFailed(
2569 "invalid value for 'branch-protection-pauth-lr' attribute: " + S, V);
2570 }
2571
2572 if (auto A = Attrs.getFnAttr("guarded-control-stack"); A.isValid()) {
2573 StringRef S = A.getValueAsString();
2574 if (S != "" && S != "true" && S != "false")
2575 CheckFailed("invalid value for 'guarded-control-stack' attribute: " + S,
2576 V);
2577 }
2578
2579 if (auto A = Attrs.getFnAttr("vector-function-abi-variant"); A.isValid()) {
2580 StringRef S = A.getValueAsString();
2581 const std::optional<VFInfo> Info = VFABI::tryDemangleForVFABI(S, FT);
2582 if (!Info)
2583 CheckFailed("invalid name for a VFABI variant: " + S, V);
2584 }
2585
2586 if (auto A = Attrs.getFnAttr("denormal-fp-math"); A.isValid()) {
2587 StringRef S = A.getValueAsString();
2589 CheckFailed("invalid value for 'denormal-fp-math' attribute: " + S, V);
2590 }
2591
2592 if (auto A = Attrs.getFnAttr("denormal-fp-math-f32"); A.isValid()) {
2593 StringRef S = A.getValueAsString();
2595 CheckFailed("invalid value for 'denormal-fp-math-f32' attribute: " + S,
2596 V);
2597 }
2598
2599 if (auto A = Attrs.getFnAttr("modular-format"); A.isValid()) {
2600 StringRef S = A.getValueAsString();
2602 S.split(Args, ',');
2603 Check(Args.size() >= 5,
2604 "modular-format attribute requires at least 5 arguments", V);
2605 unsigned FirstArgIdx;
2606 Check(!Args[2].getAsInteger(10, FirstArgIdx),
2607 "modular-format attribute first arg index is not an integer", V);
2608 unsigned UpperBound = FT->getNumParams() + (FT->isVarArg() ? 1 : 0);
2609 Check(FirstArgIdx > 0 && FirstArgIdx <= UpperBound,
2610 "modular-format attribute first arg index is out of bounds", V);
2611 }
2612}
2613void Verifier::verifyUnknownProfileMetadata(MDNode *MD) {
2614 Check(MD->getNumOperands() == 2,
2615 "'unknown' !prof should have a single additional operand", MD);
2616 auto *PassName = dyn_cast<MDString>(MD->getOperand(1));
2617 Check(PassName != nullptr,
2618 "'unknown' !prof should have an additional operand of type "
2619 "string");
2620 Check(!PassName->getString().empty(),
2621 "the 'unknown' !prof operand should not be an empty string");
2622}
2623
2624void Verifier::verifyFunctionMetadata(
2625 ArrayRef<std::pair<unsigned, MDNode *>> MDs) {
2626 for (const auto &Pair : MDs) {
2627 if (Pair.first == LLVMContext::MD_prof) {
2628 MDNode *MD = Pair.second;
2629 Check(MD->getNumOperands() >= 2,
2630 "!prof annotations should have no less than 2 operands", MD);
2631 // We may have functions that are synthesized by the compiler, e.g. in
2632 // WPD, that we can't currently determine the entry count.
2633 if (MD->getOperand(0).equalsStr(
2635 verifyUnknownProfileMetadata(MD);
2636 continue;
2637 }
2638
2639 // Check first operand.
2640 Check(MD->getOperand(0) != nullptr, "first operand should not be null",
2641 MD);
2643 "expected string with name of the !prof annotation", MD);
2644 MDString *MDS = cast<MDString>(MD->getOperand(0));
2645 StringRef ProfName = MDS->getString();
2648 "first operand should be 'function_entry_count'"
2649 " or 'synthetic_function_entry_count'",
2650 MD);
2651
2652 // Check second operand.
2653 Check(MD->getOperand(1) != nullptr, "second operand should not be null",
2654 MD);
2656 "expected integer argument to function_entry_count", MD);
2657 } else if (Pair.first == LLVMContext::MD_kcfi_type) {
2658 MDNode *MD = Pair.second;
2659 Check(MD->getNumOperands() == 1,
2660 "!kcfi_type must have exactly one operand", MD);
2661 Check(MD->getOperand(0) != nullptr, "!kcfi_type operand must not be null",
2662 MD);
2664 "expected a constant operand for !kcfi_type", MD);
2665 Constant *C = cast<ConstantAsMetadata>(MD->getOperand(0))->getValue();
2666 Check(isa<ConstantInt>(C) && isa<IntegerType>(C->getType()),
2667 "expected a constant integer operand for !kcfi_type", MD);
2669 "expected a 32-bit integer constant operand for !kcfi_type", MD);
2670 }
2671 }
2672}
2673
2674void Verifier::visitConstantExprsRecursively(const Constant *EntryC) {
2675 if (!ConstantExprVisited.insert(EntryC).second)
2676 return;
2677
2679 Stack.push_back(EntryC);
2680
2681 while (!Stack.empty()) {
2682 const Constant *C = Stack.pop_back_val();
2683
2684 // Check this constant expression.
2685 if (const auto *CE = dyn_cast<ConstantExpr>(C))
2686 visitConstantExpr(CE);
2687
2688 if (const auto *CPA = dyn_cast<ConstantPtrAuth>(C))
2689 visitConstantPtrAuth(CPA);
2690
2691 if (const auto *GV = dyn_cast<GlobalValue>(C)) {
2692 // Global Values get visited separately, but we do need to make sure
2693 // that the global value is in the correct module
2694 Check(GV->getParent() == &M, "Referencing global in another module!",
2695 EntryC, &M, GV, GV->getParent());
2696 continue;
2697 }
2698
2699 // Visit all sub-expressions.
2700 for (const Use &U : C->operands()) {
2701 const auto *OpC = dyn_cast<Constant>(U);
2702 if (!OpC)
2703 continue;
2704 if (!ConstantExprVisited.insert(OpC).second)
2705 continue;
2706 Stack.push_back(OpC);
2707 }
2708 }
2709}
2710
2711void Verifier::visitConstantExpr(const ConstantExpr *CE) {
2712 if (CE->getOpcode() == Instruction::BitCast)
2713 Check(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0),
2714 CE->getType()),
2715 "Invalid bitcast", CE);
2716 else if (CE->getOpcode() == Instruction::PtrToAddr)
2717 checkPtrToAddr(CE->getOperand(0)->getType(), CE->getType(), *CE);
2718}
2719
2720void Verifier::visitConstantPtrAuth(const ConstantPtrAuth *CPA) {
2721 Check(CPA->getPointer()->getType()->isPointerTy(),
2722 "signed ptrauth constant base pointer must have pointer type");
2723
2724 Check(CPA->getType() == CPA->getPointer()->getType(),
2725 "signed ptrauth constant must have same type as its base pointer");
2726
2727 Check(CPA->getKey()->getBitWidth() == 32,
2728 "signed ptrauth constant key must be i32 constant integer");
2729
2731 "signed ptrauth constant address discriminator must be a pointer");
2732
2733 Check(CPA->getDiscriminator()->getBitWidth() == 64,
2734 "signed ptrauth constant discriminator must be i64 constant integer");
2735}
2736
2737bool Verifier::verifyAttributeCount(AttributeList Attrs, unsigned Params) {
2738 // There shouldn't be more attribute sets than there are parameters plus the
2739 // function and return value.
2740 return Attrs.getNumAttrSets() <= Params + 2;
2741}
2742
2743void Verifier::verifyInlineAsmCall(const CallBase &Call) {
2744 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
2745 unsigned ArgNo = 0;
2746 unsigned LabelNo = 0;
2747 for (const InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) {
2748 if (CI.Type == InlineAsm::isLabel) {
2749 ++LabelNo;
2750 continue;
2751 }
2752
2753 // Only deal with constraints that correspond to call arguments.
2754 if (!CI.hasArg())
2755 continue;
2756
2757 if (CI.isIndirect) {
2758 const Value *Arg = Call.getArgOperand(ArgNo);
2759 Check(Arg->getType()->isPointerTy(),
2760 "Operand for indirect constraint must have pointer type", &Call);
2761
2763 "Operand for indirect constraint must have elementtype attribute",
2764 &Call);
2765 } else {
2766 Check(!Call.paramHasAttr(ArgNo, Attribute::ElementType),
2767 "Elementtype attribute can only be applied for indirect "
2768 "constraints",
2769 &Call);
2770 }
2771
2772 ArgNo++;
2773 }
2774
2775 if (auto *CallBr = dyn_cast<CallBrInst>(&Call)) {
2776 Check(LabelNo == CallBr->getNumIndirectDests(),
2777 "Number of label constraints does not match number of callbr dests",
2778 &Call);
2779 } else {
2780 Check(LabelNo == 0, "Label constraints can only be used with callbr",
2781 &Call);
2782 }
2783}
2784
2785/// Verify that statepoint intrinsic is well formed.
2786void Verifier::verifyStatepoint(const CallBase &Call) {
2787 assert(Call.getIntrinsicID() == Intrinsic::experimental_gc_statepoint);
2788
2791 "gc.statepoint must read and write all memory to preserve "
2792 "reordering restrictions required by safepoint semantics",
2793 Call);
2794
2795 const int64_t NumPatchBytes =
2796 cast<ConstantInt>(Call.getArgOperand(1))->getSExtValue();
2797 assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!");
2798 Check(NumPatchBytes >= 0,
2799 "gc.statepoint number of patchable bytes must be "
2800 "positive",
2801 Call);
2802
2803 Type *TargetElemType = Call.getParamElementType(2);
2804 Check(TargetElemType,
2805 "gc.statepoint callee argument must have elementtype attribute", Call);
2806 FunctionType *TargetFuncType = dyn_cast<FunctionType>(TargetElemType);
2807 Check(TargetFuncType,
2808 "gc.statepoint callee elementtype must be function type", Call);
2809
2810 const int NumCallArgs = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
2811 Check(NumCallArgs >= 0,
2812 "gc.statepoint number of arguments to underlying call "
2813 "must be positive",
2814 Call);
2815 const int NumParams = (int)TargetFuncType->getNumParams();
2816 if (TargetFuncType->isVarArg()) {
2817 Check(NumCallArgs >= NumParams,
2818 "gc.statepoint mismatch in number of vararg call args", Call);
2819
2820 // TODO: Remove this limitation
2821 Check(TargetFuncType->getReturnType()->isVoidTy(),
2822 "gc.statepoint doesn't support wrapping non-void "
2823 "vararg functions yet",
2824 Call);
2825 } else
2826 Check(NumCallArgs == NumParams,
2827 "gc.statepoint mismatch in number of call args", Call);
2828
2829 const uint64_t Flags
2830 = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
2831 Check((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0,
2832 "unknown flag used in gc.statepoint flags argument", Call);
2833
2834 // Verify that the types of the call parameter arguments match
2835 // the type of the wrapped callee.
2836 AttributeList Attrs = Call.getAttributes();
2837 for (int i = 0; i < NumParams; i++) {
2838 Type *ParamType = TargetFuncType->getParamType(i);
2839 Type *ArgType = Call.getArgOperand(5 + i)->getType();
2840 Check(ArgType == ParamType,
2841 "gc.statepoint call argument does not match wrapped "
2842 "function type",
2843 Call);
2844
2845 if (TargetFuncType->isVarArg()) {
2846 AttributeSet ArgAttrs = Attrs.getParamAttrs(5 + i);
2847 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
2848 "Attribute 'sret' cannot be used for vararg call arguments!", Call);
2849 }
2850 }
2851
2852 const int EndCallArgsInx = 4 + NumCallArgs;
2853
2854 const Value *NumTransitionArgsV = Call.getArgOperand(EndCallArgsInx + 1);
2855 Check(isa<ConstantInt>(NumTransitionArgsV),
2856 "gc.statepoint number of transition arguments "
2857 "must be constant integer",
2858 Call);
2859 const int NumTransitionArgs =
2860 cast<ConstantInt>(NumTransitionArgsV)->getZExtValue();
2861 Check(NumTransitionArgs == 0,
2862 "gc.statepoint w/inline transition bundle is deprecated", Call);
2863 const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs;
2864
2865 const Value *NumDeoptArgsV = Call.getArgOperand(EndTransitionArgsInx + 1);
2866 Check(isa<ConstantInt>(NumDeoptArgsV),
2867 "gc.statepoint number of deoptimization arguments "
2868 "must be constant integer",
2869 Call);
2870 const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue();
2871 Check(NumDeoptArgs == 0,
2872 "gc.statepoint w/inline deopt operands is deprecated", Call);
2873
2874 const int ExpectedNumArgs = 7 + NumCallArgs;
2875 Check(ExpectedNumArgs == (int)Call.arg_size(),
2876 "gc.statepoint too many arguments", Call);
2877
2878 // Check that the only uses of this gc.statepoint are gc.result or
2879 // gc.relocate calls which are tied to this statepoint and thus part
2880 // of the same statepoint sequence
2881 for (const User *U : Call.users()) {
2882 const CallInst *UserCall = dyn_cast<const CallInst>(U);
2883 Check(UserCall, "illegal use of statepoint token", Call, U);
2884 if (!UserCall)
2885 continue;
2886 Check(isa<GCRelocateInst>(UserCall) || isa<GCResultInst>(UserCall),
2887 "gc.result or gc.relocate are the only value uses "
2888 "of a gc.statepoint",
2889 Call, U);
2890 if (isa<GCResultInst>(UserCall)) {
2891 Check(UserCall->getArgOperand(0) == &Call,
2892 "gc.result connected to wrong gc.statepoint", Call, UserCall);
2893 } else if (isa<GCRelocateInst>(Call)) {
2894 Check(UserCall->getArgOperand(0) == &Call,
2895 "gc.relocate connected to wrong gc.statepoint", Call, UserCall);
2896 }
2897 }
2898
2899 // Note: It is legal for a single derived pointer to be listed multiple
2900 // times. It's non-optimal, but it is legal. It can also happen after
2901 // insertion if we strip a bitcast away.
2902 // Note: It is really tempting to check that each base is relocated and
2903 // that a derived pointer is never reused as a base pointer. This turns
2904 // out to be problematic since optimizations run after safepoint insertion
2905 // can recognize equality properties that the insertion logic doesn't know
2906 // about. See example statepoint.ll in the verifier subdirectory
2907}
2908
2909void Verifier::verifyFrameRecoverIndices() {
2910 for (auto &Counts : FrameEscapeInfo) {
2911 Function *F = Counts.first;
2912 unsigned EscapedObjectCount = Counts.second.first;
2913 unsigned MaxRecoveredIndex = Counts.second.second;
2914 Check(MaxRecoveredIndex <= EscapedObjectCount,
2915 "all indices passed to llvm.localrecover must be less than the "
2916 "number of arguments passed to llvm.localescape in the parent "
2917 "function",
2918 F);
2919 }
2920}
2921
2922static Instruction *getSuccPad(Instruction *Terminator) {
2923 BasicBlock *UnwindDest;
2924 if (auto *II = dyn_cast<InvokeInst>(Terminator))
2925 UnwindDest = II->getUnwindDest();
2926 else if (auto *CSI = dyn_cast<CatchSwitchInst>(Terminator))
2927 UnwindDest = CSI->getUnwindDest();
2928 else
2929 UnwindDest = cast<CleanupReturnInst>(Terminator)->getUnwindDest();
2930 return &*UnwindDest->getFirstNonPHIIt();
2931}
2932
2933void Verifier::verifySiblingFuncletUnwinds() {
2934 llvm::TimeTraceScope timeScope("Verifier verify sibling funclet unwinds");
2935 SmallPtrSet<Instruction *, 8> Visited;
2936 SmallPtrSet<Instruction *, 8> Active;
2937 for (const auto &Pair : SiblingFuncletInfo) {
2938 Instruction *PredPad = Pair.first;
2939 if (Visited.count(PredPad))
2940 continue;
2941 Active.insert(PredPad);
2942 Instruction *Terminator = Pair.second;
2943 do {
2944 Instruction *SuccPad = getSuccPad(Terminator);
2945 if (Active.count(SuccPad)) {
2946 // Found a cycle; report error
2947 Instruction *CyclePad = SuccPad;
2948 SmallVector<Instruction *, 8> CycleNodes;
2949 do {
2950 CycleNodes.push_back(CyclePad);
2951 Instruction *CycleTerminator = SiblingFuncletInfo[CyclePad];
2952 if (CycleTerminator != CyclePad)
2953 CycleNodes.push_back(CycleTerminator);
2954 CyclePad = getSuccPad(CycleTerminator);
2955 } while (CyclePad != SuccPad);
2956 Check(false, "EH pads can't handle each other's exceptions",
2957 ArrayRef<Instruction *>(CycleNodes));
2958 }
2959 // Don't re-walk a node we've already checked
2960 if (!Visited.insert(SuccPad).second)
2961 break;
2962 // Walk to this successor if it has a map entry.
2963 PredPad = SuccPad;
2964 auto TermI = SiblingFuncletInfo.find(PredPad);
2965 if (TermI == SiblingFuncletInfo.end())
2966 break;
2967 Terminator = TermI->second;
2968 Active.insert(PredPad);
2969 } while (true);
2970 // Each node only has one successor, so we've walked all the active
2971 // nodes' successors.
2972 Active.clear();
2973 }
2974}
2975
2976// visitFunction - Verify that a function is ok.
2977//
2978void Verifier::visitFunction(const Function &F) {
2979 visitGlobalValue(F);
2980
2981 // Check function arguments.
2982 FunctionType *FT = F.getFunctionType();
2983 unsigned NumArgs = F.arg_size();
2984
2985 Check(&Context == &F.getContext(),
2986 "Function context does not match Module context!", &F);
2987
2988 Check(!F.hasCommonLinkage(), "Functions may not have common linkage", &F);
2989 Check(FT->getNumParams() == NumArgs,
2990 "# formal arguments must match # of arguments for function type!", &F,
2991 FT);
2992 Check(F.getReturnType()->isFirstClassType() ||
2993 F.getReturnType()->isVoidTy() || F.getReturnType()->isStructTy(),
2994 "Functions cannot return aggregate values!", &F);
2995
2996 Check(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(),
2997 "Invalid struct return type!", &F);
2998
2999 if (MaybeAlign A = F.getAlign()) {
3000 Check(A->value() <= Value::MaximumAlignment,
3001 "huge alignment values are unsupported", &F);
3002 }
3003
3004 AttributeList Attrs = F.getAttributes();
3005
3006 Check(verifyAttributeCount(Attrs, FT->getNumParams()),
3007 "Attribute after last parameter!", &F);
3008
3009 bool IsIntrinsic = F.isIntrinsic();
3010
3011 // Check function attributes.
3012 verifyFunctionAttrs(FT, Attrs, &F, IsIntrinsic, /* IsInlineAsm */ false);
3013
3014 // On function declarations/definitions, we do not support the builtin
3015 // attribute. We do not check this in VerifyFunctionAttrs since that is
3016 // checking for Attributes that can/can not ever be on functions.
3017 Check(!Attrs.hasFnAttr(Attribute::Builtin),
3018 "Attribute 'builtin' can only be applied to a callsite.", &F);
3019
3020 Check(!Attrs.hasAttrSomewhere(Attribute::ElementType),
3021 "Attribute 'elementtype' can only be applied to a callsite.", &F);
3022
3023 Check(!Attrs.hasFnAttr("aarch64_zt0_undef"),
3024 "Attribute 'aarch64_zt0_undef' can only be applied to a callsite.");
3025
3026 if (Attrs.hasFnAttr(Attribute::Naked))
3027 for (const Argument &Arg : F.args())
3028 Check(Arg.use_empty(), "cannot use argument of naked function", &Arg);
3029
3030 // Check that this function meets the restrictions on this calling convention.
3031 // Sometimes varargs is used for perfectly forwarding thunks, so some of these
3032 // restrictions can be lifted.
3033 switch (F.getCallingConv()) {
3034 default:
3035 case CallingConv::C:
3036 break;
3037 case CallingConv::X86_INTR: {
3038 Check(F.arg_empty() || Attrs.hasParamAttr(0, Attribute::ByVal),
3039 "Calling convention parameter requires byval", &F);
3040 break;
3041 }
3042 case CallingConv::AMDGPU_KERNEL:
3043 case CallingConv::SPIR_KERNEL:
3044 case CallingConv::AMDGPU_CS_Chain:
3045 case CallingConv::AMDGPU_CS_ChainPreserve:
3046 Check(F.getReturnType()->isVoidTy(),
3047 "Calling convention requires void return type", &F);
3048 [[fallthrough]];
3049 case CallingConv::AMDGPU_VS:
3050 case CallingConv::AMDGPU_HS:
3051 case CallingConv::AMDGPU_GS:
3052 case CallingConv::AMDGPU_PS:
3053 case CallingConv::AMDGPU_CS:
3054 Check(!F.hasStructRetAttr(), "Calling convention does not allow sret", &F);
3055 if (F.getCallingConv() != CallingConv::SPIR_KERNEL) {
3056 const unsigned StackAS = DL.getAllocaAddrSpace();
3057 unsigned i = 0;
3058 for (const Argument &Arg : F.args()) {
3059 Check(!Attrs.hasParamAttr(i, Attribute::ByVal),
3060 "Calling convention disallows byval", &F);
3061 Check(!Attrs.hasParamAttr(i, Attribute::Preallocated),
3062 "Calling convention disallows preallocated", &F);
3063 Check(!Attrs.hasParamAttr(i, Attribute::InAlloca),
3064 "Calling convention disallows inalloca", &F);
3065
3066 if (Attrs.hasParamAttr(i, Attribute::ByRef)) {
3067 // FIXME: Should also disallow LDS and GDS, but we don't have the enum
3068 // value here.
3069 Check(Arg.getType()->getPointerAddressSpace() != StackAS,
3070 "Calling convention disallows stack byref", &F);
3071 }
3072
3073 ++i;
3074 }
3075 }
3076
3077 [[fallthrough]];
3078 case CallingConv::Fast:
3079 case CallingConv::Cold:
3080 case CallingConv::Intel_OCL_BI:
3081 case CallingConv::PTX_Kernel:
3082 case CallingConv::PTX_Device:
3083 Check(!F.isVarArg(),
3084 "Calling convention does not support varargs or "
3085 "perfect forwarding!",
3086 &F);
3087 break;
3088 case CallingConv::AMDGPU_Gfx_WholeWave:
3089 Check(!F.arg_empty() && F.arg_begin()->getType()->isIntegerTy(1),
3090 "Calling convention requires first argument to be i1", &F);
3091 Check(!F.arg_begin()->hasInRegAttr(),
3092 "Calling convention requires first argument to not be inreg", &F);
3093 Check(!F.isVarArg(),
3094 "Calling convention does not support varargs or "
3095 "perfect forwarding!",
3096 &F);
3097 break;
3098 }
3099
3100 // Check that the argument values match the function type for this function...
3101 unsigned i = 0;
3102 for (const Argument &Arg : F.args()) {
3103 Check(Arg.getType() == FT->getParamType(i),
3104 "Argument value does not match function argument type!", &Arg,
3105 FT->getParamType(i));
3106 Check(Arg.getType()->isFirstClassType(),
3107 "Function arguments must have first-class types!", &Arg);
3108 if (!IsIntrinsic) {
3109 Check(!Arg.getType()->isMetadataTy(),
3110 "Function takes metadata but isn't an intrinsic", &Arg, &F);
3111 Check(!Arg.getType()->isTokenLikeTy(),
3112 "Function takes token but isn't an intrinsic", &Arg, &F);
3113 Check(!Arg.getType()->isX86_AMXTy(),
3114 "Function takes x86_amx but isn't an intrinsic", &Arg, &F);
3115 }
3116
3117 // Check that swifterror argument is only used by loads and stores.
3118 if (Attrs.hasParamAttr(i, Attribute::SwiftError)) {
3119 verifySwiftErrorValue(&Arg);
3120 }
3121 ++i;
3122 }
3123
3124 if (!IsIntrinsic) {
3125 Check(!F.getReturnType()->isTokenLikeTy(),
3126 "Function returns a token but isn't an intrinsic", &F);
3127 Check(!F.getReturnType()->isX86_AMXTy(),
3128 "Function returns a x86_amx but isn't an intrinsic", &F);
3129 }
3130
3131 // Get the function metadata attachments.
3133 F.getAllMetadata(MDs);
3134 assert(F.hasMetadata() != MDs.empty() && "Bit out-of-sync");
3135 verifyFunctionMetadata(MDs);
3136
3137 // Check validity of the personality function
3138 if (F.hasPersonalityFn()) {
3139 auto *Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
3140 if (Per)
3141 Check(Per->getParent() == F.getParent(),
3142 "Referencing personality function in another module!", &F,
3143 F.getParent(), Per, Per->getParent());
3144 }
3145
3146 // EH funclet coloring can be expensive, recompute on-demand
3147 BlockEHFuncletColors.clear();
3148
3149 if (F.isMaterializable()) {
3150 // Function has a body somewhere we can't see.
3151 Check(MDs.empty(), "unmaterialized function cannot have metadata", &F,
3152 MDs.empty() ? nullptr : MDs.front().second);
3153 } else if (F.isDeclaration()) {
3154 for (const auto &I : MDs) {
3155 // This is used for call site debug information.
3156 CheckDI(I.first != LLVMContext::MD_dbg ||
3157 !cast<DISubprogram>(I.second)->isDistinct(),
3158 "function declaration may only have a unique !dbg attachment",
3159 &F);
3160 Check(I.first != LLVMContext::MD_prof,
3161 "function declaration may not have a !prof attachment", &F);
3162
3163 // Verify the metadata itself.
3164 visitMDNode(*I.second, AreDebugLocsAllowed::Yes);
3165 }
3166 Check(!F.hasPersonalityFn(),
3167 "Function declaration shouldn't have a personality routine", &F);
3168 } else {
3169 // Verify that this function (which has a body) is not named "llvm.*". It
3170 // is not legal to define intrinsics.
3171 Check(!IsIntrinsic, "llvm intrinsics cannot be defined!", &F);
3172
3173 // Check the entry node
3174 const BasicBlock *Entry = &F.getEntryBlock();
3175 Check(pred_empty(Entry),
3176 "Entry block to function must not have predecessors!", Entry);
3177
3178 // The address of the entry block cannot be taken, unless it is dead.
3179 if (Entry->hasAddressTaken()) {
3180 Check(!BlockAddress::lookup(Entry)->isConstantUsed(),
3181 "blockaddress may not be used with the entry block!", Entry);
3182 }
3183
3184 unsigned NumDebugAttachments = 0, NumProfAttachments = 0,
3185 NumKCFIAttachments = 0;
3186 // Visit metadata attachments.
3187 for (const auto &I : MDs) {
3188 // Verify that the attachment is legal.
3189 auto AllowLocs = AreDebugLocsAllowed::No;
3190 switch (I.first) {
3191 default:
3192 break;
3193 case LLVMContext::MD_dbg: {
3194 ++NumDebugAttachments;
3195 CheckDI(NumDebugAttachments == 1,
3196 "function must have a single !dbg attachment", &F, I.second);
3197 CheckDI(isa<DISubprogram>(I.second),
3198 "function !dbg attachment must be a subprogram", &F, I.second);
3199 CheckDI(cast<DISubprogram>(I.second)->isDistinct(),
3200 "function definition may only have a distinct !dbg attachment",
3201 &F);
3202
3203 auto *SP = cast<DISubprogram>(I.second);
3204 const Function *&AttachedTo = DISubprogramAttachments[SP];
3205 CheckDI(!AttachedTo || AttachedTo == &F,
3206 "DISubprogram attached to more than one function", SP, &F);
3207 AttachedTo = &F;
3208 AllowLocs = AreDebugLocsAllowed::Yes;
3209 break;
3210 }
3211 case LLVMContext::MD_prof:
3212 ++NumProfAttachments;
3213 Check(NumProfAttachments == 1,
3214 "function must have a single !prof attachment", &F, I.second);
3215 break;
3216 case LLVMContext::MD_kcfi_type:
3217 ++NumKCFIAttachments;
3218 Check(NumKCFIAttachments == 1,
3219 "function must have a single !kcfi_type attachment", &F,
3220 I.second);
3221 break;
3222 }
3223
3224 // Verify the metadata itself.
3225 visitMDNode(*I.second, AllowLocs);
3226 }
3227 }
3228
3229 // If this function is actually an intrinsic, verify that it is only used in
3230 // direct call/invokes, never having its "address taken".
3231 // Only do this if the module is materialized, otherwise we don't have all the
3232 // uses.
3233 if (F.isIntrinsic() && F.getParent()->isMaterialized()) {
3234 const User *U;
3235 if (F.hasAddressTaken(&U, false, true, false,
3236 /*IgnoreARCAttachedCall=*/true))
3237 Check(false, "Invalid user of intrinsic instruction!", U);
3238 }
3239
3240 // Check intrinsics' signatures.
3241 switch (F.getIntrinsicID()) {
3242 case Intrinsic::experimental_gc_get_pointer_base: {
3243 FunctionType *FT = F.getFunctionType();
3244 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3245 Check(isa<PointerType>(F.getReturnType()),
3246 "gc.get.pointer.base must return a pointer", F);
3247 Check(FT->getParamType(0) == F.getReturnType(),
3248 "gc.get.pointer.base operand and result must be of the same type", F);
3249 break;
3250 }
3251 case Intrinsic::experimental_gc_get_pointer_offset: {
3252 FunctionType *FT = F.getFunctionType();
3253 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3254 Check(isa<PointerType>(FT->getParamType(0)),
3255 "gc.get.pointer.offset operand must be a pointer", F);
3256 Check(F.getReturnType()->isIntegerTy(),
3257 "gc.get.pointer.offset must return integer", F);
3258 break;
3259 }
3260 }
3261
3262 auto *N = F.getSubprogram();
3263 HasDebugInfo = (N != nullptr);
3264 if (!HasDebugInfo)
3265 return;
3266
3267 // Check that all !dbg attachments lead to back to N.
3268 //
3269 // FIXME: Check this incrementally while visiting !dbg attachments.
3270 // FIXME: Only check when N is the canonical subprogram for F.
3271 SmallPtrSet<const MDNode *, 32> Seen;
3272 auto VisitDebugLoc = [&](const Instruction &I, const MDNode *Node) {
3273 // Be careful about using DILocation here since we might be dealing with
3274 // broken code (this is the Verifier after all).
3275 const DILocation *DL = dyn_cast_or_null<DILocation>(Node);
3276 if (!DL)
3277 return;
3278 if (!Seen.insert(DL).second)
3279 return;
3280
3281 Metadata *Parent = DL->getRawScope();
3282 CheckDI(Parent && isa<DILocalScope>(Parent),
3283 "DILocation's scope must be a DILocalScope", N, &F, &I, DL, Parent);
3284
3285 DILocalScope *Scope = DL->getInlinedAtScope();
3286 Check(Scope, "Failed to find DILocalScope", DL);
3287
3288 if (!Seen.insert(Scope).second)
3289 return;
3290
3291 DISubprogram *SP = Scope->getSubprogram();
3292
3293 // Scope and SP could be the same MDNode and we don't want to skip
3294 // validation in that case
3295 if ((Scope != SP) && !Seen.insert(SP).second)
3296 return;
3297
3298 CheckDI(SP->describes(&F),
3299 "!dbg attachment points at wrong subprogram for function", N, &F,
3300 &I, DL, Scope, SP);
3301 };
3302 for (auto &BB : F)
3303 for (auto &I : BB) {
3304 VisitDebugLoc(I, I.getDebugLoc().getAsMDNode());
3305 // The llvm.loop annotations also contain two DILocations.
3306 if (auto MD = I.getMetadata(LLVMContext::MD_loop))
3307 for (unsigned i = 1; i < MD->getNumOperands(); ++i)
3308 VisitDebugLoc(I, dyn_cast_or_null<MDNode>(MD->getOperand(i)));
3309 if (BrokenDebugInfo)
3310 return;
3311 }
3312}
3313
3314// verifyBasicBlock - Verify that a basic block is well formed...
3315//
3316void Verifier::visitBasicBlock(BasicBlock &BB) {
3317 InstsInThisBlock.clear();
3318 ConvergenceVerifyHelper.visit(BB);
3319
3320 // Ensure that basic blocks have terminators!
3321 Check(BB.getTerminator(), "Basic Block does not have terminator!", &BB);
3322
3323 // Check constraints that this basic block imposes on all of the PHI nodes in
3324 // it.
3325 if (isa<PHINode>(BB.front())) {
3326 SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
3328 llvm::sort(Preds);
3329 for (const PHINode &PN : BB.phis()) {
3330 Check(PN.getNumIncomingValues() == Preds.size(),
3331 "PHINode should have one entry for each predecessor of its "
3332 "parent basic block!",
3333 &PN);
3334
3335 // Get and sort all incoming values in the PHI node...
3336 Values.clear();
3337 Values.reserve(PN.getNumIncomingValues());
3338 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
3339 Values.push_back(
3340 std::make_pair(PN.getIncomingBlock(i), PN.getIncomingValue(i)));
3341 llvm::sort(Values);
3342
3343 for (unsigned i = 0, e = Values.size(); i != e; ++i) {
3344 // Check to make sure that if there is more than one entry for a
3345 // particular basic block in this PHI node, that the incoming values are
3346 // all identical.
3347 //
3348 Check(i == 0 || Values[i].first != Values[i - 1].first ||
3349 Values[i].second == Values[i - 1].second,
3350 "PHI node has multiple entries for the same basic block with "
3351 "different incoming values!",
3352 &PN, Values[i].first, Values[i].second, Values[i - 1].second);
3353
3354 // Check to make sure that the predecessors and PHI node entries are
3355 // matched up.
3356 Check(Values[i].first == Preds[i],
3357 "PHI node entries do not match predecessors!", &PN,
3358 Values[i].first, Preds[i]);
3359 }
3360 }
3361 }
3362
3363 // Check that all instructions have their parent pointers set up correctly.
3364 for (auto &I : BB)
3365 {
3366 Check(I.getParent() == &BB, "Instruction has bogus parent pointer!");
3367 }
3368
3369 // Confirm that no issues arise from the debug program.
3370 CheckDI(!BB.getTrailingDbgRecords(), "Basic Block has trailing DbgRecords!",
3371 &BB);
3372}
3373
3374void Verifier::visitTerminator(Instruction &I) {
3375 // Ensure that terminators only exist at the end of the basic block.
3376 Check(&I == I.getParent()->getTerminator(),
3377 "Terminator found in the middle of a basic block!", I.getParent());
3378 visitInstruction(I);
3379}
3380
3381void Verifier::visitBranchInst(BranchInst &BI) {
3382 if (BI.isConditional()) {
3384 "Branch condition is not 'i1' type!", &BI, BI.getCondition());
3385 }
3386 visitTerminator(BI);
3387}
3388
3389void Verifier::visitReturnInst(ReturnInst &RI) {
3390 Function *F = RI.getParent()->getParent();
3391 unsigned N = RI.getNumOperands();
3392 if (F->getReturnType()->isVoidTy())
3393 Check(N == 0,
3394 "Found return instr that returns non-void in Function of void "
3395 "return type!",
3396 &RI, F->getReturnType());
3397 else
3398 Check(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(),
3399 "Function return type does not match operand "
3400 "type of return inst!",
3401 &RI, F->getReturnType());
3402
3403 // Check to make sure that the return value has necessary properties for
3404 // terminators...
3405 visitTerminator(RI);
3406}
3407
3408void Verifier::visitSwitchInst(SwitchInst &SI) {
3409 Check(SI.getType()->isVoidTy(), "Switch must have void result type!", &SI);
3410 // Check to make sure that all of the constants in the switch instruction
3411 // have the same type as the switched-on value.
3412 Type *SwitchTy = SI.getCondition()->getType();
3413 SmallPtrSet<ConstantInt*, 32> Constants;
3414 for (auto &Case : SI.cases()) {
3415 Check(isa<ConstantInt>(SI.getOperand(Case.getCaseIndex() * 2 + 2)),
3416 "Case value is not a constant integer.", &SI);
3417 Check(Case.getCaseValue()->getType() == SwitchTy,
3418 "Switch constants must all be same type as switch value!", &SI);
3419 Check(Constants.insert(Case.getCaseValue()).second,
3420 "Duplicate integer as switch case", &SI, Case.getCaseValue());
3421 }
3422
3423 visitTerminator(SI);
3424}
3425
3426void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
3428 "Indirectbr operand must have pointer type!", &BI);
3429 for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i)
3431 "Indirectbr destinations must all have pointer type!", &BI);
3432
3433 visitTerminator(BI);
3434}
3435
3436void Verifier::visitCallBrInst(CallBrInst &CBI) {
3437 Check(CBI.isInlineAsm(), "Callbr is currently only used for asm-goto!", &CBI);
3438 const InlineAsm *IA = cast<InlineAsm>(CBI.getCalledOperand());
3439 Check(!IA->canThrow(), "Unwinding from Callbr is not allowed");
3440
3441 verifyInlineAsmCall(CBI);
3442 visitTerminator(CBI);
3443}
3444
3445void Verifier::visitSelectInst(SelectInst &SI) {
3446 Check(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1),
3447 SI.getOperand(2)),
3448 "Invalid operands for select instruction!", &SI);
3449
3450 Check(SI.getTrueValue()->getType() == SI.getType(),
3451 "Select values must have same type as select instruction!", &SI);
3452 visitInstruction(SI);
3453}
3454
3455/// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of
3456/// a pass, if any exist, it's an error.
3457///
3458void Verifier::visitUserOp1(Instruction &I) {
3459 Check(false, "User-defined operators should not live outside of a pass!", &I);
3460}
3461
3462void Verifier::visitTruncInst(TruncInst &I) {
3463 // Get the source and destination types
3464 Type *SrcTy = I.getOperand(0)->getType();
3465 Type *DestTy = I.getType();
3466
3467 // Get the size of the types in bits, we'll need this later
3468 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3469 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3470
3471 Check(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer", &I);
3472 Check(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer", &I);
3473 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3474 "trunc source and destination must both be a vector or neither", &I);
3475 Check(SrcBitSize > DestBitSize, "DestTy too big for Trunc", &I);
3476
3477 visitInstruction(I);
3478}
3479
3480void Verifier::visitZExtInst(ZExtInst &I) {
3481 // Get the source and destination types
3482 Type *SrcTy = I.getOperand(0)->getType();
3483 Type *DestTy = I.getType();
3484
3485 // Get the size of the types in bits, we'll need this later
3486 Check(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I);
3487 Check(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer", &I);
3488 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3489 "zext source and destination must both be a vector or neither", &I);
3490 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3491 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3492
3493 Check(SrcBitSize < DestBitSize, "Type too small for ZExt", &I);
3494
3495 visitInstruction(I);
3496}
3497
3498void Verifier::visitSExtInst(SExtInst &I) {
3499 // Get the source and destination types
3500 Type *SrcTy = I.getOperand(0)->getType();
3501 Type *DestTy = I.getType();
3502
3503 // Get the size of the types in bits, we'll need this later
3504 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3505 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3506
3507 Check(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer", &I);
3508 Check(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer", &I);
3509 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3510 "sext source and destination must both be a vector or neither", &I);
3511 Check(SrcBitSize < DestBitSize, "Type too small for SExt", &I);
3512
3513 visitInstruction(I);
3514}
3515
3516void Verifier::visitFPTruncInst(FPTruncInst &I) {
3517 // Get the source and destination types
3518 Type *SrcTy = I.getOperand(0)->getType();
3519 Type *DestTy = I.getType();
3520 // Get the size of the types in bits, we'll need this later
3521 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3522 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3523
3524 Check(SrcTy->isFPOrFPVectorTy(), "FPTrunc only operates on FP", &I);
3525 Check(DestTy->isFPOrFPVectorTy(), "FPTrunc only produces an FP", &I);
3526 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3527 "fptrunc source and destination must both be a vector or neither", &I);
3528 Check(SrcBitSize > DestBitSize, "DestTy too big for FPTrunc", &I);
3529
3530 visitInstruction(I);
3531}
3532
3533void Verifier::visitFPExtInst(FPExtInst &I) {
3534 // Get the source and destination types
3535 Type *SrcTy = I.getOperand(0)->getType();
3536 Type *DestTy = I.getType();
3537
3538 // Get the size of the types in bits, we'll need this later
3539 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3540 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3541
3542 Check(SrcTy->isFPOrFPVectorTy(), "FPExt only operates on FP", &I);
3543 Check(DestTy->isFPOrFPVectorTy(), "FPExt only produces an FP", &I);
3544 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3545 "fpext source and destination must both be a vector or neither", &I);
3546 Check(SrcBitSize < DestBitSize, "DestTy too small for FPExt", &I);
3547
3548 visitInstruction(I);
3549}
3550
3551void Verifier::visitUIToFPInst(UIToFPInst &I) {
3552 // Get the source and destination types
3553 Type *SrcTy = I.getOperand(0)->getType();
3554 Type *DestTy = I.getType();
3555
3556 bool SrcVec = SrcTy->isVectorTy();
3557 bool DstVec = DestTy->isVectorTy();
3558
3559 Check(SrcVec == DstVec,
3560 "UIToFP source and dest must both be vector or scalar", &I);
3561 Check(SrcTy->isIntOrIntVectorTy(),
3562 "UIToFP source must be integer or integer vector", &I);
3563 Check(DestTy->isFPOrFPVectorTy(), "UIToFP result must be FP or FP vector",
3564 &I);
3565
3566 if (SrcVec && DstVec)
3567 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3568 cast<VectorType>(DestTy)->getElementCount(),
3569 "UIToFP source and dest vector length mismatch", &I);
3570
3571 visitInstruction(I);
3572}
3573
3574void Verifier::visitSIToFPInst(SIToFPInst &I) {
3575 // Get the source and destination types
3576 Type *SrcTy = I.getOperand(0)->getType();
3577 Type *DestTy = I.getType();
3578
3579 bool SrcVec = SrcTy->isVectorTy();
3580 bool DstVec = DestTy->isVectorTy();
3581
3582 Check(SrcVec == DstVec,
3583 "SIToFP source and dest must both be vector or scalar", &I);
3584 Check(SrcTy->isIntOrIntVectorTy(),
3585 "SIToFP source must be integer or integer vector", &I);
3586 Check(DestTy->isFPOrFPVectorTy(), "SIToFP result must be FP or FP vector",
3587 &I);
3588
3589 if (SrcVec && DstVec)
3590 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3591 cast<VectorType>(DestTy)->getElementCount(),
3592 "SIToFP source and dest vector length mismatch", &I);
3593
3594 visitInstruction(I);
3595}
3596
3597void Verifier::visitFPToUIInst(FPToUIInst &I) {
3598 // Get the source and destination types
3599 Type *SrcTy = I.getOperand(0)->getType();
3600 Type *DestTy = I.getType();
3601
3602 bool SrcVec = SrcTy->isVectorTy();
3603 bool DstVec = DestTy->isVectorTy();
3604
3605 Check(SrcVec == DstVec,
3606 "FPToUI source and dest must both be vector or scalar", &I);
3607 Check(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector", &I);
3608 Check(DestTy->isIntOrIntVectorTy(),
3609 "FPToUI result must be integer or integer vector", &I);
3610
3611 if (SrcVec && DstVec)
3612 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3613 cast<VectorType>(DestTy)->getElementCount(),
3614 "FPToUI source and dest vector length mismatch", &I);
3615
3616 visitInstruction(I);
3617}
3618
3619void Verifier::visitFPToSIInst(FPToSIInst &I) {
3620 // Get the source and destination types
3621 Type *SrcTy = I.getOperand(0)->getType();
3622 Type *DestTy = I.getType();
3623
3624 bool SrcVec = SrcTy->isVectorTy();
3625 bool DstVec = DestTy->isVectorTy();
3626
3627 Check(SrcVec == DstVec,
3628 "FPToSI source and dest must both be vector or scalar", &I);
3629 Check(SrcTy->isFPOrFPVectorTy(), "FPToSI source must be FP or FP vector", &I);
3630 Check(DestTy->isIntOrIntVectorTy(),
3631 "FPToSI result must be integer or integer vector", &I);
3632
3633 if (SrcVec && DstVec)
3634 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3635 cast<VectorType>(DestTy)->getElementCount(),
3636 "FPToSI source and dest vector length mismatch", &I);
3637
3638 visitInstruction(I);
3639}
3640
3641void Verifier::checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V) {
3642 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToAddr source must be pointer", V);
3643 Check(DestTy->isIntOrIntVectorTy(), "PtrToAddr result must be integral", V);
3644 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToAddr type mismatch",
3645 V);
3646
3647 if (SrcTy->isVectorTy()) {
3648 auto *VSrc = cast<VectorType>(SrcTy);
3649 auto *VDest = cast<VectorType>(DestTy);
3650 Check(VSrc->getElementCount() == VDest->getElementCount(),
3651 "PtrToAddr vector length mismatch", V);
3652 }
3653
3654 Type *AddrTy = DL.getAddressType(SrcTy);
3655 Check(AddrTy == DestTy, "PtrToAddr result must be address width", V);
3656}
3657
3658void Verifier::visitPtrToAddrInst(PtrToAddrInst &I) {
3659 checkPtrToAddr(I.getOperand(0)->getType(), I.getType(), I);
3660 visitInstruction(I);
3661}
3662
3663void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
3664 // Get the source and destination types
3665 Type *SrcTy = I.getOperand(0)->getType();
3666 Type *DestTy = I.getType();
3667
3668 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToInt source must be pointer", &I);
3669
3670 Check(DestTy->isIntOrIntVectorTy(), "PtrToInt result must be integral", &I);
3671 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch",
3672 &I);
3673
3674 if (SrcTy->isVectorTy()) {
3675 auto *VSrc = cast<VectorType>(SrcTy);
3676 auto *VDest = cast<VectorType>(DestTy);
3677 Check(VSrc->getElementCount() == VDest->getElementCount(),
3678 "PtrToInt Vector length mismatch", &I);
3679 }
3680
3681 visitInstruction(I);
3682}
3683
3684void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
3685 // Get the source and destination types
3686 Type *SrcTy = I.getOperand(0)->getType();
3687 Type *DestTy = I.getType();
3688
3689 Check(SrcTy->isIntOrIntVectorTy(), "IntToPtr source must be an integral", &I);
3690 Check(DestTy->isPtrOrPtrVectorTy(), "IntToPtr result must be a pointer", &I);
3691
3692 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch",
3693 &I);
3694 if (SrcTy->isVectorTy()) {
3695 auto *VSrc = cast<VectorType>(SrcTy);
3696 auto *VDest = cast<VectorType>(DestTy);
3697 Check(VSrc->getElementCount() == VDest->getElementCount(),
3698 "IntToPtr Vector length mismatch", &I);
3699 }
3700 visitInstruction(I);
3701}
3702
3703void Verifier::visitBitCastInst(BitCastInst &I) {
3704 Check(
3705 CastInst::castIsValid(Instruction::BitCast, I.getOperand(0), I.getType()),
3706 "Invalid bitcast", &I);
3707 visitInstruction(I);
3708}
3709
3710void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
3711 Type *SrcTy = I.getOperand(0)->getType();
3712 Type *DestTy = I.getType();
3713
3714 Check(SrcTy->isPtrOrPtrVectorTy(), "AddrSpaceCast source must be a pointer",
3715 &I);
3716 Check(DestTy->isPtrOrPtrVectorTy(), "AddrSpaceCast result must be a pointer",
3717 &I);
3719 "AddrSpaceCast must be between different address spaces", &I);
3720 if (auto *SrcVTy = dyn_cast<VectorType>(SrcTy))
3721 Check(SrcVTy->getElementCount() ==
3722 cast<VectorType>(DestTy)->getElementCount(),
3723 "AddrSpaceCast vector pointer number of elements mismatch", &I);
3724 visitInstruction(I);
3725}
3726
3727/// visitPHINode - Ensure that a PHI node is well formed.
3728///
3729void Verifier::visitPHINode(PHINode &PN) {
3730 // Ensure that the PHI nodes are all grouped together at the top of the block.
3731 // This can be tested by checking whether the instruction before this is
3732 // either nonexistent (because this is begin()) or is a PHI node. If not,
3733 // then there is some other instruction before a PHI.
3734 Check(&PN == &PN.getParent()->front() ||
3736 "PHI nodes not grouped at top of basic block!", &PN, PN.getParent());
3737
3738 // Check that a PHI doesn't yield a Token.
3739 Check(!PN.getType()->isTokenLikeTy(), "PHI nodes cannot have token type!");
3740
3741 // Check that all of the values of the PHI node have the same type as the
3742 // result.
3743 for (Value *IncValue : PN.incoming_values()) {
3744 Check(PN.getType() == IncValue->getType(),
3745 "PHI node operands are not the same type as the result!", &PN);
3746 }
3747
3748 // All other PHI node constraints are checked in the visitBasicBlock method.
3749
3750 visitInstruction(PN);
3751}
3752
3753void Verifier::visitCallBase(CallBase &Call) {
3755 "Called function must be a pointer!", Call);
3756 FunctionType *FTy = Call.getFunctionType();
3757
3758 // Verify that the correct number of arguments are being passed
3759 if (FTy->isVarArg())
3760 Check(Call.arg_size() >= FTy->getNumParams(),
3761 "Called function requires more parameters than were provided!", Call);
3762 else
3763 Check(Call.arg_size() == FTy->getNumParams(),
3764 "Incorrect number of arguments passed to called function!", Call);
3765
3766 // Verify that all arguments to the call match the function type.
3767 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
3768 Check(Call.getArgOperand(i)->getType() == FTy->getParamType(i),
3769 "Call parameter type does not match function signature!",
3770 Call.getArgOperand(i), FTy->getParamType(i), Call);
3771
3772 AttributeList Attrs = Call.getAttributes();
3773
3774 Check(verifyAttributeCount(Attrs, Call.arg_size()),
3775 "Attribute after last parameter!", Call);
3776
3777 Function *Callee =
3779 bool IsIntrinsic = Callee && Callee->isIntrinsic();
3780 if (IsIntrinsic)
3781 Check(Callee->getValueType() == FTy,
3782 "Intrinsic called with incompatible signature", Call);
3783
3784 // Verify if the calling convention of the callee is callable.
3786 "calling convention does not permit calls", Call);
3787
3788 // Disallow passing/returning values with alignment higher than we can
3789 // represent.
3790 // FIXME: Consider making DataLayout cap the alignment, so this isn't
3791 // necessary.
3792 auto VerifyTypeAlign = [&](Type *Ty, const Twine &Message) {
3793 if (!Ty->isSized())
3794 return;
3795 Align ABIAlign = DL.getABITypeAlign(Ty);
3796 Check(ABIAlign.value() <= Value::MaximumAlignment,
3797 "Incorrect alignment of " + Message + " to called function!", Call);
3798 };
3799
3800 if (!IsIntrinsic) {
3801 VerifyTypeAlign(FTy->getReturnType(), "return type");
3802 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3803 Type *Ty = FTy->getParamType(i);
3804 VerifyTypeAlign(Ty, "argument passed");
3805 }
3806 }
3807
3808 if (Attrs.hasFnAttr(Attribute::Speculatable)) {
3809 // Don't allow speculatable on call sites, unless the underlying function
3810 // declaration is also speculatable.
3811 Check(Callee && Callee->isSpeculatable(),
3812 "speculatable attribute may not apply to call sites", Call);
3813 }
3814
3815 if (Attrs.hasFnAttr(Attribute::Preallocated)) {
3816 Check(Call.getIntrinsicID() == Intrinsic::call_preallocated_arg,
3817 "preallocated as a call site attribute can only be on "
3818 "llvm.call.preallocated.arg");
3819 }
3820
3821 // Verify call attributes.
3822 verifyFunctionAttrs(FTy, Attrs, &Call, IsIntrinsic, Call.isInlineAsm());
3823
3824 // Conservatively check the inalloca argument.
3825 // We have a bug if we can find that there is an underlying alloca without
3826 // inalloca.
3827 if (Call.hasInAllocaArgument()) {
3828 Value *InAllocaArg = Call.getArgOperand(FTy->getNumParams() - 1);
3829 if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets()))
3830 Check(AI->isUsedWithInAlloca(),
3831 "inalloca argument for call has mismatched alloca", AI, Call);
3832 }
3833
3834 // For each argument of the callsite, if it has the swifterror argument,
3835 // make sure the underlying alloca/parameter it comes from has a swifterror as
3836 // well.
3837 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3838 if (Call.paramHasAttr(i, Attribute::SwiftError)) {
3839 Value *SwiftErrorArg = Call.getArgOperand(i);
3840 if (auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets())) {
3841 Check(AI->isSwiftError(),
3842 "swifterror argument for call has mismatched alloca", AI, Call);
3843 continue;
3844 }
3845 auto ArgI = dyn_cast<Argument>(SwiftErrorArg);
3846 Check(ArgI, "swifterror argument should come from an alloca or parameter",
3847 SwiftErrorArg, Call);
3848 Check(ArgI->hasSwiftErrorAttr(),
3849 "swifterror argument for call has mismatched parameter", ArgI,
3850 Call);
3851 }
3852
3853 if (Attrs.hasParamAttr(i, Attribute::ImmArg)) {
3854 // Don't allow immarg on call sites, unless the underlying declaration
3855 // also has the matching immarg.
3856 Check(Callee && Callee->hasParamAttribute(i, Attribute::ImmArg),
3857 "immarg may not apply only to call sites", Call.getArgOperand(i),
3858 Call);
3859 }
3860
3861 if (Call.paramHasAttr(i, Attribute::ImmArg)) {
3862 Value *ArgVal = Call.getArgOperand(i);
3863 Check(isa<ConstantInt>(ArgVal) || isa<ConstantFP>(ArgVal),
3864 "immarg operand has non-immediate parameter", ArgVal, Call);
3865
3866 // If the imm-arg is an integer and also has a range attached,
3867 // check if the given value is within the range.
3868 if (Call.paramHasAttr(i, Attribute::Range)) {
3869 if (auto *CI = dyn_cast<ConstantInt>(ArgVal)) {
3870 const ConstantRange &CR =
3871 Call.getParamAttr(i, Attribute::Range).getValueAsConstantRange();
3872 Check(CR.contains(CI->getValue()),
3873 "immarg value " + Twine(CI->getValue().getSExtValue()) +
3874 " out of range [" + Twine(CR.getLower().getSExtValue()) +
3875 ", " + Twine(CR.getUpper().getSExtValue()) + ")",
3876 Call);
3877 }
3878 }
3879 }
3880
3881 if (Call.paramHasAttr(i, Attribute::Preallocated)) {
3882 Value *ArgVal = Call.getArgOperand(i);
3883 bool hasOB =
3885 bool isMustTail = Call.isMustTailCall();
3886 Check(hasOB != isMustTail,
3887 "preallocated operand either requires a preallocated bundle or "
3888 "the call to be musttail (but not both)",
3889 ArgVal, Call);
3890 }
3891 }
3892
3893 if (FTy->isVarArg()) {
3894 // FIXME? is 'nest' even legal here?
3895 bool SawNest = false;
3896 bool SawReturned = false;
3897
3898 for (unsigned Idx = 0; Idx < FTy->getNumParams(); ++Idx) {
3899 if (Attrs.hasParamAttr(Idx, Attribute::Nest))
3900 SawNest = true;
3901 if (Attrs.hasParamAttr(Idx, Attribute::Returned))
3902 SawReturned = true;
3903 }
3904
3905 // Check attributes on the varargs part.
3906 for (unsigned Idx = FTy->getNumParams(); Idx < Call.arg_size(); ++Idx) {
3907 Type *Ty = Call.getArgOperand(Idx)->getType();
3908 AttributeSet ArgAttrs = Attrs.getParamAttrs(Idx);
3909 verifyParameterAttrs(ArgAttrs, Ty, &Call);
3910
3911 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
3912 Check(!SawNest, "More than one parameter has attribute nest!", Call);
3913 SawNest = true;
3914 }
3915
3916 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
3917 Check(!SawReturned, "More than one parameter has attribute returned!",
3918 Call);
3919 Check(Ty->canLosslesslyBitCastTo(FTy->getReturnType()),
3920 "Incompatible argument and return types for 'returned' "
3921 "attribute",
3922 Call);
3923 SawReturned = true;
3924 }
3925
3926 // Statepoint intrinsic is vararg but the wrapped function may be not.
3927 // Allow sret here and check the wrapped function in verifyStatepoint.
3928 if (Call.getIntrinsicID() != Intrinsic::experimental_gc_statepoint)
3929 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
3930 "Attribute 'sret' cannot be used for vararg call arguments!",
3931 Call);
3932
3933 if (ArgAttrs.hasAttribute(Attribute::InAlloca))
3934 Check(Idx == Call.arg_size() - 1,
3935 "inalloca isn't on the last argument!", Call);
3936 }
3937 }
3938
3939 // Verify that there's no metadata unless it's a direct call to an intrinsic.
3940 if (!IsIntrinsic) {
3941 for (Type *ParamTy : FTy->params()) {
3942 Check(!ParamTy->isMetadataTy(),
3943 "Function has metadata parameter but isn't an intrinsic", Call);
3944 Check(!ParamTy->isTokenLikeTy(),
3945 "Function has token parameter but isn't an intrinsic", Call);
3946 }
3947 }
3948
3949 // Verify that indirect calls don't return tokens.
3950 if (!Call.getCalledFunction()) {
3951 Check(!FTy->getReturnType()->isTokenLikeTy(),
3952 "Return type cannot be token for indirect call!");
3953 Check(!FTy->getReturnType()->isX86_AMXTy(),
3954 "Return type cannot be x86_amx for indirect call!");
3955 }
3956
3958 visitIntrinsicCall(ID, Call);
3959
3960 // Verify that a callsite has at most one "deopt", at most one "funclet", at
3961 // most one "gc-transition", at most one "cfguardtarget", at most one
3962 // "preallocated" operand bundle, and at most one "ptrauth" operand bundle.
3963 bool FoundDeoptBundle = false, FoundFuncletBundle = false,
3964 FoundGCTransitionBundle = false, FoundCFGuardTargetBundle = false,
3965 FoundPreallocatedBundle = false, FoundGCLiveBundle = false,
3966 FoundPtrauthBundle = false, FoundKCFIBundle = false,
3967 FoundAttachedCallBundle = false;
3968 for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) {
3969 OperandBundleUse BU = Call.getOperandBundleAt(i);
3970 uint32_t Tag = BU.getTagID();
3971 if (Tag == LLVMContext::OB_deopt) {
3972 Check(!FoundDeoptBundle, "Multiple deopt operand bundles", Call);
3973 FoundDeoptBundle = true;
3974 } else if (Tag == LLVMContext::OB_gc_transition) {
3975 Check(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles",
3976 Call);
3977 FoundGCTransitionBundle = true;
3978 } else if (Tag == LLVMContext::OB_funclet) {
3979 Check(!FoundFuncletBundle, "Multiple funclet operand bundles", Call);
3980 FoundFuncletBundle = true;
3981 Check(BU.Inputs.size() == 1,
3982 "Expected exactly one funclet bundle operand", Call);
3983 Check(isa<FuncletPadInst>(BU.Inputs.front()),
3984 "Funclet bundle operands should correspond to a FuncletPadInst",
3985 Call);
3986 } else if (Tag == LLVMContext::OB_cfguardtarget) {
3987 Check(!FoundCFGuardTargetBundle, "Multiple CFGuardTarget operand bundles",
3988 Call);
3989 FoundCFGuardTargetBundle = true;
3990 Check(BU.Inputs.size() == 1,
3991 "Expected exactly one cfguardtarget bundle operand", Call);
3992 } else if (Tag == LLVMContext::OB_ptrauth) {
3993 Check(!FoundPtrauthBundle, "Multiple ptrauth operand bundles", Call);
3994 FoundPtrauthBundle = true;
3995 Check(BU.Inputs.size() == 2,
3996 "Expected exactly two ptrauth bundle operands", Call);
3997 Check(isa<ConstantInt>(BU.Inputs[0]) &&
3998 BU.Inputs[0]->getType()->isIntegerTy(32),
3999 "Ptrauth bundle key operand must be an i32 constant", Call);
4000 Check(BU.Inputs[1]->getType()->isIntegerTy(64),
4001 "Ptrauth bundle discriminator operand must be an i64", Call);
4002 } else if (Tag == LLVMContext::OB_kcfi) {
4003 Check(!FoundKCFIBundle, "Multiple kcfi operand bundles", Call);
4004 FoundKCFIBundle = true;
4005 Check(BU.Inputs.size() == 1, "Expected exactly one kcfi bundle operand",
4006 Call);
4007 Check(isa<ConstantInt>(BU.Inputs[0]) &&
4008 BU.Inputs[0]->getType()->isIntegerTy(32),
4009 "Kcfi bundle operand must be an i32 constant", Call);
4010 } else if (Tag == LLVMContext::OB_preallocated) {
4011 Check(!FoundPreallocatedBundle, "Multiple preallocated operand bundles",
4012 Call);
4013 FoundPreallocatedBundle = true;
4014 Check(BU.Inputs.size() == 1,
4015 "Expected exactly one preallocated bundle operand", Call);
4016 auto Input = dyn_cast<IntrinsicInst>(BU.Inputs.front());
4017 Check(Input &&
4018 Input->getIntrinsicID() == Intrinsic::call_preallocated_setup,
4019 "\"preallocated\" argument must be a token from "
4020 "llvm.call.preallocated.setup",
4021 Call);
4022 } else if (Tag == LLVMContext::OB_gc_live) {
4023 Check(!FoundGCLiveBundle, "Multiple gc-live operand bundles", Call);
4024 FoundGCLiveBundle = true;
4026 Check(!FoundAttachedCallBundle,
4027 "Multiple \"clang.arc.attachedcall\" operand bundles", Call);
4028 FoundAttachedCallBundle = true;
4029 verifyAttachedCallBundle(Call, BU);
4030 }
4031 }
4032
4033 // Verify that callee and callsite agree on whether to use pointer auth.
4034 Check(!(Call.getCalledFunction() && FoundPtrauthBundle),
4035 "Direct call cannot have a ptrauth bundle", Call);
4036
4037 // Verify that each inlinable callsite of a debug-info-bearing function in a
4038 // debug-info-bearing function has a debug location attached to it. Failure to
4039 // do so causes assertion failures when the inliner sets up inline scope info
4040 // (Interposable functions are not inlinable, neither are functions without
4041 // definitions.)
4047 "inlinable function call in a function with "
4048 "debug info must have a !dbg location",
4049 Call);
4050
4051 if (Call.isInlineAsm())
4052 verifyInlineAsmCall(Call);
4053
4054 ConvergenceVerifyHelper.visit(Call);
4055
4056 visitInstruction(Call);
4057}
4058
4059void Verifier::verifyTailCCMustTailAttrs(const AttrBuilder &Attrs,
4060 StringRef Context) {
4061 Check(!Attrs.contains(Attribute::InAlloca),
4062 Twine("inalloca attribute not allowed in ") + Context);
4063 Check(!Attrs.contains(Attribute::InReg),
4064 Twine("inreg attribute not allowed in ") + Context);
4065 Check(!Attrs.contains(Attribute::SwiftError),
4066 Twine("swifterror attribute not allowed in ") + Context);
4067 Check(!Attrs.contains(Attribute::Preallocated),
4068 Twine("preallocated attribute not allowed in ") + Context);
4069 Check(!Attrs.contains(Attribute::ByRef),
4070 Twine("byref attribute not allowed in ") + Context);
4071}
4072
4073/// Two types are "congruent" if they are identical, or if they are both pointer
4074/// types with different pointee types and the same address space.
4075static bool isTypeCongruent(Type *L, Type *R) {
4076 if (L == R)
4077 return true;
4080 if (!PL || !PR)
4081 return false;
4082 return PL->getAddressSpace() == PR->getAddressSpace();
4083}
4084
4085static AttrBuilder getParameterABIAttributes(LLVMContext& C, unsigned I, AttributeList Attrs) {
4086 static const Attribute::AttrKind ABIAttrs[] = {
4087 Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
4088 Attribute::InReg, Attribute::StackAlignment, Attribute::SwiftSelf,
4089 Attribute::SwiftAsync, Attribute::SwiftError, Attribute::Preallocated,
4090 Attribute::ByRef};
4091 AttrBuilder Copy(C);
4092 for (auto AK : ABIAttrs) {
4093 Attribute Attr = Attrs.getParamAttrs(I).getAttribute(AK);
4094 if (Attr.isValid())
4095 Copy.addAttribute(Attr);
4096 }
4097
4098 // `align` is ABI-affecting only in combination with `byval` or `byref`.
4099 if (Attrs.hasParamAttr(I, Attribute::Alignment) &&
4100 (Attrs.hasParamAttr(I, Attribute::ByVal) ||
4101 Attrs.hasParamAttr(I, Attribute::ByRef)))
4102 Copy.addAlignmentAttr(Attrs.getParamAlignment(I));
4103 return Copy;
4104}
4105
4106void Verifier::verifyMustTailCall(CallInst &CI) {
4107 Check(!CI.isInlineAsm(), "cannot use musttail call with inline asm", &CI);
4108
4109 Function *F = CI.getParent()->getParent();
4110 FunctionType *CallerTy = F->getFunctionType();
4111 FunctionType *CalleeTy = CI.getFunctionType();
4112 Check(CallerTy->isVarArg() == CalleeTy->isVarArg(),
4113 "cannot guarantee tail call due to mismatched varargs", &CI);
4114 Check(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()),
4115 "cannot guarantee tail call due to mismatched return types", &CI);
4116
4117 // - The calling conventions of the caller and callee must match.
4118 Check(F->getCallingConv() == CI.getCallingConv(),
4119 "cannot guarantee tail call due to mismatched calling conv", &CI);
4120
4121 // - The call must immediately precede a :ref:`ret <i_ret>` instruction,
4122 // or a pointer bitcast followed by a ret instruction.
4123 // - The ret instruction must return the (possibly bitcasted) value
4124 // produced by the call or void.
4125 Value *RetVal = &CI;
4127
4128 // Handle the optional bitcast.
4129 if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Next)) {
4130 Check(BI->getOperand(0) == RetVal,
4131 "bitcast following musttail call must use the call", BI);
4132 RetVal = BI;
4133 Next = BI->getNextNode();
4134 }
4135
4136 // Check the return.
4137 ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Next);
4138 Check(Ret, "musttail call must precede a ret with an optional bitcast", &CI);
4139 Check(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal ||
4140 isa<UndefValue>(Ret->getReturnValue()),
4141 "musttail call result must be returned", Ret);
4142
4143 AttributeList CallerAttrs = F->getAttributes();
4144 AttributeList CalleeAttrs = CI.getAttributes();
4145 if (CI.getCallingConv() == CallingConv::SwiftTail ||
4146 CI.getCallingConv() == CallingConv::Tail) {
4147 StringRef CCName =
4148 CI.getCallingConv() == CallingConv::Tail ? "tailcc" : "swifttailcc";
4149
4150 // - Only sret, byval, swiftself, and swiftasync ABI-impacting attributes
4151 // are allowed in swifttailcc call
4152 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4153 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
4154 SmallString<32> Context{CCName, StringRef(" musttail caller")};
4155 verifyTailCCMustTailAttrs(ABIAttrs, Context);
4156 }
4157 for (unsigned I = 0, E = CalleeTy->getNumParams(); I != E; ++I) {
4158 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
4159 SmallString<32> Context{CCName, StringRef(" musttail callee")};
4160 verifyTailCCMustTailAttrs(ABIAttrs, Context);
4161 }
4162 // - Varargs functions are not allowed
4163 Check(!CallerTy->isVarArg(), Twine("cannot guarantee ") + CCName +
4164 " tail call for varargs function");
4165 return;
4166 }
4167
4168 // - The caller and callee prototypes must match. Pointer types of
4169 // parameters or return types may differ in pointee type, but not
4170 // address space.
4171 if (!CI.getIntrinsicID()) {
4172 Check(CallerTy->getNumParams() == CalleeTy->getNumParams(),
4173 "cannot guarantee tail call due to mismatched parameter counts", &CI);
4174 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4175 Check(
4176 isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)),
4177 "cannot guarantee tail call due to mismatched parameter types", &CI);
4178 }
4179 }
4180
4181 // - All ABI-impacting function attributes, such as sret, byval, inreg,
4182 // returned, preallocated, and inalloca, must match.
4183 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4184 AttrBuilder CallerABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
4185 AttrBuilder CalleeABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
4186 Check(CallerABIAttrs == CalleeABIAttrs,
4187 "cannot guarantee tail call due to mismatched ABI impacting "
4188 "function attributes",
4189 &CI, CI.getOperand(I));
4190 }
4191}
4192
4193void Verifier::visitCallInst(CallInst &CI) {
4194 visitCallBase(CI);
4195
4196 if (CI.isMustTailCall())
4197 verifyMustTailCall(CI);
4198}
4199
4200void Verifier::visitInvokeInst(InvokeInst &II) {
4201 visitCallBase(II);
4202
4203 // Verify that the first non-PHI instruction of the unwind destination is an
4204 // exception handling instruction.
4205 Check(
4206 II.getUnwindDest()->isEHPad(),
4207 "The unwind destination does not have an exception handling instruction!",
4208 &II);
4209
4210 visitTerminator(II);
4211}
4212
4213/// visitUnaryOperator - Check the argument to the unary operator.
4214///
4215void Verifier::visitUnaryOperator(UnaryOperator &U) {
4216 Check(U.getType() == U.getOperand(0)->getType(),
4217 "Unary operators must have same type for"
4218 "operands and result!",
4219 &U);
4220
4221 switch (U.getOpcode()) {
4222 // Check that floating-point arithmetic operators are only used with
4223 // floating-point operands.
4224 case Instruction::FNeg:
4225 Check(U.getType()->isFPOrFPVectorTy(),
4226 "FNeg operator only works with float types!", &U);
4227 break;
4228 default:
4229 llvm_unreachable("Unknown UnaryOperator opcode!");
4230 }
4231
4232 visitInstruction(U);
4233}
4234
4235/// visitBinaryOperator - Check that both arguments to the binary operator are
4236/// of the same type!
4237///
4238void Verifier::visitBinaryOperator(BinaryOperator &B) {
4239 Check(B.getOperand(0)->getType() == B.getOperand(1)->getType(),
4240 "Both operands to a binary operator are not of the same type!", &B);
4241
4242 switch (B.getOpcode()) {
4243 // Check that integer arithmetic operators are only used with
4244 // integral operands.
4245 case Instruction::Add:
4246 case Instruction::Sub:
4247 case Instruction::Mul:
4248 case Instruction::SDiv:
4249 case Instruction::UDiv:
4250 case Instruction::SRem:
4251 case Instruction::URem:
4252 Check(B.getType()->isIntOrIntVectorTy(),
4253 "Integer arithmetic operators only work with integral types!", &B);
4254 Check(B.getType() == B.getOperand(0)->getType(),
4255 "Integer arithmetic operators must have same type "
4256 "for operands and result!",
4257 &B);
4258 break;
4259 // Check that floating-point arithmetic operators are only used with
4260 // floating-point operands.
4261 case Instruction::FAdd:
4262 case Instruction::FSub:
4263 case Instruction::FMul:
4264 case Instruction::FDiv:
4265 case Instruction::FRem:
4266 Check(B.getType()->isFPOrFPVectorTy(),
4267 "Floating-point arithmetic operators only work with "
4268 "floating-point types!",
4269 &B);
4270 Check(B.getType() == B.getOperand(0)->getType(),
4271 "Floating-point arithmetic operators must have same type "
4272 "for operands and result!",
4273 &B);
4274 break;
4275 // Check that logical operators are only used with integral operands.
4276 case Instruction::And:
4277 case Instruction::Or:
4278 case Instruction::Xor:
4279 Check(B.getType()->isIntOrIntVectorTy(),
4280 "Logical operators only work with integral types!", &B);
4281 Check(B.getType() == B.getOperand(0)->getType(),
4282 "Logical operators must have same type for operands and result!", &B);
4283 break;
4284 case Instruction::Shl:
4285 case Instruction::LShr:
4286 case Instruction::AShr:
4287 Check(B.getType()->isIntOrIntVectorTy(),
4288 "Shifts only work with integral types!", &B);
4289 Check(B.getType() == B.getOperand(0)->getType(),
4290 "Shift return type must be same as operands!", &B);
4291 break;
4292 default:
4293 llvm_unreachable("Unknown BinaryOperator opcode!");
4294 }
4295
4296 visitInstruction(B);
4297}
4298
4299void Verifier::visitICmpInst(ICmpInst &IC) {
4300 // Check that the operands are the same type
4301 Type *Op0Ty = IC.getOperand(0)->getType();
4302 Type *Op1Ty = IC.getOperand(1)->getType();
4303 Check(Op0Ty == Op1Ty,
4304 "Both operands to ICmp instruction are not of the same type!", &IC);
4305 // Check that the operands are the right type
4306 Check(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPtrOrPtrVectorTy(),
4307 "Invalid operand types for ICmp instruction", &IC);
4308 // Check that the predicate is valid.
4309 Check(IC.isIntPredicate(), "Invalid predicate in ICmp instruction!", &IC);
4310
4311 visitInstruction(IC);
4312}
4313
4314void Verifier::visitFCmpInst(FCmpInst &FC) {
4315 // Check that the operands are the same type
4316 Type *Op0Ty = FC.getOperand(0)->getType();
4317 Type *Op1Ty = FC.getOperand(1)->getType();
4318 Check(Op0Ty == Op1Ty,
4319 "Both operands to FCmp instruction are not of the same type!", &FC);
4320 // Check that the operands are the right type
4321 Check(Op0Ty->isFPOrFPVectorTy(), "Invalid operand types for FCmp instruction",
4322 &FC);
4323 // Check that the predicate is valid.
4324 Check(FC.isFPPredicate(), "Invalid predicate in FCmp instruction!", &FC);
4325
4326 visitInstruction(FC);
4327}
4328
4329void Verifier::visitExtractElementInst(ExtractElementInst &EI) {
4331 "Invalid extractelement operands!", &EI);
4332 visitInstruction(EI);
4333}
4334
4335void Verifier::visitInsertElementInst(InsertElementInst &IE) {
4336 Check(InsertElementInst::isValidOperands(IE.getOperand(0), IE.getOperand(1),
4337 IE.getOperand(2)),
4338 "Invalid insertelement operands!", &IE);
4339 visitInstruction(IE);
4340}
4341
4342void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) {
4344 SV.getShuffleMask()),
4345 "Invalid shufflevector operands!", &SV);
4346 visitInstruction(SV);
4347}
4348
4349void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
4350 Type *TargetTy = GEP.getPointerOperandType()->getScalarType();
4351
4352 Check(isa<PointerType>(TargetTy),
4353 "GEP base pointer is not a vector or a vector of pointers", &GEP);
4354 Check(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP);
4355
4356 if (auto *STy = dyn_cast<StructType>(GEP.getSourceElementType())) {
4357 Check(!STy->isScalableTy(),
4358 "getelementptr cannot target structure that contains scalable vector"
4359 "type",
4360 &GEP);
4361 }
4362
4363 SmallVector<Value *, 16> Idxs(GEP.indices());
4364 Check(
4365 all_of(Idxs, [](Value *V) { return V->getType()->isIntOrIntVectorTy(); }),
4366 "GEP indexes must be integers", &GEP);
4367 Type *ElTy =
4368 GetElementPtrInst::getIndexedType(GEP.getSourceElementType(), Idxs);
4369 Check(ElTy, "Invalid indices for GEP pointer type!", &GEP);
4370
4371 PointerType *PtrTy = dyn_cast<PointerType>(GEP.getType()->getScalarType());
4372
4373 Check(PtrTy && GEP.getResultElementType() == ElTy,
4374 "GEP is not of right type for indices!", &GEP, ElTy);
4375
4376 if (auto *GEPVTy = dyn_cast<VectorType>(GEP.getType())) {
4377 // Additional checks for vector GEPs.
4378 ElementCount GEPWidth = GEPVTy->getElementCount();
4379 if (GEP.getPointerOperandType()->isVectorTy())
4380 Check(
4381 GEPWidth ==
4382 cast<VectorType>(GEP.getPointerOperandType())->getElementCount(),
4383 "Vector GEP result width doesn't match operand's", &GEP);
4384 for (Value *Idx : Idxs) {
4385 Type *IndexTy = Idx->getType();
4386 if (auto *IndexVTy = dyn_cast<VectorType>(IndexTy)) {
4387 ElementCount IndexWidth = IndexVTy->getElementCount();
4388 Check(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP);
4389 }
4390 Check(IndexTy->isIntOrIntVectorTy(),
4391 "All GEP indices should be of integer type");
4392 }
4393 }
4394
4395 Check(GEP.getAddressSpace() == PtrTy->getAddressSpace(),
4396 "GEP address space doesn't match type", &GEP);
4397
4398 visitInstruction(GEP);
4399}
4400
4401static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
4402 return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
4403}
4404
4405/// Verify !range and !absolute_symbol metadata. These have the same
4406/// restrictions, except !absolute_symbol allows the full set.
4407void Verifier::verifyRangeLikeMetadata(const Value &I, const MDNode *Range,
4408 Type *Ty, RangeLikeMetadataKind Kind) {
4409 unsigned NumOperands = Range->getNumOperands();
4410 Check(NumOperands % 2 == 0, "Unfinished range!", Range);
4411 unsigned NumRanges = NumOperands / 2;
4412 Check(NumRanges >= 1, "It should have at least one range!", Range);
4413
4414 ConstantRange LastRange(1, true); // Dummy initial value
4415 for (unsigned i = 0; i < NumRanges; ++i) {
4416 ConstantInt *Low =
4417 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i));
4418 Check(Low, "The lower limit must be an integer!", Low);
4419 ConstantInt *High =
4420 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i + 1));
4421 Check(High, "The upper limit must be an integer!", High);
4422
4423 Check(High->getType() == Low->getType(), "Range pair types must match!",
4424 &I);
4425
4426 if (Kind == RangeLikeMetadataKind::NoaliasAddrspace) {
4427 Check(High->getType()->isIntegerTy(32),
4428 "noalias.addrspace type must be i32!", &I);
4429 } else {
4430 Check(High->getType() == Ty->getScalarType(),
4431 "Range types must match instruction type!", &I);
4432 }
4433
4434 APInt HighV = High->getValue();
4435 APInt LowV = Low->getValue();
4436
4437 // ConstantRange asserts if the ranges are the same except for the min/max
4438 // value. Leave the cases it tolerates for the empty range error below.
4439 Check(LowV != HighV || LowV.isMaxValue() || LowV.isMinValue(),
4440 "The upper and lower limits cannot be the same value", &I);
4441
4442 ConstantRange CurRange(LowV, HighV);
4443 Check(!CurRange.isEmptySet() &&
4444 (Kind == RangeLikeMetadataKind::AbsoluteSymbol ||
4445 !CurRange.isFullSet()),
4446 "Range must not be empty!", Range);
4447 if (i != 0) {
4448 Check(CurRange.intersectWith(LastRange).isEmptySet(),
4449 "Intervals are overlapping", Range);
4450 Check(LowV.sgt(LastRange.getLower()), "Intervals are not in order",
4451 Range);
4452 Check(!isContiguous(CurRange, LastRange), "Intervals are contiguous",
4453 Range);
4454 }
4455 LastRange = ConstantRange(LowV, HighV);
4456 }
4457 if (NumRanges > 2) {
4458 APInt FirstLow =
4459 mdconst::dyn_extract<ConstantInt>(Range->getOperand(0))->getValue();
4460 APInt FirstHigh =
4461 mdconst::dyn_extract<ConstantInt>(Range->getOperand(1))->getValue();
4462 ConstantRange FirstRange(FirstLow, FirstHigh);
4463 Check(FirstRange.intersectWith(LastRange).isEmptySet(),
4464 "Intervals are overlapping", Range);
4465 Check(!isContiguous(FirstRange, LastRange), "Intervals are contiguous",
4466 Range);
4467 }
4468}
4469
4470void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) {
4471 assert(Range && Range == I.getMetadata(LLVMContext::MD_range) &&
4472 "precondition violation");
4473 verifyRangeLikeMetadata(I, Range, Ty, RangeLikeMetadataKind::Range);
4474}
4475
4476void Verifier::visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range,
4477 Type *Ty) {
4478 assert(Range && Range == I.getMetadata(LLVMContext::MD_noalias_addrspace) &&
4479 "precondition violation");
4480 verifyRangeLikeMetadata(I, Range, Ty,
4481 RangeLikeMetadataKind::NoaliasAddrspace);
4482}
4483
4484void Verifier::checkAtomicMemAccessSize(Type *Ty, const Instruction *I) {
4485 unsigned Size = DL.getTypeSizeInBits(Ty).getFixedValue();
4486 Check(Size >= 8, "atomic memory access' size must be byte-sized", Ty, I);
4487 Check(!(Size & (Size - 1)),
4488 "atomic memory access' operand must have a power-of-two size", Ty, I);
4489}
4490
4491void Verifier::visitLoadInst(LoadInst &LI) {
4493 Check(PTy, "Load operand must be a pointer.", &LI);
4494 Type *ElTy = LI.getType();
4495 if (MaybeAlign A = LI.getAlign()) {
4496 Check(A->value() <= Value::MaximumAlignment,
4497 "huge alignment values are unsupported", &LI);
4498 }
4499 Check(ElTy->isSized(), "loading unsized types is not allowed", &LI);
4500 if (LI.isAtomic()) {
4501 Check(LI.getOrdering() != AtomicOrdering::Release &&
4502 LI.getOrdering() != AtomicOrdering::AcquireRelease,
4503 "Load cannot have Release ordering", &LI);
4504 Check(ElTy->getScalarType()->isIntOrPtrTy() ||
4506 "atomic load operand must have integer, pointer, floating point, "
4507 "or vector type!",
4508 ElTy, &LI);
4509
4510 checkAtomicMemAccessSize(ElTy, &LI);
4511 } else {
4513 "Non-atomic load cannot have SynchronizationScope specified", &LI);
4514 }
4515
4516 visitInstruction(LI);
4517}
4518
4519void Verifier::visitStoreInst(StoreInst &SI) {
4520 PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType());
4521 Check(PTy, "Store operand must be a pointer.", &SI);
4522 Type *ElTy = SI.getOperand(0)->getType();
4523 if (MaybeAlign A = SI.getAlign()) {
4524 Check(A->value() <= Value::MaximumAlignment,
4525 "huge alignment values are unsupported", &SI);
4526 }
4527 Check(ElTy->isSized(), "storing unsized types is not allowed", &SI);
4528 if (SI.isAtomic()) {
4529 Check(SI.getOrdering() != AtomicOrdering::Acquire &&
4530 SI.getOrdering() != AtomicOrdering::AcquireRelease,
4531 "Store cannot have Acquire ordering", &SI);
4532 Check(ElTy->getScalarType()->isIntOrPtrTy() ||
4534 "atomic store operand must have integer, pointer, floating point, "
4535 "or vector type!",
4536 ElTy, &SI);
4537 checkAtomicMemAccessSize(ElTy, &SI);
4538 } else {
4539 Check(SI.getSyncScopeID() == SyncScope::System,
4540 "Non-atomic store cannot have SynchronizationScope specified", &SI);
4541 }
4542 visitInstruction(SI);
4543}
4544
4545/// Check that SwiftErrorVal is used as a swifterror argument in CS.
4546void Verifier::verifySwiftErrorCall(CallBase &Call,
4547 const Value *SwiftErrorVal) {
4548 for (const auto &I : llvm::enumerate(Call.args())) {
4549 if (I.value() == SwiftErrorVal) {
4550 Check(Call.paramHasAttr(I.index(), Attribute::SwiftError),
4551 "swifterror value when used in a callsite should be marked "
4552 "with swifterror attribute",
4553 SwiftErrorVal, Call);
4554 }
4555 }
4556}
4557
4558void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) {
4559 // Check that swifterror value is only used by loads, stores, or as
4560 // a swifterror argument.
4561 for (const User *U : SwiftErrorVal->users()) {
4563 isa<InvokeInst>(U),
4564 "swifterror value can only be loaded and stored from, or "
4565 "as a swifterror argument!",
4566 SwiftErrorVal, U);
4567 // If it is used by a store, check it is the second operand.
4568 if (auto StoreI = dyn_cast<StoreInst>(U))
4569 Check(StoreI->getOperand(1) == SwiftErrorVal,
4570 "swifterror value should be the second operand when used "
4571 "by stores",
4572 SwiftErrorVal, U);
4573 if (auto *Call = dyn_cast<CallBase>(U))
4574 verifySwiftErrorCall(*const_cast<CallBase *>(Call), SwiftErrorVal);
4575 }
4576}
4577
4578void Verifier::visitAllocaInst(AllocaInst &AI) {
4579 Type *Ty = AI.getAllocatedType();
4580 SmallPtrSet<Type*, 4> Visited;
4581 Check(Ty->isSized(&Visited), "Cannot allocate unsized type", &AI);
4582 // Check if it's a target extension type that disallows being used on the
4583 // stack.
4585 "Alloca has illegal target extension type", &AI);
4587 "Alloca array size must have integer type", &AI);
4588 if (MaybeAlign A = AI.getAlign()) {
4589 Check(A->value() <= Value::MaximumAlignment,
4590 "huge alignment values are unsupported", &AI);
4591 }
4592
4593 if (AI.isSwiftError()) {
4594 Check(Ty->isPointerTy(), "swifterror alloca must have pointer type", &AI);
4596 "swifterror alloca must not be array allocation", &AI);
4597 verifySwiftErrorValue(&AI);
4598 }
4599
4600 if (TT.isAMDGPU()) {
4602 "alloca on amdgpu must be in addrspace(5)", &AI);
4603 }
4604
4605 visitInstruction(AI);
4606}
4607
4608void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
4609 Type *ElTy = CXI.getOperand(1)->getType();
4610 Check(ElTy->isIntOrPtrTy(),
4611 "cmpxchg operand must have integer or pointer type", ElTy, &CXI);
4612 checkAtomicMemAccessSize(ElTy, &CXI);
4613 visitInstruction(CXI);
4614}
4615
4616void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
4617 Check(RMWI.getOrdering() != AtomicOrdering::Unordered,
4618 "atomicrmw instructions cannot be unordered.", &RMWI);
4619 auto Op = RMWI.getOperation();
4620 Type *ElTy = RMWI.getOperand(1)->getType();
4621 if (Op == AtomicRMWInst::Xchg) {
4622 Check(ElTy->isIntegerTy() || ElTy->isFloatingPointTy() ||
4623 ElTy->isPointerTy(),
4624 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4625 " operand must have integer or floating point type!",
4626 &RMWI, ElTy);
4627 } else if (AtomicRMWInst::isFPOperation(Op)) {
4629 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4630 " operand must have floating-point or fixed vector of floating-point "
4631 "type!",
4632 &RMWI, ElTy);
4633 } else {
4634 Check(ElTy->isIntegerTy(),
4635 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4636 " operand must have integer type!",
4637 &RMWI, ElTy);
4638 }
4639 checkAtomicMemAccessSize(ElTy, &RMWI);
4641 "Invalid binary operation!", &RMWI);
4642 visitInstruction(RMWI);
4643}
4644
4645void Verifier::visitFenceInst(FenceInst &FI) {
4646 const AtomicOrdering Ordering = FI.getOrdering();
4647 Check(Ordering == AtomicOrdering::Acquire ||
4648 Ordering == AtomicOrdering::Release ||
4649 Ordering == AtomicOrdering::AcquireRelease ||
4650 Ordering == AtomicOrdering::SequentiallyConsistent,
4651 "fence instructions may only have acquire, release, acq_rel, or "
4652 "seq_cst ordering.",
4653 &FI);
4654 visitInstruction(FI);
4655}
4656
4657void Verifier::visitExtractValueInst(ExtractValueInst &EVI) {
4659 EVI.getIndices()) == EVI.getType(),
4660 "Invalid ExtractValueInst operands!", &EVI);
4661
4662 visitInstruction(EVI);
4663}
4664
4665void Verifier::visitInsertValueInst(InsertValueInst &IVI) {
4667 IVI.getIndices()) ==
4668 IVI.getOperand(1)->getType(),
4669 "Invalid InsertValueInst operands!", &IVI);
4670
4671 visitInstruction(IVI);
4672}
4673
4674static Value *getParentPad(Value *EHPad) {
4675 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
4676 return FPI->getParentPad();
4677
4678 return cast<CatchSwitchInst>(EHPad)->getParentPad();
4679}
4680
4681void Verifier::visitEHPadPredecessors(Instruction &I) {
4682 assert(I.isEHPad());
4683
4684 BasicBlock *BB = I.getParent();
4685 Function *F = BB->getParent();
4686
4687 Check(BB != &F->getEntryBlock(), "EH pad cannot be in entry block.", &I);
4688
4689 if (auto *LPI = dyn_cast<LandingPadInst>(&I)) {
4690 // The landingpad instruction defines its parent as a landing pad block. The
4691 // landing pad block may be branched to only by the unwind edge of an
4692 // invoke.
4693 for (BasicBlock *PredBB : predecessors(BB)) {
4694 const auto *II = dyn_cast<InvokeInst>(PredBB->getTerminator());
4695 Check(II && II->getUnwindDest() == BB && II->getNormalDest() != BB,
4696 "Block containing LandingPadInst must be jumped to "
4697 "only by the unwind edge of an invoke.",
4698 LPI);
4699 }
4700 return;
4701 }
4702 if (auto *CPI = dyn_cast<CatchPadInst>(&I)) {
4703 if (!pred_empty(BB))
4704 Check(BB->getUniquePredecessor() == CPI->getCatchSwitch()->getParent(),
4705 "Block containg CatchPadInst must be jumped to "
4706 "only by its catchswitch.",
4707 CPI);
4708 Check(BB != CPI->getCatchSwitch()->getUnwindDest(),
4709 "Catchswitch cannot unwind to one of its catchpads",
4710 CPI->getCatchSwitch(), CPI);
4711 return;
4712 }
4713
4714 // Verify that each pred has a legal terminator with a legal to/from EH
4715 // pad relationship.
4716 Instruction *ToPad = &I;
4717 Value *ToPadParent = getParentPad(ToPad);
4718 for (BasicBlock *PredBB : predecessors(BB)) {
4719 Instruction *TI = PredBB->getTerminator();
4720 Value *FromPad;
4721 if (auto *II = dyn_cast<InvokeInst>(TI)) {
4722 Check(II->getUnwindDest() == BB && II->getNormalDest() != BB,
4723 "EH pad must be jumped to via an unwind edge", ToPad, II);
4724 auto *CalledFn =
4725 dyn_cast<Function>(II->getCalledOperand()->stripPointerCasts());
4726 if (CalledFn && CalledFn->isIntrinsic() && II->doesNotThrow() &&
4727 !IntrinsicInst::mayLowerToFunctionCall(CalledFn->getIntrinsicID()))
4728 continue;
4729 if (auto Bundle = II->getOperandBundle(LLVMContext::OB_funclet))
4730 FromPad = Bundle->Inputs[0];
4731 else
4732 FromPad = ConstantTokenNone::get(II->getContext());
4733 } else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
4734 FromPad = CRI->getOperand(0);
4735 Check(FromPad != ToPadParent, "A cleanupret must exit its cleanup", CRI);
4736 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) {
4737 FromPad = CSI;
4738 } else {
4739 Check(false, "EH pad must be jumped to via an unwind edge", ToPad, TI);
4740 }
4741
4742 // The edge may exit from zero or more nested pads.
4743 SmallPtrSet<Value *, 8> Seen;
4744 for (;; FromPad = getParentPad(FromPad)) {
4745 Check(FromPad != ToPad,
4746 "EH pad cannot handle exceptions raised within it", FromPad, TI);
4747 if (FromPad == ToPadParent) {
4748 // This is a legal unwind edge.
4749 break;
4750 }
4751 Check(!isa<ConstantTokenNone>(FromPad),
4752 "A single unwind edge may only enter one EH pad", TI);
4753 Check(Seen.insert(FromPad).second, "EH pad jumps through a cycle of pads",
4754 FromPad);
4755
4756 // This will be diagnosed on the corresponding instruction already. We
4757 // need the extra check here to make sure getParentPad() works.
4758 Check(isa<FuncletPadInst>(FromPad) || isa<CatchSwitchInst>(FromPad),
4759 "Parent pad must be catchpad/cleanuppad/catchswitch", TI);
4760 }
4761 }
4762}
4763
4764void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
4765 // The landingpad instruction is ill-formed if it doesn't have any clauses and
4766 // isn't a cleanup.
4767 Check(LPI.getNumClauses() > 0 || LPI.isCleanup(),
4768 "LandingPadInst needs at least one clause or to be a cleanup.", &LPI);
4769
4770 visitEHPadPredecessors(LPI);
4771
4772 if (!LandingPadResultTy)
4773 LandingPadResultTy = LPI.getType();
4774 else
4775 Check(LandingPadResultTy == LPI.getType(),
4776 "The landingpad instruction should have a consistent result type "
4777 "inside a function.",
4778 &LPI);
4779
4780 Function *F = LPI.getParent()->getParent();
4781 Check(F->hasPersonalityFn(),
4782 "LandingPadInst needs to be in a function with a personality.", &LPI);
4783
4784 // The landingpad instruction must be the first non-PHI instruction in the
4785 // block.
4786 Check(LPI.getParent()->getLandingPadInst() == &LPI,
4787 "LandingPadInst not the first non-PHI instruction in the block.", &LPI);
4788
4789 for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) {
4790 Constant *Clause = LPI.getClause(i);
4791 if (LPI.isCatch(i)) {
4792 Check(isa<PointerType>(Clause->getType()),
4793 "Catch operand does not have pointer type!", &LPI);
4794 } else {
4795 Check(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI);
4797 "Filter operand is not an array of constants!", &LPI);
4798 }
4799 }
4800
4801 visitInstruction(LPI);
4802}
4803
4804void Verifier::visitResumeInst(ResumeInst &RI) {
4806 "ResumeInst needs to be in a function with a personality.", &RI);
4807
4808 if (!LandingPadResultTy)
4809 LandingPadResultTy = RI.getValue()->getType();
4810 else
4811 Check(LandingPadResultTy == RI.getValue()->getType(),
4812 "The resume instruction should have a consistent result type "
4813 "inside a function.",
4814 &RI);
4815
4816 visitTerminator(RI);
4817}
4818
4819void Verifier::visitCatchPadInst(CatchPadInst &CPI) {
4820 BasicBlock *BB = CPI.getParent();
4821
4822 Function *F = BB->getParent();
4823 Check(F->hasPersonalityFn(),
4824 "CatchPadInst needs to be in a function with a personality.", &CPI);
4825
4827 "CatchPadInst needs to be directly nested in a CatchSwitchInst.",
4828 CPI.getParentPad());
4829
4830 // The catchpad instruction must be the first non-PHI instruction in the
4831 // block.
4832 Check(&*BB->getFirstNonPHIIt() == &CPI,
4833 "CatchPadInst not the first non-PHI instruction in the block.", &CPI);
4834
4835 visitEHPadPredecessors(CPI);
4836 visitFuncletPadInst(CPI);
4837}
4838
4839void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) {
4840 Check(isa<CatchPadInst>(CatchReturn.getOperand(0)),
4841 "CatchReturnInst needs to be provided a CatchPad", &CatchReturn,
4842 CatchReturn.getOperand(0));
4843
4844 visitTerminator(CatchReturn);
4845}
4846
4847void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) {
4848 BasicBlock *BB = CPI.getParent();
4849
4850 Function *F = BB->getParent();
4851 Check(F->hasPersonalityFn(),
4852 "CleanupPadInst needs to be in a function with a personality.", &CPI);
4853
4854 // The cleanuppad instruction must be the first non-PHI instruction in the
4855 // block.
4856 Check(&*BB->getFirstNonPHIIt() == &CPI,
4857 "CleanupPadInst not the first non-PHI instruction in the block.", &CPI);
4858
4859 auto *ParentPad = CPI.getParentPad();
4860 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
4861 "CleanupPadInst has an invalid parent.", &CPI);
4862
4863 visitEHPadPredecessors(CPI);
4864 visitFuncletPadInst(CPI);
4865}
4866
4867void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) {
4868 User *FirstUser = nullptr;
4869 Value *FirstUnwindPad = nullptr;
4870 SmallVector<FuncletPadInst *, 8> Worklist({&FPI});
4871 SmallPtrSet<FuncletPadInst *, 8> Seen;
4872
4873 while (!Worklist.empty()) {
4874 FuncletPadInst *CurrentPad = Worklist.pop_back_val();
4875 Check(Seen.insert(CurrentPad).second,
4876 "FuncletPadInst must not be nested within itself", CurrentPad);
4877 Value *UnresolvedAncestorPad = nullptr;
4878 for (User *U : CurrentPad->users()) {
4879 BasicBlock *UnwindDest;
4880 if (auto *CRI = dyn_cast<CleanupReturnInst>(U)) {
4881 UnwindDest = CRI->getUnwindDest();
4882 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(U)) {
4883 // We allow catchswitch unwind to caller to nest
4884 // within an outer pad that unwinds somewhere else,
4885 // because catchswitch doesn't have a nounwind variant.
4886 // See e.g. SimplifyCFGOpt::SimplifyUnreachable.
4887 if (CSI->unwindsToCaller())
4888 continue;
4889 UnwindDest = CSI->getUnwindDest();
4890 } else if (auto *II = dyn_cast<InvokeInst>(U)) {
4891 UnwindDest = II->getUnwindDest();
4892 } else if (isa<CallInst>(U)) {
4893 // Calls which don't unwind may be found inside funclet
4894 // pads that unwind somewhere else. We don't *require*
4895 // such calls to be annotated nounwind.
4896 continue;
4897 } else if (auto *CPI = dyn_cast<CleanupPadInst>(U)) {
4898 // The unwind dest for a cleanup can only be found by
4899 // recursive search. Add it to the worklist, and we'll
4900 // search for its first use that determines where it unwinds.
4901 Worklist.push_back(CPI);
4902 continue;
4903 } else {
4904 Check(isa<CatchReturnInst>(U), "Bogus funclet pad use", U);
4905 continue;
4906 }
4907
4908 Value *UnwindPad;
4909 bool ExitsFPI;
4910 if (UnwindDest) {
4911 UnwindPad = &*UnwindDest->getFirstNonPHIIt();
4912 if (!cast<Instruction>(UnwindPad)->isEHPad())
4913 continue;
4914 Value *UnwindParent = getParentPad(UnwindPad);
4915 // Ignore unwind edges that don't exit CurrentPad.
4916 if (UnwindParent == CurrentPad)
4917 continue;
4918 // Determine whether the original funclet pad is exited,
4919 // and if we are scanning nested pads determine how many
4920 // of them are exited so we can stop searching their
4921 // children.
4922 Value *ExitedPad = CurrentPad;
4923 ExitsFPI = false;
4924 do {
4925 if (ExitedPad == &FPI) {
4926 ExitsFPI = true;
4927 // Now we can resolve any ancestors of CurrentPad up to
4928 // FPI, but not including FPI since we need to make sure
4929 // to check all direct users of FPI for consistency.
4930 UnresolvedAncestorPad = &FPI;
4931 break;
4932 }
4933 Value *ExitedParent = getParentPad(ExitedPad);
4934 if (ExitedParent == UnwindParent) {
4935 // ExitedPad is the ancestor-most pad which this unwind
4936 // edge exits, so we can resolve up to it, meaning that
4937 // ExitedParent is the first ancestor still unresolved.
4938 UnresolvedAncestorPad = ExitedParent;
4939 break;
4940 }
4941 ExitedPad = ExitedParent;
4942 } while (!isa<ConstantTokenNone>(ExitedPad));
4943 } else {
4944 // Unwinding to caller exits all pads.
4945 UnwindPad = ConstantTokenNone::get(FPI.getContext());
4946 ExitsFPI = true;
4947 UnresolvedAncestorPad = &FPI;
4948 }
4949
4950 if (ExitsFPI) {
4951 // This unwind edge exits FPI. Make sure it agrees with other
4952 // such edges.
4953 if (FirstUser) {
4954 Check(UnwindPad == FirstUnwindPad,
4955 "Unwind edges out of a funclet "
4956 "pad must have the same unwind "
4957 "dest",
4958 &FPI, U, FirstUser);
4959 } else {
4960 FirstUser = U;
4961 FirstUnwindPad = UnwindPad;
4962 // Record cleanup sibling unwinds for verifySiblingFuncletUnwinds
4963 if (isa<CleanupPadInst>(&FPI) && !isa<ConstantTokenNone>(UnwindPad) &&
4964 getParentPad(UnwindPad) == getParentPad(&FPI))
4965 SiblingFuncletInfo[&FPI] = cast<Instruction>(U);
4966 }
4967 }
4968 // Make sure we visit all uses of FPI, but for nested pads stop as
4969 // soon as we know where they unwind to.
4970 if (CurrentPad != &FPI)
4971 break;
4972 }
4973 if (UnresolvedAncestorPad) {
4974 if (CurrentPad == UnresolvedAncestorPad) {
4975 // When CurrentPad is FPI itself, we don't mark it as resolved even if
4976 // we've found an unwind edge that exits it, because we need to verify
4977 // all direct uses of FPI.
4978 assert(CurrentPad == &FPI);
4979 continue;
4980 }
4981 // Pop off the worklist any nested pads that we've found an unwind
4982 // destination for. The pads on the worklist are the uncles,
4983 // great-uncles, etc. of CurrentPad. We've found an unwind destination
4984 // for all ancestors of CurrentPad up to but not including
4985 // UnresolvedAncestorPad.
4986 Value *ResolvedPad = CurrentPad;
4987 while (!Worklist.empty()) {
4988 Value *UnclePad = Worklist.back();
4989 Value *AncestorPad = getParentPad(UnclePad);
4990 // Walk ResolvedPad up the ancestor list until we either find the
4991 // uncle's parent or the last resolved ancestor.
4992 while (ResolvedPad != AncestorPad) {
4993 Value *ResolvedParent = getParentPad(ResolvedPad);
4994 if (ResolvedParent == UnresolvedAncestorPad) {
4995 break;
4996 }
4997 ResolvedPad = ResolvedParent;
4998 }
4999 // If the resolved ancestor search didn't find the uncle's parent,
5000 // then the uncle is not yet resolved.
5001 if (ResolvedPad != AncestorPad)
5002 break;
5003 // This uncle is resolved, so pop it from the worklist.
5004 Worklist.pop_back();
5005 }
5006 }
5007 }
5008
5009 if (FirstUnwindPad) {
5010 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FPI.getParentPad())) {
5011 BasicBlock *SwitchUnwindDest = CatchSwitch->getUnwindDest();
5012 Value *SwitchUnwindPad;
5013 if (SwitchUnwindDest)
5014 SwitchUnwindPad = &*SwitchUnwindDest->getFirstNonPHIIt();
5015 else
5016 SwitchUnwindPad = ConstantTokenNone::get(FPI.getContext());
5017 Check(SwitchUnwindPad == FirstUnwindPad,
5018 "Unwind edges out of a catch must have the same unwind dest as "
5019 "the parent catchswitch",
5020 &FPI, FirstUser, CatchSwitch);
5021 }
5022 }
5023
5024 visitInstruction(FPI);
5025}
5026
5027void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) {
5028 BasicBlock *BB = CatchSwitch.getParent();
5029
5030 Function *F = BB->getParent();
5031 Check(F->hasPersonalityFn(),
5032 "CatchSwitchInst needs to be in a function with a personality.",
5033 &CatchSwitch);
5034
5035 // The catchswitch instruction must be the first non-PHI instruction in the
5036 // block.
5037 Check(&*BB->getFirstNonPHIIt() == &CatchSwitch,
5038 "CatchSwitchInst not the first non-PHI instruction in the block.",
5039 &CatchSwitch);
5040
5041 auto *ParentPad = CatchSwitch.getParentPad();
5042 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
5043 "CatchSwitchInst has an invalid parent.", ParentPad);
5044
5045 if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) {
5046 BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt();
5047 Check(I->isEHPad() && !isa<LandingPadInst>(I),
5048 "CatchSwitchInst must unwind to an EH block which is not a "
5049 "landingpad.",
5050 &CatchSwitch);
5051
5052 // Record catchswitch sibling unwinds for verifySiblingFuncletUnwinds
5053 if (getParentPad(&*I) == ParentPad)
5054 SiblingFuncletInfo[&CatchSwitch] = &CatchSwitch;
5055 }
5056
5057 Check(CatchSwitch.getNumHandlers() != 0,
5058 "CatchSwitchInst cannot have empty handler list", &CatchSwitch);
5059
5060 for (BasicBlock *Handler : CatchSwitch.handlers()) {
5061 Check(isa<CatchPadInst>(Handler->getFirstNonPHIIt()),
5062 "CatchSwitchInst handlers must be catchpads", &CatchSwitch, Handler);
5063 }
5064
5065 visitEHPadPredecessors(CatchSwitch);
5066 visitTerminator(CatchSwitch);
5067}
5068
5069void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) {
5071 "CleanupReturnInst needs to be provided a CleanupPad", &CRI,
5072 CRI.getOperand(0));
5073
5074 if (BasicBlock *UnwindDest = CRI.getUnwindDest()) {
5075 BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt();
5076 Check(I->isEHPad() && !isa<LandingPadInst>(I),
5077 "CleanupReturnInst must unwind to an EH block which is not a "
5078 "landingpad.",
5079 &CRI);
5080 }
5081
5082 visitTerminator(CRI);
5083}
5084
5085void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
5086 Instruction *Op = cast<Instruction>(I.getOperand(i));
5087 // If the we have an invalid invoke, don't try to compute the dominance.
5088 // We already reject it in the invoke specific checks and the dominance
5089 // computation doesn't handle multiple edges.
5090 if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) {
5091 if (II->getNormalDest() == II->getUnwindDest())
5092 return;
5093 }
5094
5095 // Quick check whether the def has already been encountered in the same block.
5096 // PHI nodes are not checked to prevent accepting preceding PHIs, because PHI
5097 // uses are defined to happen on the incoming edge, not at the instruction.
5098 //
5099 // FIXME: If this operand is a MetadataAsValue (wrapping a LocalAsMetadata)
5100 // wrapping an SSA value, assert that we've already encountered it. See
5101 // related FIXME in Mapper::mapLocalAsMetadata in ValueMapper.cpp.
5102 if (!isa<PHINode>(I) && InstsInThisBlock.count(Op))
5103 return;
5104
5105 const Use &U = I.getOperandUse(i);
5106 Check(DT.dominates(Op, U), "Instruction does not dominate all uses!", Op, &I);
5107}
5108
5109void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) {
5110 Check(I.getType()->isPointerTy(),
5111 "dereferenceable, dereferenceable_or_null "
5112 "apply only to pointer types",
5113 &I);
5115 "dereferenceable, dereferenceable_or_null apply only to load"
5116 " and inttoptr instructions, use attributes for calls or invokes",
5117 &I);
5118 Check(MD->getNumOperands() == 1,
5119 "dereferenceable, dereferenceable_or_null "
5120 "take one operand!",
5121 &I);
5122 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0));
5123 Check(CI && CI->getType()->isIntegerTy(64),
5124 "dereferenceable, "
5125 "dereferenceable_or_null metadata value must be an i64!",
5126 &I);
5127}
5128
5129void Verifier::visitNofreeMetadata(Instruction &I, MDNode *MD) {
5130 Check(I.getType()->isPointerTy(), "nofree applies only to pointer types", &I);
5131 Check((isa<IntToPtrInst>(I)), "nofree applies only to inttoptr instruction",
5132 &I);
5133 Check(MD->getNumOperands() == 0, "nofree metadata must be empty", &I);
5134}
5135
5136void Verifier::visitProfMetadata(Instruction &I, MDNode *MD) {
5137 auto GetBranchingTerminatorNumOperands = [&]() {
5138 unsigned ExpectedNumOperands = 0;
5139 if (BranchInst *BI = dyn_cast<BranchInst>(&I))
5140 ExpectedNumOperands = BI->getNumSuccessors();
5141 else if (SwitchInst *SI = dyn_cast<SwitchInst>(&I))
5142 ExpectedNumOperands = SI->getNumSuccessors();
5143 else if (isa<CallInst>(&I))
5144 ExpectedNumOperands = 1;
5145 else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(&I))
5146 ExpectedNumOperands = IBI->getNumDestinations();
5147 else if (isa<SelectInst>(&I))
5148 ExpectedNumOperands = 2;
5149 else if (CallBrInst *CI = dyn_cast<CallBrInst>(&I))
5150 ExpectedNumOperands = CI->getNumSuccessors();
5151 return ExpectedNumOperands;
5152 };
5153 Check(MD->getNumOperands() >= 1,
5154 "!prof annotations should have at least 1 operand", MD);
5155 // Check first operand.
5156 Check(MD->getOperand(0) != nullptr, "first operand should not be null", MD);
5158 "expected string with name of the !prof annotation", MD);
5159 MDString *MDS = cast<MDString>(MD->getOperand(0));
5160 StringRef ProfName = MDS->getString();
5161
5163 Check(GetBranchingTerminatorNumOperands() != 0 || isa<InvokeInst>(I),
5164 "'unknown' !prof should only appear on instructions on which "
5165 "'branch_weights' would",
5166 MD);
5167 verifyUnknownProfileMetadata(MD);
5168 return;
5169 }
5170
5171 Check(MD->getNumOperands() >= 2,
5172 "!prof annotations should have no less than 2 operands", MD);
5173
5174 // Check consistency of !prof branch_weights metadata.
5175 if (ProfName == MDProfLabels::BranchWeights) {
5176 unsigned NumBranchWeights = getNumBranchWeights(*MD);
5177 if (isa<InvokeInst>(&I)) {
5178 Check(NumBranchWeights == 1 || NumBranchWeights == 2,
5179 "Wrong number of InvokeInst branch_weights operands", MD);
5180 } else {
5181 const unsigned ExpectedNumOperands = GetBranchingTerminatorNumOperands();
5182 if (ExpectedNumOperands == 0)
5183 CheckFailed("!prof branch_weights are not allowed for this instruction",
5184 MD);
5185
5186 Check(NumBranchWeights == ExpectedNumOperands, "Wrong number of operands",
5187 MD);
5188 }
5189 for (unsigned i = getBranchWeightOffset(MD); i < MD->getNumOperands();
5190 ++i) {
5191 auto &MDO = MD->getOperand(i);
5192 Check(MDO, "second operand should not be null", MD);
5194 "!prof brunch_weights operand is not a const int");
5195 }
5196 } else if (ProfName == MDProfLabels::ValueProfile) {
5197 Check(isValueProfileMD(MD), "invalid value profiling metadata", MD);
5198 ConstantInt *KindInt = mdconst::dyn_extract<ConstantInt>(MD->getOperand(1));
5199 Check(KindInt, "VP !prof missing kind argument", MD);
5200
5201 auto Kind = KindInt->getZExtValue();
5202 Check(Kind >= InstrProfValueKind::IPVK_First &&
5203 Kind <= InstrProfValueKind::IPVK_Last,
5204 "Invalid VP !prof kind", MD);
5205 Check(MD->getNumOperands() % 2 == 1,
5206 "VP !prof should have an even number "
5207 "of arguments after 'VP'",
5208 MD);
5209 if (Kind == InstrProfValueKind::IPVK_IndirectCallTarget ||
5210 Kind == InstrProfValueKind::IPVK_MemOPSize)
5212 "VP !prof indirect call or memop size expected to be applied to "
5213 "CallBase instructions only",
5214 MD);
5215 } else {
5216 CheckFailed("expected either branch_weights or VP profile name", MD);
5217 }
5218}
5219
5220void Verifier::visitDIAssignIDMetadata(Instruction &I, MDNode *MD) {
5221 assert(I.hasMetadata(LLVMContext::MD_DIAssignID));
5222 // DIAssignID metadata must be attached to either an alloca or some form of
5223 // store/memory-writing instruction.
5224 // FIXME: We allow all intrinsic insts here to avoid trying to enumerate all
5225 // possible store intrinsics.
5226 bool ExpectedInstTy =
5228 CheckDI(ExpectedInstTy, "!DIAssignID attached to unexpected instruction kind",
5229 I, MD);
5230 // Iterate over the MetadataAsValue uses of the DIAssignID - these should
5231 // only be found as DbgAssignIntrinsic operands.
5232 if (auto *AsValue = MetadataAsValue::getIfExists(Context, MD)) {
5233 for (auto *User : AsValue->users()) {
5235 "!DIAssignID should only be used by llvm.dbg.assign intrinsics",
5236 MD, User);
5237 // All of the dbg.assign intrinsics should be in the same function as I.
5238 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(User))
5239 CheckDI(DAI->getFunction() == I.getFunction(),
5240 "dbg.assign not in same function as inst", DAI, &I);
5241 }
5242 }
5243 for (DbgVariableRecord *DVR :
5244 cast<DIAssignID>(MD)->getAllDbgVariableRecordUsers()) {
5245 CheckDI(DVR->isDbgAssign(),
5246 "!DIAssignID should only be used by Assign DVRs.", MD, DVR);
5247 CheckDI(DVR->getFunction() == I.getFunction(),
5248 "DVRAssign not in same function as inst", DVR, &I);
5249 }
5250}
5251
5252void Verifier::visitMMRAMetadata(Instruction &I, MDNode *MD) {
5254 "!mmra metadata attached to unexpected instruction kind", I, MD);
5255
5256 // MMRA Metadata should either be a tag, e.g. !{!"foo", !"bar"}, or a
5257 // list of tags such as !2 in the following example:
5258 // !0 = !{!"a", !"b"}
5259 // !1 = !{!"c", !"d"}
5260 // !2 = !{!0, !1}
5261 if (MMRAMetadata::isTagMD(MD))
5262 return;
5263
5264 Check(isa<MDTuple>(MD), "!mmra expected to be a metadata tuple", I, MD);
5265 for (const MDOperand &MDOp : MD->operands())
5266 Check(MMRAMetadata::isTagMD(MDOp.get()),
5267 "!mmra metadata tuple operand is not an MMRA tag", I, MDOp.get());
5268}
5269
5270void Verifier::visitCallStackMetadata(MDNode *MD) {
5271 // Call stack metadata should consist of a list of at least 1 constant int
5272 // (representing a hash of the location).
5273 Check(MD->getNumOperands() >= 1,
5274 "call stack metadata should have at least 1 operand", MD);
5275
5276 for (const auto &Op : MD->operands())
5278 "call stack metadata operand should be constant integer", Op);
5279}
5280
5281void Verifier::visitMemProfMetadata(Instruction &I, MDNode *MD) {
5282 Check(isa<CallBase>(I), "!memprof metadata should only exist on calls", &I);
5283 Check(MD->getNumOperands() >= 1,
5284 "!memprof annotations should have at least 1 metadata operand "
5285 "(MemInfoBlock)",
5286 MD);
5287
5288 // Check each MIB
5289 for (auto &MIBOp : MD->operands()) {
5290 MDNode *MIB = dyn_cast<MDNode>(MIBOp);
5291 // The first operand of an MIB should be the call stack metadata.
5292 // There rest of the operands should be MDString tags, and there should be
5293 // at least one.
5294 Check(MIB->getNumOperands() >= 2,
5295 "Each !memprof MemInfoBlock should have at least 2 operands", MIB);
5296
5297 // Check call stack metadata (first operand).
5298 Check(MIB->getOperand(0) != nullptr,
5299 "!memprof MemInfoBlock first operand should not be null", MIB);
5300 Check(isa<MDNode>(MIB->getOperand(0)),
5301 "!memprof MemInfoBlock first operand should be an MDNode", MIB);
5302 MDNode *StackMD = dyn_cast<MDNode>(MIB->getOperand(0));
5303 visitCallStackMetadata(StackMD);
5304
5305 // The next set of 1 or more operands should be MDString.
5306 unsigned I = 1;
5307 for (; I < MIB->getNumOperands(); ++I) {
5308 if (!isa<MDString>(MIB->getOperand(I))) {
5309 Check(I > 1,
5310 "!memprof MemInfoBlock second operand should be an MDString",
5311 MIB);
5312 break;
5313 }
5314 }
5315
5316 // Any remaining should be MDNode that are pairs of integers
5317 for (; I < MIB->getNumOperands(); ++I) {
5318 MDNode *OpNode = dyn_cast<MDNode>(MIB->getOperand(I));
5319 Check(OpNode, "Not all !memprof MemInfoBlock operands 2 to N are MDNode",
5320 MIB);
5321 Check(OpNode->getNumOperands() == 2,
5322 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with 2 "
5323 "operands",
5324 MIB);
5325 // Check that all of Op's operands are ConstantInt.
5326 Check(llvm::all_of(OpNode->operands(),
5327 [](const MDOperand &Op) {
5328 return mdconst::hasa<ConstantInt>(Op);
5329 }),
5330 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with "
5331 "ConstantInt operands",
5332 MIB);
5333 }
5334 }
5335}
5336
5337void Verifier::visitCallsiteMetadata(Instruction &I, MDNode *MD) {
5338 Check(isa<CallBase>(I), "!callsite metadata should only exist on calls", &I);
5339 // Verify the partial callstack annotated from memprof profiles. This callsite
5340 // is a part of a profiled allocation callstack.
5341 visitCallStackMetadata(MD);
5342}
5343
5344static inline bool isConstantIntMetadataOperand(const Metadata *MD) {
5345 if (auto *VAL = dyn_cast<ValueAsMetadata>(MD))
5346 return isa<ConstantInt>(VAL->getValue());
5347 return false;
5348}
5349
5350void Verifier::visitCalleeTypeMetadata(Instruction &I, MDNode *MD) {
5351 Check(isa<CallBase>(I), "!callee_type metadata should only exist on calls",
5352 &I);
5353 for (Metadata *Op : MD->operands()) {
5355 "The callee_type metadata must be a list of type metadata nodes", Op);
5356 auto *TypeMD = cast<MDNode>(Op);
5357 Check(TypeMD->getNumOperands() == 2,
5358 "Well-formed generalized type metadata must contain exactly two "
5359 "operands",
5360 Op);
5361 Check(isConstantIntMetadataOperand(TypeMD->getOperand(0)) &&
5362 mdconst::extract<ConstantInt>(TypeMD->getOperand(0))->isZero(),
5363 "The first operand of type metadata for functions must be zero", Op);
5364 Check(TypeMD->hasGeneralizedMDString(),
5365 "Only generalized type metadata can be part of the callee_type "
5366 "metadata list",
5367 Op);
5368 }
5369}
5370
5371void Verifier::visitAnnotationMetadata(MDNode *Annotation) {
5372 Check(isa<MDTuple>(Annotation), "annotation must be a tuple");
5373 Check(Annotation->getNumOperands() >= 1,
5374 "annotation must have at least one operand");
5375 for (const MDOperand &Op : Annotation->operands()) {
5376 bool TupleOfStrings =
5377 isa<MDTuple>(Op.get()) &&
5378 all_of(cast<MDTuple>(Op)->operands(), [](auto &Annotation) {
5379 return isa<MDString>(Annotation.get());
5380 });
5381 Check(isa<MDString>(Op.get()) || TupleOfStrings,
5382 "operands must be a string or a tuple of strings");
5383 }
5384}
5385
5386void Verifier::visitAliasScopeMetadata(const MDNode *MD) {
5387 unsigned NumOps = MD->getNumOperands();
5388 Check(NumOps >= 2 && NumOps <= 3, "scope must have two or three operands",
5389 MD);
5390 Check(MD->getOperand(0).get() == MD || isa<MDString>(MD->getOperand(0)),
5391 "first scope operand must be self-referential or string", MD);
5392 if (NumOps == 3)
5394 "third scope operand must be string (if used)", MD);
5395
5396 MDNode *Domain = dyn_cast<MDNode>(MD->getOperand(1));
5397 Check(Domain != nullptr, "second scope operand must be MDNode", MD);
5398
5399 unsigned NumDomainOps = Domain->getNumOperands();
5400 Check(NumDomainOps >= 1 && NumDomainOps <= 2,
5401 "domain must have one or two operands", Domain);
5402 Check(Domain->getOperand(0).get() == Domain ||
5403 isa<MDString>(Domain->getOperand(0)),
5404 "first domain operand must be self-referential or string", Domain);
5405 if (NumDomainOps == 2)
5406 Check(isa<MDString>(Domain->getOperand(1)),
5407 "second domain operand must be string (if used)", Domain);
5408}
5409
5410void Verifier::visitAliasScopeListMetadata(const MDNode *MD) {
5411 for (const MDOperand &Op : MD->operands()) {
5412 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5413 Check(OpMD != nullptr, "scope list must consist of MDNodes", MD);
5414 visitAliasScopeMetadata(OpMD);
5415 }
5416}
5417
5418void Verifier::visitAccessGroupMetadata(const MDNode *MD) {
5419 auto IsValidAccessScope = [](const MDNode *MD) {
5420 return MD->getNumOperands() == 0 && MD->isDistinct();
5421 };
5422
5423 // It must be either an access scope itself...
5424 if (IsValidAccessScope(MD))
5425 return;
5426
5427 // ...or a list of access scopes.
5428 for (const MDOperand &Op : MD->operands()) {
5429 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5430 Check(OpMD != nullptr, "Access scope list must consist of MDNodes", MD);
5431 Check(IsValidAccessScope(OpMD),
5432 "Access scope list contains invalid access scope", MD);
5433 }
5434}
5435
5436void Verifier::visitCapturesMetadata(Instruction &I, const MDNode *Captures) {
5437 static const char *ValidArgs[] = {"address_is_null", "address",
5438 "read_provenance", "provenance"};
5439
5440 auto *SI = dyn_cast<StoreInst>(&I);
5441 Check(SI, "!captures metadata can only be applied to store instructions", &I);
5442 Check(SI->getValueOperand()->getType()->isPointerTy(),
5443 "!captures metadata can only be applied to store with value operand of "
5444 "pointer type",
5445 &I);
5446 Check(Captures->getNumOperands() != 0, "!captures metadata cannot be empty",
5447 &I);
5448
5449 for (Metadata *Op : Captures->operands()) {
5450 auto *Str = dyn_cast<MDString>(Op);
5451 Check(Str, "!captures metadata must be a list of strings", &I);
5452 Check(is_contained(ValidArgs, Str->getString()),
5453 "invalid entry in !captures metadata", &I, Str);
5454 }
5455}
5456
5457void Verifier::visitAllocTokenMetadata(Instruction &I, MDNode *MD) {
5458 Check(isa<CallBase>(I), "!alloc_token should only exist on calls", &I);
5459 Check(MD->getNumOperands() == 2, "!alloc_token must have 2 operands", MD);
5460 Check(isa<MDString>(MD->getOperand(0)), "expected string", MD);
5462 "expected integer constant", MD);
5463}
5464
5465/// verifyInstruction - Verify that an instruction is well formed.
5466///
5467void Verifier::visitInstruction(Instruction &I) {
5468 BasicBlock *BB = I.getParent();
5469 Check(BB, "Instruction not embedded in basic block!", &I);
5470
5471 if (!isa<PHINode>(I)) { // Check that non-phi nodes are not self referential
5472 for (User *U : I.users()) {
5473 Check(U != (User *)&I || !DT.isReachableFromEntry(BB),
5474 "Only PHI nodes may reference their own value!", &I);
5475 }
5476 }
5477
5478 // Check that void typed values don't have names
5479 Check(!I.getType()->isVoidTy() || !I.hasName(),
5480 "Instruction has a name, but provides a void value!", &I);
5481
5482 // Check that the return value of the instruction is either void or a legal
5483 // value type.
5484 Check(I.getType()->isVoidTy() || I.getType()->isFirstClassType(),
5485 "Instruction returns a non-scalar type!", &I);
5486
5487 // Check that the instruction doesn't produce metadata. Calls are already
5488 // checked against the callee type.
5489 Check(!I.getType()->isMetadataTy() || isa<CallInst>(I) || isa<InvokeInst>(I),
5490 "Invalid use of metadata!", &I);
5491
5492 // Check that all uses of the instruction, if they are instructions
5493 // themselves, actually have parent basic blocks. If the use is not an
5494 // instruction, it is an error!
5495 for (Use &U : I.uses()) {
5496 if (Instruction *Used = dyn_cast<Instruction>(U.getUser()))
5497 Check(Used->getParent() != nullptr,
5498 "Instruction referencing"
5499 " instruction not embedded in a basic block!",
5500 &I, Used);
5501 else {
5502 CheckFailed("Use of instruction is not an instruction!", U);
5503 return;
5504 }
5505 }
5506
5507 // Get a pointer to the call base of the instruction if it is some form of
5508 // call.
5509 const CallBase *CBI = dyn_cast<CallBase>(&I);
5510
5511 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
5512 Check(I.getOperand(i) != nullptr, "Instruction has null operand!", &I);
5513
5514 // Check to make sure that only first-class-values are operands to
5515 // instructions.
5516 if (!I.getOperand(i)->getType()->isFirstClassType()) {
5517 Check(false, "Instruction operands must be first-class values!", &I);
5518 }
5519
5520 if (Function *F = dyn_cast<Function>(I.getOperand(i))) {
5521 // This code checks whether the function is used as the operand of a
5522 // clang_arc_attachedcall operand bundle.
5523 auto IsAttachedCallOperand = [](Function *F, const CallBase *CBI,
5524 int Idx) {
5525 return CBI && CBI->isOperandBundleOfType(
5527 };
5528
5529 // Check to make sure that the "address of" an intrinsic function is never
5530 // taken. Ignore cases where the address of the intrinsic function is used
5531 // as the argument of operand bundle "clang.arc.attachedcall" as those
5532 // cases are handled in verifyAttachedCallBundle.
5533 Check((!F->isIntrinsic() ||
5534 (CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i)) ||
5535 IsAttachedCallOperand(F, CBI, i)),
5536 "Cannot take the address of an intrinsic!", &I);
5537 Check(!F->isIntrinsic() || isa<CallInst>(I) ||
5538 F->getIntrinsicID() == Intrinsic::donothing ||
5539 F->getIntrinsicID() == Intrinsic::seh_try_begin ||
5540 F->getIntrinsicID() == Intrinsic::seh_try_end ||
5541 F->getIntrinsicID() == Intrinsic::seh_scope_begin ||
5542 F->getIntrinsicID() == Intrinsic::seh_scope_end ||
5543 F->getIntrinsicID() == Intrinsic::coro_resume ||
5544 F->getIntrinsicID() == Intrinsic::coro_destroy ||
5545 F->getIntrinsicID() == Intrinsic::coro_await_suspend_void ||
5546 F->getIntrinsicID() == Intrinsic::coro_await_suspend_bool ||
5547 F->getIntrinsicID() == Intrinsic::coro_await_suspend_handle ||
5548 F->getIntrinsicID() ==
5549 Intrinsic::experimental_patchpoint_void ||
5550 F->getIntrinsicID() == Intrinsic::experimental_patchpoint ||
5551 F->getIntrinsicID() == Intrinsic::fake_use ||
5552 F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint ||
5553 F->getIntrinsicID() == Intrinsic::wasm_throw ||
5554 F->getIntrinsicID() == Intrinsic::wasm_rethrow ||
5555 IsAttachedCallOperand(F, CBI, i),
5556 "Cannot invoke an intrinsic other than donothing, patchpoint, "
5557 "statepoint, coro_resume, coro_destroy, clang.arc.attachedcall or "
5558 "wasm.(re)throw",
5559 &I);
5560 Check(F->getParent() == &M, "Referencing function in another module!", &I,
5561 &M, F, F->getParent());
5562 } else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) {
5563 Check(OpBB->getParent() == BB->getParent(),
5564 "Referring to a basic block in another function!", &I);
5565 } else if (Argument *OpArg = dyn_cast<Argument>(I.getOperand(i))) {
5566 Check(OpArg->getParent() == BB->getParent(),
5567 "Referring to an argument in another function!", &I);
5568 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) {
5569 Check(GV->getParent() == &M, "Referencing global in another module!", &I,
5570 &M, GV, GV->getParent());
5571 } else if (Instruction *OpInst = dyn_cast<Instruction>(I.getOperand(i))) {
5572 Check(OpInst->getFunction() == BB->getParent(),
5573 "Referring to an instruction in another function!", &I);
5574 verifyDominatesUse(I, i);
5575 } else if (isa<InlineAsm>(I.getOperand(i))) {
5576 Check(CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i),
5577 "Cannot take the address of an inline asm!", &I);
5578 } else if (auto *CPA = dyn_cast<ConstantPtrAuth>(I.getOperand(i))) {
5579 visitConstantExprsRecursively(CPA);
5580 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(I.getOperand(i))) {
5581 if (CE->getType()->isPtrOrPtrVectorTy()) {
5582 // If we have a ConstantExpr pointer, we need to see if it came from an
5583 // illegal bitcast.
5584 visitConstantExprsRecursively(CE);
5585 }
5586 }
5587 }
5588
5589 if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) {
5590 Check(I.getType()->isFPOrFPVectorTy(),
5591 "fpmath requires a floating point result!", &I);
5592 Check(MD->getNumOperands() == 1, "fpmath takes one operand!", &I);
5593 if (ConstantFP *CFP0 =
5595 const APFloat &Accuracy = CFP0->getValueAPF();
5596 Check(&Accuracy.getSemantics() == &APFloat::IEEEsingle(),
5597 "fpmath accuracy must have float type", &I);
5598 Check(Accuracy.isFiniteNonZero() && !Accuracy.isNegative(),
5599 "fpmath accuracy not a positive number!", &I);
5600 } else {
5601 Check(false, "invalid fpmath accuracy!", &I);
5602 }
5603 }
5604
5605 if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) {
5607 "Ranges are only for loads, calls and invokes!", &I);
5608 visitRangeMetadata(I, Range, I.getType());
5609 }
5610
5611 if (MDNode *Range = I.getMetadata(LLVMContext::MD_noalias_addrspace)) {
5614 "noalias.addrspace are only for memory operations!", &I);
5615 visitNoaliasAddrspaceMetadata(I, Range, I.getType());
5616 }
5617
5618 if (I.hasMetadata(LLVMContext::MD_invariant_group)) {
5620 "invariant.group metadata is only for loads and stores", &I);
5621 }
5622
5623 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nonnull)) {
5624 Check(I.getType()->isPointerTy(), "nonnull applies only to pointer types",
5625 &I);
5627 "nonnull applies only to load instructions, use attributes"
5628 " for calls or invokes",
5629 &I);
5630 Check(MD->getNumOperands() == 0, "nonnull metadata must be empty", &I);
5631 }
5632
5633 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable))
5634 visitDereferenceableMetadata(I, MD);
5635
5636 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable_or_null))
5637 visitDereferenceableMetadata(I, MD);
5638
5639 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nofree))
5640 visitNofreeMetadata(I, MD);
5641
5642 if (MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa))
5643 TBAAVerifyHelper.visitTBAAMetadata(&I, TBAA);
5644
5645 if (MDNode *MD = I.getMetadata(LLVMContext::MD_noalias))
5646 visitAliasScopeListMetadata(MD);
5647 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alias_scope))
5648 visitAliasScopeListMetadata(MD);
5649
5650 if (MDNode *MD = I.getMetadata(LLVMContext::MD_access_group))
5651 visitAccessGroupMetadata(MD);
5652
5653 if (MDNode *AlignMD = I.getMetadata(LLVMContext::MD_align)) {
5654 Check(I.getType()->isPointerTy(), "align applies only to pointer types",
5655 &I);
5657 "align applies only to load instructions, "
5658 "use attributes for calls or invokes",
5659 &I);
5660 Check(AlignMD->getNumOperands() == 1, "align takes one operand!", &I);
5661 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(AlignMD->getOperand(0));
5662 Check(CI && CI->getType()->isIntegerTy(64),
5663 "align metadata value must be an i64!", &I);
5664 uint64_t Align = CI->getZExtValue();
5665 Check(isPowerOf2_64(Align), "align metadata value must be a power of 2!",
5666 &I);
5667 Check(Align <= Value::MaximumAlignment,
5668 "alignment is larger that implementation defined limit", &I);
5669 }
5670
5671 if (MDNode *MD = I.getMetadata(LLVMContext::MD_prof))
5672 visitProfMetadata(I, MD);
5673
5674 if (MDNode *MD = I.getMetadata(LLVMContext::MD_memprof))
5675 visitMemProfMetadata(I, MD);
5676
5677 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callsite))
5678 visitCallsiteMetadata(I, MD);
5679
5680 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callee_type))
5681 visitCalleeTypeMetadata(I, MD);
5682
5683 if (MDNode *MD = I.getMetadata(LLVMContext::MD_DIAssignID))
5684 visitDIAssignIDMetadata(I, MD);
5685
5686 if (MDNode *MMRA = I.getMetadata(LLVMContext::MD_mmra))
5687 visitMMRAMetadata(I, MMRA);
5688
5689 if (MDNode *Annotation = I.getMetadata(LLVMContext::MD_annotation))
5690 visitAnnotationMetadata(Annotation);
5691
5692 if (MDNode *Captures = I.getMetadata(LLVMContext::MD_captures))
5693 visitCapturesMetadata(I, Captures);
5694
5695 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alloc_token))
5696 visitAllocTokenMetadata(I, MD);
5697
5698 if (MDNode *N = I.getDebugLoc().getAsMDNode()) {
5699 CheckDI(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N);
5700 visitMDNode(*N, AreDebugLocsAllowed::Yes);
5701
5702 if (auto *DL = dyn_cast<DILocation>(N)) {
5703 if (DL->getAtomGroup()) {
5704 CheckDI(DL->getScope()->getSubprogram()->getKeyInstructionsEnabled(),
5705 "DbgLoc uses atomGroup but DISubprogram doesn't have Key "
5706 "Instructions enabled",
5707 DL, DL->getScope()->getSubprogram());
5708 }
5709 }
5710 }
5711
5713 I.getAllMetadata(MDs);
5714 for (auto Attachment : MDs) {
5715 unsigned Kind = Attachment.first;
5716 auto AllowLocs =
5717 (Kind == LLVMContext::MD_dbg || Kind == LLVMContext::MD_loop)
5718 ? AreDebugLocsAllowed::Yes
5719 : AreDebugLocsAllowed::No;
5720 visitMDNode(*Attachment.second, AllowLocs);
5721 }
5722
5723 InstsInThisBlock.insert(&I);
5724}
5725
5726/// Allow intrinsics to be verified in different ways.
5727void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
5729 Check(IF->isDeclaration(), "Intrinsic functions should never be defined!",
5730 IF);
5731
5732 // Verify that the intrinsic prototype lines up with what the .td files
5733 // describe.
5734 FunctionType *IFTy = IF->getFunctionType();
5735 bool IsVarArg = IFTy->isVarArg();
5736
5740
5741 // Walk the descriptors to extract overloaded types.
5746 "Intrinsic has incorrect return type!", IF);
5748 "Intrinsic has incorrect argument type!", IF);
5749
5750 // Verify if the intrinsic call matches the vararg property.
5751 if (IsVarArg)
5753 "Intrinsic was not defined with variable arguments!", IF);
5754 else
5756 "Callsite was not defined with variable arguments!", IF);
5757
5758 // All descriptors should be absorbed by now.
5759 Check(TableRef.empty(), "Intrinsic has too few arguments!", IF);
5760
5761 // Now that we have the intrinsic ID and the actual argument types (and we
5762 // know they are legal for the intrinsic!) get the intrinsic name through the
5763 // usual means. This allows us to verify the mangling of argument types into
5764 // the name.
5765 const std::string ExpectedName =
5766 Intrinsic::getName(ID, ArgTys, IF->getParent(), IFTy);
5767 Check(ExpectedName == IF->getName(),
5768 "Intrinsic name not mangled correctly for type arguments! "
5769 "Should be: " +
5770 ExpectedName,
5771 IF);
5772
5773 // If the intrinsic takes MDNode arguments, verify that they are either global
5774 // or are local to *this* function.
5775 for (Value *V : Call.args()) {
5776 if (auto *MD = dyn_cast<MetadataAsValue>(V))
5777 visitMetadataAsValue(*MD, Call.getCaller());
5778 if (auto *Const = dyn_cast<Constant>(V))
5779 Check(!Const->getType()->isX86_AMXTy(),
5780 "const x86_amx is not allowed in argument!");
5781 }
5782
5783 switch (ID) {
5784 default:
5785 break;
5786 case Intrinsic::assume: {
5787 if (Call.hasOperandBundles()) {
5789 Check(Cond && Cond->isOne(),
5790 "assume with operand bundles must have i1 true condition", Call);
5791 }
5792 for (auto &Elem : Call.bundle_op_infos()) {
5793 unsigned ArgCount = Elem.End - Elem.Begin;
5794 // Separate storage assumptions are special insofar as they're the only
5795 // operand bundles allowed on assumes that aren't parameter attributes.
5796 if (Elem.Tag->getKey() == "separate_storage") {
5797 Check(ArgCount == 2,
5798 "separate_storage assumptions should have 2 arguments", Call);
5799 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy() &&
5800 Call.getOperand(Elem.Begin + 1)->getType()->isPointerTy(),
5801 "arguments to separate_storage assumptions should be pointers",
5802 Call);
5803 continue;
5804 }
5805 Check(Elem.Tag->getKey() == "ignore" ||
5806 Attribute::isExistingAttribute(Elem.Tag->getKey()),
5807 "tags must be valid attribute names", Call);
5808 Attribute::AttrKind Kind =
5809 Attribute::getAttrKindFromName(Elem.Tag->getKey());
5810 if (Kind == Attribute::Alignment) {
5811 Check(ArgCount <= 3 && ArgCount >= 2,
5812 "alignment assumptions should have 2 or 3 arguments", Call);
5813 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5814 "first argument should be a pointer", Call);
5815 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5816 "second argument should be an integer", Call);
5817 if (ArgCount == 3)
5818 Check(Call.getOperand(Elem.Begin + 2)->getType()->isIntegerTy(),
5819 "third argument should be an integer if present", Call);
5820 continue;
5821 }
5822 if (Kind == Attribute::Dereferenceable) {
5823 Check(ArgCount == 2,
5824 "dereferenceable assumptions should have 2 arguments", Call);
5825 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5826 "first argument should be a pointer", Call);
5827 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5828 "second argument should be an integer", Call);
5829 continue;
5830 }
5831 Check(ArgCount <= 2, "too many arguments", Call);
5832 if (Kind == Attribute::None)
5833 break;
5834 if (Attribute::isIntAttrKind(Kind)) {
5835 Check(ArgCount == 2, "this attribute should have 2 arguments", Call);
5836 Check(isa<ConstantInt>(Call.getOperand(Elem.Begin + 1)),
5837 "the second argument should be a constant integral value", Call);
5838 } else if (Attribute::canUseAsParamAttr(Kind)) {
5839 Check((ArgCount) == 1, "this attribute should have one argument", Call);
5840 } else if (Attribute::canUseAsFnAttr(Kind)) {
5841 Check((ArgCount) == 0, "this attribute has no argument", Call);
5842 }
5843 }
5844 break;
5845 }
5846 case Intrinsic::ucmp:
5847 case Intrinsic::scmp: {
5848 Type *SrcTy = Call.getOperand(0)->getType();
5849 Type *DestTy = Call.getType();
5850
5851 Check(DestTy->getScalarSizeInBits() >= 2,
5852 "result type must be at least 2 bits wide", Call);
5853
5854 bool IsDestTypeVector = DestTy->isVectorTy();
5855 Check(SrcTy->isVectorTy() == IsDestTypeVector,
5856 "ucmp/scmp argument and result types must both be either vector or "
5857 "scalar types",
5858 Call);
5859 if (IsDestTypeVector) {
5860 auto SrcVecLen = cast<VectorType>(SrcTy)->getElementCount();
5861 auto DestVecLen = cast<VectorType>(DestTy)->getElementCount();
5862 Check(SrcVecLen == DestVecLen,
5863 "return type and arguments must have the same number of "
5864 "elements",
5865 Call);
5866 }
5867 break;
5868 }
5869 case Intrinsic::coro_id: {
5870 auto *InfoArg = Call.getArgOperand(3)->stripPointerCasts();
5871 if (isa<ConstantPointerNull>(InfoArg))
5872 break;
5873 auto *GV = dyn_cast<GlobalVariable>(InfoArg);
5874 Check(GV && GV->isConstant() && GV->hasDefinitiveInitializer(),
5875 "info argument of llvm.coro.id must refer to an initialized "
5876 "constant");
5877 Constant *Init = GV->getInitializer();
5879 "info argument of llvm.coro.id must refer to either a struct or "
5880 "an array");
5881 break;
5882 }
5883 case Intrinsic::is_fpclass: {
5884 const ConstantInt *TestMask = cast<ConstantInt>(Call.getOperand(1));
5885 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
5886 "unsupported bits for llvm.is.fpclass test mask");
5887 break;
5888 }
5889 case Intrinsic::fptrunc_round: {
5890 // Check the rounding mode
5891 Metadata *MD = nullptr;
5893 if (MAV)
5894 MD = MAV->getMetadata();
5895
5896 Check(MD != nullptr, "missing rounding mode argument", Call);
5897
5898 Check(isa<MDString>(MD),
5899 ("invalid value for llvm.fptrunc.round metadata operand"
5900 " (the operand should be a string)"),
5901 MD);
5902
5903 std::optional<RoundingMode> RoundMode =
5904 convertStrToRoundingMode(cast<MDString>(MD)->getString());
5905 Check(RoundMode && *RoundMode != RoundingMode::Dynamic,
5906 "unsupported rounding mode argument", Call);
5907 break;
5908 }
5909#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
5910#include "llvm/IR/VPIntrinsics.def"
5911#undef BEGIN_REGISTER_VP_INTRINSIC
5912 visitVPIntrinsic(cast<VPIntrinsic>(Call));
5913 break;
5914#define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC) \
5915 case Intrinsic::INTRINSIC:
5916#include "llvm/IR/ConstrainedOps.def"
5917#undef INSTRUCTION
5918 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(Call));
5919 break;
5920 case Intrinsic::dbg_declare: // llvm.dbg.declare
5921 case Intrinsic::dbg_value: // llvm.dbg.value
5922 case Intrinsic::dbg_assign: // llvm.dbg.assign
5923 case Intrinsic::dbg_label: // llvm.dbg.label
5924 // We no longer interpret debug intrinsics (the old variable-location
5925 // design). They're meaningless as far as LLVM is concerned we could make
5926 // it an error for them to appear, but it's possible we'll have users
5927 // converting back to intrinsics for the forseeable future (such as DXIL),
5928 // so tolerate their existance.
5929 break;
5930 case Intrinsic::memcpy:
5931 case Intrinsic::memcpy_inline:
5932 case Intrinsic::memmove:
5933 case Intrinsic::memset:
5934 case Intrinsic::memset_inline:
5935 break;
5936 case Intrinsic::experimental_memset_pattern: {
5937 const auto Memset = cast<MemSetPatternInst>(&Call);
5938 Check(Memset->getValue()->getType()->isSized(),
5939 "unsized types cannot be used as memset patterns", Call);
5940 break;
5941 }
5942 case Intrinsic::memcpy_element_unordered_atomic:
5943 case Intrinsic::memmove_element_unordered_atomic:
5944 case Intrinsic::memset_element_unordered_atomic: {
5945 const auto *AMI = cast<AnyMemIntrinsic>(&Call);
5946
5947 ConstantInt *ElementSizeCI =
5948 cast<ConstantInt>(AMI->getRawElementSizeInBytes());
5949 const APInt &ElementSizeVal = ElementSizeCI->getValue();
5950 Check(ElementSizeVal.isPowerOf2(),
5951 "element size of the element-wise atomic memory intrinsic "
5952 "must be a power of 2",
5953 Call);
5954
5955 auto IsValidAlignment = [&](MaybeAlign Alignment) {
5956 return Alignment && ElementSizeVal.ule(Alignment->value());
5957 };
5958 Check(IsValidAlignment(AMI->getDestAlign()),
5959 "incorrect alignment of the destination argument", Call);
5960 if (const auto *AMT = dyn_cast<AnyMemTransferInst>(AMI)) {
5961 Check(IsValidAlignment(AMT->getSourceAlign()),
5962 "incorrect alignment of the source argument", Call);
5963 }
5964 break;
5965 }
5966 case Intrinsic::call_preallocated_setup: {
5967 auto *NumArgs = cast<ConstantInt>(Call.getArgOperand(0));
5968 bool FoundCall = false;
5969 for (User *U : Call.users()) {
5970 auto *UseCall = dyn_cast<CallBase>(U);
5971 Check(UseCall != nullptr,
5972 "Uses of llvm.call.preallocated.setup must be calls");
5973 Intrinsic::ID IID = UseCall->getIntrinsicID();
5974 if (IID == Intrinsic::call_preallocated_arg) {
5975 auto *AllocArgIndex = dyn_cast<ConstantInt>(UseCall->getArgOperand(1));
5976 Check(AllocArgIndex != nullptr,
5977 "llvm.call.preallocated.alloc arg index must be a constant");
5978 auto AllocArgIndexInt = AllocArgIndex->getValue();
5979 Check(AllocArgIndexInt.sge(0) &&
5980 AllocArgIndexInt.slt(NumArgs->getValue()),
5981 "llvm.call.preallocated.alloc arg index must be between 0 and "
5982 "corresponding "
5983 "llvm.call.preallocated.setup's argument count");
5984 } else if (IID == Intrinsic::call_preallocated_teardown) {
5985 // nothing to do
5986 } else {
5987 Check(!FoundCall, "Can have at most one call corresponding to a "
5988 "llvm.call.preallocated.setup");
5989 FoundCall = true;
5990 size_t NumPreallocatedArgs = 0;
5991 for (unsigned i = 0; i < UseCall->arg_size(); i++) {
5992 if (UseCall->paramHasAttr(i, Attribute::Preallocated)) {
5993 ++NumPreallocatedArgs;
5994 }
5995 }
5996 Check(NumPreallocatedArgs != 0,
5997 "cannot use preallocated intrinsics on a call without "
5998 "preallocated arguments");
5999 Check(NumArgs->equalsInt(NumPreallocatedArgs),
6000 "llvm.call.preallocated.setup arg size must be equal to number "
6001 "of preallocated arguments "
6002 "at call site",
6003 Call, *UseCall);
6004 // getOperandBundle() cannot be called if more than one of the operand
6005 // bundle exists. There is already a check elsewhere for this, so skip
6006 // here if we see more than one.
6007 if (UseCall->countOperandBundlesOfType(LLVMContext::OB_preallocated) >
6008 1) {
6009 return;
6010 }
6011 auto PreallocatedBundle =
6012 UseCall->getOperandBundle(LLVMContext::OB_preallocated);
6013 Check(PreallocatedBundle,
6014 "Use of llvm.call.preallocated.setup outside intrinsics "
6015 "must be in \"preallocated\" operand bundle");
6016 Check(PreallocatedBundle->Inputs.front().get() == &Call,
6017 "preallocated bundle must have token from corresponding "
6018 "llvm.call.preallocated.setup");
6019 }
6020 }
6021 break;
6022 }
6023 case Intrinsic::call_preallocated_arg: {
6024 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
6025 Check(Token &&
6026 Token->getIntrinsicID() == Intrinsic::call_preallocated_setup,
6027 "llvm.call.preallocated.arg token argument must be a "
6028 "llvm.call.preallocated.setup");
6029 Check(Call.hasFnAttr(Attribute::Preallocated),
6030 "llvm.call.preallocated.arg must be called with a \"preallocated\" "
6031 "call site attribute");
6032 break;
6033 }
6034 case Intrinsic::call_preallocated_teardown: {
6035 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
6036 Check(Token &&
6037 Token->getIntrinsicID() == Intrinsic::call_preallocated_setup,
6038 "llvm.call.preallocated.teardown token argument must be a "
6039 "llvm.call.preallocated.setup");
6040 break;
6041 }
6042 case Intrinsic::gcroot:
6043 case Intrinsic::gcwrite:
6044 case Intrinsic::gcread:
6045 if (ID == Intrinsic::gcroot) {
6046 AllocaInst *AI =
6048 Check(AI, "llvm.gcroot parameter #1 must be an alloca.", Call);
6050 "llvm.gcroot parameter #2 must be a constant.", Call);
6051 if (!AI->getAllocatedType()->isPointerTy()) {
6053 "llvm.gcroot parameter #1 must either be a pointer alloca, "
6054 "or argument #2 must be a non-null constant.",
6055 Call);
6056 }
6057 }
6058
6059 Check(Call.getParent()->getParent()->hasGC(),
6060 "Enclosing function does not use GC.", Call);
6061 break;
6062 case Intrinsic::init_trampoline:
6064 "llvm.init_trampoline parameter #2 must resolve to a function.",
6065 Call);
6066 break;
6067 case Intrinsic::prefetch:
6068 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6069 "rw argument to llvm.prefetch must be 0-1", Call);
6070 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6071 "locality argument to llvm.prefetch must be 0-3", Call);
6072 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6073 "cache type argument to llvm.prefetch must be 0-1", Call);
6074 break;
6075 case Intrinsic::reloc_none: {
6077 cast<MetadataAsValue>(Call.getArgOperand(0))->getMetadata()),
6078 "llvm.reloc.none argument must be a metadata string", &Call);
6079 break;
6080 }
6081 case Intrinsic::stackprotector:
6083 "llvm.stackprotector parameter #2 must resolve to an alloca.", Call);
6084 break;
6085 case Intrinsic::localescape: {
6086 BasicBlock *BB = Call.getParent();
6087 Check(BB->isEntryBlock(), "llvm.localescape used outside of entry block",
6088 Call);
6089 Check(!SawFrameEscape, "multiple calls to llvm.localescape in one function",
6090 Call);
6091 for (Value *Arg : Call.args()) {
6092 if (isa<ConstantPointerNull>(Arg))
6093 continue; // Null values are allowed as placeholders.
6094 auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
6095 Check(AI && AI->isStaticAlloca(),
6096 "llvm.localescape only accepts static allocas", Call);
6097 }
6098 FrameEscapeInfo[BB->getParent()].first = Call.arg_size();
6099 SawFrameEscape = true;
6100 break;
6101 }
6102 case Intrinsic::localrecover: {
6104 Function *Fn = dyn_cast<Function>(FnArg);
6105 Check(Fn && !Fn->isDeclaration(),
6106 "llvm.localrecover first "
6107 "argument must be function defined in this module",
6108 Call);
6109 auto *IdxArg = cast<ConstantInt>(Call.getArgOperand(2));
6110 auto &Entry = FrameEscapeInfo[Fn];
6111 Entry.second = unsigned(
6112 std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1));
6113 break;
6114 }
6115
6116 case Intrinsic::experimental_gc_statepoint:
6117 if (auto *CI = dyn_cast<CallInst>(&Call))
6118 Check(!CI->isInlineAsm(),
6119 "gc.statepoint support for inline assembly unimplemented", CI);
6120 Check(Call.getParent()->getParent()->hasGC(),
6121 "Enclosing function does not use GC.", Call);
6122
6123 verifyStatepoint(Call);
6124 break;
6125 case Intrinsic::experimental_gc_result: {
6126 Check(Call.getParent()->getParent()->hasGC(),
6127 "Enclosing function does not use GC.", Call);
6128
6129 auto *Statepoint = Call.getArgOperand(0);
6130 if (isa<UndefValue>(Statepoint))
6131 break;
6132
6133 // Are we tied to a statepoint properly?
6134 const auto *StatepointCall = dyn_cast<CallBase>(Statepoint);
6135 Check(StatepointCall && StatepointCall->getIntrinsicID() ==
6136 Intrinsic::experimental_gc_statepoint,
6137 "gc.result operand #1 must be from a statepoint", Call,
6138 Call.getArgOperand(0));
6139
6140 // Check that result type matches wrapped callee.
6141 auto *TargetFuncType =
6142 cast<FunctionType>(StatepointCall->getParamElementType(2));
6143 Check(Call.getType() == TargetFuncType->getReturnType(),
6144 "gc.result result type does not match wrapped callee", Call);
6145 break;
6146 }
6147 case Intrinsic::experimental_gc_relocate: {
6148 Check(Call.arg_size() == 3, "wrong number of arguments", Call);
6149
6151 "gc.relocate must return a pointer or a vector of pointers", Call);
6152
6153 // Check that this relocate is correctly tied to the statepoint
6154
6155 // This is case for relocate on the unwinding path of an invoke statepoint
6156 if (LandingPadInst *LandingPad =
6158
6159 const BasicBlock *InvokeBB =
6160 LandingPad->getParent()->getUniquePredecessor();
6161
6162 // Landingpad relocates should have only one predecessor with invoke
6163 // statepoint terminator
6164 Check(InvokeBB, "safepoints should have unique landingpads",
6165 LandingPad->getParent());
6166 Check(InvokeBB->getTerminator(), "safepoint block should be well formed",
6167 InvokeBB);
6169 "gc relocate should be linked to a statepoint", InvokeBB);
6170 } else {
6171 // In all other cases relocate should be tied to the statepoint directly.
6172 // This covers relocates on a normal return path of invoke statepoint and
6173 // relocates of a call statepoint.
6174 auto *Token = Call.getArgOperand(0);
6176 "gc relocate is incorrectly tied to the statepoint", Call, Token);
6177 }
6178
6179 // Verify rest of the relocate arguments.
6180 const Value &StatepointCall = *cast<GCRelocateInst>(Call).getStatepoint();
6181
6182 // Both the base and derived must be piped through the safepoint.
6185 "gc.relocate operand #2 must be integer offset", Call);
6186
6187 Value *Derived = Call.getArgOperand(2);
6188 Check(isa<ConstantInt>(Derived),
6189 "gc.relocate operand #3 must be integer offset", Call);
6190
6191 const uint64_t BaseIndex = cast<ConstantInt>(Base)->getZExtValue();
6192 const uint64_t DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue();
6193
6194 // Check the bounds
6195 if (isa<UndefValue>(StatepointCall))
6196 break;
6197 if (auto Opt = cast<GCStatepointInst>(StatepointCall)
6198 .getOperandBundle(LLVMContext::OB_gc_live)) {
6199 Check(BaseIndex < Opt->Inputs.size(),
6200 "gc.relocate: statepoint base index out of bounds", Call);
6201 Check(DerivedIndex < Opt->Inputs.size(),
6202 "gc.relocate: statepoint derived index out of bounds", Call);
6203 }
6204
6205 // Relocated value must be either a pointer type or vector-of-pointer type,
6206 // but gc_relocate does not need to return the same pointer type as the
6207 // relocated pointer. It can be casted to the correct type later if it's
6208 // desired. However, they must have the same address space and 'vectorness'
6209 GCRelocateInst &Relocate = cast<GCRelocateInst>(Call);
6210 auto *ResultType = Call.getType();
6211 auto *DerivedType = Relocate.getDerivedPtr()->getType();
6212 auto *BaseType = Relocate.getBasePtr()->getType();
6213
6214 Check(BaseType->isPtrOrPtrVectorTy(),
6215 "gc.relocate: relocated value must be a pointer", Call);
6216 Check(DerivedType->isPtrOrPtrVectorTy(),
6217 "gc.relocate: relocated value must be a pointer", Call);
6218
6219 Check(ResultType->isVectorTy() == DerivedType->isVectorTy(),
6220 "gc.relocate: vector relocates to vector and pointer to pointer",
6221 Call);
6222 Check(
6223 ResultType->getPointerAddressSpace() ==
6224 DerivedType->getPointerAddressSpace(),
6225 "gc.relocate: relocating a pointer shouldn't change its address space",
6226 Call);
6227
6228 auto GC = llvm::getGCStrategy(Relocate.getFunction()->getGC());
6229 Check(GC, "gc.relocate: calling function must have GCStrategy",
6230 Call.getFunction());
6231 if (GC) {
6232 auto isGCPtr = [&GC](Type *PTy) {
6233 return GC->isGCManagedPointer(PTy->getScalarType()).value_or(true);
6234 };
6235 Check(isGCPtr(ResultType), "gc.relocate: must return gc pointer", Call);
6236 Check(isGCPtr(BaseType),
6237 "gc.relocate: relocated value must be a gc pointer", Call);
6238 Check(isGCPtr(DerivedType),
6239 "gc.relocate: relocated value must be a gc pointer", Call);
6240 }
6241 break;
6242 }
6243 case Intrinsic::experimental_patchpoint: {
6244 if (Call.getCallingConv() == CallingConv::AnyReg) {
6246 "patchpoint: invalid return type used with anyregcc", Call);
6247 }
6248 break;
6249 }
6250 case Intrinsic::eh_exceptioncode:
6251 case Intrinsic::eh_exceptionpointer: {
6253 "eh.exceptionpointer argument must be a catchpad", Call);
6254 break;
6255 }
6256 case Intrinsic::get_active_lane_mask: {
6258 "get_active_lane_mask: must return a "
6259 "vector",
6260 Call);
6261 auto *ElemTy = Call.getType()->getScalarType();
6262 Check(ElemTy->isIntegerTy(1),
6263 "get_active_lane_mask: element type is not "
6264 "i1",
6265 Call);
6266 break;
6267 }
6268 case Intrinsic::experimental_get_vector_length: {
6269 ConstantInt *VF = cast<ConstantInt>(Call.getArgOperand(1));
6270 Check(!VF->isNegative() && !VF->isZero(),
6271 "get_vector_length: VF must be positive", Call);
6272 break;
6273 }
6274 case Intrinsic::masked_load: {
6275 Check(Call.getType()->isVectorTy(), "masked_load: must return a vector",
6276 Call);
6277
6279 Value *PassThru = Call.getArgOperand(2);
6280 Check(Mask->getType()->isVectorTy(), "masked_load: mask must be vector",
6281 Call);
6282 Check(PassThru->getType() == Call.getType(),
6283 "masked_load: pass through and return type must match", Call);
6284 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
6285 cast<VectorType>(Call.getType())->getElementCount(),
6286 "masked_load: vector mask must be same length as return", Call);
6287 break;
6288 }
6289 case Intrinsic::masked_store: {
6290 Value *Val = Call.getArgOperand(0);
6292 Check(Mask->getType()->isVectorTy(), "masked_store: mask must be vector",
6293 Call);
6294 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
6295 cast<VectorType>(Val->getType())->getElementCount(),
6296 "masked_store: vector mask must be same length as value", Call);
6297 break;
6298 }
6299
6300 case Intrinsic::experimental_guard: {
6301 Check(isa<CallInst>(Call), "experimental_guard cannot be invoked", Call);
6303 "experimental_guard must have exactly one "
6304 "\"deopt\" operand bundle");
6305 break;
6306 }
6307
6308 case Intrinsic::experimental_deoptimize: {
6309 Check(isa<CallInst>(Call), "experimental_deoptimize cannot be invoked",
6310 Call);
6312 "experimental_deoptimize must have exactly one "
6313 "\"deopt\" operand bundle");
6315 "experimental_deoptimize return type must match caller return type");
6316
6317 if (isa<CallInst>(Call)) {
6319 Check(RI,
6320 "calls to experimental_deoptimize must be followed by a return");
6321
6322 if (!Call.getType()->isVoidTy() && RI)
6323 Check(RI->getReturnValue() == &Call,
6324 "calls to experimental_deoptimize must be followed by a return "
6325 "of the value computed by experimental_deoptimize");
6326 }
6327
6328 break;
6329 }
6330 case Intrinsic::vastart: {
6332 "va_start called in a non-varargs function");
6333 break;
6334 }
6335 case Intrinsic::get_dynamic_area_offset: {
6336 auto *IntTy = dyn_cast<IntegerType>(Call.getType());
6337 Check(IntTy && DL.getPointerSizeInBits(DL.getAllocaAddrSpace()) ==
6338 IntTy->getBitWidth(),
6339 "get_dynamic_area_offset result type must be scalar integer matching "
6340 "alloca address space width",
6341 Call);
6342 break;
6343 }
6344 case Intrinsic::vector_reduce_and:
6345 case Intrinsic::vector_reduce_or:
6346 case Intrinsic::vector_reduce_xor:
6347 case Intrinsic::vector_reduce_add:
6348 case Intrinsic::vector_reduce_mul:
6349 case Intrinsic::vector_reduce_smax:
6350 case Intrinsic::vector_reduce_smin:
6351 case Intrinsic::vector_reduce_umax:
6352 case Intrinsic::vector_reduce_umin: {
6353 Type *ArgTy = Call.getArgOperand(0)->getType();
6354 Check(ArgTy->isIntOrIntVectorTy() && ArgTy->isVectorTy(),
6355 "Intrinsic has incorrect argument type!");
6356 break;
6357 }
6358 case Intrinsic::vector_reduce_fmax:
6359 case Intrinsic::vector_reduce_fmin: {
6360 Type *ArgTy = Call.getArgOperand(0)->getType();
6361 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6362 "Intrinsic has incorrect argument type!");
6363 break;
6364 }
6365 case Intrinsic::vector_reduce_fadd:
6366 case Intrinsic::vector_reduce_fmul: {
6367 // Unlike the other reductions, the first argument is a start value. The
6368 // second argument is the vector to be reduced.
6369 Type *ArgTy = Call.getArgOperand(1)->getType();
6370 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6371 "Intrinsic has incorrect argument type!");
6372 break;
6373 }
6374 case Intrinsic::smul_fix:
6375 case Intrinsic::smul_fix_sat:
6376 case Intrinsic::umul_fix:
6377 case Intrinsic::umul_fix_sat:
6378 case Intrinsic::sdiv_fix:
6379 case Intrinsic::sdiv_fix_sat:
6380 case Intrinsic::udiv_fix:
6381 case Intrinsic::udiv_fix_sat: {
6382 Value *Op1 = Call.getArgOperand(0);
6383 Value *Op2 = Call.getArgOperand(1);
6385 "first operand of [us][mul|div]_fix[_sat] must be an int type or "
6386 "vector of ints");
6388 "second operand of [us][mul|div]_fix[_sat] must be an int type or "
6389 "vector of ints");
6390
6391 auto *Op3 = cast<ConstantInt>(Call.getArgOperand(2));
6392 Check(Op3->getType()->isIntegerTy(),
6393 "third operand of [us][mul|div]_fix[_sat] must be an int type");
6394 Check(Op3->getBitWidth() <= 32,
6395 "third operand of [us][mul|div]_fix[_sat] must fit within 32 bits");
6396
6397 if (ID == Intrinsic::smul_fix || ID == Intrinsic::smul_fix_sat ||
6398 ID == Intrinsic::sdiv_fix || ID == Intrinsic::sdiv_fix_sat) {
6399 Check(Op3->getZExtValue() < Op1->getType()->getScalarSizeInBits(),
6400 "the scale of s[mul|div]_fix[_sat] must be less than the width of "
6401 "the operands");
6402 } else {
6403 Check(Op3->getZExtValue() <= Op1->getType()->getScalarSizeInBits(),
6404 "the scale of u[mul|div]_fix[_sat] must be less than or equal "
6405 "to the width of the operands");
6406 }
6407 break;
6408 }
6409 case Intrinsic::lrint:
6410 case Intrinsic::llrint:
6411 case Intrinsic::lround:
6412 case Intrinsic::llround: {
6413 Type *ValTy = Call.getArgOperand(0)->getType();
6414 Type *ResultTy = Call.getType();
6415 auto *VTy = dyn_cast<VectorType>(ValTy);
6416 auto *RTy = dyn_cast<VectorType>(ResultTy);
6417 Check(ValTy->isFPOrFPVectorTy() && ResultTy->isIntOrIntVectorTy(),
6418 ExpectedName + ": argument must be floating-point or vector "
6419 "of floating-points, and result must be integer or "
6420 "vector of integers",
6421 &Call);
6422 Check(ValTy->isVectorTy() == ResultTy->isVectorTy(),
6423 ExpectedName + ": argument and result disagree on vector use", &Call);
6424 if (VTy) {
6425 Check(VTy->getElementCount() == RTy->getElementCount(),
6426 ExpectedName + ": argument must be same length as result", &Call);
6427 }
6428 break;
6429 }
6430 case Intrinsic::bswap: {
6431 Type *Ty = Call.getType();
6432 unsigned Size = Ty->getScalarSizeInBits();
6433 Check(Size % 16 == 0, "bswap must be an even number of bytes", &Call);
6434 break;
6435 }
6436 case Intrinsic::invariant_start: {
6437 ConstantInt *InvariantSize = dyn_cast<ConstantInt>(Call.getArgOperand(0));
6438 Check(InvariantSize &&
6439 (!InvariantSize->isNegative() || InvariantSize->isMinusOne()),
6440 "invariant_start parameter must be -1, 0 or a positive number",
6441 &Call);
6442 break;
6443 }
6444 case Intrinsic::matrix_multiply:
6445 case Intrinsic::matrix_transpose:
6446 case Intrinsic::matrix_column_major_load:
6447 case Intrinsic::matrix_column_major_store: {
6449 ConstantInt *Stride = nullptr;
6450 ConstantInt *NumRows;
6451 ConstantInt *NumColumns;
6452 VectorType *ResultTy;
6453 Type *Op0ElemTy = nullptr;
6454 Type *Op1ElemTy = nullptr;
6455 switch (ID) {
6456 case Intrinsic::matrix_multiply: {
6457 NumRows = cast<ConstantInt>(Call.getArgOperand(2));
6458 ConstantInt *N = cast<ConstantInt>(Call.getArgOperand(3));
6459 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6461 ->getNumElements() ==
6462 NumRows->getZExtValue() * N->getZExtValue(),
6463 "First argument of a matrix operation does not match specified "
6464 "shape!");
6466 ->getNumElements() ==
6467 N->getZExtValue() * NumColumns->getZExtValue(),
6468 "Second argument of a matrix operation does not match specified "
6469 "shape!");
6470
6471 ResultTy = cast<VectorType>(Call.getType());
6472 Op0ElemTy =
6473 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6474 Op1ElemTy =
6475 cast<VectorType>(Call.getArgOperand(1)->getType())->getElementType();
6476 break;
6477 }
6478 case Intrinsic::matrix_transpose:
6479 NumRows = cast<ConstantInt>(Call.getArgOperand(1));
6480 NumColumns = cast<ConstantInt>(Call.getArgOperand(2));
6481 ResultTy = cast<VectorType>(Call.getType());
6482 Op0ElemTy =
6483 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6484 break;
6485 case Intrinsic::matrix_column_major_load: {
6487 NumRows = cast<ConstantInt>(Call.getArgOperand(3));
6488 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6489 ResultTy = cast<VectorType>(Call.getType());
6490 break;
6491 }
6492 case Intrinsic::matrix_column_major_store: {
6494 NumRows = cast<ConstantInt>(Call.getArgOperand(4));
6495 NumColumns = cast<ConstantInt>(Call.getArgOperand(5));
6496 ResultTy = cast<VectorType>(Call.getArgOperand(0)->getType());
6497 Op0ElemTy =
6498 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6499 break;
6500 }
6501 default:
6502 llvm_unreachable("unexpected intrinsic");
6503 }
6504
6505 Check(ResultTy->getElementType()->isIntegerTy() ||
6506 ResultTy->getElementType()->isFloatingPointTy(),
6507 "Result type must be an integer or floating-point type!", IF);
6508
6509 if (Op0ElemTy)
6510 Check(ResultTy->getElementType() == Op0ElemTy,
6511 "Vector element type mismatch of the result and first operand "
6512 "vector!",
6513 IF);
6514
6515 if (Op1ElemTy)
6516 Check(ResultTy->getElementType() == Op1ElemTy,
6517 "Vector element type mismatch of the result and second operand "
6518 "vector!",
6519 IF);
6520
6522 NumRows->getZExtValue() * NumColumns->getZExtValue(),
6523 "Result of a matrix operation does not fit in the returned vector!");
6524
6525 if (Stride) {
6526 Check(Stride->getBitWidth() <= 64, "Stride bitwidth cannot exceed 64!",
6527 IF);
6528 Check(Stride->getZExtValue() >= NumRows->getZExtValue(),
6529 "Stride must be greater or equal than the number of rows!", IF);
6530 }
6531
6532 break;
6533 }
6534 case Intrinsic::vector_splice: {
6536 int64_t Idx = cast<ConstantInt>(Call.getArgOperand(2))->getSExtValue();
6537 int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
6538 if (Call.getParent() && Call.getParent()->getParent()) {
6539 AttributeList Attrs = Call.getParent()->getParent()->getAttributes();
6540 if (Attrs.hasFnAttr(Attribute::VScaleRange))
6541 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
6542 }
6543 Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
6544 (Idx >= 0 && Idx < KnownMinNumElements),
6545 "The splice index exceeds the range [-VL, VL-1] where VL is the "
6546 "known minimum number of elements in the vector. For scalable "
6547 "vectors the minimum number of elements is determined from "
6548 "vscale_range.",
6549 &Call);
6550 break;
6551 }
6552 case Intrinsic::stepvector: {
6554 Check(VecTy && VecTy->getScalarType()->isIntegerTy() &&
6555 VecTy->getScalarSizeInBits() >= 8,
6556 "stepvector only supported for vectors of integers "
6557 "with a bitwidth of at least 8.",
6558 &Call);
6559 break;
6560 }
6561 case Intrinsic::experimental_vector_match: {
6562 Value *Op1 = Call.getArgOperand(0);
6563 Value *Op2 = Call.getArgOperand(1);
6565
6566 VectorType *Op1Ty = dyn_cast<VectorType>(Op1->getType());
6567 VectorType *Op2Ty = dyn_cast<VectorType>(Op2->getType());
6568 VectorType *MaskTy = dyn_cast<VectorType>(Mask->getType());
6569
6570 Check(Op1Ty && Op2Ty && MaskTy, "Operands must be vectors.", &Call);
6572 "Second operand must be a fixed length vector.", &Call);
6573 Check(Op1Ty->getElementType()->isIntegerTy(),
6574 "First operand must be a vector of integers.", &Call);
6575 Check(Op1Ty->getElementType() == Op2Ty->getElementType(),
6576 "First two operands must have the same element type.", &Call);
6577 Check(Op1Ty->getElementCount() == MaskTy->getElementCount(),
6578 "First operand and mask must have the same number of elements.",
6579 &Call);
6580 Check(MaskTy->getElementType()->isIntegerTy(1),
6581 "Mask must be a vector of i1's.", &Call);
6582 Check(Call.getType() == MaskTy, "Return type must match the mask type.",
6583 &Call);
6584 break;
6585 }
6586 case Intrinsic::vector_insert: {
6587 Value *Vec = Call.getArgOperand(0);
6588 Value *SubVec = Call.getArgOperand(1);
6589 Value *Idx = Call.getArgOperand(2);
6590 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6591
6592 VectorType *VecTy = cast<VectorType>(Vec->getType());
6593 VectorType *SubVecTy = cast<VectorType>(SubVec->getType());
6594
6595 ElementCount VecEC = VecTy->getElementCount();
6596 ElementCount SubVecEC = SubVecTy->getElementCount();
6597 Check(VecTy->getElementType() == SubVecTy->getElementType(),
6598 "vector_insert parameters must have the same element "
6599 "type.",
6600 &Call);
6601 Check(IdxN % SubVecEC.getKnownMinValue() == 0,
6602 "vector_insert index must be a constant multiple of "
6603 "the subvector's known minimum vector length.");
6604
6605 // If this insertion is not the 'mixed' case where a fixed vector is
6606 // inserted into a scalable vector, ensure that the insertion of the
6607 // subvector does not overrun the parent vector.
6608 if (VecEC.isScalable() == SubVecEC.isScalable()) {
6609 Check(IdxN < VecEC.getKnownMinValue() &&
6610 IdxN + SubVecEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6611 "subvector operand of vector_insert would overrun the "
6612 "vector being inserted into.");
6613 }
6614 break;
6615 }
6616 case Intrinsic::vector_extract: {
6617 Value *Vec = Call.getArgOperand(0);
6618 Value *Idx = Call.getArgOperand(1);
6619 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6620
6621 VectorType *ResultTy = cast<VectorType>(Call.getType());
6622 VectorType *VecTy = cast<VectorType>(Vec->getType());
6623
6624 ElementCount VecEC = VecTy->getElementCount();
6625 ElementCount ResultEC = ResultTy->getElementCount();
6626
6627 Check(ResultTy->getElementType() == VecTy->getElementType(),
6628 "vector_extract result must have the same element "
6629 "type as the input vector.",
6630 &Call);
6631 Check(IdxN % ResultEC.getKnownMinValue() == 0,
6632 "vector_extract index must be a constant multiple of "
6633 "the result type's known minimum vector length.");
6634
6635 // If this extraction is not the 'mixed' case where a fixed vector is
6636 // extracted from a scalable vector, ensure that the extraction does not
6637 // overrun the parent vector.
6638 if (VecEC.isScalable() == ResultEC.isScalable()) {
6639 Check(IdxN < VecEC.getKnownMinValue() &&
6640 IdxN + ResultEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6641 "vector_extract would overrun.");
6642 }
6643 break;
6644 }
6645 case Intrinsic::vector_partial_reduce_fadd:
6646 case Intrinsic::vector_partial_reduce_add: {
6649
6650 unsigned VecWidth = VecTy->getElementCount().getKnownMinValue();
6651 unsigned AccWidth = AccTy->getElementCount().getKnownMinValue();
6652
6653 Check((VecWidth % AccWidth) == 0,
6654 "Invalid vector widths for partial "
6655 "reduction. The width of the input vector "
6656 "must be a positive integer multiple of "
6657 "the width of the accumulator vector.");
6658 break;
6659 }
6660 case Intrinsic::experimental_noalias_scope_decl: {
6661 NoAliasScopeDecls.push_back(cast<IntrinsicInst>(&Call));
6662 break;
6663 }
6664 case Intrinsic::preserve_array_access_index:
6665 case Intrinsic::preserve_struct_access_index:
6666 case Intrinsic::aarch64_ldaxr:
6667 case Intrinsic::aarch64_ldxr:
6668 case Intrinsic::arm_ldaex:
6669 case Intrinsic::arm_ldrex: {
6670 Type *ElemTy = Call.getParamElementType(0);
6671 Check(ElemTy, "Intrinsic requires elementtype attribute on first argument.",
6672 &Call);
6673 break;
6674 }
6675 case Intrinsic::aarch64_stlxr:
6676 case Intrinsic::aarch64_stxr:
6677 case Intrinsic::arm_stlex:
6678 case Intrinsic::arm_strex: {
6679 Type *ElemTy = Call.getAttributes().getParamElementType(1);
6680 Check(ElemTy,
6681 "Intrinsic requires elementtype attribute on second argument.",
6682 &Call);
6683 break;
6684 }
6685 case Intrinsic::aarch64_prefetch: {
6686 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6687 "write argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6688 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6689 "target argument to llvm.aarch64.prefetch must be 0-3", Call);
6690 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6691 "stream argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6692 Check(cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue() < 2,
6693 "isdata argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6694 break;
6695 }
6696 case Intrinsic::callbr_landingpad: {
6697 const auto *CBR = dyn_cast<CallBrInst>(Call.getOperand(0));
6698 Check(CBR, "intrinstic requires callbr operand", &Call);
6699 if (!CBR)
6700 break;
6701
6702 const BasicBlock *LandingPadBB = Call.getParent();
6703 const BasicBlock *PredBB = LandingPadBB->getUniquePredecessor();
6704 if (!PredBB) {
6705 CheckFailed("Intrinsic in block must have 1 unique predecessor", &Call);
6706 break;
6707 }
6708 if (!isa<CallBrInst>(PredBB->getTerminator())) {
6709 CheckFailed("Intrinsic must have corresponding callbr in predecessor",
6710 &Call);
6711 break;
6712 }
6713 Check(llvm::is_contained(CBR->getIndirectDests(), LandingPadBB),
6714 "Intrinsic's corresponding callbr must have intrinsic's parent basic "
6715 "block in indirect destination list",
6716 &Call);
6717 const Instruction &First = *LandingPadBB->begin();
6718 Check(&First == &Call, "No other instructions may proceed intrinsic",
6719 &Call);
6720 break;
6721 }
6722 case Intrinsic::amdgcn_cs_chain: {
6723 auto CallerCC = Call.getCaller()->getCallingConv();
6724 switch (CallerCC) {
6725 case CallingConv::AMDGPU_CS:
6726 case CallingConv::AMDGPU_CS_Chain:
6727 case CallingConv::AMDGPU_CS_ChainPreserve:
6728 break;
6729 default:
6730 CheckFailed("Intrinsic can only be used from functions with the "
6731 "amdgpu_cs, amdgpu_cs_chain or amdgpu_cs_chain_preserve "
6732 "calling conventions",
6733 &Call);
6734 break;
6735 }
6736
6737 Check(Call.paramHasAttr(2, Attribute::InReg),
6738 "SGPR arguments must have the `inreg` attribute", &Call);
6739 Check(!Call.paramHasAttr(3, Attribute::InReg),
6740 "VGPR arguments must not have the `inreg` attribute", &Call);
6741
6742 auto *Next = Call.getNextNode();
6743 bool IsAMDUnreachable = Next && isa<IntrinsicInst>(Next) &&
6744 cast<IntrinsicInst>(Next)->getIntrinsicID() ==
6745 Intrinsic::amdgcn_unreachable;
6746 Check(Next && (isa<UnreachableInst>(Next) || IsAMDUnreachable),
6747 "llvm.amdgcn.cs.chain must be followed by unreachable", &Call);
6748 break;
6749 }
6750 case Intrinsic::amdgcn_init_exec_from_input: {
6751 const Argument *Arg = dyn_cast<Argument>(Call.getOperand(0));
6752 Check(Arg && Arg->hasInRegAttr(),
6753 "only inreg arguments to the parent function are valid as inputs to "
6754 "this intrinsic",
6755 &Call);
6756 break;
6757 }
6758 case Intrinsic::amdgcn_set_inactive_chain_arg: {
6759 auto CallerCC = Call.getCaller()->getCallingConv();
6760 switch (CallerCC) {
6761 case CallingConv::AMDGPU_CS_Chain:
6762 case CallingConv::AMDGPU_CS_ChainPreserve:
6763 break;
6764 default:
6765 CheckFailed("Intrinsic can only be used from functions with the "
6766 "amdgpu_cs_chain or amdgpu_cs_chain_preserve "
6767 "calling conventions",
6768 &Call);
6769 break;
6770 }
6771
6772 unsigned InactiveIdx = 1;
6773 Check(!Call.paramHasAttr(InactiveIdx, Attribute::InReg),
6774 "Value for inactive lanes must not have the `inreg` attribute",
6775 &Call);
6776 Check(isa<Argument>(Call.getArgOperand(InactiveIdx)),
6777 "Value for inactive lanes must be a function argument", &Call);
6778 Check(!cast<Argument>(Call.getArgOperand(InactiveIdx))->hasInRegAttr(),
6779 "Value for inactive lanes must be a VGPR function argument", &Call);
6780 break;
6781 }
6782 case Intrinsic::amdgcn_call_whole_wave: {
6784 Check(F, "Indirect whole wave calls are not allowed", &Call);
6785
6786 CallingConv::ID CC = F->getCallingConv();
6787 Check(CC == CallingConv::AMDGPU_Gfx_WholeWave,
6788 "Callee must have the amdgpu_gfx_whole_wave calling convention",
6789 &Call);
6790
6791 Check(!F->isVarArg(), "Variadic whole wave calls are not allowed", &Call);
6792
6793 Check(Call.arg_size() == F->arg_size(),
6794 "Call argument count must match callee argument count", &Call);
6795
6796 // The first argument of the call is the callee, and the first argument of
6797 // the callee is the active mask. The rest of the arguments must match.
6798 Check(F->arg_begin()->getType()->isIntegerTy(1),
6799 "Callee must have i1 as its first argument", &Call);
6800 for (auto [CallArg, FuncArg] :
6801 drop_begin(zip_equal(Call.args(), F->args()))) {
6802 Check(CallArg->getType() == FuncArg.getType(),
6803 "Argument types must match", &Call);
6804
6805 // Check that inreg attributes match between call site and function
6806 Check(Call.paramHasAttr(FuncArg.getArgNo(), Attribute::InReg) ==
6807 FuncArg.hasInRegAttr(),
6808 "Argument inreg attributes must match", &Call);
6809 }
6810 break;
6811 }
6812 case Intrinsic::amdgcn_s_prefetch_data: {
6813 Check(
6816 "llvm.amdgcn.s.prefetch.data only supports global or constant memory");
6817 break;
6818 }
6819 case Intrinsic::amdgcn_mfma_scale_f32_16x16x128_f8f6f4:
6820 case Intrinsic::amdgcn_mfma_scale_f32_32x32x64_f8f6f4: {
6821 Value *Src0 = Call.getArgOperand(0);
6822 Value *Src1 = Call.getArgOperand(1);
6823
6824 uint64_t CBSZ = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
6825 uint64_t BLGP = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
6826 Check(CBSZ <= 4, "invalid value for cbsz format", Call,
6827 Call.getArgOperand(3));
6828 Check(BLGP <= 4, "invalid value for blgp format", Call,
6829 Call.getArgOperand(4));
6830
6831 // AMDGPU::MFMAScaleFormats values
6832 auto getFormatNumRegs = [](unsigned FormatVal) {
6833 switch (FormatVal) {
6834 case 0:
6835 case 1:
6836 return 8u;
6837 case 2:
6838 case 3:
6839 return 6u;
6840 case 4:
6841 return 4u;
6842 default:
6843 llvm_unreachable("invalid format value");
6844 }
6845 };
6846
6847 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
6848 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
6849 return false;
6850 unsigned NumElts = Ty->getNumElements();
6851 return NumElts == 4 || NumElts == 6 || NumElts == 8;
6852 };
6853
6854 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
6855 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
6856 Check(isValidSrcASrcBVector(Src0Ty),
6857 "operand 0 must be 4, 6 or 8 element i32 vector", &Call, Src0);
6858 Check(isValidSrcASrcBVector(Src1Ty),
6859 "operand 1 must be 4, 6 or 8 element i32 vector", &Call, Src1);
6860
6861 // Permit excess registers for the format.
6862 Check(Src0Ty->getNumElements() >= getFormatNumRegs(CBSZ),
6863 "invalid vector type for format", &Call, Src0, Call.getArgOperand(3));
6864 Check(Src1Ty->getNumElements() >= getFormatNumRegs(BLGP),
6865 "invalid vector type for format", &Call, Src1, Call.getArgOperand(5));
6866 break;
6867 }
6868 case Intrinsic::amdgcn_wmma_f32_16x16x128_f8f6f4:
6869 case Intrinsic::amdgcn_wmma_scale_f32_16x16x128_f8f6f4:
6870 case Intrinsic::amdgcn_wmma_scale16_f32_16x16x128_f8f6f4: {
6871 Value *Src0 = Call.getArgOperand(1);
6872 Value *Src1 = Call.getArgOperand(3);
6873
6874 unsigned FmtA = cast<ConstantInt>(Call.getArgOperand(0))->getZExtValue();
6875 unsigned FmtB = cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue();
6876 Check(FmtA <= 4, "invalid value for matrix format", Call,
6877 Call.getArgOperand(0));
6878 Check(FmtB <= 4, "invalid value for matrix format", Call,
6879 Call.getArgOperand(2));
6880
6881 // AMDGPU::MatrixFMT values
6882 auto getFormatNumRegs = [](unsigned FormatVal) {
6883 switch (FormatVal) {
6884 case 0:
6885 case 1:
6886 return 16u;
6887 case 2:
6888 case 3:
6889 return 12u;
6890 case 4:
6891 return 8u;
6892 default:
6893 llvm_unreachable("invalid format value");
6894 }
6895 };
6896
6897 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
6898 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
6899 return false;
6900 unsigned NumElts = Ty->getNumElements();
6901 return NumElts == 16 || NumElts == 12 || NumElts == 8;
6902 };
6903
6904 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
6905 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
6906 Check(isValidSrcASrcBVector(Src0Ty),
6907 "operand 1 must be 8, 12 or 16 element i32 vector", &Call, Src0);
6908 Check(isValidSrcASrcBVector(Src1Ty),
6909 "operand 3 must be 8, 12 or 16 element i32 vector", &Call, Src1);
6910
6911 // Permit excess registers for the format.
6912 Check(Src0Ty->getNumElements() >= getFormatNumRegs(FmtA),
6913 "invalid vector type for format", &Call, Src0, Call.getArgOperand(0));
6914 Check(Src1Ty->getNumElements() >= getFormatNumRegs(FmtB),
6915 "invalid vector type for format", &Call, Src1, Call.getArgOperand(2));
6916 break;
6917 }
6918 case Intrinsic::amdgcn_cooperative_atomic_load_32x4B:
6919 case Intrinsic::amdgcn_cooperative_atomic_load_16x8B:
6920 case Intrinsic::amdgcn_cooperative_atomic_load_8x16B:
6921 case Intrinsic::amdgcn_cooperative_atomic_store_32x4B:
6922 case Intrinsic::amdgcn_cooperative_atomic_store_16x8B:
6923 case Intrinsic::amdgcn_cooperative_atomic_store_8x16B: {
6924 // Check we only use this intrinsic on the FLAT or GLOBAL address spaces.
6925 Value *PtrArg = Call.getArgOperand(0);
6926 const unsigned AS = PtrArg->getType()->getPointerAddressSpace();
6928 "cooperative atomic intrinsics require a generic or global pointer",
6929 &Call, PtrArg);
6930
6931 // Last argument must be a MD string
6933 MDNode *MD = cast<MDNode>(Op->getMetadata());
6934 Check((MD->getNumOperands() == 1) && isa<MDString>(MD->getOperand(0)),
6935 "cooperative atomic intrinsics require that the last argument is a "
6936 "metadata string",
6937 &Call, Op);
6938 break;
6939 }
6940 case Intrinsic::nvvm_setmaxnreg_inc_sync_aligned_u32:
6941 case Intrinsic::nvvm_setmaxnreg_dec_sync_aligned_u32: {
6942 Value *V = Call.getArgOperand(0);
6943 unsigned RegCount = cast<ConstantInt>(V)->getZExtValue();
6944 Check(RegCount % 8 == 0,
6945 "reg_count argument to nvvm.setmaxnreg must be in multiples of 8");
6946 break;
6947 }
6948 case Intrinsic::experimental_convergence_entry:
6949 case Intrinsic::experimental_convergence_anchor:
6950 break;
6951 case Intrinsic::experimental_convergence_loop:
6952 break;
6953 case Intrinsic::ptrmask: {
6954 Type *Ty0 = Call.getArgOperand(0)->getType();
6955 Type *Ty1 = Call.getArgOperand(1)->getType();
6957 "llvm.ptrmask intrinsic first argument must be pointer or vector "
6958 "of pointers",
6959 &Call);
6960 Check(
6961 Ty0->isVectorTy() == Ty1->isVectorTy(),
6962 "llvm.ptrmask intrinsic arguments must be both scalars or both vectors",
6963 &Call);
6964 if (Ty0->isVectorTy())
6965 Check(cast<VectorType>(Ty0)->getElementCount() ==
6966 cast<VectorType>(Ty1)->getElementCount(),
6967 "llvm.ptrmask intrinsic arguments must have the same number of "
6968 "elements",
6969 &Call);
6970 Check(DL.getIndexTypeSizeInBits(Ty0) == Ty1->getScalarSizeInBits(),
6971 "llvm.ptrmask intrinsic second argument bitwidth must match "
6972 "pointer index type size of first argument",
6973 &Call);
6974 break;
6975 }
6976 case Intrinsic::thread_pointer: {
6978 DL.getDefaultGlobalsAddressSpace(),
6979 "llvm.thread.pointer intrinsic return type must be for the globals "
6980 "address space",
6981 &Call);
6982 break;
6983 }
6984 case Intrinsic::threadlocal_address: {
6985 const Value &Arg0 = *Call.getArgOperand(0);
6986 Check(isa<GlobalValue>(Arg0),
6987 "llvm.threadlocal.address first argument must be a GlobalValue");
6988 Check(cast<GlobalValue>(Arg0).isThreadLocal(),
6989 "llvm.threadlocal.address operand isThreadLocal() must be true");
6990 break;
6991 }
6992 case Intrinsic::lifetime_start:
6993 case Intrinsic::lifetime_end: {
6994 Value *Ptr = Call.getArgOperand(0);
6996 "llvm.lifetime.start/end can only be used on alloca or poison",
6997 &Call);
6998 break;
6999 }
7000 };
7001
7002 // Verify that there aren't any unmediated control transfers between funclets.
7004 Function *F = Call.getParent()->getParent();
7005 if (F->hasPersonalityFn() &&
7006 isScopedEHPersonality(classifyEHPersonality(F->getPersonalityFn()))) {
7007 // Run EH funclet coloring on-demand and cache results for other intrinsic
7008 // calls in this function
7009 if (BlockEHFuncletColors.empty())
7010 BlockEHFuncletColors = colorEHFunclets(*F);
7011
7012 // Check for catch-/cleanup-pad in first funclet block
7013 bool InEHFunclet = false;
7014 BasicBlock *CallBB = Call.getParent();
7015 const ColorVector &CV = BlockEHFuncletColors.find(CallBB)->second;
7016 assert(CV.size() > 0 && "Uncolored block");
7017 for (BasicBlock *ColorFirstBB : CV)
7018 if (auto It = ColorFirstBB->getFirstNonPHIIt();
7019 It != ColorFirstBB->end())
7021 InEHFunclet = true;
7022
7023 // Check for funclet operand bundle
7024 bool HasToken = false;
7025 for (unsigned I = 0, E = Call.getNumOperandBundles(); I != E; ++I)
7027 HasToken = true;
7028
7029 // This would cause silent code truncation in WinEHPrepare
7030 if (InEHFunclet)
7031 Check(HasToken, "Missing funclet token on intrinsic call", &Call);
7032 }
7033 }
7034}
7035
7036/// Carefully grab the subprogram from a local scope.
7037///
7038/// This carefully grabs the subprogram from a local scope, avoiding the
7039/// built-in assertions that would typically fire.
7041 if (!LocalScope)
7042 return nullptr;
7043
7044 if (auto *SP = dyn_cast<DISubprogram>(LocalScope))
7045 return SP;
7046
7047 if (auto *LB = dyn_cast<DILexicalBlockBase>(LocalScope))
7048 return getSubprogram(LB->getRawScope());
7049
7050 // Just return null; broken scope chains are checked elsewhere.
7051 assert(!isa<DILocalScope>(LocalScope) && "Unknown type of local scope");
7052 return nullptr;
7053}
7054
7055void Verifier::visit(DbgLabelRecord &DLR) {
7057 "invalid #dbg_label intrinsic variable", &DLR, DLR.getRawLabel());
7058
7059 // Ignore broken !dbg attachments; they're checked elsewhere.
7060 if (MDNode *N = DLR.getDebugLoc().getAsMDNode())
7061 if (!isa<DILocation>(N))
7062 return;
7063
7064 BasicBlock *BB = DLR.getParent();
7065 Function *F = BB ? BB->getParent() : nullptr;
7066
7067 // The scopes for variables and !dbg attachments must agree.
7068 DILabel *Label = DLR.getLabel();
7069 DILocation *Loc = DLR.getDebugLoc();
7070 CheckDI(Loc, "#dbg_label record requires a !dbg attachment", &DLR, BB, F);
7071
7072 DISubprogram *LabelSP = getSubprogram(Label->getRawScope());
7073 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
7074 if (!LabelSP || !LocSP)
7075 return;
7076
7077 CheckDI(LabelSP == LocSP,
7078 "mismatched subprogram between #dbg_label label and !dbg attachment",
7079 &DLR, BB, F, Label, Label->getScope()->getSubprogram(), Loc,
7080 Loc->getScope()->getSubprogram());
7081}
7082
7083void Verifier::visit(DbgVariableRecord &DVR) {
7084 BasicBlock *BB = DVR.getParent();
7085 Function *F = BB->getParent();
7086
7087 CheckDI(DVR.getType() == DbgVariableRecord::LocationType::Value ||
7088 DVR.getType() == DbgVariableRecord::LocationType::Declare ||
7089 DVR.getType() == DbgVariableRecord::LocationType::DeclareValue ||
7090 DVR.getType() == DbgVariableRecord::LocationType::Assign,
7091 "invalid #dbg record type", &DVR, DVR.getType(), BB, F);
7092
7093 // The location for a DbgVariableRecord must be either a ValueAsMetadata,
7094 // DIArgList, or an empty MDNode (which is a legacy representation for an
7095 // "undef" location).
7096 auto *MD = DVR.getRawLocation();
7097 CheckDI(MD && (isa<ValueAsMetadata>(MD) || isa<DIArgList>(MD) ||
7098 (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands())),
7099 "invalid #dbg record address/value", &DVR, MD, BB, F);
7100 if (auto *VAM = dyn_cast<ValueAsMetadata>(MD)) {
7101 visitValueAsMetadata(*VAM, F);
7102 if (DVR.isDbgDeclare()) {
7103 // Allow integers here to support inttoptr salvage.
7104 Type *Ty = VAM->getValue()->getType();
7105 CheckDI(Ty->isPointerTy() || Ty->isIntegerTy(),
7106 "location of #dbg_declare must be a pointer or int", &DVR, MD, BB,
7107 F);
7108 }
7109 } else if (auto *AL = dyn_cast<DIArgList>(MD)) {
7110 visitDIArgList(*AL, F);
7111 }
7112
7114 "invalid #dbg record variable", &DVR, DVR.getRawVariable(), BB, F);
7115 visitMDNode(*DVR.getRawVariable(), AreDebugLocsAllowed::No);
7116
7118 "invalid #dbg record expression", &DVR, DVR.getRawExpression(), BB,
7119 F);
7120 visitMDNode(*DVR.getExpression(), AreDebugLocsAllowed::No);
7121
7122 if (DVR.isDbgAssign()) {
7124 "invalid #dbg_assign DIAssignID", &DVR, DVR.getRawAssignID(), BB,
7125 F);
7126 visitMDNode(*cast<DIAssignID>(DVR.getRawAssignID()),
7127 AreDebugLocsAllowed::No);
7128
7129 const auto *RawAddr = DVR.getRawAddress();
7130 // Similarly to the location above, the address for an assign
7131 // DbgVariableRecord must be a ValueAsMetadata or an empty MDNode, which
7132 // represents an undef address.
7133 CheckDI(
7134 isa<ValueAsMetadata>(RawAddr) ||
7135 (isa<MDNode>(RawAddr) && !cast<MDNode>(RawAddr)->getNumOperands()),
7136 "invalid #dbg_assign address", &DVR, DVR.getRawAddress(), BB, F);
7137 if (auto *VAM = dyn_cast<ValueAsMetadata>(RawAddr))
7138 visitValueAsMetadata(*VAM, F);
7139
7141 "invalid #dbg_assign address expression", &DVR,
7142 DVR.getRawAddressExpression(), BB, F);
7143 visitMDNode(*DVR.getAddressExpression(), AreDebugLocsAllowed::No);
7144
7145 // All of the linked instructions should be in the same function as DVR.
7146 for (Instruction *I : at::getAssignmentInsts(&DVR))
7147 CheckDI(DVR.getFunction() == I->getFunction(),
7148 "inst not in same function as #dbg_assign", I, &DVR, BB, F);
7149 }
7150
7151 // This check is redundant with one in visitLocalVariable().
7152 DILocalVariable *Var = DVR.getVariable();
7153 CheckDI(isType(Var->getRawType()), "invalid type ref", Var, Var->getRawType(),
7154 BB, F);
7155
7156 auto *DLNode = DVR.getDebugLoc().getAsMDNode();
7157 CheckDI(isa_and_nonnull<DILocation>(DLNode), "invalid #dbg record DILocation",
7158 &DVR, DLNode, BB, F);
7159 DILocation *Loc = DVR.getDebugLoc();
7160
7161 // The scopes for variables and !dbg attachments must agree.
7162 DISubprogram *VarSP = getSubprogram(Var->getRawScope());
7163 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
7164 if (!VarSP || !LocSP)
7165 return; // Broken scope chains are checked elsewhere.
7166
7167 CheckDI(VarSP == LocSP,
7168 "mismatched subprogram between #dbg record variable and DILocation",
7169 &DVR, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
7170 Loc->getScope()->getSubprogram(), BB, F);
7171
7172 verifyFnArgs(DVR);
7173}
7174
7175void Verifier::visitVPIntrinsic(VPIntrinsic &VPI) {
7176 if (auto *VPCast = dyn_cast<VPCastIntrinsic>(&VPI)) {
7177 auto *RetTy = cast<VectorType>(VPCast->getType());
7178 auto *ValTy = cast<VectorType>(VPCast->getOperand(0)->getType());
7179 Check(RetTy->getElementCount() == ValTy->getElementCount(),
7180 "VP cast intrinsic first argument and result vector lengths must be "
7181 "equal",
7182 *VPCast);
7183
7184 switch (VPCast->getIntrinsicID()) {
7185 default:
7186 llvm_unreachable("Unknown VP cast intrinsic");
7187 case Intrinsic::vp_trunc:
7188 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
7189 "llvm.vp.trunc intrinsic first argument and result element type "
7190 "must be integer",
7191 *VPCast);
7192 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
7193 "llvm.vp.trunc intrinsic the bit size of first argument must be "
7194 "larger than the bit size of the return type",
7195 *VPCast);
7196 break;
7197 case Intrinsic::vp_zext:
7198 case Intrinsic::vp_sext:
7199 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
7200 "llvm.vp.zext or llvm.vp.sext intrinsic first argument and result "
7201 "element type must be integer",
7202 *VPCast);
7203 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
7204 "llvm.vp.zext or llvm.vp.sext intrinsic the bit size of first "
7205 "argument must be smaller than the bit size of the return type",
7206 *VPCast);
7207 break;
7208 case Intrinsic::vp_fptoui:
7209 case Intrinsic::vp_fptosi:
7210 case Intrinsic::vp_lrint:
7211 case Intrinsic::vp_llrint:
7212 Check(
7213 RetTy->isIntOrIntVectorTy() && ValTy->isFPOrFPVectorTy(),
7214 "llvm.vp.fptoui, llvm.vp.fptosi, llvm.vp.lrint or llvm.vp.llrint" "intrinsic first argument element "
7215 "type must be floating-point and result element type must be integer",
7216 *VPCast);
7217 break;
7218 case Intrinsic::vp_uitofp:
7219 case Intrinsic::vp_sitofp:
7220 Check(
7221 RetTy->isFPOrFPVectorTy() && ValTy->isIntOrIntVectorTy(),
7222 "llvm.vp.uitofp or llvm.vp.sitofp intrinsic first argument element "
7223 "type must be integer and result element type must be floating-point",
7224 *VPCast);
7225 break;
7226 case Intrinsic::vp_fptrunc:
7227 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
7228 "llvm.vp.fptrunc intrinsic first argument and result element type "
7229 "must be floating-point",
7230 *VPCast);
7231 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
7232 "llvm.vp.fptrunc intrinsic the bit size of first argument must be "
7233 "larger than the bit size of the return type",
7234 *VPCast);
7235 break;
7236 case Intrinsic::vp_fpext:
7237 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
7238 "llvm.vp.fpext intrinsic first argument and result element type "
7239 "must be floating-point",
7240 *VPCast);
7241 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
7242 "llvm.vp.fpext intrinsic the bit size of first argument must be "
7243 "smaller than the bit size of the return type",
7244 *VPCast);
7245 break;
7246 case Intrinsic::vp_ptrtoint:
7247 Check(RetTy->isIntOrIntVectorTy() && ValTy->isPtrOrPtrVectorTy(),
7248 "llvm.vp.ptrtoint intrinsic first argument element type must be "
7249 "pointer and result element type must be integer",
7250 *VPCast);
7251 break;
7252 case Intrinsic::vp_inttoptr:
7253 Check(RetTy->isPtrOrPtrVectorTy() && ValTy->isIntOrIntVectorTy(),
7254 "llvm.vp.inttoptr intrinsic first argument element type must be "
7255 "integer and result element type must be pointer",
7256 *VPCast);
7257 break;
7258 }
7259 }
7260
7261 switch (VPI.getIntrinsicID()) {
7262 case Intrinsic::vp_fcmp: {
7263 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
7265 "invalid predicate for VP FP comparison intrinsic", &VPI);
7266 break;
7267 }
7268 case Intrinsic::vp_icmp: {
7269 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
7271 "invalid predicate for VP integer comparison intrinsic", &VPI);
7272 break;
7273 }
7274 case Intrinsic::vp_is_fpclass: {
7275 auto TestMask = cast<ConstantInt>(VPI.getOperand(1));
7276 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
7277 "unsupported bits for llvm.vp.is.fpclass test mask");
7278 break;
7279 }
7280 case Intrinsic::experimental_vp_splice: {
7281 VectorType *VecTy = cast<VectorType>(VPI.getType());
7282 int64_t Idx = cast<ConstantInt>(VPI.getArgOperand(2))->getSExtValue();
7283 int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
7284 if (VPI.getParent() && VPI.getParent()->getParent()) {
7285 AttributeList Attrs = VPI.getParent()->getParent()->getAttributes();
7286 if (Attrs.hasFnAttr(Attribute::VScaleRange))
7287 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
7288 }
7289 Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
7290 (Idx >= 0 && Idx < KnownMinNumElements),
7291 "The splice index exceeds the range [-VL, VL-1] where VL is the "
7292 "known minimum number of elements in the vector. For scalable "
7293 "vectors the minimum number of elements is determined from "
7294 "vscale_range.",
7295 &VPI);
7296 break;
7297 }
7298 }
7299}
7300
7301void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
7302 unsigned NumOperands = FPI.getNonMetadataArgCount();
7303 bool HasRoundingMD =
7305
7306 // Add the expected number of metadata operands.
7307 NumOperands += (1 + HasRoundingMD);
7308
7309 // Compare intrinsics carry an extra predicate metadata operand.
7311 NumOperands += 1;
7312 Check((FPI.arg_size() == NumOperands),
7313 "invalid arguments for constrained FP intrinsic", &FPI);
7314
7315 switch (FPI.getIntrinsicID()) {
7316 case Intrinsic::experimental_constrained_lrint:
7317 case Intrinsic::experimental_constrained_llrint: {
7318 Type *ValTy = FPI.getArgOperand(0)->getType();
7319 Type *ResultTy = FPI.getType();
7320 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
7321 "Intrinsic does not support vectors", &FPI);
7322 break;
7323 }
7324
7325 case Intrinsic::experimental_constrained_lround:
7326 case Intrinsic::experimental_constrained_llround: {
7327 Type *ValTy = FPI.getArgOperand(0)->getType();
7328 Type *ResultTy = FPI.getType();
7329 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
7330 "Intrinsic does not support vectors", &FPI);
7331 break;
7332 }
7333
7334 case Intrinsic::experimental_constrained_fcmp:
7335 case Intrinsic::experimental_constrained_fcmps: {
7336 auto Pred = cast<ConstrainedFPCmpIntrinsic>(&FPI)->getPredicate();
7338 "invalid predicate for constrained FP comparison intrinsic", &FPI);
7339 break;
7340 }
7341
7342 case Intrinsic::experimental_constrained_fptosi:
7343 case Intrinsic::experimental_constrained_fptoui: {
7344 Value *Operand = FPI.getArgOperand(0);
7345 ElementCount SrcEC;
7346 Check(Operand->getType()->isFPOrFPVectorTy(),
7347 "Intrinsic first argument must be floating point", &FPI);
7348 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7349 SrcEC = cast<VectorType>(OperandT)->getElementCount();
7350 }
7351
7352 Operand = &FPI;
7353 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
7354 "Intrinsic first argument and result disagree on vector use", &FPI);
7355 Check(Operand->getType()->isIntOrIntVectorTy(),
7356 "Intrinsic result must be an integer", &FPI);
7357 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7358 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
7359 "Intrinsic first argument and result vector lengths must be equal",
7360 &FPI);
7361 }
7362 break;
7363 }
7364
7365 case Intrinsic::experimental_constrained_sitofp:
7366 case Intrinsic::experimental_constrained_uitofp: {
7367 Value *Operand = FPI.getArgOperand(0);
7368 ElementCount SrcEC;
7369 Check(Operand->getType()->isIntOrIntVectorTy(),
7370 "Intrinsic first argument must be integer", &FPI);
7371 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7372 SrcEC = cast<VectorType>(OperandT)->getElementCount();
7373 }
7374
7375 Operand = &FPI;
7376 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
7377 "Intrinsic first argument and result disagree on vector use", &FPI);
7378 Check(Operand->getType()->isFPOrFPVectorTy(),
7379 "Intrinsic result must be a floating point", &FPI);
7380 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7381 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
7382 "Intrinsic first argument and result vector lengths must be equal",
7383 &FPI);
7384 }
7385 break;
7386 }
7387
7388 case Intrinsic::experimental_constrained_fptrunc:
7389 case Intrinsic::experimental_constrained_fpext: {
7390 Value *Operand = FPI.getArgOperand(0);
7391 Type *OperandTy = Operand->getType();
7392 Value *Result = &FPI;
7393 Type *ResultTy = Result->getType();
7394 Check(OperandTy->isFPOrFPVectorTy(),
7395 "Intrinsic first argument must be FP or FP vector", &FPI);
7396 Check(ResultTy->isFPOrFPVectorTy(),
7397 "Intrinsic result must be FP or FP vector", &FPI);
7398 Check(OperandTy->isVectorTy() == ResultTy->isVectorTy(),
7399 "Intrinsic first argument and result disagree on vector use", &FPI);
7400 if (OperandTy->isVectorTy()) {
7401 Check(cast<VectorType>(OperandTy)->getElementCount() ==
7402 cast<VectorType>(ResultTy)->getElementCount(),
7403 "Intrinsic first argument and result vector lengths must be equal",
7404 &FPI);
7405 }
7406 if (FPI.getIntrinsicID() == Intrinsic::experimental_constrained_fptrunc) {
7407 Check(OperandTy->getScalarSizeInBits() > ResultTy->getScalarSizeInBits(),
7408 "Intrinsic first argument's type must be larger than result type",
7409 &FPI);
7410 } else {
7411 Check(OperandTy->getScalarSizeInBits() < ResultTy->getScalarSizeInBits(),
7412 "Intrinsic first argument's type must be smaller than result type",
7413 &FPI);
7414 }
7415 break;
7416 }
7417
7418 default:
7419 break;
7420 }
7421
7422 // If a non-metadata argument is passed in a metadata slot then the
7423 // error will be caught earlier when the incorrect argument doesn't
7424 // match the specification in the intrinsic call table. Thus, no
7425 // argument type check is needed here.
7426
7427 Check(FPI.getExceptionBehavior().has_value(),
7428 "invalid exception behavior argument", &FPI);
7429 if (HasRoundingMD) {
7430 Check(FPI.getRoundingMode().has_value(), "invalid rounding mode argument",
7431 &FPI);
7432 }
7433}
7434
7435void Verifier::verifyFragmentExpression(const DbgVariableRecord &DVR) {
7436 DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(DVR.getRawVariable());
7437 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7438
7439 // We don't know whether this intrinsic verified correctly.
7440 if (!V || !E || !E->isValid())
7441 return;
7442
7443 // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
7444 auto Fragment = E->getFragmentInfo();
7445 if (!Fragment)
7446 return;
7447
7448 // The frontend helps out GDB by emitting the members of local anonymous
7449 // unions as artificial local variables with shared storage. When SROA splits
7450 // the storage for artificial local variables that are smaller than the entire
7451 // union, the overhang piece will be outside of the allotted space for the
7452 // variable and this check fails.
7453 // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
7454 if (V->isArtificial())
7455 return;
7456
7457 verifyFragmentExpression(*V, *Fragment, &DVR);
7458}
7459
7460template <typename ValueOrMetadata>
7461void Verifier::verifyFragmentExpression(const DIVariable &V,
7463 ValueOrMetadata *Desc) {
7464 // If there's no size, the type is broken, but that should be checked
7465 // elsewhere.
7466 auto VarSize = V.getSizeInBits();
7467 if (!VarSize)
7468 return;
7469
7470 unsigned FragSize = Fragment.SizeInBits;
7471 unsigned FragOffset = Fragment.OffsetInBits;
7472 CheckDI(FragSize + FragOffset <= *VarSize,
7473 "fragment is larger than or outside of variable", Desc, &V);
7474 CheckDI(FragSize != *VarSize, "fragment covers entire variable", Desc, &V);
7475}
7476
7477void Verifier::verifyFnArgs(const DbgVariableRecord &DVR) {
7478 // This function does not take the scope of noninlined function arguments into
7479 // account. Don't run it if current function is nodebug, because it may
7480 // contain inlined debug intrinsics.
7481 if (!HasDebugInfo)
7482 return;
7483
7484 // For performance reasons only check non-inlined ones.
7485 if (DVR.getDebugLoc()->getInlinedAt())
7486 return;
7487
7488 DILocalVariable *Var = DVR.getVariable();
7489 CheckDI(Var, "#dbg record without variable");
7490
7491 unsigned ArgNo = Var->getArg();
7492 if (!ArgNo)
7493 return;
7494
7495 // Verify there are no duplicate function argument debug info entries.
7496 // These will cause hard-to-debug assertions in the DWARF backend.
7497 if (DebugFnArgs.size() < ArgNo)
7498 DebugFnArgs.resize(ArgNo, nullptr);
7499
7500 auto *Prev = DebugFnArgs[ArgNo - 1];
7501 DebugFnArgs[ArgNo - 1] = Var;
7502 CheckDI(!Prev || (Prev == Var), "conflicting debug info for argument", &DVR,
7503 Prev, Var);
7504}
7505
7506void Verifier::verifyNotEntryValue(const DbgVariableRecord &DVR) {
7507 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7508
7509 // We don't know whether this intrinsic verified correctly.
7510 if (!E || !E->isValid())
7511 return;
7512
7514 Value *VarValue = DVR.getVariableLocationOp(0);
7515 if (isa<UndefValue>(VarValue) || isa<PoisonValue>(VarValue))
7516 return;
7517 // We allow EntryValues for swift async arguments, as they have an
7518 // ABI-guarantee to be turned into a specific register.
7519 if (auto *ArgLoc = dyn_cast_or_null<Argument>(VarValue);
7520 ArgLoc && ArgLoc->hasAttribute(Attribute::SwiftAsync))
7521 return;
7522 }
7523
7524 CheckDI(!E->isEntryValue(),
7525 "Entry values are only allowed in MIR unless they target a "
7526 "swiftasync Argument",
7527 &DVR);
7528}
7529
7530void Verifier::verifyCompileUnits() {
7531 // When more than one Module is imported into the same context, such as during
7532 // an LTO build before linking the modules, ODR type uniquing may cause types
7533 // to point to a different CU. This check does not make sense in this case.
7534 if (M.getContext().isODRUniquingDebugTypes())
7535 return;
7536 auto *CUs = M.getNamedMetadata("llvm.dbg.cu");
7537 SmallPtrSet<const Metadata *, 2> Listed;
7538 if (CUs)
7539 Listed.insert_range(CUs->operands());
7540 for (const auto *CU : CUVisited)
7541 CheckDI(Listed.count(CU), "DICompileUnit not listed in llvm.dbg.cu", CU);
7542 CUVisited.clear();
7543}
7544
7545void Verifier::verifyDeoptimizeCallingConvs() {
7546 if (DeoptimizeDeclarations.empty())
7547 return;
7548
7549 const Function *First = DeoptimizeDeclarations[0];
7550 for (const auto *F : ArrayRef(DeoptimizeDeclarations).slice(1)) {
7551 Check(First->getCallingConv() == F->getCallingConv(),
7552 "All llvm.experimental.deoptimize declarations must have the same "
7553 "calling convention",
7554 First, F);
7555 }
7556}
7557
7558void Verifier::verifyAttachedCallBundle(const CallBase &Call,
7559 const OperandBundleUse &BU) {
7560 FunctionType *FTy = Call.getFunctionType();
7561
7562 Check((FTy->getReturnType()->isPointerTy() ||
7563 (Call.doesNotReturn() && FTy->getReturnType()->isVoidTy())),
7564 "a call with operand bundle \"clang.arc.attachedcall\" must call a "
7565 "function returning a pointer or a non-returning function that has a "
7566 "void return type",
7567 Call);
7568
7569 Check(BU.Inputs.size() == 1 && isa<Function>(BU.Inputs.front()),
7570 "operand bundle \"clang.arc.attachedcall\" requires one function as "
7571 "an argument",
7572 Call);
7573
7574 auto *Fn = cast<Function>(BU.Inputs.front());
7575 Intrinsic::ID IID = Fn->getIntrinsicID();
7576
7577 if (IID) {
7578 Check((IID == Intrinsic::objc_retainAutoreleasedReturnValue ||
7579 IID == Intrinsic::objc_claimAutoreleasedReturnValue ||
7580 IID == Intrinsic::objc_unsafeClaimAutoreleasedReturnValue),
7581 "invalid function argument", Call);
7582 } else {
7583 StringRef FnName = Fn->getName();
7584 Check((FnName == "objc_retainAutoreleasedReturnValue" ||
7585 FnName == "objc_claimAutoreleasedReturnValue" ||
7586 FnName == "objc_unsafeClaimAutoreleasedReturnValue"),
7587 "invalid function argument", Call);
7588 }
7589}
7590
7591void Verifier::verifyNoAliasScopeDecl() {
7592 if (NoAliasScopeDecls.empty())
7593 return;
7594
7595 // only a single scope must be declared at a time.
7596 for (auto *II : NoAliasScopeDecls) {
7597 assert(II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl &&
7598 "Not a llvm.experimental.noalias.scope.decl ?");
7599 const auto *ScopeListMV = dyn_cast<MetadataAsValue>(
7601 Check(ScopeListMV != nullptr,
7602 "llvm.experimental.noalias.scope.decl must have a MetadataAsValue "
7603 "argument",
7604 II);
7605
7606 const auto *ScopeListMD = dyn_cast<MDNode>(ScopeListMV->getMetadata());
7607 Check(ScopeListMD != nullptr, "!id.scope.list must point to an MDNode", II);
7608 Check(ScopeListMD->getNumOperands() == 1,
7609 "!id.scope.list must point to a list with a single scope", II);
7610 visitAliasScopeListMetadata(ScopeListMD);
7611 }
7612
7613 // Only check the domination rule when requested. Once all passes have been
7614 // adapted this option can go away.
7616 return;
7617
7618 // Now sort the intrinsics based on the scope MDNode so that declarations of
7619 // the same scopes are next to each other.
7620 auto GetScope = [](IntrinsicInst *II) {
7621 const auto *ScopeListMV = cast<MetadataAsValue>(
7623 return &cast<MDNode>(ScopeListMV->getMetadata())->getOperand(0);
7624 };
7625
7626 // We are sorting on MDNode pointers here. For valid input IR this is ok.
7627 // TODO: Sort on Metadata ID to avoid non-deterministic error messages.
7628 auto Compare = [GetScope](IntrinsicInst *Lhs, IntrinsicInst *Rhs) {
7629 return GetScope(Lhs) < GetScope(Rhs);
7630 };
7631
7632 llvm::sort(NoAliasScopeDecls, Compare);
7633
7634 // Go over the intrinsics and check that for the same scope, they are not
7635 // dominating each other.
7636 auto ItCurrent = NoAliasScopeDecls.begin();
7637 while (ItCurrent != NoAliasScopeDecls.end()) {
7638 auto CurScope = GetScope(*ItCurrent);
7639 auto ItNext = ItCurrent;
7640 do {
7641 ++ItNext;
7642 } while (ItNext != NoAliasScopeDecls.end() &&
7643 GetScope(*ItNext) == CurScope);
7644
7645 // [ItCurrent, ItNext) represents the declarations for the same scope.
7646 // Ensure they are not dominating each other.. but only if it is not too
7647 // expensive.
7648 if (ItNext - ItCurrent < 32)
7649 for (auto *I : llvm::make_range(ItCurrent, ItNext))
7650 for (auto *J : llvm::make_range(ItCurrent, ItNext))
7651 if (I != J)
7652 Check(!DT.dominates(I, J),
7653 "llvm.experimental.noalias.scope.decl dominates another one "
7654 "with the same scope",
7655 I);
7656 ItCurrent = ItNext;
7657 }
7658}
7659
7660//===----------------------------------------------------------------------===//
7661// Implement the public interfaces to this file...
7662//===----------------------------------------------------------------------===//
7663
7665 Function &F = const_cast<Function &>(f);
7666
7667 // Don't use a raw_null_ostream. Printing IR is expensive.
7668 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/true, *f.getParent());
7669
7670 // Note that this function's return value is inverted from what you would
7671 // expect of a function called "verify".
7672 return !V.verify(F);
7673}
7674
7676 bool *BrokenDebugInfo) {
7677 // Don't use a raw_null_ostream. Printing IR is expensive.
7678 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/!BrokenDebugInfo, M);
7679
7680 bool Broken = false;
7681 for (const Function &F : M)
7682 Broken |= !V.verify(F);
7683
7684 Broken |= !V.verify();
7685 if (BrokenDebugInfo)
7686 *BrokenDebugInfo = V.hasBrokenDebugInfo();
7687 // Note that this function's return value is inverted from what you would
7688 // expect of a function called "verify".
7689 return Broken;
7690}
7691
7692namespace {
7693
7694struct VerifierLegacyPass : public FunctionPass {
7695 static char ID;
7696
7697 std::unique_ptr<Verifier> V;
7698 bool FatalErrors = true;
7699
7700 VerifierLegacyPass() : FunctionPass(ID) {
7702 }
7703 explicit VerifierLegacyPass(bool FatalErrors)
7704 : FunctionPass(ID),
7705 FatalErrors(FatalErrors) {
7707 }
7708
7709 bool doInitialization(Module &M) override {
7710 V = std::make_unique<Verifier>(
7711 &dbgs(), /*ShouldTreatBrokenDebugInfoAsError=*/false, M);
7712 return false;
7713 }
7714
7715 bool runOnFunction(Function &F) override {
7716 if (!V->verify(F) && FatalErrors) {
7717 errs() << "in function " << F.getName() << '\n';
7718 report_fatal_error("Broken function found, compilation aborted!");
7719 }
7720 return false;
7721 }
7722
7723 bool doFinalization(Module &M) override {
7724 bool HasErrors = false;
7725 for (Function &F : M)
7726 if (F.isDeclaration())
7727 HasErrors |= !V->verify(F);
7728
7729 HasErrors |= !V->verify();
7730 if (FatalErrors && (HasErrors || V->hasBrokenDebugInfo()))
7731 report_fatal_error("Broken module found, compilation aborted!");
7732 return false;
7733 }
7734
7735 void getAnalysisUsage(AnalysisUsage &AU) const override {
7736 AU.setPreservesAll();
7737 }
7738};
7739
7740} // end anonymous namespace
7741
7742/// Helper to issue failure from the TBAA verification
7743template <typename... Tys> void TBAAVerifier::CheckFailed(Tys &&... Args) {
7744 if (Diagnostic)
7745 return Diagnostic->CheckFailed(Args...);
7746}
7747
7748#define CheckTBAA(C, ...) \
7749 do { \
7750 if (!(C)) { \
7751 CheckFailed(__VA_ARGS__); \
7752 return false; \
7753 } \
7754 } while (false)
7755
7756/// Verify that \p BaseNode can be used as the "base type" in the struct-path
7757/// TBAA scheme. This means \p BaseNode is either a scalar node, or a
7758/// struct-type node describing an aggregate data structure (like a struct).
7759TBAAVerifier::TBAABaseNodeSummary
7760TBAAVerifier::verifyTBAABaseNode(const Instruction *I, const MDNode *BaseNode,
7761 bool IsNewFormat) {
7762 if (BaseNode->getNumOperands() < 2) {
7763 CheckFailed("Base nodes must have at least two operands", I, BaseNode);
7764 return {true, ~0u};
7765 }
7766
7767 auto Itr = TBAABaseNodes.find(BaseNode);
7768 if (Itr != TBAABaseNodes.end())
7769 return Itr->second;
7770
7771 auto Result = verifyTBAABaseNodeImpl(I, BaseNode, IsNewFormat);
7772 auto InsertResult = TBAABaseNodes.insert({BaseNode, Result});
7773 (void)InsertResult;
7774 assert(InsertResult.second && "We just checked!");
7775 return Result;
7776}
7777
7778TBAAVerifier::TBAABaseNodeSummary
7779TBAAVerifier::verifyTBAABaseNodeImpl(const Instruction *I,
7780 const MDNode *BaseNode, bool IsNewFormat) {
7781 const TBAAVerifier::TBAABaseNodeSummary InvalidNode = {true, ~0u};
7782
7783 if (BaseNode->getNumOperands() == 2) {
7784 // Scalar nodes can only be accessed at offset 0.
7785 return isValidScalarTBAANode(BaseNode)
7786 ? TBAAVerifier::TBAABaseNodeSummary({false, 0})
7787 : InvalidNode;
7788 }
7789
7790 if (IsNewFormat) {
7791 if (BaseNode->getNumOperands() % 3 != 0) {
7792 CheckFailed("Access tag nodes must have the number of operands that is a "
7793 "multiple of 3!", BaseNode);
7794 return InvalidNode;
7795 }
7796 } else {
7797 if (BaseNode->getNumOperands() % 2 != 1) {
7798 CheckFailed("Struct tag nodes must have an odd number of operands!",
7799 BaseNode);
7800 return InvalidNode;
7801 }
7802 }
7803
7804 // Check the type size field.
7805 if (IsNewFormat) {
7806 auto *TypeSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7807 BaseNode->getOperand(1));
7808 if (!TypeSizeNode) {
7809 CheckFailed("Type size nodes must be constants!", I, BaseNode);
7810 return InvalidNode;
7811 }
7812 }
7813
7814 // Check the type name field. In the new format it can be anything.
7815 if (!IsNewFormat && !isa<MDString>(BaseNode->getOperand(0))) {
7816 CheckFailed("Struct tag nodes have a string as their first operand",
7817 BaseNode);
7818 return InvalidNode;
7819 }
7820
7821 bool Failed = false;
7822
7823 std::optional<APInt> PrevOffset;
7824 unsigned BitWidth = ~0u;
7825
7826 // We've already checked that BaseNode is not a degenerate root node with one
7827 // operand in \c verifyTBAABaseNode, so this loop should run at least once.
7828 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
7829 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
7830 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
7831 Idx += NumOpsPerField) {
7832 const MDOperand &FieldTy = BaseNode->getOperand(Idx);
7833 const MDOperand &FieldOffset = BaseNode->getOperand(Idx + 1);
7834 if (!isa<MDNode>(FieldTy)) {
7835 CheckFailed("Incorrect field entry in struct type node!", I, BaseNode);
7836 Failed = true;
7837 continue;
7838 }
7839
7840 auto *OffsetEntryCI =
7842 if (!OffsetEntryCI) {
7843 CheckFailed("Offset entries must be constants!", I, BaseNode);
7844 Failed = true;
7845 continue;
7846 }
7847
7848 if (BitWidth == ~0u)
7849 BitWidth = OffsetEntryCI->getBitWidth();
7850
7851 if (OffsetEntryCI->getBitWidth() != BitWidth) {
7852 CheckFailed(
7853 "Bitwidth between the offsets and struct type entries must match", I,
7854 BaseNode);
7855 Failed = true;
7856 continue;
7857 }
7858
7859 // NB! As far as I can tell, we generate a non-strictly increasing offset
7860 // sequence only from structs that have zero size bit fields. When
7861 // recursing into a contained struct in \c getFieldNodeFromTBAABaseNode we
7862 // pick the field lexically the latest in struct type metadata node. This
7863 // mirrors the actual behavior of the alias analysis implementation.
7864 bool IsAscending =
7865 !PrevOffset || PrevOffset->ule(OffsetEntryCI->getValue());
7866
7867 if (!IsAscending) {
7868 CheckFailed("Offsets must be increasing!", I, BaseNode);
7869 Failed = true;
7870 }
7871
7872 PrevOffset = OffsetEntryCI->getValue();
7873
7874 if (IsNewFormat) {
7875 auto *MemberSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7876 BaseNode->getOperand(Idx + 2));
7877 if (!MemberSizeNode) {
7878 CheckFailed("Member size entries must be constants!", I, BaseNode);
7879 Failed = true;
7880 continue;
7881 }
7882 }
7883 }
7884
7885 return Failed ? InvalidNode
7886 : TBAAVerifier::TBAABaseNodeSummary(false, BitWidth);
7887}
7888
7889static bool IsRootTBAANode(const MDNode *MD) {
7890 return MD->getNumOperands() < 2;
7891}
7892
7893static bool IsScalarTBAANodeImpl(const MDNode *MD,
7895 if (MD->getNumOperands() != 2 && MD->getNumOperands() != 3)
7896 return false;
7897
7898 if (!isa<MDString>(MD->getOperand(0)))
7899 return false;
7900
7901 if (MD->getNumOperands() == 3) {
7903 if (!(Offset && Offset->isZero() && isa<MDString>(MD->getOperand(0))))
7904 return false;
7905 }
7906
7907 auto *Parent = dyn_cast_or_null<MDNode>(MD->getOperand(1));
7908 return Parent && Visited.insert(Parent).second &&
7909 (IsRootTBAANode(Parent) || IsScalarTBAANodeImpl(Parent, Visited));
7910}
7911
7912bool TBAAVerifier::isValidScalarTBAANode(const MDNode *MD) {
7913 auto ResultIt = TBAAScalarNodes.find(MD);
7914 if (ResultIt != TBAAScalarNodes.end())
7915 return ResultIt->second;
7916
7917 SmallPtrSet<const MDNode *, 4> Visited;
7918 bool Result = IsScalarTBAANodeImpl(MD, Visited);
7919 auto InsertResult = TBAAScalarNodes.insert({MD, Result});
7920 (void)InsertResult;
7921 assert(InsertResult.second && "Just checked!");
7922
7923 return Result;
7924}
7925
7926/// Returns the field node at the offset \p Offset in \p BaseNode. Update \p
7927/// Offset in place to be the offset within the field node returned.
7928///
7929/// We assume we've okayed \p BaseNode via \c verifyTBAABaseNode.
7930MDNode *TBAAVerifier::getFieldNodeFromTBAABaseNode(const Instruction *I,
7931 const MDNode *BaseNode,
7932 APInt &Offset,
7933 bool IsNewFormat) {
7934 assert(BaseNode->getNumOperands() >= 2 && "Invalid base node!");
7935
7936 // Scalar nodes have only one possible "field" -- their parent in the access
7937 // hierarchy. Offset must be zero at this point, but our caller is supposed
7938 // to check that.
7939 if (BaseNode->getNumOperands() == 2)
7940 return cast<MDNode>(BaseNode->getOperand(1));
7941
7942 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
7943 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
7944 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
7945 Idx += NumOpsPerField) {
7946 auto *OffsetEntryCI =
7947 mdconst::extract<ConstantInt>(BaseNode->getOperand(Idx + 1));
7948 if (OffsetEntryCI->getValue().ugt(Offset)) {
7949 if (Idx == FirstFieldOpNo) {
7950 CheckFailed("Could not find TBAA parent in struct type node", I,
7951 BaseNode, &Offset);
7952 return nullptr;
7953 }
7954
7955 unsigned PrevIdx = Idx - NumOpsPerField;
7956 auto *PrevOffsetEntryCI =
7957 mdconst::extract<ConstantInt>(BaseNode->getOperand(PrevIdx + 1));
7958 Offset -= PrevOffsetEntryCI->getValue();
7959 return cast<MDNode>(BaseNode->getOperand(PrevIdx));
7960 }
7961 }
7962
7963 unsigned LastIdx = BaseNode->getNumOperands() - NumOpsPerField;
7964 auto *LastOffsetEntryCI = mdconst::extract<ConstantInt>(
7965 BaseNode->getOperand(LastIdx + 1));
7966 Offset -= LastOffsetEntryCI->getValue();
7967 return cast<MDNode>(BaseNode->getOperand(LastIdx));
7968}
7969
7971 if (!Type || Type->getNumOperands() < 3)
7972 return false;
7973
7974 // In the new format type nodes shall have a reference to the parent type as
7975 // its first operand.
7976 return isa_and_nonnull<MDNode>(Type->getOperand(0));
7977}
7978
7980 CheckTBAA(MD->getNumOperands() > 0, "TBAA metadata cannot have 0 operands", I,
7981 MD);
7982
7983 if (I)
7987 "This instruction shall not have a TBAA access tag!", I);
7988
7989 bool IsStructPathTBAA =
7990 isa<MDNode>(MD->getOperand(0)) && MD->getNumOperands() >= 3;
7991
7992 CheckTBAA(IsStructPathTBAA,
7993 "Old-style TBAA is no longer allowed, use struct-path TBAA instead",
7994 I);
7995
7996 MDNode *BaseNode = dyn_cast_or_null<MDNode>(MD->getOperand(0));
7997 MDNode *AccessType = dyn_cast_or_null<MDNode>(MD->getOperand(1));
7998
7999 bool IsNewFormat = isNewFormatTBAATypeNode(AccessType);
8000
8001 if (IsNewFormat) {
8002 CheckTBAA(MD->getNumOperands() == 4 || MD->getNumOperands() == 5,
8003 "Access tag metadata must have either 4 or 5 operands", I, MD);
8004 } else {
8005 CheckTBAA(MD->getNumOperands() < 5,
8006 "Struct tag metadata must have either 3 or 4 operands", I, MD);
8007 }
8008
8009 // Check the access size field.
8010 if (IsNewFormat) {
8011 auto *AccessSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
8012 MD->getOperand(3));
8013 CheckTBAA(AccessSizeNode, "Access size field must be a constant", I, MD);
8014 }
8015
8016 // Check the immutability flag.
8017 unsigned ImmutabilityFlagOpNo = IsNewFormat ? 4 : 3;
8018 if (MD->getNumOperands() == ImmutabilityFlagOpNo + 1) {
8019 auto *IsImmutableCI = mdconst::dyn_extract_or_null<ConstantInt>(
8020 MD->getOperand(ImmutabilityFlagOpNo));
8021 CheckTBAA(IsImmutableCI,
8022 "Immutability tag on struct tag metadata must be a constant", I,
8023 MD);
8024 CheckTBAA(
8025 IsImmutableCI->isZero() || IsImmutableCI->isOne(),
8026 "Immutability part of the struct tag metadata must be either 0 or 1", I,
8027 MD);
8028 }
8029
8030 CheckTBAA(BaseNode && AccessType,
8031 "Malformed struct tag metadata: base and access-type "
8032 "should be non-null and point to Metadata nodes",
8033 I, MD, BaseNode, AccessType);
8034
8035 if (!IsNewFormat) {
8036 CheckTBAA(isValidScalarTBAANode(AccessType),
8037 "Access type node must be a valid scalar type", I, MD,
8038 AccessType);
8039 }
8040
8042 CheckTBAA(OffsetCI, "Offset must be constant integer", I, MD);
8043
8044 APInt Offset = OffsetCI->getValue();
8045 bool SeenAccessTypeInPath = false;
8046
8047 SmallPtrSet<MDNode *, 4> StructPath;
8048
8049 for (/* empty */; BaseNode && !IsRootTBAANode(BaseNode);
8050 BaseNode =
8051 getFieldNodeFromTBAABaseNode(I, BaseNode, Offset, IsNewFormat)) {
8052 if (!StructPath.insert(BaseNode).second) {
8053 CheckFailed("Cycle detected in struct path", I, MD);
8054 return false;
8055 }
8056
8057 bool Invalid;
8058 unsigned BaseNodeBitWidth;
8059 std::tie(Invalid, BaseNodeBitWidth) =
8060 verifyTBAABaseNode(I, BaseNode, IsNewFormat);
8061
8062 // If the base node is invalid in itself, then we've already printed all the
8063 // errors we wanted to print.
8064 if (Invalid)
8065 return false;
8066
8067 SeenAccessTypeInPath |= BaseNode == AccessType;
8068
8069 if (isValidScalarTBAANode(BaseNode) || BaseNode == AccessType)
8070 CheckTBAA(Offset == 0, "Offset not zero at the point of scalar access", I,
8071 MD, &Offset);
8072
8073 CheckTBAA(BaseNodeBitWidth == Offset.getBitWidth() ||
8074 (BaseNodeBitWidth == 0 && Offset == 0) ||
8075 (IsNewFormat && BaseNodeBitWidth == ~0u),
8076 "Access bit-width not the same as description bit-width", I, MD,
8077 BaseNodeBitWidth, Offset.getBitWidth());
8078
8079 if (IsNewFormat && SeenAccessTypeInPath)
8080 break;
8081 }
8082
8083 CheckTBAA(SeenAccessTypeInPath, "Did not see access type in access path!", I,
8084 MD);
8085 return true;
8086}
8087
8088char VerifierLegacyPass::ID = 0;
8089INITIALIZE_PASS(VerifierLegacyPass, "verify", "Module Verifier", false, false)
8090
8092 return new VerifierLegacyPass(FatalErrors);
8093}
8094
8095AnalysisKey VerifierAnalysis::Key;
8102
8107
8109 auto Res = AM.getResult<VerifierAnalysis>(M);
8110 if (FatalErrors && (Res.IRBroken || Res.DebugInfoBroken))
8111 report_fatal_error("Broken module found, compilation aborted!");
8112
8113 return PreservedAnalyses::all();
8114}
8115
8117 auto res = AM.getResult<VerifierAnalysis>(F);
8118 if (res.IRBroken && FatalErrors)
8119 report_fatal_error("Broken function found, compilation aborted!");
8120
8121 return PreservedAnalyses::all();
8122}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU address space definition.
ArrayRef< TableEntry > TableRef
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Atomic ordering constants.
@ RetAttr
@ FnAttr
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file declares the LLVM IR specialization of the GenericConvergenceVerifier template.
static DISubprogram * getSubprogram(bool IsDistinct, Ts &&...Args)
dxil translate DXIL Translate Metadata
This file defines the DenseMap class.
This file contains constants used for implementing Dwarf debug support.
static bool runOnFunction(Function &F, bool PostInlining)
#define Check(C,...)
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Machine Check Debug Module
This file implements a map that provides insertion order iteration.
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
static bool isContiguous(const ConstantRange &A, const ConstantRange &B)
This file contains the declarations for metadata subclasses.
#define T
#define T1
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t High
uint64_t IntrinsicInst * II
#define P(N)
ppc ctr loops verify
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
static unsigned getNumElements(Type *Ty)
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
This file contains some templates that are useful if you are working with the STL at all.
verify safepoint Safepoint IR Verifier
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
static bool IsScalarTBAANodeImpl(const MDNode *MD, SmallPtrSetImpl< const MDNode * > &Visited)
static bool isType(const Metadata *MD)
static Instruction * getSuccPad(Instruction *Terminator)
static bool isMDTuple(const Metadata *MD)
static bool isNewFormatTBAATypeNode(llvm::MDNode *Type)
#define CheckDI(C,...)
We know that a debug info condition should be true, if not print an error message.
Definition Verifier.cpp:681
static void forEachUser(const Value *User, SmallPtrSet< const Value *, 32 > &Visited, llvm::function_ref< bool(const Value *)> Callback)
Definition Verifier.cpp:722
static bool isDINode(const Metadata *MD)
static bool isScope(const Metadata *MD)
static cl::opt< bool > VerifyNoAliasScopeDomination("verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false), cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical " "scopes are not dominating"))
static bool isTypeCongruent(Type *L, Type *R)
Two types are "congruent" if they are identical, or if they are both pointer types with different poi...
#define CheckTBAA(C,...)
static bool isConstantIntMetadataOperand(const Metadata *MD)
static bool IsRootTBAANode(const MDNode *MD)
static Value * getParentPad(Value *EHPad)
static bool hasConflictingReferenceFlags(unsigned Flags)
Detect mutually exclusive flags.
static AttrBuilder getParameterABIAttributes(LLVMContext &C, unsigned I, AttributeList Attrs)
static const char PassName[]
bool isFiniteNonZero() const
Definition APFloat.h:1441
bool isNegative() const
Definition APFloat.h:1431
const fltSemantics & getSemantics() const
Definition APFloat.h:1439
Class for arbitrary precision integers.
Definition APInt.h:78
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition APInt.h:1202
bool isMinValue() const
Determine if this is the smallest unsigned value.
Definition APInt.h:418
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition APInt.h:1151
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:441
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1563
bool isMaxValue() const
Determine if this is the largest unsigned value.
Definition APInt.h:400
This class represents a conversion between pointers from one address space to another.
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
unsigned getAddressSpace() const
Return the address space for the allocation.
LLVM_ABI bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
const Value * getArraySize() const
Get the number of elements allocated.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
void setPreservesAll()
Set by analyses that do not transform their input at all.
LLVM_ABI bool hasInRegAttr() const
Return true if this argument has the inreg attribute.
Definition Function.cpp:293
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
static bool isFPOperation(BinOp Op)
BinOp getOperation() const
static LLVM_ABI StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
bool contains(Attribute::AttrKind A) const
Return true if the builder has the specified attribute.
LLVM_ABI bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
LLVM_ABI std::string getAsString(bool InAttrGrp=false) const
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:69
LLVM_ABI const ConstantRange & getValueAsConstantRange() const
Return the attribute's value as a ConstantRange.
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition Attributes.h:88
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:223
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:459
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:528
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
const Instruction & front() const
Definition BasicBlock.h:482
LLVM_ABI const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
This class represents a no-op cast from one type to another.
static LLVM_ABI BlockAddress * lookup(const BasicBlock *BB)
Lookup an existing BlockAddress constant for the given BasicBlock.
bool isConditional() const
Value * getCondition() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool isInlineAsm() const
Check if this call is an inline asm statement.
bool hasInAllocaArgument() const
Determine if there are is an inalloca argument.
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool doesNotAccessMemory(unsigned OpNo) const
bool hasFnAttr(Attribute::AttrKind Kind) const
Determine whether this call has the given attribute.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
CallingConv::ID getCallingConv() const
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Get the attribute of a given kind from a given arg.
iterator_range< bundle_op_iterator > bundle_op_infos()
Return the range [bundle_op_info_begin, bundle_op_info_end).
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
bool onlyReadsMemory(unsigned OpNo) const
Value * getCalledOperand() const
Type * getParamElementType(unsigned ArgNo) const
Extract the elementtype type for a parameter.
Value * getArgOperand(unsigned i) const
FunctionType * getFunctionType() const
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
bool doesNotReturn() const
Determine if the call cannot return.
LLVM_ABI bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
bool hasOperandBundles() const
Return true if this User has any operand bundles.
LLVM_ABI Function * getCaller()
Helper to get the caller (the parent function).
bool isMustTailCall() const
static LLVM_ABI bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
unsigned getNumHandlers() const
return the number of 'handlers' in this catchswitch instruction, except the default handler
Value * getParentPad() const
BasicBlock * getUnwindDest() const
handler_range handlers()
iteration adapter for range-for loops.
BasicBlock * getUnwindDest() const
bool isFPPredicate() const
Definition InstrTypes.h:782
bool isIntPredicate() const
Definition InstrTypes.h:783
static bool isIntPredicate(Predicate P)
Definition InstrTypes.h:776
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
Definition Constants.h:226
bool isNegative() const
Definition Constants.h:209
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition Constants.h:214
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition Constants.h:157
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:163
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:154
Constant * getAddrDiscriminator() const
The address discriminator if any, or the null constant.
Definition Constants.h:1072
Constant * getPointer() const
The pointer that is signed in this ptrauth signed pointer.
Definition Constants.h:1059
ConstantInt * getKey() const
The Key ID, an i32 constant.
Definition Constants.h:1062
ConstantInt * getDiscriminator() const
The integer discriminator, an i64 constant, or 0.
Definition Constants.h:1065
static LLVM_ABI bool isOrderedRanges(ArrayRef< ConstantRange > RangesRef)
This class represents a range of values.
const APInt & getLower() const
Return the lower value for this range.
const APInt & getUpper() const
Return the upper value for this range.
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
static LLVM_ABI ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition Constants.cpp:90
LLVM_ABI std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
LLVM_ABI std::optional< RoundingMode > getRoundingMode() const
LLVM_ABI unsigned getNonMetadataArgCount() const
DbgVariableFragmentInfo FragmentInfo
@ FixedPointBinary
Scale factor 2^Factor.
@ FixedPointDecimal
Scale factor 10^Factor.
@ FixedPointRational
Arbitrary rational scale factor.
DIGlobalVariable * getVariable() const
LLVM_ABI DISubprogram * getSubprogram() const
Get the subprogram for this scope.
DILocalScope * getScope() const
Get the local scope for this variable.
Metadata * getRawScope() const
Base class for scope-like contexts.
Subprogram description. Uses SubclassData1.
static const DIScope * getRawRetainedNodeScope(const MDNode *N)
Base class for template parameters.
Base class for variables.
Metadata * getRawType() const
Metadata * getRawScope() const
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
LLVM_ABI void print(raw_ostream &O, bool IsForDebug=false) const
DebugLoc getDebugLoc() const
LLVM_ABI const BasicBlock * getParent() const
LLVM_ABI Function * getFunction()
Record of a variable value-assignment, aka a non instruction representation of the dbg....
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
DIExpression * getExpression() const
DILocalVariable * getVariable() const
Metadata * getRawLocation() const
Returns the metadata operand for the first location description.
@ End
Marks the end of the concrete types.
@ Any
To indicate all LocationTypes in searches.
DIExpression * getAddressExpression() const
MDNode * getAsMDNode() const
Return this as a bar MDNode.
Definition DebugLoc.h:291
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:205
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
bool empty() const
Definition DenseMap.h:109
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:241
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:164
This instruction extracts a single (scalar) element from a VectorType value.
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
ArrayRef< unsigned > getIndices() const
static LLVM_ABI Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
This instruction compares its operands according to the predicate given to the constructor.
This class represents an extension of floating point types.
This class represents a cast from floating point to signed integer.
This class represents a cast from floating point to unsigned integer.
This class represents a truncation of floating point types.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Value * getParentPad() const
Convenience accessors.
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
Type * getReturnType() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition Function.h:209
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition Function.h:244
DISubprogram * getSubprogram() const
Get the attached subprogram.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:270
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition Function.h:903
const Function & getFunction() const
Definition Function.h:164
const std::string & getGC() const
Definition Function.cpp:834
Type * getReturnType() const
Returns the type of the ret val.
Definition Function.h:214
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
Definition Function.h:227
LLVM_ABI Value * getBasePtr() const
LLVM_ABI Value * getDerivedPtr() const
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
static bool isValidLinkage(LinkageTypes L)
Definition GlobalAlias.h:98
const Constant * getAliasee() const
Definition GlobalAlias.h:87
LLVM_ABI const Function * getResolverFunction() const
Definition Globals.cpp:665
static bool isValidLinkage(LinkageTypes L)
Definition GlobalIFunc.h:86
const Constant * getResolver() const
Definition GlobalIFunc.h:73
LLVM_ABI void getAllMetadata(SmallVectorImpl< std::pair< unsigned, MDNode * > > &MDs) const
Appends all metadata attached to this value to MDs, sorting by KindID.
bool hasComdat() const
MDNode * getMetadata(unsigned KindID) const
Get the current metadata attachments for the given kind, if any.
Definition Value.h:576
bool hasExternalLinkage() const
bool isDSOLocal() const
bool isImplicitDSOLocal() const
LLVM_ABI bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition Globals.cpp:328
bool hasValidDeclarationLinkage() const
LinkageTypes getLinkage() const
bool hasDefaultVisibility() const
bool hasPrivateLinkage() const
bool hasHiddenVisibility() const
bool hasExternalWeakLinkage() const
bool hasDLLImportStorageClass() const
bool hasDLLExportStorageClass() const
bool isDeclarationForLinker() const
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
PointerType * getType() const
Global values are always pointers.
LLVM_ABI bool isInterposable() const
Return true if this global's definition can be substituted with an arbitrary definition at link time ...
Definition Globals.cpp:107
bool hasComdat() const
bool hasCommonLinkage() const
bool hasGlobalUnnamedAddr() const
bool hasAppendingLinkage() const
bool hasAvailableExternallyLinkage() const
Type * getValueType() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool hasInitializer() const
Definitions have initializers, declarations don't.
MaybeAlign getAlign() const
Returns the alignment of the given variable.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
This instruction compares its operands according to the predicate given to the constructor.
BasicBlock * getDestination(unsigned i)
Return the specified destination.
unsigned getNumDestinations() const
return the number of possible destinations in this indirectbr instruction.
unsigned getNumSuccessors() const
This instruction inserts a single (scalar) element into a VectorType value.
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
ArrayRef< unsigned > getIndices() const
Base class for instruction visitors.
Definition InstVisitor.h:78
void visit(Iterator Start, Iterator End)
Definition InstVisitor.h:87
LLVM_ABI unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
This class represents a cast from an integer to a pointer.
static LLVM_ABI bool mayLowerToFunctionCall(Intrinsic::ID IID)
Check if the intrinsic might lower into a regular function call in the course of IR transformations.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Align getAlign() const
Return the alignment of the access that is being performed.
Metadata node.
Definition Metadata.h:1078
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1442
bool isTemporary() const
Definition Metadata.h:1262
ArrayRef< MDOperand > operands() const
Definition Metadata.h:1440
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1448
bool isDistinct() const
Definition Metadata.h:1261
bool isResolved() const
Check if node is fully resolved.
Definition Metadata.h:1258
LLVMContext & getContext() const
Definition Metadata.h:1242
bool equalsStr(StringRef Str) const
Definition Metadata.h:922
Metadata * get() const
Definition Metadata.h:929
LLVM_ABI StringRef getString() const
Definition Metadata.cpp:618
static LLVM_ABI bool isTagMD(const Metadata *MD)
This class implements a map that also provides access to all stored values in a deterministic order.
Definition MapVector.h:36
static LLVM_ABI MetadataAsValue * getIfExists(LLVMContext &Context, Metadata *MD)
Definition Metadata.cpp:112
Metadata * getMetadata() const
Definition Metadata.h:201
Root of the metadata hierarchy.
Definition Metadata.h:64
LLVM_ABI void print(raw_ostream &OS, const Module *M=nullptr, bool IsForDebug=false) const
Print.
unsigned getMetadataID() const
Definition Metadata.h:104
Manage lifetime of a slot tracker for printing IR.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
LLVM_ABI StringRef getName() const
LLVM_ABI void print(raw_ostream &ROS, bool IsForDebug=false) const
LLVM_ABI unsigned getNumOperands() const
iterator_range< op_iterator > operands()
Definition Metadata.h:1853
op_range incoming_values()
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
This class represents a cast from a pointer to an address (non-capturing ptrtoint).
This class represents a cast from a pointer to an integer.
Value * getValue() const
Convenience accessor.
This class represents a sign extension of integer types.
This class represents a cast from signed integer to floating point.
static LLVM_ABI const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
This instruction constructs a fixed permutation of two input vectors.
static LLVM_ABI bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
static LLVM_ABI void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
void insert_range(Range &&R)
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
void reserve(size_type N)
iterator insert(iterator I, T &&Elt)
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition StringRef.h:702
static constexpr size_t npos
Definition StringRef.h:57
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition StringRef.h:472
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:261
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:143
unsigned getNumElements() const
Random access to the elements.
LLVM_ABI Type * getTypeAtIndex(const Value *V) const
Given an index value into the type, return the type of the element.
Definition Type.cpp:718
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Returns true if this struct contains a scalable vector.
Definition Type.cpp:440
Verify that the TBAA Metadatas are valid.
Definition Verifier.h:40
LLVM_ABI bool visitTBAAMetadata(const Instruction *I, const MDNode *MD)
Visit an instruction, or a TBAA node itself as part of a metadata, and return true if it is valid,...
unsigned size() const
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
LLVM_ABI bool containsNonGlobalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a global...
Definition Type.cpp:74
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition Type.h:264
LLVM_ABI bool containsNonLocalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a local.
Definition Type.cpp:90
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
Definition Type.cpp:61
bool isLabelTy() const
Return true if this is 'label'.
Definition Type.h:228
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition Type.h:246
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
bool isTokenLikeTy() const
Returns true if this is 'token' or a token-like target type.s.
Definition Type.cpp:1065
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isSingleValueType() const
Return true if the type is a valid type for a register in codegen.
Definition Type.h:296
LLVM_ABI bool canLosslesslyBitCastTo(Type *Ty) const
Return true if this type could be converted with a lossless BitCast to type 'Ty'.
Definition Type.cpp:153
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition Type.h:311
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:230
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:184
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition Type.h:270
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
Definition Type.h:255
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition Type.h:225
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
bool isMetadataTy() const
Return true if this is 'metadata'.
Definition Type.h:231
This class represents a cast unsigned integer to floating point.
op_range operands()
Definition User.h:292
Value * getOperand(unsigned i) const
Definition User.h:232
unsigned getNumOperands() const
Definition User.h:254
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
Value * getValue() const
Definition Metadata.h:498
LLVM Value Representation.
Definition Value.h:75
iterator_range< user_iterator > materialized_users()
Definition Value.h:420
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI const Value * stripInBoundsOffsets(function_ref< void(const Value *)> Func=[](const Value *) {}) const
Strip off pointer casts and inbounds GEPs.
Definition Value.cpp:812
iterator_range< user_iterator > users()
Definition Value.h:426
bool materialized_use_empty() const
Definition Value.h:351
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:701
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1099
bool hasName() const
Definition Value.h:262
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
Check a module for errors, and report separate error states for IR and debug info errors.
Definition Verifier.h:109
LLVM_ABI Result run(Module &M, ModuleAnalysisManager &)
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
This class represents zero extension of integer types.
constexpr bool isNonZero() const
Definition TypeSize.h:155
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
CallInst * Call
This file contains the declaration of the Comdat class, which represents a single COMDAT in LLVM.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ FLAT_ADDRESS
Address space for flat memory.
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
@ PRIVATE_ADDRESS
Address space for private memory.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
bool isFlatGlobalAddrSpace(unsigned AS)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Entry
Definition COFF.h:862
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
LLVM_ABI MatchIntrinsicTypesResult matchIntrinsicSignature(FunctionType *FTy, ArrayRef< IITDescriptor > &Infos, SmallVectorImpl< Type * > &ArgTys)
Match the specified function type with the type constraints specified by the .td file.
LLVM_ABI void getIntrinsicInfoTableEntries(ID id, SmallVectorImpl< IITDescriptor > &T)
Return the IIT table descriptor for the specified intrinsic into an array of IITDescriptors.
@ MatchIntrinsicTypes_NoMatchRet
Definition Intrinsics.h:245
@ MatchIntrinsicTypes_NoMatchArg
Definition Intrinsics.h:246
LLVM_ABI bool hasConstrainedFPRoundingModeOperand(ID QID)
Returns true if the intrinsic ID is for one of the "ConstrainedFloating-Point Intrinsics" that take r...
LLVM_ABI StringRef getName(ID id)
Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
static const int NoAliasScopeDeclScopeArg
Definition Intrinsics.h:41
LLVM_ABI bool matchIntrinsicVarArg(bool isVarArg, ArrayRef< IITDescriptor > &Infos)
Verify if the intrinsic has variable arguments.
std::variant< std::monostate, Loc::Single, Loc::Multi, Loc::MMI, Loc::EntryValue > Variant
Alias for the std::variant specialization base class of DbgVariable.
Definition DwarfDebug.h:189
Flag
These should be considered private to the implementation of the MCInstrDesc class.
@ System
Synchronized with respect to all concurrently executing threads.
Definition LLVMContext.h:58
LLVM_ABI std::optional< VFInfo > tryDemangleForVFABI(StringRef MangledName, const FunctionType *FTy)
Function to construct a VFInfo out of a mangled names in the following format:
@ CE
Windows NT (Windows on ARM)
Definition MCAsmInfo.h:48
LLVM_ABI AssignmentInstRange getAssignmentInsts(DIAssignID *ID)
Return a range of instructions (typically just one) that have ID as an attachment.
initializer< Ty > init(const Ty &Val)
@ DW_MACINFO_undef
Definition Dwarf.h:818
@ DW_MACINFO_start_file
Definition Dwarf.h:819
@ DW_MACINFO_define
Definition Dwarf.h:817
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract_or_null(Y &&MD)
Extract a Value from Metadata, if any, allowing null.
Definition Metadata.h:708
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract(Y &&MD)
Extract a Value from Metadata, if any.
Definition Metadata.h:695
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:667
@ User
could "use" a pointer
NodeAddr< UseNode * > Use
Definition RDFGraph.h:385
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
Definition Threading.h:280
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1725
LLVM_ABI bool canInstructionHaveMMRAs(const Instruction &I)
detail::zippy< detail::zip_first, T, U, Args... > zip_equal(T &&t, U &&u, Args &&...args)
zip iterator that assumes that all iteratees have the same length.
Definition STLExtras.h:839
LLVM_ABI unsigned getBranchWeightOffset(const MDNode *ProfileData)
Return the offset to the first branch weight data.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2472
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
AllocFnKind
Definition Attributes.h:51
testing::Matcher< const detail::ErrorHolder & > Failed()
Definition Error.h:198
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2136
LLVM_ABI DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition MathExtras.h:284
bool isa_and_nonnull(const Y &Val)
Definition Casting.h:676
Op::Description Desc
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
GenericConvergenceVerifier< SSAContext > ConvergenceVerifier
LLVM_ABI void initializeVerifierLegacyPassPass(PassRegistry &)
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
bool isModSet(const ModRefInfo MRI)
Definition ModRef.h:49
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1622
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI bool isValueProfileMD(const MDNode *ProfileData)
Checks if an MDNode contains value profiling Metadata.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
LLVM_ABI unsigned getNumBranchWeights(const MDNode &ProfileData)
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:74
LLVM_ABI FunctionPass * createVerifierPass(bool FatalErrors=true)
FunctionAddr VTableAddr Next
Definition InstrProf.h:141
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
TinyPtrVector< BasicBlock * > ColorVector
LLVM_ABI const char * LLVMLoopEstimatedTripCount
Profile-based loop metadata that should be accessed only by using llvm::getLoopEstimatedTripCount and...
DenormalMode parseDenormalFPAttribute(StringRef Str)
Returns the denormal mode to use for inputs and outputs.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
Definition FPEnv.cpp:25
LLVM_ABI std::unique_ptr< GCStrategy > getGCStrategy(const StringRef Name)
Lookup the GCStrategy object associated with the given gc name.
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1897
bool pred_empty(const BasicBlock *BB)
Definition CFG.h:119
bool isHexDigit(char C)
Checks if character C is a hexadecimal numeric character.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
constexpr bool isCallableCC(CallingConv::ID CC)
LLVM_ABI bool verifyModule(const Module &M, raw_ostream *OS=nullptr, bool *BrokenDebugInfo=nullptr)
Check a module for errors.
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
Definition MIRParser.h:39
#define N
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition Analysis.h:29
static LLVM_ABI const char * SyntheticFunctionEntryCount
static LLVM_ABI const char * UnknownBranchWeightsMarker
static LLVM_ABI const char * ValueProfile
static LLVM_ABI const char * FunctionEntryCount
static LLVM_ABI const char * BranchWeights
uint32_t getTagID() const
Return the tag of this operand bundle as an integer.
ArrayRef< Use > Inputs
void DebugInfoCheckFailed(const Twine &Message)
A debug info check failed.
Definition Verifier.cpp:305
VerifierSupport(raw_ostream *OS, const Module &M)
Definition Verifier.cpp:154
bool Broken
Track the brokenness of the module while recursively visiting.
Definition Verifier.cpp:148
void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A check failed (with values to print).
Definition Verifier.cpp:298
bool BrokenDebugInfo
Broken debug info can be "recovered" from by stripping the debug info.
Definition Verifier.cpp:150
LLVMContext & Context
Definition Verifier.cpp:145
bool TreatBrokenDebugInfoAsError
Whether to treat broken debug info as an error.
Definition Verifier.cpp:152
void CheckFailed(const Twine &Message)
A check failed, so printout out the condition and the message.
Definition Verifier.cpp:287
const Module & M
Definition Verifier.cpp:141
const DataLayout & DL
Definition Verifier.cpp:144
void DebugInfoCheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A debug info check failed (with values to print).
Definition Verifier.cpp:314
const Triple & TT
Definition Verifier.cpp:143
ModuleSlotTracker MST
Definition Verifier.cpp:142