LLVM 23.0.0git
Verifier.cpp
Go to the documentation of this file.
1//===-- Verifier.cpp - Implement the Module Verifier -----------------------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the function verifier interface, that can be used for some
10// basic correctness checking of input to the system.
11//
12// Note that this does not provide full `Java style' security and verifications,
13// instead it just tries to ensure that code is well-formed.
14//
15// * Both of a binary operator's parameters are of the same type
16// * Verify that the indices of mem access instructions match other operands
17// * Verify that arithmetic and other things are only performed on first-class
18// types. Verify that shifts & logicals only happen on integrals f.e.
19// * All of the constants in a switch statement are of the correct type
20// * The code is in valid SSA form
21// * It should be illegal to put a label into any other type (like a structure)
22// or to return one. [except constant arrays!]
23// * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad
24// * PHI nodes must have an entry for each predecessor, with no extras.
25// * PHI nodes must be the first thing in a basic block, all grouped together
26// * All basic blocks should only end with terminator insts, not contain them
27// * The entry node to a function must not have predecessors
28// * All Instructions must be embedded into a basic block
29// * Functions cannot take a void-typed parameter
30// * Verify that a function's argument list agrees with it's declared type.
31// * It is illegal to specify a name for a void value.
32// * It is illegal to have a internal global value with no initializer
33// * It is illegal to have a ret instruction that returns a value that does not
34// agree with the function return value type.
35// * Function call argument types match the function prototype
36// * A landing pad is defined by a landingpad instruction, and can be jumped to
37// only by the unwind edge of an invoke instruction.
38// * A landingpad instruction must be the first non-PHI instruction in the
39// block.
40// * Landingpad instructions must be in a function with a personality function.
41// * Convergence control intrinsics are introduced in ConvergentOperations.rst.
42// The applied restrictions are too numerous to list here.
43// * The convergence entry intrinsic and the loop heart must be the first
44// non-PHI instruction in their respective block. This does not conflict with
45// the landing pads, since these two kinds cannot occur in the same block.
46// * All other things that are tested by asserts spread about the code...
47//
48//===----------------------------------------------------------------------===//
49
50#include "llvm/IR/Verifier.h"
51#include "llvm/ADT/APFloat.h"
52#include "llvm/ADT/APInt.h"
53#include "llvm/ADT/ArrayRef.h"
54#include "llvm/ADT/DenseMap.h"
55#include "llvm/ADT/MapVector.h"
56#include "llvm/ADT/STLExtras.h"
60#include "llvm/ADT/StringRef.h"
61#include "llvm/ADT/Twine.h"
63#include "llvm/IR/Argument.h"
65#include "llvm/IR/Attributes.h"
66#include "llvm/IR/BasicBlock.h"
67#include "llvm/IR/CFG.h"
68#include "llvm/IR/CallingConv.h"
69#include "llvm/IR/Comdat.h"
70#include "llvm/IR/Constant.h"
73#include "llvm/IR/Constants.h"
75#include "llvm/IR/DataLayout.h"
76#include "llvm/IR/DebugInfo.h"
78#include "llvm/IR/DebugLoc.h"
80#include "llvm/IR/Dominators.h"
82#include "llvm/IR/FPEnv.h"
83#include "llvm/IR/Function.h"
84#include "llvm/IR/GCStrategy.h"
86#include "llvm/IR/GlobalAlias.h"
87#include "llvm/IR/GlobalValue.h"
89#include "llvm/IR/InlineAsm.h"
90#include "llvm/IR/InstVisitor.h"
91#include "llvm/IR/InstrTypes.h"
92#include "llvm/IR/Instruction.h"
95#include "llvm/IR/Intrinsics.h"
96#include "llvm/IR/IntrinsicsAArch64.h"
97#include "llvm/IR/IntrinsicsAMDGPU.h"
98#include "llvm/IR/IntrinsicsARM.h"
99#include "llvm/IR/IntrinsicsNVPTX.h"
100#include "llvm/IR/IntrinsicsWebAssembly.h"
101#include "llvm/IR/LLVMContext.h"
103#include "llvm/IR/Metadata.h"
104#include "llvm/IR/Module.h"
106#include "llvm/IR/PassManager.h"
108#include "llvm/IR/Statepoint.h"
109#include "llvm/IR/Type.h"
110#include "llvm/IR/Use.h"
111#include "llvm/IR/User.h"
113#include "llvm/IR/Value.h"
115#include "llvm/Pass.h"
119#include "llvm/Support/Casting.h"
123#include "llvm/Support/ModRef.h"
127#include <algorithm>
128#include <cassert>
129#include <cstdint>
130#include <memory>
131#include <optional>
132#include <queue>
133#include <string>
134#include <utility>
135
136using namespace llvm;
137
139 "verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false),
140 cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical "
141 "scopes are not dominating"));
142
145 const Module &M;
147 const Triple &TT;
150
151 /// Track the brokenness of the module while recursively visiting.
152 bool Broken = false;
153 /// Broken debug info can be "recovered" from by stripping the debug info.
154 bool BrokenDebugInfo = false;
155 /// Whether to treat broken debug info as an error.
157
159 : OS(OS), M(M), MST(&M), TT(M.getTargetTriple()), DL(M.getDataLayout()),
160 Context(M.getContext()) {}
161
162private:
163 void Write(const Module *M) {
164 *OS << "; ModuleID = '" << M->getModuleIdentifier() << "'\n";
165 }
166
167 void Write(const Value *V) {
168 if (V)
169 Write(*V);
170 }
171
172 void Write(const Value &V) {
173 if (isa<Instruction>(V)) {
174 V.print(*OS, MST);
175 *OS << '\n';
176 } else {
177 V.printAsOperand(*OS, true, MST);
178 *OS << '\n';
179 }
180 }
181
182 void Write(const DbgRecord *DR) {
183 if (DR) {
184 DR->print(*OS, MST, false);
185 *OS << '\n';
186 }
187 }
188
190 switch (Type) {
192 *OS << "value";
193 break;
195 *OS << "declare";
196 break;
198 *OS << "declare_value";
199 break;
201 *OS << "assign";
202 break;
204 *OS << "end";
205 break;
207 *OS << "any";
208 break;
209 };
210 }
211
212 void Write(const Metadata *MD) {
213 if (!MD)
214 return;
215 MD->print(*OS, MST, &M);
216 *OS << '\n';
217 }
218
219 template <class T> void Write(const MDTupleTypedArrayWrapper<T> &MD) {
220 Write(MD.get());
221 }
222
223 void Write(const NamedMDNode *NMD) {
224 if (!NMD)
225 return;
226 NMD->print(*OS, MST);
227 *OS << '\n';
228 }
229
230 void Write(Type *T) {
231 if (!T)
232 return;
233 *OS << ' ' << *T;
234 }
235
236 void Write(const Comdat *C) {
237 if (!C)
238 return;
239 *OS << *C;
240 }
241
242 void Write(const APInt *AI) {
243 if (!AI)
244 return;
245 *OS << *AI << '\n';
246 }
247
248 void Write(const unsigned i) { *OS << i << '\n'; }
249
250 // NOLINTNEXTLINE(readability-identifier-naming)
251 void Write(const Attribute *A) {
252 if (!A)
253 return;
254 *OS << A->getAsString() << '\n';
255 }
256
257 // NOLINTNEXTLINE(readability-identifier-naming)
258 void Write(const AttributeSet *AS) {
259 if (!AS)
260 return;
261 *OS << AS->getAsString() << '\n';
262 }
263
264 // NOLINTNEXTLINE(readability-identifier-naming)
265 void Write(const AttributeList *AL) {
266 if (!AL)
267 return;
268 AL->print(*OS);
269 }
270
271 void Write(Printable P) { *OS << P << '\n'; }
272
273 template <typename T> void Write(ArrayRef<T> Vs) {
274 for (const T &V : Vs)
275 Write(V);
276 }
277
278 template <typename T1, typename... Ts>
279 void WriteTs(const T1 &V1, const Ts &... Vs) {
280 Write(V1);
281 WriteTs(Vs...);
282 }
283
284 template <typename... Ts> void WriteTs() {}
285
286public:
287 /// A check failed, so printout out the condition and the message.
288 ///
289 /// This provides a nice place to put a breakpoint if you want to see why
290 /// something is not correct.
291 void CheckFailed(const Twine &Message) {
292 if (OS)
293 *OS << Message << '\n';
294 Broken = true;
295 }
296
297 /// A check failed (with values to print).
298 ///
299 /// This calls the Message-only version so that the above is easier to set a
300 /// breakpoint on.
301 template <typename T1, typename... Ts>
302 void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) {
303 CheckFailed(Message);
304 if (OS)
305 WriteTs(V1, Vs...);
306 }
307
308 /// A debug info check failed.
309 void DebugInfoCheckFailed(const Twine &Message) {
310 if (OS)
311 *OS << Message << '\n';
313 BrokenDebugInfo = true;
314 }
315
316 /// A debug info check failed (with values to print).
317 template <typename T1, typename... Ts>
318 void DebugInfoCheckFailed(const Twine &Message, const T1 &V1,
319 const Ts &... Vs) {
320 DebugInfoCheckFailed(Message);
321 if (OS)
322 WriteTs(V1, Vs...);
323 }
324};
325
326namespace {
327
328class Verifier : public InstVisitor<Verifier>, VerifierSupport {
329 friend class InstVisitor<Verifier>;
330 DominatorTree DT;
331
332 /// When verifying a basic block, keep track of all of the
333 /// instructions we have seen so far.
334 ///
335 /// This allows us to do efficient dominance checks for the case when an
336 /// instruction has an operand that is an instruction in the same block.
337 SmallPtrSet<Instruction *, 16> InstsInThisBlock;
338
339 /// Keep track of the metadata nodes that have been checked already.
341
342 /// Keep track which DISubprogram is attached to which function.
344
345 /// Track all DICompileUnits visited.
347
348 /// The result type for a landingpad.
349 Type *LandingPadResultTy;
350
351 /// Whether we've seen a call to @llvm.localescape in this function
352 /// already.
353 bool SawFrameEscape;
354
355 /// Whether the current function has a DISubprogram attached to it.
356 bool HasDebugInfo = false;
357
358 /// Stores the count of how many objects were passed to llvm.localescape for a
359 /// given function and the largest index passed to llvm.localrecover.
361
362 // Maps catchswitches and cleanuppads that unwind to siblings to the
363 // terminators that indicate the unwind, used to detect cycles therein.
365
366 /// Cache which blocks are in which funclet, if an EH funclet personality is
367 /// in use. Otherwise empty.
368 DenseMap<BasicBlock *, ColorVector> BlockEHFuncletColors;
369
370 /// Cache of constants visited in search of ConstantExprs.
371 SmallPtrSet<const Constant *, 32> ConstantExprVisited;
372
373 /// Cache of declarations of the llvm.experimental.deoptimize.<ty> intrinsic.
374 SmallVector<const Function *, 4> DeoptimizeDeclarations;
375
376 /// Cache of attribute lists verified.
377 SmallPtrSet<const void *, 32> AttributeListsVisited;
378
379 // Verify that this GlobalValue is only used in this module.
380 // This map is used to avoid visiting uses twice. We can arrive at a user
381 // twice, if they have multiple operands. In particular for very large
382 // constant expressions, we can arrive at a particular user many times.
383 SmallPtrSet<const Value *, 32> GlobalValueVisited;
384
385 // Keeps track of duplicate function argument debug info.
387
388 TBAAVerifier TBAAVerifyHelper;
389 ConvergenceVerifier ConvergenceVerifyHelper;
390
391 SmallVector<IntrinsicInst *, 4> NoAliasScopeDecls;
392
393 void checkAtomicMemAccessSize(Type *Ty, const Instruction *I);
394
395public:
396 explicit Verifier(raw_ostream *OS, bool ShouldTreatBrokenDebugInfoAsError,
397 const Module &M)
398 : VerifierSupport(OS, M), LandingPadResultTy(nullptr),
399 SawFrameEscape(false), TBAAVerifyHelper(this) {
400 TreatBrokenDebugInfoAsError = ShouldTreatBrokenDebugInfoAsError;
401 }
402
403 bool hasBrokenDebugInfo() const { return BrokenDebugInfo; }
404
405 bool verify(const Function &F) {
406 llvm::TimeTraceScope timeScope("Verifier");
407 assert(F.getParent() == &M &&
408 "An instance of this class only works with a specific module!");
409
410 // First ensure the function is well-enough formed to compute dominance
411 // information, and directly compute a dominance tree. We don't rely on the
412 // pass manager to provide this as it isolates us from a potentially
413 // out-of-date dominator tree and makes it significantly more complex to run
414 // this code outside of a pass manager.
415
416 // First check that every basic block has a terminator, otherwise we can't
417 // even inspect the CFG.
418 for (const BasicBlock &BB : F) {
419 if (!BB.empty() && BB.back().isTerminator())
420 continue;
421
422 if (OS) {
423 *OS << "Basic Block in function '" << F.getName()
424 << "' does not have terminator!\n";
425 BB.printAsOperand(*OS, true, MST);
426 *OS << "\n";
427 }
428 return false;
429 }
430
431 // FIXME: It's really gross that we have to cast away constness here.
432 if (!F.empty())
433 DT.recalculate(const_cast<Function &>(F));
434
435 auto FailureCB = [this](const Twine &Message) {
436 this->CheckFailed(Message);
437 };
438 ConvergenceVerifyHelper.initialize(OS, FailureCB, F);
439
440 Broken = false;
441 // FIXME: We strip const here because the inst visitor strips const.
442 visit(const_cast<Function &>(F));
443 verifySiblingFuncletUnwinds();
444
445 if (ConvergenceVerifyHelper.sawTokens())
446 ConvergenceVerifyHelper.verify(DT);
447
448 InstsInThisBlock.clear();
449 DebugFnArgs.clear();
450 LandingPadResultTy = nullptr;
451 SawFrameEscape = false;
452 SiblingFuncletInfo.clear();
453 verifyNoAliasScopeDecl();
454 NoAliasScopeDecls.clear();
455
456 return !Broken;
457 }
458
459 /// Verify the module that this instance of \c Verifier was initialized with.
460 bool verify() {
461 Broken = false;
462
463 // Collect all declarations of the llvm.experimental.deoptimize intrinsic.
464 for (const Function &F : M)
465 if (F.getIntrinsicID() == Intrinsic::experimental_deoptimize)
466 DeoptimizeDeclarations.push_back(&F);
467
468 // Now that we've visited every function, verify that we never asked to
469 // recover a frame index that wasn't escaped.
470 verifyFrameRecoverIndices();
471 for (const GlobalVariable &GV : M.globals())
472 visitGlobalVariable(GV);
473
474 for (const GlobalAlias &GA : M.aliases())
475 visitGlobalAlias(GA);
476
477 for (const GlobalIFunc &GI : M.ifuncs())
478 visitGlobalIFunc(GI);
479
480 for (const NamedMDNode &NMD : M.named_metadata())
481 visitNamedMDNode(NMD);
482
483 for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable())
484 visitComdat(SMEC.getValue());
485
486 visitModuleFlags();
487 visitModuleIdents();
488 visitModuleCommandLines();
489 visitModuleErrnoTBAA();
490
491 verifyCompileUnits();
492
493 verifyDeoptimizeCallingConvs();
494 DISubprogramAttachments.clear();
495 return !Broken;
496 }
497
498private:
499 /// Whether a metadata node is allowed to be, or contain, a DILocation.
500 enum class AreDebugLocsAllowed { No, Yes };
501
502 /// Metadata that should be treated as a range, with slightly different
503 /// requirements.
504 enum class RangeLikeMetadataKind {
505 Range, // MD_range
506 AbsoluteSymbol, // MD_absolute_symbol
507 NoaliasAddrspace // MD_noalias_addrspace
508 };
509
510 // Verification methods...
511 void visitGlobalValue(const GlobalValue &GV);
512 void visitGlobalVariable(const GlobalVariable &GV);
513 void visitGlobalAlias(const GlobalAlias &GA);
514 void visitGlobalIFunc(const GlobalIFunc &GI);
515 void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C);
516 void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited,
517 const GlobalAlias &A, const Constant &C);
518 void visitNamedMDNode(const NamedMDNode &NMD);
519 void visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs);
520 void visitMetadataAsValue(const MetadataAsValue &MD, Function *F);
521 void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F);
522 void visitDIArgList(const DIArgList &AL, Function *F);
523 void visitComdat(const Comdat &C);
524 void visitModuleIdents();
525 void visitModuleCommandLines();
526 void visitModuleErrnoTBAA();
527 void visitModuleFlags();
528 void visitModuleFlag(const MDNode *Op,
529 DenseMap<const MDString *, const MDNode *> &SeenIDs,
530 SmallVectorImpl<const MDNode *> &Requirements);
531 void visitModuleFlagCGProfileEntry(const MDOperand &MDO);
532 void visitFunction(const Function &F);
533 void visitBasicBlock(BasicBlock &BB);
534 void verifyRangeLikeMetadata(const Value &V, const MDNode *Range, Type *Ty,
535 RangeLikeMetadataKind Kind);
536 void visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty);
537 void visitNoFPClassMetadata(Instruction &I, MDNode *Range, Type *Ty);
538 void visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range, Type *Ty);
539 void visitDereferenceableMetadata(Instruction &I, MDNode *MD);
540 void visitNofreeMetadata(Instruction &I, MDNode *MD);
541 void visitProfMetadata(Instruction &I, MDNode *MD);
542 void visitCallStackMetadata(MDNode *MD);
543 void visitMemProfMetadata(Instruction &I, MDNode *MD);
544 void visitCallsiteMetadata(Instruction &I, MDNode *MD);
545 void visitCalleeTypeMetadata(Instruction &I, MDNode *MD);
546 void visitDIAssignIDMetadata(Instruction &I, MDNode *MD);
547 void visitMMRAMetadata(Instruction &I, MDNode *MD);
548 void visitAnnotationMetadata(MDNode *Annotation);
549 void visitAliasScopeMetadata(const MDNode *MD);
550 void visitAliasScopeListMetadata(const MDNode *MD);
551 void visitAccessGroupMetadata(const MDNode *MD);
552 void visitCapturesMetadata(Instruction &I, const MDNode *Captures);
553 void visitAllocTokenMetadata(Instruction &I, MDNode *MD);
554 void visitInlineHistoryMetadata(Instruction &I, MDNode *MD);
555
556 template <class Ty> bool isValidMetadataArray(const MDTuple &N);
557#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N);
558#include "llvm/IR/Metadata.def"
559 void visitDIType(const DIType &N);
560 void visitDIScope(const DIScope &N);
561 void visitDIVariable(const DIVariable &N);
562 void visitDILexicalBlockBase(const DILexicalBlockBase &N);
563 void visitDITemplateParameter(const DITemplateParameter &N);
564
565 void visitTemplateParams(const MDNode &N, const Metadata &RawParams);
566
567 void visit(DbgLabelRecord &DLR);
568 void visit(DbgVariableRecord &DVR);
569 // InstVisitor overrides...
570 using InstVisitor<Verifier>::visit;
571 void visitDbgRecords(Instruction &I);
572 void visit(Instruction &I);
573
574 void visitTruncInst(TruncInst &I);
575 void visitZExtInst(ZExtInst &I);
576 void visitSExtInst(SExtInst &I);
577 void visitFPTruncInst(FPTruncInst &I);
578 void visitFPExtInst(FPExtInst &I);
579 void visitFPToUIInst(FPToUIInst &I);
580 void visitFPToSIInst(FPToSIInst &I);
581 void visitUIToFPInst(UIToFPInst &I);
582 void visitSIToFPInst(SIToFPInst &I);
583 void visitIntToPtrInst(IntToPtrInst &I);
584 void checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V);
585 void visitPtrToAddrInst(PtrToAddrInst &I);
586 void visitPtrToIntInst(PtrToIntInst &I);
587 void visitBitCastInst(BitCastInst &I);
588 void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
589 void visitPHINode(PHINode &PN);
590 void visitCallBase(CallBase &Call);
591 void visitUnaryOperator(UnaryOperator &U);
592 void visitBinaryOperator(BinaryOperator &B);
593 void visitICmpInst(ICmpInst &IC);
594 void visitFCmpInst(FCmpInst &FC);
595 void visitExtractElementInst(ExtractElementInst &EI);
596 void visitInsertElementInst(InsertElementInst &EI);
597 void visitShuffleVectorInst(ShuffleVectorInst &EI);
598 void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); }
599 void visitCallInst(CallInst &CI);
600 void visitInvokeInst(InvokeInst &II);
601 void visitGetElementPtrInst(GetElementPtrInst &GEP);
602 void visitLoadInst(LoadInst &LI);
603 void visitStoreInst(StoreInst &SI);
604 void verifyDominatesUse(Instruction &I, unsigned i);
605 void visitInstruction(Instruction &I);
606 void visitTerminator(Instruction &I);
607 void visitCondBrInst(CondBrInst &BI);
608 void visitReturnInst(ReturnInst &RI);
609 void visitSwitchInst(SwitchInst &SI);
610 void visitIndirectBrInst(IndirectBrInst &BI);
611 void visitCallBrInst(CallBrInst &CBI);
612 void visitSelectInst(SelectInst &SI);
613 void visitUserOp1(Instruction &I);
614 void visitUserOp2(Instruction &I) { visitUserOp1(I); }
615 void visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call);
616 void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI);
617 void visitVPIntrinsic(VPIntrinsic &VPI);
618 void visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI);
619 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
620 void visitAtomicRMWInst(AtomicRMWInst &RMWI);
621 void visitFenceInst(FenceInst &FI);
622 void visitAllocaInst(AllocaInst &AI);
623 void visitExtractValueInst(ExtractValueInst &EVI);
624 void visitInsertValueInst(InsertValueInst &IVI);
625 void visitEHPadPredecessors(Instruction &I);
626 void visitLandingPadInst(LandingPadInst &LPI);
627 void visitResumeInst(ResumeInst &RI);
628 void visitCatchPadInst(CatchPadInst &CPI);
629 void visitCatchReturnInst(CatchReturnInst &CatchReturn);
630 void visitCleanupPadInst(CleanupPadInst &CPI);
631 void visitFuncletPadInst(FuncletPadInst &FPI);
632 void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch);
633 void visitCleanupReturnInst(CleanupReturnInst &CRI);
634
635 void verifySwiftErrorCall(CallBase &Call, const Value *SwiftErrorVal);
636 void verifySwiftErrorValue(const Value *SwiftErrorVal);
637 void verifyTailCCMustTailAttrs(const AttrBuilder &Attrs, StringRef Context);
638 void verifyMustTailCall(CallInst &CI);
639 bool verifyAttributeCount(AttributeList Attrs, unsigned Params);
640 void verifyAttributeTypes(AttributeSet Attrs, const Value *V);
641 void verifyParameterAttrs(AttributeSet Attrs, Type *Ty, const Value *V);
642 void checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
643 const Value *V);
644 void verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
645 const Value *V, bool IsIntrinsic, bool IsInlineAsm);
646 void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs);
647 void verifyUnknownProfileMetadata(MDNode *MD);
648 void visitConstantExprsRecursively(const Constant *EntryC);
649 void visitConstantExpr(const ConstantExpr *CE);
650 void visitConstantPtrAuth(const ConstantPtrAuth *CPA);
651 void verifyInlineAsmCall(const CallBase &Call);
652 void verifyStatepoint(const CallBase &Call);
653 void verifyFrameRecoverIndices();
654 void verifySiblingFuncletUnwinds();
655
656 void verifyFragmentExpression(const DbgVariableRecord &I);
657 template <typename ValueOrMetadata>
658 void verifyFragmentExpression(const DIVariable &V,
660 ValueOrMetadata *Desc);
661 void verifyFnArgs(const DbgVariableRecord &DVR);
662 void verifyNotEntryValue(const DbgVariableRecord &I);
663
664 /// Module-level debug info verification...
665 void verifyCompileUnits();
666
667 /// Module-level verification that all @llvm.experimental.deoptimize
668 /// declarations share the same calling convention.
669 void verifyDeoptimizeCallingConvs();
670
671 void verifyAttachedCallBundle(const CallBase &Call,
672 const OperandBundleUse &BU);
673
674 /// Verify the llvm.experimental.noalias.scope.decl declarations
675 void verifyNoAliasScopeDecl();
676};
677
678} // end anonymous namespace
679
680/// We know that cond should be true, if not print an error message.
681#define Check(C, ...) \
682 do { \
683 if (!(C)) { \
684 CheckFailed(__VA_ARGS__); \
685 return; \
686 } \
687 } while (false)
688
689/// We know that a debug info condition should be true, if not print
690/// an error message.
691#define CheckDI(C, ...) \
692 do { \
693 if (!(C)) { \
694 DebugInfoCheckFailed(__VA_ARGS__); \
695 return; \
696 } \
697 } while (false)
698
699void Verifier::visitDbgRecords(Instruction &I) {
700 if (!I.DebugMarker)
701 return;
702 CheckDI(I.DebugMarker->MarkedInstr == &I,
703 "Instruction has invalid DebugMarker", &I);
704 CheckDI(!isa<PHINode>(&I) || !I.hasDbgRecords(),
705 "PHI Node must not have any attached DbgRecords", &I);
706 for (DbgRecord &DR : I.getDbgRecordRange()) {
707 CheckDI(DR.getMarker() == I.DebugMarker,
708 "DbgRecord had invalid DebugMarker", &I, &DR);
709 if (auto *Loc =
711 visitMDNode(*Loc, AreDebugLocsAllowed::Yes);
712 if (auto *DVR = dyn_cast<DbgVariableRecord>(&DR)) {
713 visit(*DVR);
714 // These have to appear after `visit` for consistency with existing
715 // intrinsic behaviour.
716 verifyFragmentExpression(*DVR);
717 verifyNotEntryValue(*DVR);
718 } else if (auto *DLR = dyn_cast<DbgLabelRecord>(&DR)) {
719 visit(*DLR);
720 }
721 }
722}
723
724void Verifier::visit(Instruction &I) {
725 visitDbgRecords(I);
726 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
727 Check(I.getOperand(i) != nullptr, "Operand is null", &I);
729}
730
731// Helper to iterate over indirect users. By returning false, the callback can ask to stop traversing further.
732static void forEachUser(const Value *User,
734 llvm::function_ref<bool(const Value *)> Callback) {
735 if (!Visited.insert(User).second)
736 return;
737
739 while (!WorkList.empty()) {
740 const Value *Cur = WorkList.pop_back_val();
741 if (!Visited.insert(Cur).second)
742 continue;
743 if (Callback(Cur))
744 append_range(WorkList, Cur->materialized_users());
745 }
746}
747
748void Verifier::visitGlobalValue(const GlobalValue &GV) {
750 "Global is external, but doesn't have external or weak linkage!", &GV);
751
752 if (const GlobalObject *GO = dyn_cast<GlobalObject>(&GV)) {
753 if (const MDNode *Associated =
754 GO->getMetadata(LLVMContext::MD_associated)) {
755 Check(Associated->getNumOperands() == 1,
756 "associated metadata must have one operand", &GV, Associated);
757 const Metadata *Op = Associated->getOperand(0).get();
758 Check(Op, "associated metadata must have a global value", GO, Associated);
759
760 const auto *VM = dyn_cast_or_null<ValueAsMetadata>(Op);
761 Check(VM, "associated metadata must be ValueAsMetadata", GO, Associated);
762 if (VM) {
763 Check(isa<PointerType>(VM->getValue()->getType()),
764 "associated value must be pointer typed", GV, Associated);
765
766 const Value *Stripped = VM->getValue()->stripPointerCastsAndAliases();
767 Check(isa<GlobalObject>(Stripped) || isa<Constant>(Stripped),
768 "associated metadata must point to a GlobalObject", GO, Stripped);
769 Check(Stripped != GO,
770 "global values should not associate to themselves", GO,
771 Associated);
772 }
773 }
774
775 // FIXME: Why is getMetadata on GlobalValue protected?
776 if (const MDNode *AbsoluteSymbol =
777 GO->getMetadata(LLVMContext::MD_absolute_symbol)) {
778 verifyRangeLikeMetadata(*GO, AbsoluteSymbol,
779 DL.getIntPtrType(GO->getType()),
780 RangeLikeMetadataKind::AbsoluteSymbol);
781 }
782
783 if (GO->hasMetadata(LLVMContext::MD_implicit_ref)) {
784 Check(!GO->isDeclaration(),
785 "ref metadata must not be placed on a declaration", GO);
786
788 GO->getMetadata(LLVMContext::MD_implicit_ref, MDs);
789 for (const MDNode *MD : MDs) {
790 Check(MD->getNumOperands() == 1, "ref metadata must have one operand",
791 &GV, MD);
792 const Metadata *Op = MD->getOperand(0).get();
793 const auto *VM = dyn_cast_or_null<ValueAsMetadata>(Op);
794 Check(VM, "ref metadata must be ValueAsMetadata", GO, MD);
795 if (VM) {
796 Check(isa<PointerType>(VM->getValue()->getType()),
797 "ref value must be pointer typed", GV, MD);
798
799 const Value *Stripped = VM->getValue()->stripPointerCastsAndAliases();
800 Check(isa<GlobalObject>(Stripped) || isa<Constant>(Stripped),
801 "ref metadata must point to a GlobalObject", GO, Stripped);
802 Check(Stripped != GO, "values should not reference themselves", GO,
803 MD);
804 }
805 }
806 }
807
808 if (auto *Props = GO->getMetadata(LLVMContext::MD_elf_section_properties)) {
809 Check(Props->getNumOperands() == 2,
810 "elf_section_properties metadata must have two operands", GO,
811 Props);
812 if (Props->getNumOperands() == 2) {
813 auto *Type = dyn_cast<ConstantAsMetadata>(Props->getOperand(0));
814 Check(Type, "type field must be ConstantAsMetadata", GO, Props);
815 auto *TypeInt = dyn_cast<ConstantInt>(Type->getValue());
816 Check(TypeInt, "type field must be ConstantInt", GO, Props);
817
818 auto *Entsize = dyn_cast<ConstantAsMetadata>(Props->getOperand(1));
819 Check(Entsize, "entsize field must be ConstantAsMetadata", GO, Props);
820 auto *EntsizeInt = dyn_cast<ConstantInt>(Entsize->getValue());
821 Check(EntsizeInt, "entsize field must be ConstantInt", GO, Props);
822 }
823 }
824 }
825
827 "Only global variables can have appending linkage!", &GV);
828
829 if (GV.hasAppendingLinkage()) {
830 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV);
831 Check(GVar && GVar->getValueType()->isArrayTy(),
832 "Only global arrays can have appending linkage!", GVar);
833 }
834
835 if (GV.isDeclarationForLinker())
836 Check(!GV.hasComdat(), "Declaration may not be in a Comdat!", &GV);
837
838 if (GV.hasDLLExportStorageClass()) {
840 "dllexport GlobalValue must have default or protected visibility",
841 &GV);
842 }
843 if (GV.hasDLLImportStorageClass()) {
845 "dllimport GlobalValue must have default visibility", &GV);
846 Check(!GV.isDSOLocal(), "GlobalValue with DLLImport Storage is dso_local!",
847 &GV);
848
849 Check((GV.isDeclaration() &&
852 "Global is marked as dllimport, but not external", &GV);
853 }
854
855 if (GV.isImplicitDSOLocal())
856 Check(GV.isDSOLocal(),
857 "GlobalValue with local linkage or non-default "
858 "visibility must be dso_local!",
859 &GV);
860
861 forEachUser(&GV, GlobalValueVisited, [&](const Value *V) -> bool {
862 if (const Instruction *I = dyn_cast<Instruction>(V)) {
863 if (!I->getParent() || !I->getParent()->getParent())
864 CheckFailed("Global is referenced by parentless instruction!", &GV, &M,
865 I);
866 else if (I->getParent()->getParent()->getParent() != &M)
867 CheckFailed("Global is referenced in a different module!", &GV, &M, I,
868 I->getParent()->getParent(),
869 I->getParent()->getParent()->getParent());
870 return false;
871 } else if (const Function *F = dyn_cast<Function>(V)) {
872 if (F->getParent() != &M)
873 CheckFailed("Global is used by function in a different module", &GV, &M,
874 F, F->getParent());
875 return false;
876 }
877 return true;
878 });
879}
880
881void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
882 Type *GVType = GV.getValueType();
883
884 if (MaybeAlign A = GV.getAlign()) {
885 Check(A->value() <= Value::MaximumAlignment,
886 "huge alignment values are unsupported", &GV);
887 }
888
889 if (GV.hasInitializer()) {
890 Check(GV.getInitializer()->getType() == GVType,
891 "Global variable initializer type does not match global "
892 "variable type!",
893 &GV);
895 "Global variable initializer must be sized", &GV);
896 visitConstantExprsRecursively(GV.getInitializer());
897 // If the global has common linkage, it must have a zero initializer and
898 // cannot be constant.
899 if (GV.hasCommonLinkage()) {
901 "'common' global must have a zero initializer!", &GV);
902 Check(!GV.isConstant(), "'common' global may not be marked constant!",
903 &GV);
904 Check(!GV.hasComdat(), "'common' global may not be in a Comdat!", &GV);
905 }
906 }
907
908 if (GV.hasName() && (GV.getName() == "llvm.global_ctors" ||
909 GV.getName() == "llvm.global_dtors")) {
911 "invalid linkage for intrinsic global variable", &GV);
913 "invalid uses of intrinsic global variable", &GV);
914
915 // Don't worry about emitting an error for it not being an array,
916 // visitGlobalValue will complain on appending non-array.
917 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
918 StructType *STy = dyn_cast<StructType>(ATy->getElementType());
919 PointerType *FuncPtrTy =
920 PointerType::get(Context, DL.getProgramAddressSpace());
921 Check(STy && (STy->getNumElements() == 2 || STy->getNumElements() == 3) &&
922 STy->getTypeAtIndex(0u)->isIntegerTy(32) &&
923 STy->getTypeAtIndex(1) == FuncPtrTy,
924 "wrong type for intrinsic global variable", &GV);
925 Check(STy->getNumElements() == 3,
926 "the third field of the element type is mandatory, "
927 "specify ptr null to migrate from the obsoleted 2-field form");
928 Type *ETy = STy->getTypeAtIndex(2);
929 Check(ETy->isPointerTy(), "wrong type for intrinsic global variable",
930 &GV);
931 }
932 }
933
934 if (GV.hasName() && (GV.getName() == "llvm.used" ||
935 GV.getName() == "llvm.compiler.used")) {
937 "invalid linkage for intrinsic global variable", &GV);
939 "invalid uses of intrinsic global variable", &GV);
940
941 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
942 PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType());
943 Check(PTy, "wrong type for intrinsic global variable", &GV);
944 if (GV.hasInitializer()) {
945 const Constant *Init = GV.getInitializer();
946 const ConstantArray *InitArray = dyn_cast<ConstantArray>(Init);
947 Check(InitArray, "wrong initializer for intrinsic global variable",
948 Init);
949 for (Value *Op : InitArray->operands()) {
950 Value *V = Op->stripPointerCasts();
953 Twine("invalid ") + GV.getName() + " member", V);
954 Check(V->hasName(),
955 Twine("members of ") + GV.getName() + " must be named", V);
956 }
957 }
958 }
959 }
960
961 // Visit any debug info attachments.
963 GV.getMetadata(LLVMContext::MD_dbg, MDs);
964 for (auto *MD : MDs) {
965 if (auto *GVE = dyn_cast<DIGlobalVariableExpression>(MD))
966 visitDIGlobalVariableExpression(*GVE);
967 else
968 CheckDI(false, "!dbg attachment of global variable must be a "
969 "DIGlobalVariableExpression");
970 }
971
972 // Scalable vectors cannot be global variables, since we don't know
973 // the runtime size.
974 Check(!GVType->isScalableTy(), "Globals cannot contain scalable types", &GV);
975
976 // Check if it is or contains a target extension type that disallows being
977 // used as a global.
979 "Global @" + GV.getName() + " has illegal target extension type",
980 GVType);
981
982 // Check that the the address space can hold all bits of the type, recognized
983 // by an access in the address space being able to reach all bytes of the
984 // type.
985 Check(!GVType->isSized() ||
986 isUIntN(DL.getAddressSizeInBits(GV.getAddressSpace()),
987 GV.getGlobalSize(DL)),
988 "Global variable is too large to fit into the address space", &GV,
989 GVType);
990
991 if (!GV.hasInitializer()) {
992 visitGlobalValue(GV);
993 return;
994 }
995
996 // Walk any aggregate initializers looking for bitcasts between address spaces
997 visitConstantExprsRecursively(GV.getInitializer());
998
999 visitGlobalValue(GV);
1000}
1001
1002void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) {
1003 SmallPtrSet<const GlobalAlias*, 4> Visited;
1004 Visited.insert(&GA);
1005 visitAliaseeSubExpr(Visited, GA, C);
1006}
1007
1008void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited,
1009 const GlobalAlias &GA, const Constant &C) {
1012 cast<GlobalValue>(C).hasAvailableExternallyLinkage(),
1013 "available_externally alias must point to available_externally "
1014 "global value",
1015 &GA);
1016 }
1017 if (const auto *GV = dyn_cast<GlobalValue>(&C)) {
1019 Check(!GV->isDeclarationForLinker(), "Alias must point to a definition",
1020 &GA);
1021 }
1022
1023 if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) {
1024 Check(Visited.insert(GA2).second, "Aliases cannot form a cycle", &GA);
1025
1026 Check(!GA2->isInterposable(),
1027 "Alias cannot point to an interposable alias", &GA);
1028 } else {
1029 // Only continue verifying subexpressions of GlobalAliases.
1030 // Do not recurse into global initializers.
1031 return;
1032 }
1033 }
1034
1035 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
1036 visitConstantExprsRecursively(CE);
1037
1038 for (const Use &U : C.operands()) {
1039 Value *V = &*U;
1040 if (const auto *GA2 = dyn_cast<GlobalAlias>(V))
1041 visitAliaseeSubExpr(Visited, GA, *GA2->getAliasee());
1042 else if (const auto *C2 = dyn_cast<Constant>(V))
1043 visitAliaseeSubExpr(Visited, GA, *C2);
1044 }
1045}
1046
1047void Verifier::visitGlobalAlias(const GlobalAlias &GA) {
1049 "Alias should have private, internal, linkonce, weak, linkonce_odr, "
1050 "weak_odr, external, or available_externally linkage!",
1051 &GA);
1052 const Constant *Aliasee = GA.getAliasee();
1053 Check(Aliasee, "Aliasee cannot be NULL!", &GA);
1054 Check(GA.getType() == Aliasee->getType(),
1055 "Alias and aliasee types should match!", &GA);
1056
1057 Check(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee),
1058 "Aliasee should be either GlobalValue or ConstantExpr", &GA);
1059
1060 visitAliaseeSubExpr(GA, *Aliasee);
1061
1062 visitGlobalValue(GA);
1063}
1064
1065void Verifier::visitGlobalIFunc(const GlobalIFunc &GI) {
1066 visitGlobalValue(GI);
1067
1069 GI.getAllMetadata(MDs);
1070 for (const auto &I : MDs) {
1071 CheckDI(I.first != LLVMContext::MD_dbg,
1072 "an ifunc may not have a !dbg attachment", &GI);
1073 Check(I.first != LLVMContext::MD_prof,
1074 "an ifunc may not have a !prof attachment", &GI);
1075 visitMDNode(*I.second, AreDebugLocsAllowed::No);
1076 }
1077
1079 "IFunc should have private, internal, linkonce, weak, linkonce_odr, "
1080 "weak_odr, or external linkage!",
1081 &GI);
1082 // Pierce through ConstantExprs and GlobalAliases and check that the resolver
1083 // is a Function definition.
1084 const Function *Resolver = GI.getResolverFunction();
1085 Check(Resolver, "IFunc must have a Function resolver", &GI);
1086 Check(!Resolver->isDeclarationForLinker(),
1087 "IFunc resolver must be a definition", &GI);
1088
1089 // Check that the immediate resolver operand (prior to any bitcasts) has the
1090 // correct type.
1091 const Type *ResolverTy = GI.getResolver()->getType();
1092
1094 "IFunc resolver must return a pointer", &GI);
1095
1096 Check(ResolverTy == PointerType::get(Context, GI.getAddressSpace()),
1097 "IFunc resolver has incorrect type", &GI);
1098}
1099
1100void Verifier::visitNamedMDNode(const NamedMDNode &NMD) {
1101 // There used to be various other llvm.dbg.* nodes, but we don't support
1102 // upgrading them and we want to reserve the namespace for future uses.
1103 if (NMD.getName().starts_with("llvm.dbg."))
1104 CheckDI(NMD.getName() == "llvm.dbg.cu",
1105 "unrecognized named metadata node in the llvm.dbg namespace", &NMD);
1106 for (const MDNode *MD : NMD.operands()) {
1107 if (NMD.getName() == "llvm.dbg.cu")
1108 CheckDI(MD && isa<DICompileUnit>(MD), "invalid compile unit", &NMD, MD);
1109
1110 if (!MD)
1111 continue;
1112
1113 visitMDNode(*MD, AreDebugLocsAllowed::Yes);
1114 }
1115}
1116
1117void Verifier::visitMDNode(const MDNode &BaseMD,
1118 AreDebugLocsAllowed AllowLocs) {
1119 // Only visit each node once. Metadata can be mutually recursive, so this
1120 // avoids infinite recursion here, as well as being an optimization.
1121 if (!MDNodes.insert(&BaseMD).second)
1122 return;
1123
1124 std::queue<const MDNode *> Worklist;
1125 Worklist.push(&BaseMD);
1126
1127 while (!Worklist.empty()) {
1128 const MDNode *CurrentMD = Worklist.front();
1129 Worklist.pop();
1130 Check(&CurrentMD->getContext() == &Context,
1131 "MDNode context does not match Module context!", CurrentMD);
1132
1133 switch (CurrentMD->getMetadataID()) {
1134 default:
1135 llvm_unreachable("Invalid MDNode subclass");
1136 case Metadata::MDTupleKind:
1137 break;
1138#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) \
1139 case Metadata::CLASS##Kind: \
1140 visit##CLASS(cast<CLASS>(*CurrentMD)); \
1141 break;
1142#include "llvm/IR/Metadata.def"
1143 }
1144
1145 for (const Metadata *Op : CurrentMD->operands()) {
1146 if (!Op)
1147 continue;
1148 Check(!isa<LocalAsMetadata>(Op), "Invalid operand for global metadata!",
1149 CurrentMD, Op);
1150 CheckDI(!isa<DILocation>(Op) || AllowLocs == AreDebugLocsAllowed::Yes,
1151 "DILocation not allowed within this metadata node", CurrentMD,
1152 Op);
1153 if (auto *N = dyn_cast<MDNode>(Op)) {
1154 if (MDNodes.insert(N).second)
1155 Worklist.push(N);
1156 continue;
1157 }
1158 if (auto *V = dyn_cast<ValueAsMetadata>(Op)) {
1159 visitValueAsMetadata(*V, nullptr);
1160 continue;
1161 }
1162 }
1163
1164 // Check llvm.loop.estimated_trip_count.
1165 if (CurrentMD->getNumOperands() > 0 &&
1167 Check(CurrentMD->getNumOperands() == 2, "Expected two operands",
1168 CurrentMD);
1169 auto *Count =
1171 Check(Count && Count->getType()->isIntegerTy() &&
1172 cast<IntegerType>(Count->getType())->getBitWidth() <= 32,
1173 "Expected second operand to be an integer constant of type i32 or "
1174 "smaller",
1175 CurrentMD);
1176 }
1177
1178 // Check these last, so we diagnose problems in operands first.
1179 Check(!CurrentMD->isTemporary(), "Expected no forward declarations!",
1180 CurrentMD);
1181 Check(CurrentMD->isResolved(), "All nodes should be resolved!", CurrentMD);
1182 }
1183}
1184
1185void Verifier::visitValueAsMetadata(const ValueAsMetadata &MD, Function *F) {
1186 Check(MD.getValue(), "Expected valid value", &MD);
1187 Check(!MD.getValue()->getType()->isMetadataTy(),
1188 "Unexpected metadata round-trip through values", &MD, MD.getValue());
1189
1190 auto *L = dyn_cast<LocalAsMetadata>(&MD);
1191 if (!L)
1192 return;
1193
1194 Check(F, "function-local metadata used outside a function", L);
1195
1196 // If this was an instruction, bb, or argument, verify that it is in the
1197 // function that we expect.
1198 Function *ActualF = nullptr;
1199 if (Instruction *I = dyn_cast<Instruction>(L->getValue())) {
1200 Check(I->getParent(), "function-local metadata not in basic block", L, I);
1201 ActualF = I->getParent()->getParent();
1202 } else if (BasicBlock *BB = dyn_cast<BasicBlock>(L->getValue()))
1203 ActualF = BB->getParent();
1204 else if (Argument *A = dyn_cast<Argument>(L->getValue()))
1205 ActualF = A->getParent();
1206 assert(ActualF && "Unimplemented function local metadata case!");
1207
1208 Check(ActualF == F, "function-local metadata used in wrong function", L);
1209}
1210
1211void Verifier::visitDIArgList(const DIArgList &AL, Function *F) {
1212 for (const ValueAsMetadata *VAM : AL.getArgs())
1213 visitValueAsMetadata(*VAM, F);
1214}
1215
1216void Verifier::visitMetadataAsValue(const MetadataAsValue &MDV, Function *F) {
1217 Metadata *MD = MDV.getMetadata();
1218 if (auto *N = dyn_cast<MDNode>(MD)) {
1219 visitMDNode(*N, AreDebugLocsAllowed::No);
1220 return;
1221 }
1222
1223 // Only visit each node once. Metadata can be mutually recursive, so this
1224 // avoids infinite recursion here, as well as being an optimization.
1225 if (!MDNodes.insert(MD).second)
1226 return;
1227
1228 if (auto *V = dyn_cast<ValueAsMetadata>(MD))
1229 visitValueAsMetadata(*V, F);
1230
1231 if (auto *AL = dyn_cast<DIArgList>(MD))
1232 visitDIArgList(*AL, F);
1233}
1234
1235static bool isType(const Metadata *MD) { return !MD || isa<DIType>(MD); }
1236static bool isScope(const Metadata *MD) { return !MD || isa<DIScope>(MD); }
1237static bool isDINode(const Metadata *MD) { return !MD || isa<DINode>(MD); }
1238static bool isMDTuple(const Metadata *MD) { return !MD || isa<MDTuple>(MD); }
1239
1240void Verifier::visitDILocation(const DILocation &N) {
1241 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1242 "location requires a valid scope", &N, N.getRawScope());
1243 if (auto *IA = N.getRawInlinedAt())
1244 CheckDI(isa<DILocation>(IA), "inlined-at should be a location", &N, IA);
1245 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1246 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1247}
1248
1249void Verifier::visitGenericDINode(const GenericDINode &N) {
1250 CheckDI(N.getTag(), "invalid tag", &N);
1251}
1252
1253void Verifier::visitDIScope(const DIScope &N) {
1254 if (auto *F = N.getRawFile())
1255 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1256}
1257
1258void Verifier::visitDIType(const DIType &N) {
1259 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1260 visitDIScope(N);
1261 CheckDI(N.getRawFile() || N.getLine() == 0, "line specified with no file", &N,
1262 N.getLine());
1263}
1264
1265void Verifier::visitDISubrangeType(const DISubrangeType &N) {
1266 visitDIType(N);
1267
1268 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1269 auto *BaseType = N.getRawBaseType();
1270 CheckDI(!BaseType || isType(BaseType), "BaseType must be a type");
1271 auto *LBound = N.getRawLowerBound();
1272 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1273 isa<DIVariable>(LBound) || isa<DIExpression>(LBound) ||
1274 isa<DIDerivedType>(LBound),
1275 "LowerBound must be signed constant or DIVariable or DIExpression or "
1276 "DIDerivedType",
1277 &N);
1278 auto *UBound = N.getRawUpperBound();
1279 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1280 isa<DIVariable>(UBound) || isa<DIExpression>(UBound) ||
1281 isa<DIDerivedType>(UBound),
1282 "UpperBound must be signed constant or DIVariable or DIExpression or "
1283 "DIDerivedType",
1284 &N);
1285 auto *Stride = N.getRawStride();
1286 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1287 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1288 "Stride must be signed constant or DIVariable or DIExpression", &N);
1289 auto *Bias = N.getRawBias();
1290 CheckDI(!Bias || isa<ConstantAsMetadata>(Bias) || isa<DIVariable>(Bias) ||
1291 isa<DIExpression>(Bias),
1292 "Bias must be signed constant or DIVariable or DIExpression", &N);
1293 // Subrange types currently only support constant size.
1294 auto *Size = N.getRawSizeInBits();
1296 "SizeInBits must be a constant");
1297}
1298
1299void Verifier::visitDISubrange(const DISubrange &N) {
1300 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1301 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1302 "Subrange can have any one of count or upperBound", &N);
1303 auto *CBound = N.getRawCountNode();
1304 CheckDI(!CBound || isa<ConstantAsMetadata>(CBound) ||
1305 isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1306 "Count must be signed constant or DIVariable or DIExpression", &N);
1307 auto Count = N.getCount();
1309 cast<ConstantInt *>(Count)->getSExtValue() >= -1,
1310 "invalid subrange count", &N);
1311 auto *LBound = N.getRawLowerBound();
1312 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1313 isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1314 "LowerBound must be signed constant or DIVariable or DIExpression",
1315 &N);
1316 auto *UBound = N.getRawUpperBound();
1317 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1318 isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1319 "UpperBound must be signed constant or DIVariable or DIExpression",
1320 &N);
1321 auto *Stride = N.getRawStride();
1322 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1323 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1324 "Stride must be signed constant or DIVariable or DIExpression", &N);
1325}
1326
1327void Verifier::visitDIGenericSubrange(const DIGenericSubrange &N) {
1328 CheckDI(N.getTag() == dwarf::DW_TAG_generic_subrange, "invalid tag", &N);
1329 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1330 "GenericSubrange can have any one of count or upperBound", &N);
1331 auto *CBound = N.getRawCountNode();
1332 CheckDI(!CBound || isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1333 "Count must be signed constant or DIVariable or DIExpression", &N);
1334 auto *LBound = N.getRawLowerBound();
1335 CheckDI(LBound, "GenericSubrange must contain lowerBound", &N);
1336 CheckDI(isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1337 "LowerBound must be signed constant or DIVariable or DIExpression",
1338 &N);
1339 auto *UBound = N.getRawUpperBound();
1340 CheckDI(!UBound || isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1341 "UpperBound must be signed constant or DIVariable or DIExpression",
1342 &N);
1343 auto *Stride = N.getRawStride();
1344 CheckDI(Stride, "GenericSubrange must contain stride", &N);
1345 CheckDI(isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1346 "Stride must be signed constant or DIVariable or DIExpression", &N);
1347}
1348
1349void Verifier::visitDIEnumerator(const DIEnumerator &N) {
1350 CheckDI(N.getTag() == dwarf::DW_TAG_enumerator, "invalid tag", &N);
1351}
1352
1353void Verifier::visitDIBasicType(const DIBasicType &N) {
1354 visitDIType(N);
1355
1356 CheckDI(N.getTag() == dwarf::DW_TAG_base_type ||
1357 N.getTag() == dwarf::DW_TAG_unspecified_type ||
1358 N.getTag() == dwarf::DW_TAG_string_type,
1359 "invalid tag", &N);
1360 // Basic types currently only support constant size.
1361 auto *Size = N.getRawSizeInBits();
1363 "SizeInBits must be a constant");
1364}
1365
1366void Verifier::visitDIFixedPointType(const DIFixedPointType &N) {
1367 visitDIBasicType(N);
1368
1369 CheckDI(N.getTag() == dwarf::DW_TAG_base_type, "invalid tag", &N);
1370 CheckDI(N.getEncoding() == dwarf::DW_ATE_signed_fixed ||
1371 N.getEncoding() == dwarf::DW_ATE_unsigned_fixed,
1372 "invalid encoding", &N);
1376 "invalid kind", &N);
1378 N.getFactorRaw() == 0,
1379 "factor should be 0 for rationals", &N);
1381 (N.getNumeratorRaw() == 0 && N.getDenominatorRaw() == 0),
1382 "numerator and denominator should be 0 for non-rationals", &N);
1383}
1384
1385void Verifier::visitDIStringType(const DIStringType &N) {
1386 visitDIType(N);
1387
1388 CheckDI(N.getTag() == dwarf::DW_TAG_string_type, "invalid tag", &N);
1389 CheckDI(!(N.isBigEndian() && N.isLittleEndian()), "has conflicting flags",
1390 &N);
1391}
1392
1393void Verifier::visitDIDerivedType(const DIDerivedType &N) {
1394 // Common type checks.
1395 visitDIType(N);
1396
1397 CheckDI(N.getTag() == dwarf::DW_TAG_typedef ||
1398 N.getTag() == dwarf::DW_TAG_pointer_type ||
1399 N.getTag() == dwarf::DW_TAG_ptr_to_member_type ||
1400 N.getTag() == dwarf::DW_TAG_reference_type ||
1401 N.getTag() == dwarf::DW_TAG_rvalue_reference_type ||
1402 N.getTag() == dwarf::DW_TAG_const_type ||
1403 N.getTag() == dwarf::DW_TAG_immutable_type ||
1404 N.getTag() == dwarf::DW_TAG_volatile_type ||
1405 N.getTag() == dwarf::DW_TAG_restrict_type ||
1406 N.getTag() == dwarf::DW_TAG_atomic_type ||
1407 N.getTag() == dwarf::DW_TAG_LLVM_ptrauth_type ||
1408 N.getTag() == dwarf::DW_TAG_member ||
1409 (N.getTag() == dwarf::DW_TAG_variable && N.isStaticMember()) ||
1410 N.getTag() == dwarf::DW_TAG_inheritance ||
1411 N.getTag() == dwarf::DW_TAG_friend ||
1412 N.getTag() == dwarf::DW_TAG_set_type ||
1413 N.getTag() == dwarf::DW_TAG_template_alias,
1414 "invalid tag", &N);
1415 if (N.getTag() == dwarf::DW_TAG_ptr_to_member_type) {
1416 CheckDI(isType(N.getRawExtraData()), "invalid pointer to member type", &N,
1417 N.getRawExtraData());
1418 } else if (N.getTag() == dwarf::DW_TAG_template_alias) {
1419 CheckDI(isMDTuple(N.getRawExtraData()), "invalid template parameters", &N,
1420 N.getRawExtraData());
1421 } else if (N.getTag() == dwarf::DW_TAG_inheritance ||
1422 N.getTag() == dwarf::DW_TAG_member ||
1423 N.getTag() == dwarf::DW_TAG_variable) {
1424 auto *ExtraData = N.getRawExtraData();
1425 auto IsValidExtraData = [&]() {
1426 if (ExtraData == nullptr)
1427 return true;
1428 if (isa<ConstantAsMetadata>(ExtraData) || isa<MDString>(ExtraData) ||
1429 isa<DIObjCProperty>(ExtraData))
1430 return true;
1431 if (auto *Tuple = dyn_cast<MDTuple>(ExtraData)) {
1432 if (Tuple->getNumOperands() != 1)
1433 return false;
1434 return isa_and_nonnull<ConstantAsMetadata>(Tuple->getOperand(0).get());
1435 }
1436 return false;
1437 };
1438 CheckDI(IsValidExtraData(),
1439 "extraData must be ConstantAsMetadata, MDString, DIObjCProperty, "
1440 "or MDTuple with single ConstantAsMetadata operand",
1441 &N, ExtraData);
1442 }
1443
1444 if (N.getTag() == dwarf::DW_TAG_set_type) {
1445 if (auto *T = N.getRawBaseType()) {
1449 CheckDI(
1450 (Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type) ||
1451 (Subrange && Subrange->getTag() == dwarf::DW_TAG_subrange_type) ||
1452 (Basic && (Basic->getEncoding() == dwarf::DW_ATE_unsigned ||
1453 Basic->getEncoding() == dwarf::DW_ATE_signed ||
1454 Basic->getEncoding() == dwarf::DW_ATE_unsigned_char ||
1455 Basic->getEncoding() == dwarf::DW_ATE_signed_char ||
1456 Basic->getEncoding() == dwarf::DW_ATE_boolean)),
1457 "invalid set base type", &N, T);
1458 }
1459 }
1460
1461 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1462 N.getRawBaseType());
1463
1464 if (N.getDWARFAddressSpace()) {
1465 CheckDI(N.getTag() == dwarf::DW_TAG_pointer_type ||
1466 N.getTag() == dwarf::DW_TAG_reference_type ||
1467 N.getTag() == dwarf::DW_TAG_rvalue_reference_type,
1468 "DWARF address space only applies to pointer or reference types",
1469 &N);
1470 }
1471
1472 auto *Size = N.getRawSizeInBits();
1475 "SizeInBits must be a constant or DIVariable or DIExpression");
1476}
1477
1478/// Detect mutually exclusive flags.
1479static bool hasConflictingReferenceFlags(unsigned Flags) {
1480 return ((Flags & DINode::FlagLValueReference) &&
1481 (Flags & DINode::FlagRValueReference)) ||
1482 ((Flags & DINode::FlagTypePassByValue) &&
1483 (Flags & DINode::FlagTypePassByReference));
1484}
1485
1486void Verifier::visitTemplateParams(const MDNode &N, const Metadata &RawParams) {
1487 auto *Params = dyn_cast<MDTuple>(&RawParams);
1488 CheckDI(Params, "invalid template params", &N, &RawParams);
1489 for (Metadata *Op : Params->operands()) {
1490 CheckDI(Op && isa<DITemplateParameter>(Op), "invalid template parameter",
1491 &N, Params, Op);
1492 }
1493}
1494
1495void Verifier::visitDICompositeType(const DICompositeType &N) {
1496 // Common type checks.
1497 visitDIType(N);
1498
1499 CheckDI(N.getTag() == dwarf::DW_TAG_array_type ||
1500 N.getTag() == dwarf::DW_TAG_structure_type ||
1501 N.getTag() == dwarf::DW_TAG_union_type ||
1502 N.getTag() == dwarf::DW_TAG_enumeration_type ||
1503 N.getTag() == dwarf::DW_TAG_class_type ||
1504 N.getTag() == dwarf::DW_TAG_variant_part ||
1505 N.getTag() == dwarf::DW_TAG_variant ||
1506 N.getTag() == dwarf::DW_TAG_namelist,
1507 "invalid tag", &N);
1508
1509 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1510 N.getRawBaseType());
1511
1512 CheckDI(!N.getRawElements() || isa<MDTuple>(N.getRawElements()),
1513 "invalid composite elements", &N, N.getRawElements());
1514 CheckDI(isType(N.getRawVTableHolder()), "invalid vtable holder", &N,
1515 N.getRawVTableHolder());
1517 "invalid reference flags", &N);
1518 unsigned DIBlockByRefStruct = 1 << 4;
1519 CheckDI((N.getFlags() & DIBlockByRefStruct) == 0,
1520 "DIBlockByRefStruct on DICompositeType is no longer supported", &N);
1521 CheckDI(llvm::all_of(N.getElements(), [](const DINode *N) { return N; }),
1522 "DISubprogram contains null entry in `elements` field", &N);
1523
1524 if (N.isVector()) {
1525 const DINodeArray Elements = N.getElements();
1526 CheckDI(Elements.size() == 1 &&
1527 Elements[0]->getTag() == dwarf::DW_TAG_subrange_type,
1528 "invalid vector, expected one element of type subrange", &N);
1529 }
1530
1531 if (auto *Params = N.getRawTemplateParams())
1532 visitTemplateParams(N, *Params);
1533
1534 if (auto *D = N.getRawDiscriminator()) {
1535 CheckDI(isa<DIDerivedType>(D) && N.getTag() == dwarf::DW_TAG_variant_part,
1536 "discriminator can only appear on variant part");
1537 }
1538
1539 if (N.getRawDataLocation()) {
1540 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1541 "dataLocation can only appear in array type");
1542 }
1543
1544 if (N.getRawAssociated()) {
1545 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1546 "associated can only appear in array type");
1547 }
1548
1549 if (N.getRawAllocated()) {
1550 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1551 "allocated can only appear in array type");
1552 }
1553
1554 if (N.getRawRank()) {
1555 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1556 "rank can only appear in array type");
1557 }
1558
1559 if (N.getTag() == dwarf::DW_TAG_array_type) {
1560 CheckDI(N.getRawBaseType(), "array types must have a base type", &N);
1561 }
1562
1563 auto *Size = N.getRawSizeInBits();
1566 "SizeInBits must be a constant or DIVariable or DIExpression");
1567}
1568
1569void Verifier::visitDISubroutineType(const DISubroutineType &N) {
1570 visitDIType(N);
1571 CheckDI(N.getTag() == dwarf::DW_TAG_subroutine_type, "invalid tag", &N);
1572 if (auto *Types = N.getRawTypeArray()) {
1573 CheckDI(isa<MDTuple>(Types), "invalid composite elements", &N, Types);
1574 for (Metadata *Ty : N.getTypeArray()->operands()) {
1575 CheckDI(isType(Ty), "invalid subroutine type ref", &N, Types, Ty);
1576 }
1577 }
1579 "invalid reference flags", &N);
1580}
1581
1582void Verifier::visitDIFile(const DIFile &N) {
1583 CheckDI(N.getTag() == dwarf::DW_TAG_file_type, "invalid tag", &N);
1584 std::optional<DIFile::ChecksumInfo<StringRef>> Checksum = N.getChecksum();
1585 if (Checksum) {
1586 CheckDI(Checksum->Kind <= DIFile::ChecksumKind::CSK_Last,
1587 "invalid checksum kind", &N);
1588 size_t Size;
1589 switch (Checksum->Kind) {
1590 case DIFile::CSK_MD5:
1591 Size = 32;
1592 break;
1593 case DIFile::CSK_SHA1:
1594 Size = 40;
1595 break;
1596 case DIFile::CSK_SHA256:
1597 Size = 64;
1598 break;
1599 }
1600 CheckDI(Checksum->Value.size() == Size, "invalid checksum length", &N);
1601 CheckDI(Checksum->Value.find_if_not(llvm::isHexDigit) == StringRef::npos,
1602 "invalid checksum", &N);
1603 }
1604}
1605
1606void Verifier::visitDICompileUnit(const DICompileUnit &N) {
1607 CheckDI(N.isDistinct(), "compile units must be distinct", &N);
1608 CheckDI(N.getTag() == dwarf::DW_TAG_compile_unit, "invalid tag", &N);
1609
1610 // Don't bother verifying the compilation directory or producer string
1611 // as those could be empty.
1612 CheckDI(N.getRawFile() && isa<DIFile>(N.getRawFile()), "invalid file", &N,
1613 N.getRawFile());
1614 CheckDI(!N.getFile()->getFilename().empty(), "invalid filename", &N,
1615 N.getFile());
1616
1617 CheckDI((N.getEmissionKind() <= DICompileUnit::LastEmissionKind),
1618 "invalid emission kind", &N);
1619
1620 if (auto *Array = N.getRawEnumTypes()) {
1621 CheckDI(isa<MDTuple>(Array), "invalid enum list", &N, Array);
1622 for (Metadata *Op : N.getEnumTypes()->operands()) {
1624 CheckDI(Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type,
1625 "invalid enum type", &N, N.getEnumTypes(), Op);
1626 CheckDI(!Enum->getScope() || !isa<DILocalScope>(Enum->getScope()),
1627 "function-local enum in a DICompileUnit's enum list", &N,
1628 N.getEnumTypes(), Op);
1629 }
1630 }
1631 if (auto *Array = N.getRawRetainedTypes()) {
1632 CheckDI(isa<MDTuple>(Array), "invalid retained type list", &N, Array);
1633 for (Metadata *Op : N.getRetainedTypes()->operands()) {
1634 CheckDI(
1635 Op && (isa<DIType>(Op) || (isa<DISubprogram>(Op) &&
1636 !cast<DISubprogram>(Op)->isDefinition())),
1637 "invalid retained type", &N, Op);
1638 }
1639 }
1640 if (auto *Array = N.getRawGlobalVariables()) {
1641 CheckDI(isa<MDTuple>(Array), "invalid global variable list", &N, Array);
1642 for (Metadata *Op : N.getGlobalVariables()->operands()) {
1644 "invalid global variable ref", &N, Op);
1645 }
1646 }
1647 if (auto *Array = N.getRawImportedEntities()) {
1648 CheckDI(isa<MDTuple>(Array), "invalid imported entity list", &N, Array);
1649 for (Metadata *Op : N.getImportedEntities()->operands()) {
1651 CheckDI(IE, "invalid imported entity ref", &N, Op);
1653 "function-local imports are not allowed in a DICompileUnit's "
1654 "imported entities list",
1655 &N, Op);
1656 }
1657 }
1658 if (auto *Array = N.getRawMacros()) {
1659 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1660 for (Metadata *Op : N.getMacros()->operands()) {
1661 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1662 }
1663 }
1664 CUVisited.insert(&N);
1665}
1666
1667void Verifier::visitDISubprogram(const DISubprogram &N) {
1668 CheckDI(N.getTag() == dwarf::DW_TAG_subprogram, "invalid tag", &N);
1669 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1670 if (auto *F = N.getRawFile())
1671 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1672 else
1673 CheckDI(N.getLine() == 0, "line specified with no file", &N, N.getLine());
1674 auto *T = N.getRawType();
1675 CheckDI(T, "DISubprogram requires a non-null type", &N);
1676 CheckDI(isa<DISubroutineType>(T), "invalid subroutine type", &N, T);
1677 CheckDI(isType(N.getRawContainingType()), "invalid containing type", &N,
1678 N.getRawContainingType());
1679 if (auto *Params = N.getRawTemplateParams())
1680 visitTemplateParams(N, *Params);
1681 if (auto *S = N.getRawDeclaration())
1682 CheckDI(isa<DISubprogram>(S) && !cast<DISubprogram>(S)->isDefinition(),
1683 "invalid subprogram declaration", &N, S);
1684 if (auto *RawNode = N.getRawRetainedNodes()) {
1685 auto *Node = dyn_cast<MDTuple>(RawNode);
1686 CheckDI(Node, "invalid retained nodes list", &N, RawNode);
1687
1688 DenseMap<unsigned, DILocalVariable *> Args;
1689 for (Metadata *Op : Node->operands()) {
1690 CheckDI(Op, "nullptr in retained nodes", &N, Node);
1691
1692 auto True = [](const Metadata *) { return true; };
1693 auto False = [](const Metadata *) { return false; };
1694 bool IsTypeCorrect = DISubprogram::visitRetainedNode<bool>(
1695 Op, True, True, True, True, False);
1696 CheckDI(IsTypeCorrect,
1697 "invalid retained nodes, expected DILocalVariable, DILabel, "
1698 "DIImportedEntity or DIType",
1699 &N, Node, Op);
1700
1701 auto *RetainedNode = cast<DINode>(Op);
1702 auto *RetainedNodeScope = dyn_cast_or_null<DILocalScope>(
1704 CheckDI(RetainedNodeScope,
1705 "invalid retained nodes, retained node is not local", &N, Node,
1706 RetainedNode);
1707
1708 DISubprogram *RetainedNodeSP = RetainedNodeScope->getSubprogram();
1709 DICompileUnit *RetainedNodeUnit =
1710 RetainedNodeSP ? RetainedNodeSP->getUnit() : nullptr;
1711 CheckDI(
1712 RetainedNodeSP == &N,
1713 "invalid retained nodes, retained node does not belong to subprogram",
1714 &N, Node, RetainedNode, RetainedNodeScope, RetainedNodeSP,
1715 RetainedNodeUnit);
1716
1717 auto *DV = dyn_cast<DILocalVariable>(RetainedNode);
1718 if (!DV)
1719 continue;
1720 if (unsigned ArgNum = DV->getArg()) {
1721 auto [ArgI, Inserted] = Args.insert({ArgNum, DV});
1722 CheckDI(Inserted || DV == ArgI->second,
1723 "invalid retained nodes, more than one local variable with the "
1724 "same argument index",
1725 &N, N.getUnit(), Node, RetainedNode, Args[ArgNum]);
1726 }
1727 }
1728 }
1730 "invalid reference flags", &N);
1731
1732 auto *Unit = N.getRawUnit();
1733 if (N.isDefinition()) {
1734 // Subprogram definitions (not part of the type hierarchy).
1735 CheckDI(N.isDistinct(), "subprogram definitions must be distinct", &N);
1736 CheckDI(Unit, "subprogram definitions must have a compile unit", &N);
1737 CheckDI(isa<DICompileUnit>(Unit), "invalid unit type", &N, Unit);
1738 // There's no good way to cross the CU boundary to insert a nested
1739 // DISubprogram definition in one CU into a type defined in another CU.
1740 auto *CT = dyn_cast_or_null<DICompositeType>(N.getRawScope());
1741 if (CT && CT->getRawIdentifier() &&
1742 M.getContext().isODRUniquingDebugTypes())
1743 CheckDI(N.getDeclaration(),
1744 "definition subprograms cannot be nested within DICompositeType "
1745 "when enabling ODR",
1746 &N);
1747 } else {
1748 // Subprogram declarations (part of the type hierarchy).
1749 CheckDI(!Unit, "subprogram declarations must not have a compile unit", &N);
1750 CheckDI(!N.getRawDeclaration(),
1751 "subprogram declaration must not have a declaration field");
1752 }
1753
1754 if (auto *RawThrownTypes = N.getRawThrownTypes()) {
1755 auto *ThrownTypes = dyn_cast<MDTuple>(RawThrownTypes);
1756 CheckDI(ThrownTypes, "invalid thrown types list", &N, RawThrownTypes);
1757 for (Metadata *Op : ThrownTypes->operands())
1758 CheckDI(Op && isa<DIType>(Op), "invalid thrown type", &N, ThrownTypes,
1759 Op);
1760 }
1761
1762 if (N.areAllCallsDescribed())
1763 CheckDI(N.isDefinition(),
1764 "DIFlagAllCallsDescribed must be attached to a definition");
1765}
1766
1767void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) {
1768 CheckDI(N.getTag() == dwarf::DW_TAG_lexical_block, "invalid tag", &N);
1769 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1770 "invalid local scope", &N, N.getRawScope());
1771 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1772 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1773}
1774
1775void Verifier::visitDILexicalBlock(const DILexicalBlock &N) {
1776 visitDILexicalBlockBase(N);
1777
1778 CheckDI(N.getLine() || !N.getColumn(),
1779 "cannot have column info without line info", &N);
1780}
1781
1782void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) {
1783 visitDILexicalBlockBase(N);
1784}
1785
1786void Verifier::visitDICommonBlock(const DICommonBlock &N) {
1787 CheckDI(N.getTag() == dwarf::DW_TAG_common_block, "invalid tag", &N);
1788 if (auto *S = N.getRawScope())
1789 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1790 if (auto *S = N.getRawDecl())
1791 CheckDI(isa<DIGlobalVariable>(S), "invalid declaration", &N, S);
1792}
1793
1794void Verifier::visitDINamespace(const DINamespace &N) {
1795 CheckDI(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag", &N);
1796 if (auto *S = N.getRawScope())
1797 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1798}
1799
1800void Verifier::visitDIMacro(const DIMacro &N) {
1801 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_define ||
1802 N.getMacinfoType() == dwarf::DW_MACINFO_undef,
1803 "invalid macinfo type", &N);
1804 CheckDI(!N.getName().empty(), "anonymous macro", &N);
1805 if (!N.getValue().empty()) {
1806 assert(N.getValue().data()[0] != ' ' && "Macro value has a space prefix");
1807 }
1808}
1809
1810void Verifier::visitDIMacroFile(const DIMacroFile &N) {
1811 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_start_file,
1812 "invalid macinfo type", &N);
1813 if (auto *F = N.getRawFile())
1814 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1815
1816 if (auto *Array = N.getRawElements()) {
1817 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1818 for (Metadata *Op : N.getElements()->operands()) {
1819 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1820 }
1821 }
1822}
1823
1824void Verifier::visitDIModule(const DIModule &N) {
1825 CheckDI(N.getTag() == dwarf::DW_TAG_module, "invalid tag", &N);
1826 CheckDI(!N.getName().empty(), "anonymous module", &N);
1827}
1828
1829void Verifier::visitDITemplateParameter(const DITemplateParameter &N) {
1830 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1831}
1832
1833void Verifier::visitDITemplateTypeParameter(const DITemplateTypeParameter &N) {
1834 visitDITemplateParameter(N);
1835
1836 CheckDI(N.getTag() == dwarf::DW_TAG_template_type_parameter, "invalid tag",
1837 &N);
1838}
1839
1840void Verifier::visitDITemplateValueParameter(
1841 const DITemplateValueParameter &N) {
1842 visitDITemplateParameter(N);
1843
1844 CheckDI(N.getTag() == dwarf::DW_TAG_template_value_parameter ||
1845 N.getTag() == dwarf::DW_TAG_GNU_template_template_param ||
1846 N.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack,
1847 "invalid tag", &N);
1848}
1849
1850void Verifier::visitDIVariable(const DIVariable &N) {
1851 if (auto *S = N.getRawScope())
1852 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1853 if (auto *F = N.getRawFile())
1854 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1855}
1856
1857void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) {
1858 // Checks common to all variables.
1859 visitDIVariable(N);
1860
1861 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1862 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1863 // Check only if the global variable is not an extern
1864 if (N.isDefinition())
1865 CheckDI(N.getType(), "missing global variable type", &N);
1866 if (auto *Member = N.getRawStaticDataMemberDeclaration()) {
1868 "invalid static data member declaration", &N, Member);
1869 }
1870}
1871
1872void Verifier::visitDILocalVariable(const DILocalVariable &N) {
1873 // Checks common to all variables.
1874 visitDIVariable(N);
1875
1876 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1877 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1878 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1879 "local variable requires a valid scope", &N, N.getRawScope());
1880 if (auto Ty = N.getType())
1881 CheckDI(!isa<DISubroutineType>(Ty), "invalid type", &N, N.getType());
1882}
1883
1884void Verifier::visitDIAssignID(const DIAssignID &N) {
1885 CheckDI(!N.getNumOperands(), "DIAssignID has no arguments", &N);
1886 CheckDI(N.isDistinct(), "DIAssignID must be distinct", &N);
1887}
1888
1889void Verifier::visitDILabel(const DILabel &N) {
1890 if (auto *S = N.getRawScope())
1891 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1892 if (auto *F = N.getRawFile())
1893 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1894
1895 CheckDI(N.getTag() == dwarf::DW_TAG_label, "invalid tag", &N);
1896 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1897 "label requires a valid scope", &N, N.getRawScope());
1898}
1899
1900void Verifier::visitDIExpression(const DIExpression &N) {
1901 CheckDI(N.isValid(), "invalid expression", &N);
1902}
1903
1904void Verifier::visitDIGlobalVariableExpression(
1905 const DIGlobalVariableExpression &GVE) {
1906 CheckDI(GVE.getVariable(), "missing variable");
1907 if (auto *Var = GVE.getVariable())
1908 visitDIGlobalVariable(*Var);
1909 if (auto *Expr = GVE.getExpression()) {
1910 visitDIExpression(*Expr);
1911 if (auto Fragment = Expr->getFragmentInfo())
1912 verifyFragmentExpression(*GVE.getVariable(), *Fragment, &GVE);
1913 }
1914}
1915
1916void Verifier::visitDIObjCProperty(const DIObjCProperty &N) {
1917 CheckDI(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag", &N);
1918 if (auto *T = N.getRawType())
1919 CheckDI(isType(T), "invalid type ref", &N, T);
1920 if (auto *F = N.getRawFile())
1921 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1922}
1923
1924void Verifier::visitDIImportedEntity(const DIImportedEntity &N) {
1925 CheckDI(N.getTag() == dwarf::DW_TAG_imported_module ||
1926 N.getTag() == dwarf::DW_TAG_imported_declaration,
1927 "invalid tag", &N);
1928 if (auto *S = N.getRawScope())
1929 CheckDI(isa<DIScope>(S), "invalid scope for imported entity", &N, S);
1930 CheckDI(isDINode(N.getRawEntity()), "invalid imported entity", &N,
1931 N.getRawEntity());
1932}
1933
1934void Verifier::visitComdat(const Comdat &C) {
1935 // In COFF the Module is invalid if the GlobalValue has private linkage.
1936 // Entities with private linkage don't have entries in the symbol table.
1937 if (TT.isOSBinFormatCOFF())
1938 if (const GlobalValue *GV = M.getNamedValue(C.getName()))
1939 Check(!GV->hasPrivateLinkage(), "comdat global value has private linkage",
1940 GV);
1941}
1942
1943void Verifier::visitModuleIdents() {
1944 const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident");
1945 if (!Idents)
1946 return;
1947
1948 // llvm.ident takes a list of metadata entry. Each entry has only one string.
1949 // Scan each llvm.ident entry and make sure that this requirement is met.
1950 for (const MDNode *N : Idents->operands()) {
1951 Check(N->getNumOperands() == 1,
1952 "incorrect number of operands in llvm.ident metadata", N);
1953 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1954 ("invalid value for llvm.ident metadata entry operand"
1955 "(the operand should be a string)"),
1956 N->getOperand(0));
1957 }
1958}
1959
1960void Verifier::visitModuleCommandLines() {
1961 const NamedMDNode *CommandLines = M.getNamedMetadata("llvm.commandline");
1962 if (!CommandLines)
1963 return;
1964
1965 // llvm.commandline takes a list of metadata entry. Each entry has only one
1966 // string. Scan each llvm.commandline entry and make sure that this
1967 // requirement is met.
1968 for (const MDNode *N : CommandLines->operands()) {
1969 Check(N->getNumOperands() == 1,
1970 "incorrect number of operands in llvm.commandline metadata", N);
1971 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1972 ("invalid value for llvm.commandline metadata entry operand"
1973 "(the operand should be a string)"),
1974 N->getOperand(0));
1975 }
1976}
1977
1978void Verifier::visitModuleErrnoTBAA() {
1979 const NamedMDNode *ErrnoTBAA = M.getNamedMetadata("llvm.errno.tbaa");
1980 if (!ErrnoTBAA)
1981 return;
1982
1983 Check(ErrnoTBAA->getNumOperands() >= 1,
1984 "llvm.errno.tbaa must have at least one operand", ErrnoTBAA);
1985
1986 for (const MDNode *N : ErrnoTBAA->operands())
1987 TBAAVerifyHelper.visitTBAAMetadata(nullptr, N);
1988}
1989
1990void Verifier::visitModuleFlags() {
1991 const NamedMDNode *Flags = M.getModuleFlagsMetadata();
1992 if (!Flags) return;
1993
1994 // Scan each flag, and track the flags and requirements.
1995 DenseMap<const MDString*, const MDNode*> SeenIDs;
1996 SmallVector<const MDNode*, 16> Requirements;
1997 uint64_t PAuthABIPlatform = -1;
1998 uint64_t PAuthABIVersion = -1;
1999 for (const MDNode *MDN : Flags->operands()) {
2000 visitModuleFlag(MDN, SeenIDs, Requirements);
2001 if (MDN->getNumOperands() != 3)
2002 continue;
2003 if (const auto *FlagName = dyn_cast_or_null<MDString>(MDN->getOperand(1))) {
2004 if (FlagName->getString() == "aarch64-elf-pauthabi-platform") {
2005 if (const auto *PAP =
2007 PAuthABIPlatform = PAP->getZExtValue();
2008 } else if (FlagName->getString() == "aarch64-elf-pauthabi-version") {
2009 if (const auto *PAV =
2011 PAuthABIVersion = PAV->getZExtValue();
2012 }
2013 }
2014 }
2015
2016 if ((PAuthABIPlatform == uint64_t(-1)) != (PAuthABIVersion == uint64_t(-1)))
2017 CheckFailed("either both or no 'aarch64-elf-pauthabi-platform' and "
2018 "'aarch64-elf-pauthabi-version' module flags must be present");
2019
2020 // Validate that the requirements in the module are valid.
2021 for (const MDNode *Requirement : Requirements) {
2022 const MDString *Flag = cast<MDString>(Requirement->getOperand(0));
2023 const Metadata *ReqValue = Requirement->getOperand(1);
2024
2025 const MDNode *Op = SeenIDs.lookup(Flag);
2026 if (!Op) {
2027 CheckFailed("invalid requirement on flag, flag is not present in module",
2028 Flag);
2029 continue;
2030 }
2031
2032 if (Op->getOperand(2) != ReqValue) {
2033 CheckFailed(("invalid requirement on flag, "
2034 "flag does not have the required value"),
2035 Flag);
2036 continue;
2037 }
2038 }
2039}
2040
2041void
2042Verifier::visitModuleFlag(const MDNode *Op,
2043 DenseMap<const MDString *, const MDNode *> &SeenIDs,
2044 SmallVectorImpl<const MDNode *> &Requirements) {
2045 // Each module flag should have three arguments, the merge behavior (a
2046 // constant int), the flag ID (an MDString), and the value.
2047 Check(Op->getNumOperands() == 3,
2048 "incorrect number of operands in module flag", Op);
2049 Module::ModFlagBehavior MFB;
2050 if (!Module::isValidModFlagBehavior(Op->getOperand(0), MFB)) {
2052 "invalid behavior operand in module flag (expected constant integer)",
2053 Op->getOperand(0));
2054 Check(false,
2055 "invalid behavior operand in module flag (unexpected constant)",
2056 Op->getOperand(0));
2057 }
2058 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
2059 Check(ID, "invalid ID operand in module flag (expected metadata string)",
2060 Op->getOperand(1));
2061
2062 // Check the values for behaviors with additional requirements.
2063 switch (MFB) {
2064 case Module::Error:
2065 case Module::Warning:
2066 case Module::Override:
2067 // These behavior types accept any value.
2068 break;
2069
2070 case Module::Min: {
2071 auto *V = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
2072 Check(V && V->getValue().isNonNegative(),
2073 "invalid value for 'min' module flag (expected constant non-negative "
2074 "integer)",
2075 Op->getOperand(2));
2076 break;
2077 }
2078
2079 case Module::Max: {
2081 "invalid value for 'max' module flag (expected constant integer)",
2082 Op->getOperand(2));
2083 break;
2084 }
2085
2086 case Module::Require: {
2087 // The value should itself be an MDNode with two operands, a flag ID (an
2088 // MDString), and a value.
2089 MDNode *Value = dyn_cast<MDNode>(Op->getOperand(2));
2090 Check(Value && Value->getNumOperands() == 2,
2091 "invalid value for 'require' module flag (expected metadata pair)",
2092 Op->getOperand(2));
2093 Check(isa<MDString>(Value->getOperand(0)),
2094 ("invalid value for 'require' module flag "
2095 "(first value operand should be a string)"),
2096 Value->getOperand(0));
2097
2098 // Append it to the list of requirements, to check once all module flags are
2099 // scanned.
2100 Requirements.push_back(Value);
2101 break;
2102 }
2103
2104 case Module::Append:
2105 case Module::AppendUnique: {
2106 // These behavior types require the operand be an MDNode.
2107 Check(isa<MDNode>(Op->getOperand(2)),
2108 "invalid value for 'append'-type module flag "
2109 "(expected a metadata node)",
2110 Op->getOperand(2));
2111 break;
2112 }
2113 }
2114
2115 // Unless this is a "requires" flag, check the ID is unique.
2116 if (MFB != Module::Require) {
2117 bool Inserted = SeenIDs.insert(std::make_pair(ID, Op)).second;
2118 Check(Inserted,
2119 "module flag identifiers must be unique (or of 'require' type)", ID);
2120 }
2121
2122 if (ID->getString() == "wchar_size") {
2123 ConstantInt *Value
2125 Check(Value, "wchar_size metadata requires constant integer argument");
2126 }
2127
2128 if (ID->getString() == "Linker Options") {
2129 // If the llvm.linker.options named metadata exists, we assume that the
2130 // bitcode reader has upgraded the module flag. Otherwise the flag might
2131 // have been created by a client directly.
2132 Check(M.getNamedMetadata("llvm.linker.options"),
2133 "'Linker Options' named metadata no longer supported");
2134 }
2135
2136 if (ID->getString() == "SemanticInterposition") {
2137 ConstantInt *Value =
2139 Check(Value,
2140 "SemanticInterposition metadata requires constant integer argument");
2141 }
2142
2143 if (ID->getString() == "CG Profile") {
2144 for (const MDOperand &MDO : cast<MDNode>(Op->getOperand(2))->operands())
2145 visitModuleFlagCGProfileEntry(MDO);
2146 }
2147}
2148
2149void Verifier::visitModuleFlagCGProfileEntry(const MDOperand &MDO) {
2150 auto CheckFunction = [&](const MDOperand &FuncMDO) {
2151 if (!FuncMDO)
2152 return;
2153 auto F = dyn_cast<ValueAsMetadata>(FuncMDO);
2154 Check(F && isa<Function>(F->getValue()->stripPointerCasts()),
2155 "expected a Function or null", FuncMDO);
2156 };
2157 auto Node = dyn_cast_or_null<MDNode>(MDO);
2158 Check(Node && Node->getNumOperands() == 3, "expected a MDNode triple", MDO);
2159 CheckFunction(Node->getOperand(0));
2160 CheckFunction(Node->getOperand(1));
2161 auto Count = dyn_cast_or_null<ConstantAsMetadata>(Node->getOperand(2));
2162 Check(Count && Count->getType()->isIntegerTy(),
2163 "expected an integer constant", Node->getOperand(2));
2164}
2165
2166void Verifier::verifyAttributeTypes(AttributeSet Attrs, const Value *V) {
2167 for (Attribute A : Attrs) {
2168
2169 if (A.isStringAttribute()) {
2170#define GET_ATTR_NAMES
2171#define ATTRIBUTE_ENUM(ENUM_NAME, DISPLAY_NAME)
2172#define ATTRIBUTE_STRBOOL(ENUM_NAME, DISPLAY_NAME) \
2173 if (A.getKindAsString() == #DISPLAY_NAME) { \
2174 auto V = A.getValueAsString(); \
2175 if (!(V.empty() || V == "true" || V == "false")) \
2176 CheckFailed("invalid value for '" #DISPLAY_NAME "' attribute: " + V + \
2177 ""); \
2178 }
2179
2180#include "llvm/IR/Attributes.inc"
2181 continue;
2182 }
2183
2184 if (A.isIntAttribute() != Attribute::isIntAttrKind(A.getKindAsEnum())) {
2185 CheckFailed("Attribute '" + A.getAsString() + "' should have an Argument",
2186 V);
2187 return;
2188 }
2189 }
2190}
2191
2192// VerifyParameterAttrs - Check the given attributes for an argument or return
2193// value of the specified type. The value V is printed in error messages.
2194void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty,
2195 const Value *V) {
2196 if (!Attrs.hasAttributes())
2197 return;
2198
2199 verifyAttributeTypes(Attrs, V);
2200
2201 for (Attribute Attr : Attrs)
2202 Check(Attr.isStringAttribute() ||
2203 Attribute::canUseAsParamAttr(Attr.getKindAsEnum()),
2204 "Attribute '" + Attr.getAsString() + "' does not apply to parameters",
2205 V);
2206
2207 if (Attrs.hasAttribute(Attribute::ImmArg)) {
2208 unsigned AttrCount =
2209 Attrs.getNumAttributes() - Attrs.hasAttribute(Attribute::Range);
2210 Check(AttrCount == 1,
2211 "Attribute 'immarg' is incompatible with other attributes except the "
2212 "'range' attribute",
2213 V);
2214 }
2215
2216 // Check for mutually incompatible attributes. Only inreg is compatible with
2217 // sret.
2218 unsigned AttrCount = 0;
2219 AttrCount += Attrs.hasAttribute(Attribute::ByVal);
2220 AttrCount += Attrs.hasAttribute(Attribute::InAlloca);
2221 AttrCount += Attrs.hasAttribute(Attribute::Preallocated);
2222 AttrCount += Attrs.hasAttribute(Attribute::StructRet) ||
2223 Attrs.hasAttribute(Attribute::InReg);
2224 AttrCount += Attrs.hasAttribute(Attribute::Nest);
2225 AttrCount += Attrs.hasAttribute(Attribute::ByRef);
2226 Check(AttrCount <= 1,
2227 "Attributes 'byval', 'inalloca', 'preallocated', 'inreg', 'nest', "
2228 "'byref', and 'sret' are incompatible!",
2229 V);
2230
2231 Check(!(Attrs.hasAttribute(Attribute::InAlloca) &&
2232 Attrs.hasAttribute(Attribute::ReadOnly)),
2233 "Attributes "
2234 "'inalloca and readonly' are incompatible!",
2235 V);
2236
2237 Check(!(Attrs.hasAttribute(Attribute::StructRet) &&
2238 Attrs.hasAttribute(Attribute::Returned)),
2239 "Attributes "
2240 "'sret and returned' are incompatible!",
2241 V);
2242
2243 Check(!(Attrs.hasAttribute(Attribute::ZExt) &&
2244 Attrs.hasAttribute(Attribute::SExt)),
2245 "Attributes "
2246 "'zeroext and signext' are incompatible!",
2247 V);
2248
2249 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
2250 Attrs.hasAttribute(Attribute::ReadOnly)),
2251 "Attributes "
2252 "'readnone and readonly' are incompatible!",
2253 V);
2254
2255 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
2256 Attrs.hasAttribute(Attribute::WriteOnly)),
2257 "Attributes "
2258 "'readnone and writeonly' are incompatible!",
2259 V);
2260
2261 Check(!(Attrs.hasAttribute(Attribute::ReadOnly) &&
2262 Attrs.hasAttribute(Attribute::WriteOnly)),
2263 "Attributes "
2264 "'readonly and writeonly' are incompatible!",
2265 V);
2266
2267 Check(!(Attrs.hasAttribute(Attribute::NoInline) &&
2268 Attrs.hasAttribute(Attribute::AlwaysInline)),
2269 "Attributes "
2270 "'noinline and alwaysinline' are incompatible!",
2271 V);
2272
2273 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2274 Attrs.hasAttribute(Attribute::ReadNone)),
2275 "Attributes writable and readnone are incompatible!", V);
2276
2277 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2278 Attrs.hasAttribute(Attribute::ReadOnly)),
2279 "Attributes writable and readonly are incompatible!", V);
2280
2281 AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible(Ty, Attrs);
2282 for (Attribute Attr : Attrs) {
2283 if (!Attr.isStringAttribute() &&
2284 IncompatibleAttrs.contains(Attr.getKindAsEnum())) {
2285 CheckFailed("Attribute '" + Attr.getAsString() +
2286 "' applied to incompatible type!", V);
2287 return;
2288 }
2289 }
2290
2291 if (isa<PointerType>(Ty)) {
2292 if (Attrs.hasAttribute(Attribute::Alignment)) {
2293 Align AttrAlign = Attrs.getAlignment().valueOrOne();
2294 Check(AttrAlign.value() <= Value::MaximumAlignment,
2295 "huge alignment values are unsupported", V);
2296 }
2297 if (Attrs.hasAttribute(Attribute::ByVal)) {
2298 Type *ByValTy = Attrs.getByValType();
2299 SmallPtrSet<Type *, 4> Visited;
2300 Check(ByValTy->isSized(&Visited),
2301 "Attribute 'byval' does not support unsized types!", V);
2302 // Check if it is or contains a target extension type that disallows being
2303 // used on the stack.
2305 "'byval' argument has illegal target extension type", V);
2306 Check(DL.getTypeAllocSize(ByValTy).getKnownMinValue() < (1ULL << 32),
2307 "huge 'byval' arguments are unsupported", V);
2308 }
2309 if (Attrs.hasAttribute(Attribute::ByRef)) {
2310 SmallPtrSet<Type *, 4> Visited;
2311 Check(Attrs.getByRefType()->isSized(&Visited),
2312 "Attribute 'byref' does not support unsized types!", V);
2313 Check(DL.getTypeAllocSize(Attrs.getByRefType()).getKnownMinValue() <
2314 (1ULL << 32),
2315 "huge 'byref' arguments are unsupported", V);
2316 }
2317 if (Attrs.hasAttribute(Attribute::InAlloca)) {
2318 SmallPtrSet<Type *, 4> Visited;
2319 Check(Attrs.getInAllocaType()->isSized(&Visited),
2320 "Attribute 'inalloca' does not support unsized types!", V);
2321 Check(DL.getTypeAllocSize(Attrs.getInAllocaType()).getKnownMinValue() <
2322 (1ULL << 32),
2323 "huge 'inalloca' arguments are unsupported", V);
2324 }
2325 if (Attrs.hasAttribute(Attribute::Preallocated)) {
2326 SmallPtrSet<Type *, 4> Visited;
2327 Check(Attrs.getPreallocatedType()->isSized(&Visited),
2328 "Attribute 'preallocated' does not support unsized types!", V);
2329 Check(
2330 DL.getTypeAllocSize(Attrs.getPreallocatedType()).getKnownMinValue() <
2331 (1ULL << 32),
2332 "huge 'preallocated' arguments are unsupported", V);
2333 }
2334 }
2335
2336 if (Attrs.hasAttribute(Attribute::Initializes)) {
2337 auto Inits = Attrs.getAttribute(Attribute::Initializes).getInitializes();
2338 Check(!Inits.empty(), "Attribute 'initializes' does not support empty list",
2339 V);
2341 "Attribute 'initializes' does not support unordered ranges", V);
2342 }
2343
2344 if (Attrs.hasAttribute(Attribute::NoFPClass)) {
2345 uint64_t Val = Attrs.getAttribute(Attribute::NoFPClass).getValueAsInt();
2346 Check(Val != 0, "Attribute 'nofpclass' must have at least one test bit set",
2347 V);
2348 Check((Val & ~static_cast<unsigned>(fcAllFlags)) == 0,
2349 "Invalid value for 'nofpclass' test mask", V);
2350 }
2351 if (Attrs.hasAttribute(Attribute::Range)) {
2352 const ConstantRange &CR =
2353 Attrs.getAttribute(Attribute::Range).getValueAsConstantRange();
2355 "Range bit width must match type bit width!", V);
2356 }
2357}
2358
2359void Verifier::checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
2360 const Value *V) {
2361 if (Attrs.hasFnAttr(Attr)) {
2362 StringRef S = Attrs.getFnAttr(Attr).getValueAsString();
2363 unsigned N;
2364 if (S.getAsInteger(10, N))
2365 CheckFailed("\"" + Attr + "\" takes an unsigned integer: " + S, V);
2366 }
2367}
2368
2369// Check parameter attributes against a function type.
2370// The value V is printed in error messages.
2371void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
2372 const Value *V, bool IsIntrinsic,
2373 bool IsInlineAsm) {
2374 if (Attrs.isEmpty())
2375 return;
2376
2377 if (AttributeListsVisited.insert(Attrs.getRawPointer()).second) {
2378 Check(Attrs.hasParentContext(Context),
2379 "Attribute list does not match Module context!", &Attrs, V);
2380 for (const auto &AttrSet : Attrs) {
2381 Check(!AttrSet.hasAttributes() || AttrSet.hasParentContext(Context),
2382 "Attribute set does not match Module context!", &AttrSet, V);
2383 for (const auto &A : AttrSet) {
2384 Check(A.hasParentContext(Context),
2385 "Attribute does not match Module context!", &A, V);
2386 }
2387 }
2388 }
2389
2390 bool SawNest = false;
2391 bool SawReturned = false;
2392 bool SawSRet = false;
2393 bool SawSwiftSelf = false;
2394 bool SawSwiftAsync = false;
2395 bool SawSwiftError = false;
2396
2397 // Verify return value attributes.
2398 AttributeSet RetAttrs = Attrs.getRetAttrs();
2399 for (Attribute RetAttr : RetAttrs)
2400 Check(RetAttr.isStringAttribute() ||
2401 Attribute::canUseAsRetAttr(RetAttr.getKindAsEnum()),
2402 "Attribute '" + RetAttr.getAsString() +
2403 "' does not apply to function return values",
2404 V);
2405
2406 unsigned MaxParameterWidth = 0;
2407 auto GetMaxParameterWidth = [&MaxParameterWidth](Type *Ty) {
2408 if (Ty->isVectorTy()) {
2409 if (auto *VT = dyn_cast<FixedVectorType>(Ty)) {
2410 unsigned Size = VT->getPrimitiveSizeInBits().getFixedValue();
2411 if (Size > MaxParameterWidth)
2412 MaxParameterWidth = Size;
2413 }
2414 }
2415 };
2416 GetMaxParameterWidth(FT->getReturnType());
2417 verifyParameterAttrs(RetAttrs, FT->getReturnType(), V);
2418
2419 // Verify parameter attributes.
2420 for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
2421 Type *Ty = FT->getParamType(i);
2422 AttributeSet ArgAttrs = Attrs.getParamAttrs(i);
2423
2424 if (!IsIntrinsic) {
2425 Check(!ArgAttrs.hasAttribute(Attribute::ImmArg),
2426 "immarg attribute only applies to intrinsics", V);
2427 if (!IsInlineAsm)
2428 Check(!ArgAttrs.hasAttribute(Attribute::ElementType),
2429 "Attribute 'elementtype' can only be applied to intrinsics"
2430 " and inline asm.",
2431 V);
2432 }
2433
2434 verifyParameterAttrs(ArgAttrs, Ty, V);
2435 GetMaxParameterWidth(Ty);
2436
2437 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
2438 Check(!SawNest, "More than one parameter has attribute nest!", V);
2439 SawNest = true;
2440 }
2441
2442 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
2443 Check(!SawReturned, "More than one parameter has attribute returned!", V);
2444 Check(Ty->canLosslesslyBitCastTo(FT->getReturnType()),
2445 "Incompatible argument and return types for 'returned' attribute",
2446 V);
2447 SawReturned = true;
2448 }
2449
2450 if (ArgAttrs.hasAttribute(Attribute::StructRet)) {
2451 Check(!SawSRet, "Cannot have multiple 'sret' parameters!", V);
2452 Check(i == 0 || i == 1,
2453 "Attribute 'sret' is not on first or second parameter!", V);
2454 SawSRet = true;
2455 }
2456
2457 if (ArgAttrs.hasAttribute(Attribute::SwiftSelf)) {
2458 Check(!SawSwiftSelf, "Cannot have multiple 'swiftself' parameters!", V);
2459 SawSwiftSelf = true;
2460 }
2461
2462 if (ArgAttrs.hasAttribute(Attribute::SwiftAsync)) {
2463 Check(!SawSwiftAsync, "Cannot have multiple 'swiftasync' parameters!", V);
2464 SawSwiftAsync = true;
2465 }
2466
2467 if (ArgAttrs.hasAttribute(Attribute::SwiftError)) {
2468 Check(!SawSwiftError, "Cannot have multiple 'swifterror' parameters!", V);
2469 SawSwiftError = true;
2470 }
2471
2472 if (ArgAttrs.hasAttribute(Attribute::InAlloca)) {
2473 Check(i == FT->getNumParams() - 1,
2474 "inalloca isn't on the last parameter!", V);
2475 }
2476 }
2477
2478 if (!Attrs.hasFnAttrs())
2479 return;
2480
2481 verifyAttributeTypes(Attrs.getFnAttrs(), V);
2482 for (Attribute FnAttr : Attrs.getFnAttrs())
2483 Check(FnAttr.isStringAttribute() ||
2484 Attribute::canUseAsFnAttr(FnAttr.getKindAsEnum()),
2485 "Attribute '" + FnAttr.getAsString() +
2486 "' does not apply to functions!",
2487 V);
2488
2489 Check(!(Attrs.hasFnAttr(Attribute::NoInline) &&
2490 Attrs.hasFnAttr(Attribute::AlwaysInline)),
2491 "Attributes 'noinline and alwaysinline' are incompatible!", V);
2492
2493 if (Attrs.hasFnAttr(Attribute::OptimizeNone)) {
2494 Check(Attrs.hasFnAttr(Attribute::NoInline),
2495 "Attribute 'optnone' requires 'noinline'!", V);
2496
2497 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2498 "Attributes 'optsize and optnone' are incompatible!", V);
2499
2500 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2501 "Attributes 'minsize and optnone' are incompatible!", V);
2502
2503 Check(!Attrs.hasFnAttr(Attribute::OptimizeForDebugging),
2504 "Attributes 'optdebug and optnone' are incompatible!", V);
2505 }
2506
2507 Check(!(Attrs.hasFnAttr(Attribute::SanitizeRealtime) &&
2508 Attrs.hasFnAttr(Attribute::SanitizeRealtimeBlocking)),
2509 "Attributes "
2510 "'sanitize_realtime and sanitize_realtime_blocking' are incompatible!",
2511 V);
2512
2513 if (Attrs.hasFnAttr(Attribute::OptimizeForDebugging)) {
2514 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2515 "Attributes 'optsize and optdebug' are incompatible!", V);
2516
2517 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2518 "Attributes 'minsize and optdebug' are incompatible!", V);
2519 }
2520
2521 Check(!Attrs.hasAttrSomewhere(Attribute::Writable) ||
2522 isModSet(Attrs.getMemoryEffects().getModRef(IRMemLocation::ArgMem)),
2523 "Attribute writable and memory without argmem: write are incompatible!",
2524 V);
2525
2526 if (Attrs.hasFnAttr("aarch64_pstate_sm_enabled")) {
2527 Check(!Attrs.hasFnAttr("aarch64_pstate_sm_compatible"),
2528 "Attributes 'aarch64_pstate_sm_enabled and "
2529 "aarch64_pstate_sm_compatible' are incompatible!",
2530 V);
2531 }
2532
2533 Check((Attrs.hasFnAttr("aarch64_new_za") + Attrs.hasFnAttr("aarch64_in_za") +
2534 Attrs.hasFnAttr("aarch64_inout_za") +
2535 Attrs.hasFnAttr("aarch64_out_za") +
2536 Attrs.hasFnAttr("aarch64_preserves_za") +
2537 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2538 "Attributes 'aarch64_new_za', 'aarch64_in_za', 'aarch64_out_za', "
2539 "'aarch64_inout_za', 'aarch64_preserves_za' and "
2540 "'aarch64_za_state_agnostic' are mutually exclusive",
2541 V);
2542
2543 Check((Attrs.hasFnAttr("aarch64_new_zt0") +
2544 Attrs.hasFnAttr("aarch64_in_zt0") +
2545 Attrs.hasFnAttr("aarch64_inout_zt0") +
2546 Attrs.hasFnAttr("aarch64_out_zt0") +
2547 Attrs.hasFnAttr("aarch64_preserves_zt0") +
2548 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2549 "Attributes 'aarch64_new_zt0', 'aarch64_in_zt0', 'aarch64_out_zt0', "
2550 "'aarch64_inout_zt0', 'aarch64_preserves_zt0' and "
2551 "'aarch64_za_state_agnostic' are mutually exclusive",
2552 V);
2553
2554 if (Attrs.hasFnAttr(Attribute::JumpTable)) {
2555 const GlobalValue *GV = cast<GlobalValue>(V);
2557 "Attribute 'jumptable' requires 'unnamed_addr'", V);
2558 }
2559
2560 if (auto Args = Attrs.getFnAttrs().getAllocSizeArgs()) {
2561 auto CheckParam = [&](StringRef Name, unsigned ParamNo) {
2562 if (ParamNo >= FT->getNumParams()) {
2563 CheckFailed("'allocsize' " + Name + " argument is out of bounds", V);
2564 return false;
2565 }
2566
2567 if (!FT->getParamType(ParamNo)->isIntegerTy()) {
2568 CheckFailed("'allocsize' " + Name +
2569 " argument must refer to an integer parameter",
2570 V);
2571 return false;
2572 }
2573
2574 return true;
2575 };
2576
2577 if (!CheckParam("element size", Args->first))
2578 return;
2579
2580 if (Args->second && !CheckParam("number of elements", *Args->second))
2581 return;
2582 }
2583
2584 if (Attrs.hasFnAttr(Attribute::AllocKind)) {
2585 AllocFnKind K = Attrs.getAllocKind();
2587 K & (AllocFnKind::Alloc | AllocFnKind::Realloc | AllocFnKind::Free);
2588 if (!is_contained(
2589 {AllocFnKind::Alloc, AllocFnKind::Realloc, AllocFnKind::Free},
2590 Type))
2591 CheckFailed(
2592 "'allockind()' requires exactly one of alloc, realloc, and free");
2593 if ((Type == AllocFnKind::Free) &&
2594 ((K & (AllocFnKind::Uninitialized | AllocFnKind::Zeroed |
2595 AllocFnKind::Aligned)) != AllocFnKind::Unknown))
2596 CheckFailed("'allockind(\"free\")' doesn't allow uninitialized, zeroed, "
2597 "or aligned modifiers.");
2598 AllocFnKind ZeroedUninit = AllocFnKind::Uninitialized | AllocFnKind::Zeroed;
2599 if ((K & ZeroedUninit) == ZeroedUninit)
2600 CheckFailed("'allockind()' can't be both zeroed and uninitialized");
2601 }
2602
2603 if (Attribute A = Attrs.getFnAttr("alloc-variant-zeroed"); A.isValid()) {
2604 StringRef S = A.getValueAsString();
2605 Check(!S.empty(), "'alloc-variant-zeroed' must not be empty");
2606 Function *Variant = M.getFunction(S);
2607 if (Variant) {
2608 Attribute Family = Attrs.getFnAttr("alloc-family");
2609 Attribute VariantFamily = Variant->getFnAttribute("alloc-family");
2610 if (Family.isValid())
2611 Check(VariantFamily.isValid() &&
2612 VariantFamily.getValueAsString() == Family.getValueAsString(),
2613 "'alloc-variant-zeroed' must name a function belonging to the "
2614 "same 'alloc-family'");
2615
2616 Check(Variant->hasFnAttribute(Attribute::AllocKind) &&
2617 (Variant->getFnAttribute(Attribute::AllocKind).getAllocKind() &
2618 AllocFnKind::Zeroed) != AllocFnKind::Unknown,
2619 "'alloc-variant-zeroed' must name a function with "
2620 "'allockind(\"zeroed\")'");
2621
2622 Check(FT == Variant->getFunctionType(),
2623 "'alloc-variant-zeroed' must name a function with the same "
2624 "signature");
2625
2626 if (const Function *F = dyn_cast<Function>(V))
2627 Check(F->getCallingConv() == Variant->getCallingConv(),
2628 "'alloc-variant-zeroed' must name a function with the same "
2629 "calling convention");
2630 }
2631 }
2632
2633 if (Attrs.hasFnAttr(Attribute::VScaleRange)) {
2634 unsigned VScaleMin = Attrs.getFnAttrs().getVScaleRangeMin();
2635 if (VScaleMin == 0)
2636 CheckFailed("'vscale_range' minimum must be greater than 0", V);
2637 else if (!isPowerOf2_32(VScaleMin))
2638 CheckFailed("'vscale_range' minimum must be power-of-two value", V);
2639 std::optional<unsigned> VScaleMax = Attrs.getFnAttrs().getVScaleRangeMax();
2640 if (VScaleMax && VScaleMin > VScaleMax)
2641 CheckFailed("'vscale_range' minimum cannot be greater than maximum", V);
2642 else if (VScaleMax && !isPowerOf2_32(*VScaleMax))
2643 CheckFailed("'vscale_range' maximum must be power-of-two value", V);
2644 }
2645
2646 if (Attribute FPAttr = Attrs.getFnAttr("frame-pointer"); FPAttr.isValid()) {
2647 StringRef FP = FPAttr.getValueAsString();
2648 if (FP != "all" && FP != "non-leaf" && FP != "none" && FP != "reserved" &&
2649 FP != "non-leaf-no-reserve")
2650 CheckFailed("invalid value for 'frame-pointer' attribute: " + FP, V);
2651 }
2652
2653 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-prefix", V);
2654 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-entry", V);
2655 if (Attrs.hasFnAttr("patchable-function-entry-section"))
2656 Check(!Attrs.getFnAttr("patchable-function-entry-section")
2657 .getValueAsString()
2658 .empty(),
2659 "\"patchable-function-entry-section\" must not be empty");
2660 checkUnsignedBaseTenFuncAttr(Attrs, "warn-stack-size", V);
2661
2662 if (auto A = Attrs.getFnAttr("sign-return-address"); A.isValid()) {
2663 StringRef S = A.getValueAsString();
2664 if (S != "none" && S != "all" && S != "non-leaf")
2665 CheckFailed("invalid value for 'sign-return-address' attribute: " + S, V);
2666 }
2667
2668 if (auto A = Attrs.getFnAttr("sign-return-address-key"); A.isValid()) {
2669 StringRef S = A.getValueAsString();
2670 if (S != "a_key" && S != "b_key")
2671 CheckFailed("invalid value for 'sign-return-address-key' attribute: " + S,
2672 V);
2673 if (auto AA = Attrs.getFnAttr("sign-return-address"); !AA.isValid()) {
2674 CheckFailed(
2675 "'sign-return-address-key' present without `sign-return-address`");
2676 }
2677 }
2678
2679 if (auto A = Attrs.getFnAttr("branch-target-enforcement"); A.isValid()) {
2680 StringRef S = A.getValueAsString();
2681 if (S != "" && S != "true" && S != "false")
2682 CheckFailed(
2683 "invalid value for 'branch-target-enforcement' attribute: " + S, V);
2684 }
2685
2686 if (auto A = Attrs.getFnAttr("branch-protection-pauth-lr"); A.isValid()) {
2687 StringRef S = A.getValueAsString();
2688 if (S != "" && S != "true" && S != "false")
2689 CheckFailed(
2690 "invalid value for 'branch-protection-pauth-lr' attribute: " + S, V);
2691 }
2692
2693 if (auto A = Attrs.getFnAttr("guarded-control-stack"); A.isValid()) {
2694 StringRef S = A.getValueAsString();
2695 if (S != "" && S != "true" && S != "false")
2696 CheckFailed("invalid value for 'guarded-control-stack' attribute: " + S,
2697 V);
2698 }
2699
2700 if (auto A = Attrs.getFnAttr("vector-function-abi-variant"); A.isValid()) {
2701 StringRef S = A.getValueAsString();
2702 const std::optional<VFInfo> Info = VFABI::tryDemangleForVFABI(S, FT);
2703 if (!Info)
2704 CheckFailed("invalid name for a VFABI variant: " + S, V);
2705 }
2706
2707 if (auto A = Attrs.getFnAttr("modular-format"); A.isValid()) {
2708 StringRef S = A.getValueAsString();
2710 S.split(Args, ',');
2711 Check(Args.size() >= 5,
2712 "modular-format attribute requires at least 5 arguments", V);
2713 unsigned FirstArgIdx;
2714 Check(!Args[2].getAsInteger(10, FirstArgIdx),
2715 "modular-format attribute first arg index is not an integer", V);
2716 unsigned UpperBound = FT->getNumParams() + (FT->isVarArg() ? 1 : 0);
2717 Check(FirstArgIdx > 0 && FirstArgIdx <= UpperBound,
2718 "modular-format attribute first arg index is out of bounds", V);
2719 }
2720
2721 if (auto A = Attrs.getFnAttr("target-features"); A.isValid()) {
2722 StringRef S = A.getValueAsString();
2723 if (!S.empty()) {
2724 for (auto FeatureFlag : split(S, ',')) {
2725 if (FeatureFlag.empty())
2726 CheckFailed(
2727 "target-features attribute should not contain an empty string");
2728 else
2729 Check(FeatureFlag[0] == '+' || FeatureFlag[0] == '-',
2730 "target feature '" + FeatureFlag +
2731 "' must start with a '+' or '-'",
2732 V);
2733 }
2734 }
2735 }
2736}
2737void Verifier::verifyUnknownProfileMetadata(MDNode *MD) {
2738 Check(MD->getNumOperands() == 2,
2739 "'unknown' !prof should have a single additional operand", MD);
2740 auto *PassName = dyn_cast<MDString>(MD->getOperand(1));
2741 Check(PassName != nullptr,
2742 "'unknown' !prof should have an additional operand of type "
2743 "string");
2744 Check(!PassName->getString().empty(),
2745 "the 'unknown' !prof operand should not be an empty string");
2746}
2747
2748void Verifier::verifyFunctionMetadata(
2749 ArrayRef<std::pair<unsigned, MDNode *>> MDs) {
2750 for (const auto &Pair : MDs) {
2751 if (Pair.first == LLVMContext::MD_prof) {
2752 MDNode *MD = Pair.second;
2753 Check(MD->getNumOperands() >= 2,
2754 "!prof annotations should have no less than 2 operands", MD);
2755 // We may have functions that are synthesized by the compiler, e.g. in
2756 // WPD, that we can't currently determine the entry count.
2757 if (MD->getOperand(0).equalsStr(
2759 verifyUnknownProfileMetadata(MD);
2760 continue;
2761 }
2762
2763 // Check first operand.
2764 Check(MD->getOperand(0) != nullptr, "first operand should not be null",
2765 MD);
2767 "expected string with name of the !prof annotation", MD);
2768 MDString *MDS = cast<MDString>(MD->getOperand(0));
2769 StringRef ProfName = MDS->getString();
2772 "first operand should be 'function_entry_count'"
2773 " or 'synthetic_function_entry_count'",
2774 MD);
2775
2776 // Check second operand.
2777 Check(MD->getOperand(1) != nullptr, "second operand should not be null",
2778 MD);
2780 "expected integer argument to function_entry_count", MD);
2781 } else if (Pair.first == LLVMContext::MD_kcfi_type) {
2782 MDNode *MD = Pair.second;
2783 Check(MD->getNumOperands() == 1,
2784 "!kcfi_type must have exactly one operand", MD);
2785 Check(MD->getOperand(0) != nullptr, "!kcfi_type operand must not be null",
2786 MD);
2788 "expected a constant operand for !kcfi_type", MD);
2789 Constant *C = cast<ConstantAsMetadata>(MD->getOperand(0))->getValue();
2790 Check(isa<ConstantInt>(C) && isa<IntegerType>(C->getType()),
2791 "expected a constant integer operand for !kcfi_type", MD);
2793 "expected a 32-bit integer constant operand for !kcfi_type", MD);
2794 }
2795 }
2796}
2797
2798void Verifier::visitConstantExprsRecursively(const Constant *EntryC) {
2799 if (EntryC->getNumOperands() == 0)
2800 return;
2801
2802 if (!ConstantExprVisited.insert(EntryC).second)
2803 return;
2804
2806 Stack.push_back(EntryC);
2807
2808 while (!Stack.empty()) {
2809 const Constant *C = Stack.pop_back_val();
2810
2811 // Check this constant expression.
2812 if (const auto *CE = dyn_cast<ConstantExpr>(C))
2813 visitConstantExpr(CE);
2814
2815 if (const auto *CPA = dyn_cast<ConstantPtrAuth>(C))
2816 visitConstantPtrAuth(CPA);
2817
2818 if (const auto *GV = dyn_cast<GlobalValue>(C)) {
2819 // Global Values get visited separately, but we do need to make sure
2820 // that the global value is in the correct module
2821 Check(GV->getParent() == &M, "Referencing global in another module!",
2822 EntryC, &M, GV, GV->getParent());
2823 continue;
2824 }
2825
2826 // Visit all sub-expressions.
2827 for (const Use &U : C->operands()) {
2828 const auto *OpC = dyn_cast<Constant>(U);
2829 if (!OpC)
2830 continue;
2831 if (!ConstantExprVisited.insert(OpC).second)
2832 continue;
2833 Stack.push_back(OpC);
2834 }
2835 }
2836}
2837
2838void Verifier::visitConstantExpr(const ConstantExpr *CE) {
2839 if (CE->getOpcode() == Instruction::BitCast)
2840 Check(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0),
2841 CE->getType()),
2842 "Invalid bitcast", CE);
2843 else if (CE->getOpcode() == Instruction::PtrToAddr)
2844 checkPtrToAddr(CE->getOperand(0)->getType(), CE->getType(), *CE);
2845}
2846
2847void Verifier::visitConstantPtrAuth(const ConstantPtrAuth *CPA) {
2848 Check(CPA->getPointer()->getType()->isPointerTy(),
2849 "signed ptrauth constant base pointer must have pointer type");
2850
2851 Check(CPA->getType() == CPA->getPointer()->getType(),
2852 "signed ptrauth constant must have same type as its base pointer");
2853
2854 Check(CPA->getKey()->getBitWidth() == 32,
2855 "signed ptrauth constant key must be i32 constant integer");
2856
2858 "signed ptrauth constant address discriminator must be a pointer");
2859
2860 Check(CPA->getDiscriminator()->getBitWidth() == 64,
2861 "signed ptrauth constant discriminator must be i64 constant integer");
2862
2864 "signed ptrauth constant deactivation symbol must be a pointer");
2865
2868 "signed ptrauth constant deactivation symbol must be a global value "
2869 "or null");
2870}
2871
2872bool Verifier::verifyAttributeCount(AttributeList Attrs, unsigned Params) {
2873 // There shouldn't be more attribute sets than there are parameters plus the
2874 // function and return value.
2875 return Attrs.getNumAttrSets() <= Params + 2;
2876}
2877
2878void Verifier::verifyInlineAsmCall(const CallBase &Call) {
2879 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
2880 unsigned ArgNo = 0;
2881 unsigned LabelNo = 0;
2882 for (const InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) {
2883 if (CI.Type == InlineAsm::isLabel) {
2884 ++LabelNo;
2885 continue;
2886 }
2887
2888 // Only deal with constraints that correspond to call arguments.
2889 if (!CI.hasArg())
2890 continue;
2891
2892 if (CI.isIndirect) {
2893 const Value *Arg = Call.getArgOperand(ArgNo);
2894 Check(Arg->getType()->isPointerTy(),
2895 "Operand for indirect constraint must have pointer type", &Call);
2896
2898 "Operand for indirect constraint must have elementtype attribute",
2899 &Call);
2900 } else {
2901 Check(!Call.paramHasAttr(ArgNo, Attribute::ElementType),
2902 "Elementtype attribute can only be applied for indirect "
2903 "constraints",
2904 &Call);
2905 }
2906
2907 ArgNo++;
2908 }
2909
2910 if (auto *CallBr = dyn_cast<CallBrInst>(&Call)) {
2911 Check(LabelNo == CallBr->getNumIndirectDests(),
2912 "Number of label constraints does not match number of callbr dests",
2913 &Call);
2914 } else {
2915 Check(LabelNo == 0, "Label constraints can only be used with callbr",
2916 &Call);
2917 }
2918}
2919
2920/// Verify that statepoint intrinsic is well formed.
2921void Verifier::verifyStatepoint(const CallBase &Call) {
2922 assert(Call.getIntrinsicID() == Intrinsic::experimental_gc_statepoint);
2923
2926 "gc.statepoint must read and write all memory to preserve "
2927 "reordering restrictions required by safepoint semantics",
2928 Call);
2929
2930 const int64_t NumPatchBytes =
2931 cast<ConstantInt>(Call.getArgOperand(1))->getSExtValue();
2932 assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!");
2933 Check(NumPatchBytes >= 0,
2934 "gc.statepoint number of patchable bytes must be "
2935 "positive",
2936 Call);
2937
2938 Type *TargetElemType = Call.getParamElementType(2);
2939 Check(TargetElemType,
2940 "gc.statepoint callee argument must have elementtype attribute", Call);
2941 FunctionType *TargetFuncType = dyn_cast<FunctionType>(TargetElemType);
2942 Check(TargetFuncType,
2943 "gc.statepoint callee elementtype must be function type", Call);
2944
2945 const int NumCallArgs = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
2946 Check(NumCallArgs >= 0,
2947 "gc.statepoint number of arguments to underlying call "
2948 "must be positive",
2949 Call);
2950 const int NumParams = (int)TargetFuncType->getNumParams();
2951 if (TargetFuncType->isVarArg()) {
2952 Check(NumCallArgs >= NumParams,
2953 "gc.statepoint mismatch in number of vararg call args", Call);
2954
2955 // TODO: Remove this limitation
2956 Check(TargetFuncType->getReturnType()->isVoidTy(),
2957 "gc.statepoint doesn't support wrapping non-void "
2958 "vararg functions yet",
2959 Call);
2960 } else
2961 Check(NumCallArgs == NumParams,
2962 "gc.statepoint mismatch in number of call args", Call);
2963
2964 const uint64_t Flags
2965 = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
2966 Check((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0,
2967 "unknown flag used in gc.statepoint flags argument", Call);
2968
2969 // Verify that the types of the call parameter arguments match
2970 // the type of the wrapped callee.
2971 AttributeList Attrs = Call.getAttributes();
2972 for (int i = 0; i < NumParams; i++) {
2973 Type *ParamType = TargetFuncType->getParamType(i);
2974 Type *ArgType = Call.getArgOperand(5 + i)->getType();
2975 Check(ArgType == ParamType,
2976 "gc.statepoint call argument does not match wrapped "
2977 "function type",
2978 Call);
2979
2980 if (TargetFuncType->isVarArg()) {
2981 AttributeSet ArgAttrs = Attrs.getParamAttrs(5 + i);
2982 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
2983 "Attribute 'sret' cannot be used for vararg call arguments!", Call);
2984 }
2985 }
2986
2987 const int EndCallArgsInx = 4 + NumCallArgs;
2988
2989 const Value *NumTransitionArgsV = Call.getArgOperand(EndCallArgsInx + 1);
2990 Check(isa<ConstantInt>(NumTransitionArgsV),
2991 "gc.statepoint number of transition arguments "
2992 "must be constant integer",
2993 Call);
2994 const int NumTransitionArgs =
2995 cast<ConstantInt>(NumTransitionArgsV)->getZExtValue();
2996 Check(NumTransitionArgs == 0,
2997 "gc.statepoint w/inline transition bundle is deprecated", Call);
2998 const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs;
2999
3000 const Value *NumDeoptArgsV = Call.getArgOperand(EndTransitionArgsInx + 1);
3001 Check(isa<ConstantInt>(NumDeoptArgsV),
3002 "gc.statepoint number of deoptimization arguments "
3003 "must be constant integer",
3004 Call);
3005 const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue();
3006 Check(NumDeoptArgs == 0,
3007 "gc.statepoint w/inline deopt operands is deprecated", Call);
3008
3009 const int ExpectedNumArgs = 7 + NumCallArgs;
3010 Check(ExpectedNumArgs == (int)Call.arg_size(),
3011 "gc.statepoint too many arguments", Call);
3012
3013 // Check that the only uses of this gc.statepoint are gc.result or
3014 // gc.relocate calls which are tied to this statepoint and thus part
3015 // of the same statepoint sequence
3016 for (const User *U : Call.users()) {
3017 const CallInst *UserCall = dyn_cast<const CallInst>(U);
3018 Check(UserCall, "illegal use of statepoint token", Call, U);
3019 if (!UserCall)
3020 continue;
3021 Check(isa<GCRelocateInst>(UserCall) || isa<GCResultInst>(UserCall),
3022 "gc.result or gc.relocate are the only value uses "
3023 "of a gc.statepoint",
3024 Call, U);
3025 if (isa<GCResultInst>(UserCall)) {
3026 Check(UserCall->getArgOperand(0) == &Call,
3027 "gc.result connected to wrong gc.statepoint", Call, UserCall);
3028 } else if (isa<GCRelocateInst>(Call)) {
3029 Check(UserCall->getArgOperand(0) == &Call,
3030 "gc.relocate connected to wrong gc.statepoint", Call, UserCall);
3031 }
3032 }
3033
3034 // Note: It is legal for a single derived pointer to be listed multiple
3035 // times. It's non-optimal, but it is legal. It can also happen after
3036 // insertion if we strip a bitcast away.
3037 // Note: It is really tempting to check that each base is relocated and
3038 // that a derived pointer is never reused as a base pointer. This turns
3039 // out to be problematic since optimizations run after safepoint insertion
3040 // can recognize equality properties that the insertion logic doesn't know
3041 // about. See example statepoint.ll in the verifier subdirectory
3042}
3043
3044void Verifier::verifyFrameRecoverIndices() {
3045 for (auto &Counts : FrameEscapeInfo) {
3046 Function *F = Counts.first;
3047 unsigned EscapedObjectCount = Counts.second.first;
3048 unsigned MaxRecoveredIndex = Counts.second.second;
3049 Check(MaxRecoveredIndex <= EscapedObjectCount,
3050 "all indices passed to llvm.localrecover must be less than the "
3051 "number of arguments passed to llvm.localescape in the parent "
3052 "function",
3053 F);
3054 }
3055}
3056
3057static Instruction *getSuccPad(Instruction *Terminator) {
3058 BasicBlock *UnwindDest;
3059 if (auto *II = dyn_cast<InvokeInst>(Terminator))
3060 UnwindDest = II->getUnwindDest();
3061 else if (auto *CSI = dyn_cast<CatchSwitchInst>(Terminator))
3062 UnwindDest = CSI->getUnwindDest();
3063 else
3064 UnwindDest = cast<CleanupReturnInst>(Terminator)->getUnwindDest();
3065 return &*UnwindDest->getFirstNonPHIIt();
3066}
3067
3068void Verifier::verifySiblingFuncletUnwinds() {
3069 llvm::TimeTraceScope timeScope("Verifier verify sibling funclet unwinds");
3070 SmallPtrSet<Instruction *, 8> Visited;
3071 SmallPtrSet<Instruction *, 8> Active;
3072 for (const auto &Pair : SiblingFuncletInfo) {
3073 Instruction *PredPad = Pair.first;
3074 if (Visited.count(PredPad))
3075 continue;
3076 Active.insert(PredPad);
3077 Instruction *Terminator = Pair.second;
3078 do {
3079 Instruction *SuccPad = getSuccPad(Terminator);
3080 if (Active.count(SuccPad)) {
3081 // Found a cycle; report error
3082 Instruction *CyclePad = SuccPad;
3083 SmallVector<Instruction *, 8> CycleNodes;
3084 do {
3085 CycleNodes.push_back(CyclePad);
3086 Instruction *CycleTerminator = SiblingFuncletInfo[CyclePad];
3087 if (CycleTerminator != CyclePad)
3088 CycleNodes.push_back(CycleTerminator);
3089 CyclePad = getSuccPad(CycleTerminator);
3090 } while (CyclePad != SuccPad);
3091 Check(false, "EH pads can't handle each other's exceptions",
3092 ArrayRef<Instruction *>(CycleNodes));
3093 }
3094 // Don't re-walk a node we've already checked
3095 if (!Visited.insert(SuccPad).second)
3096 break;
3097 // Walk to this successor if it has a map entry.
3098 PredPad = SuccPad;
3099 auto TermI = SiblingFuncletInfo.find(PredPad);
3100 if (TermI == SiblingFuncletInfo.end())
3101 break;
3102 Terminator = TermI->second;
3103 Active.insert(PredPad);
3104 } while (true);
3105 // Each node only has one successor, so we've walked all the active
3106 // nodes' successors.
3107 Active.clear();
3108 }
3109}
3110
3111// visitFunction - Verify that a function is ok.
3112//
3113void Verifier::visitFunction(const Function &F) {
3114 visitGlobalValue(F);
3115
3116 // Check function arguments.
3117 FunctionType *FT = F.getFunctionType();
3118 unsigned NumArgs = F.arg_size();
3119
3120 Check(&Context == &F.getContext(),
3121 "Function context does not match Module context!", &F);
3122
3123 Check(!F.hasCommonLinkage(), "Functions may not have common linkage", &F);
3124 Check(FT->getNumParams() == NumArgs,
3125 "# formal arguments must match # of arguments for function type!", &F,
3126 FT);
3127 Check(F.getReturnType()->isFirstClassType() ||
3128 F.getReturnType()->isVoidTy() || F.getReturnType()->isStructTy(),
3129 "Functions cannot return aggregate values!", &F);
3130
3131 Check(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(),
3132 "Invalid struct return type!", &F);
3133
3134 if (MaybeAlign A = F.getAlign()) {
3135 Check(A->value() <= Value::MaximumAlignment,
3136 "huge alignment values are unsupported", &F);
3137 }
3138
3139 AttributeList Attrs = F.getAttributes();
3140
3141 Check(verifyAttributeCount(Attrs, FT->getNumParams()),
3142 "Attribute after last parameter!", &F);
3143
3144 bool IsIntrinsic = F.isIntrinsic();
3145
3146 // Check function attributes.
3147 verifyFunctionAttrs(FT, Attrs, &F, IsIntrinsic, /* IsInlineAsm */ false);
3148
3149 // On function declarations/definitions, we do not support the builtin
3150 // attribute. We do not check this in VerifyFunctionAttrs since that is
3151 // checking for Attributes that can/can not ever be on functions.
3152 Check(!Attrs.hasFnAttr(Attribute::Builtin),
3153 "Attribute 'builtin' can only be applied to a callsite.", &F);
3154
3155 Check(!Attrs.hasAttrSomewhere(Attribute::ElementType),
3156 "Attribute 'elementtype' can only be applied to a callsite.", &F);
3157
3158 if (Attrs.hasFnAttr(Attribute::Naked))
3159 for (const Argument &Arg : F.args())
3160 Check(Arg.use_empty(), "cannot use argument of naked function", &Arg);
3161
3162 // Check that this function meets the restrictions on this calling convention.
3163 // Sometimes varargs is used for perfectly forwarding thunks, so some of these
3164 // restrictions can be lifted.
3165 switch (F.getCallingConv()) {
3166 default:
3167 case CallingConv::C:
3168 break;
3169 case CallingConv::X86_INTR: {
3170 Check(F.arg_empty() || Attrs.hasParamAttr(0, Attribute::ByVal),
3171 "Calling convention parameter requires byval", &F);
3172 break;
3173 }
3174 case CallingConv::AMDGPU_KERNEL:
3175 case CallingConv::SPIR_KERNEL:
3176 case CallingConv::AMDGPU_CS_Chain:
3177 case CallingConv::AMDGPU_CS_ChainPreserve:
3178 Check(F.getReturnType()->isVoidTy(),
3179 "Calling convention requires void return type", &F);
3180 [[fallthrough]];
3181 case CallingConv::AMDGPU_VS:
3182 case CallingConv::AMDGPU_HS:
3183 case CallingConv::AMDGPU_GS:
3184 case CallingConv::AMDGPU_PS:
3185 case CallingConv::AMDGPU_CS:
3186 Check(!F.hasStructRetAttr(), "Calling convention does not allow sret", &F);
3187 if (F.getCallingConv() != CallingConv::SPIR_KERNEL) {
3188 const unsigned StackAS = DL.getAllocaAddrSpace();
3189 unsigned i = 0;
3190 for (const Argument &Arg : F.args()) {
3191 Check(!Attrs.hasParamAttr(i, Attribute::ByVal),
3192 "Calling convention disallows byval", &F);
3193 Check(!Attrs.hasParamAttr(i, Attribute::Preallocated),
3194 "Calling convention disallows preallocated", &F);
3195 Check(!Attrs.hasParamAttr(i, Attribute::InAlloca),
3196 "Calling convention disallows inalloca", &F);
3197
3198 if (Attrs.hasParamAttr(i, Attribute::ByRef)) {
3199 // FIXME: Should also disallow LDS and GDS, but we don't have the enum
3200 // value here.
3201 Check(Arg.getType()->getPointerAddressSpace() != StackAS,
3202 "Calling convention disallows stack byref", &F);
3203 }
3204
3205 ++i;
3206 }
3207 }
3208
3209 [[fallthrough]];
3210 case CallingConv::Fast:
3211 case CallingConv::Cold:
3212 case CallingConv::Intel_OCL_BI:
3213 case CallingConv::PTX_Kernel:
3214 case CallingConv::PTX_Device:
3215 Check(!F.isVarArg(),
3216 "Calling convention does not support varargs or "
3217 "perfect forwarding!",
3218 &F);
3219 break;
3220 case CallingConv::AMDGPU_Gfx_WholeWave:
3221 Check(!F.arg_empty() && F.arg_begin()->getType()->isIntegerTy(1),
3222 "Calling convention requires first argument to be i1", &F);
3223 Check(!F.arg_begin()->hasInRegAttr(),
3224 "Calling convention requires first argument to not be inreg", &F);
3225 Check(!F.isVarArg(),
3226 "Calling convention does not support varargs or "
3227 "perfect forwarding!",
3228 &F);
3229 break;
3230 }
3231
3232 // Check that the argument values match the function type for this function...
3233 unsigned i = 0;
3234 for (const Argument &Arg : F.args()) {
3235 Check(Arg.getType() == FT->getParamType(i),
3236 "Argument value does not match function argument type!", &Arg,
3237 FT->getParamType(i));
3238 Check(Arg.getType()->isFirstClassType(),
3239 "Function arguments must have first-class types!", &Arg);
3240 if (!IsIntrinsic) {
3241 Check(!Arg.getType()->isMetadataTy(),
3242 "Function takes metadata but isn't an intrinsic", &Arg, &F);
3243 Check(!Arg.getType()->isTokenLikeTy(),
3244 "Function takes token but isn't an intrinsic", &Arg, &F);
3245 Check(!Arg.getType()->isX86_AMXTy(),
3246 "Function takes x86_amx but isn't an intrinsic", &Arg, &F);
3247 }
3248
3249 // Check that swifterror argument is only used by loads and stores.
3250 if (Attrs.hasParamAttr(i, Attribute::SwiftError)) {
3251 verifySwiftErrorValue(&Arg);
3252 }
3253 ++i;
3254 }
3255
3256 if (!IsIntrinsic) {
3257 Check(!F.getReturnType()->isTokenLikeTy(),
3258 "Function returns a token but isn't an intrinsic", &F);
3259 Check(!F.getReturnType()->isX86_AMXTy(),
3260 "Function returns a x86_amx but isn't an intrinsic", &F);
3261 }
3262
3263 // Get the function metadata attachments.
3265 F.getAllMetadata(MDs);
3266 assert(F.hasMetadata() != MDs.empty() && "Bit out-of-sync");
3267 verifyFunctionMetadata(MDs);
3268
3269 // Check validity of the personality function
3270 if (F.hasPersonalityFn()) {
3271 auto *Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
3272 if (Per)
3273 Check(Per->getParent() == F.getParent(),
3274 "Referencing personality function in another module!", &F,
3275 F.getParent(), Per, Per->getParent());
3276 }
3277
3278 // EH funclet coloring can be expensive, recompute on-demand
3279 BlockEHFuncletColors.clear();
3280
3281 if (F.isMaterializable()) {
3282 // Function has a body somewhere we can't see.
3283 Check(MDs.empty(), "unmaterialized function cannot have metadata", &F,
3284 MDs.empty() ? nullptr : MDs.front().second);
3285 } else if (F.isDeclaration()) {
3286 for (const auto &I : MDs) {
3287 // This is used for call site debug information.
3288 CheckDI(I.first != LLVMContext::MD_dbg ||
3289 !cast<DISubprogram>(I.second)->isDistinct(),
3290 "function declaration may only have a unique !dbg attachment",
3291 &F);
3292 Check(I.first != LLVMContext::MD_prof,
3293 "function declaration may not have a !prof attachment", &F);
3294
3295 // Verify the metadata itself.
3296 visitMDNode(*I.second, AreDebugLocsAllowed::Yes);
3297 }
3298 Check(!F.hasPersonalityFn(),
3299 "Function declaration shouldn't have a personality routine", &F);
3300 } else {
3301 // Verify that this function (which has a body) is not named "llvm.*". It
3302 // is not legal to define intrinsics.
3303 Check(!IsIntrinsic, "llvm intrinsics cannot be defined!", &F);
3304
3305 // Check the entry node
3306 const BasicBlock *Entry = &F.getEntryBlock();
3307 Check(pred_empty(Entry),
3308 "Entry block to function must not have predecessors!", Entry);
3309
3310 // The address of the entry block cannot be taken, unless it is dead.
3311 if (Entry->hasAddressTaken()) {
3312 Check(!BlockAddress::lookup(Entry)->isConstantUsed(),
3313 "blockaddress may not be used with the entry block!", Entry);
3314 }
3315
3316 unsigned NumDebugAttachments = 0, NumProfAttachments = 0,
3317 NumKCFIAttachments = 0;
3318 // Visit metadata attachments.
3319 for (const auto &I : MDs) {
3320 // Verify that the attachment is legal.
3321 auto AllowLocs = AreDebugLocsAllowed::No;
3322 switch (I.first) {
3323 default:
3324 break;
3325 case LLVMContext::MD_dbg: {
3326 ++NumDebugAttachments;
3327 CheckDI(NumDebugAttachments == 1,
3328 "function must have a single !dbg attachment", &F, I.second);
3329 CheckDI(isa<DISubprogram>(I.second),
3330 "function !dbg attachment must be a subprogram", &F, I.second);
3331 CheckDI(cast<DISubprogram>(I.second)->isDistinct(),
3332 "function definition may only have a distinct !dbg attachment",
3333 &F);
3334
3335 auto *SP = cast<DISubprogram>(I.second);
3336 const Function *&AttachedTo = DISubprogramAttachments[SP];
3337 CheckDI(!AttachedTo || AttachedTo == &F,
3338 "DISubprogram attached to more than one function", SP, &F);
3339 AttachedTo = &F;
3340 AllowLocs = AreDebugLocsAllowed::Yes;
3341 break;
3342 }
3343 case LLVMContext::MD_prof:
3344 ++NumProfAttachments;
3345 Check(NumProfAttachments == 1,
3346 "function must have a single !prof attachment", &F, I.second);
3347 break;
3348 case LLVMContext::MD_kcfi_type:
3349 ++NumKCFIAttachments;
3350 Check(NumKCFIAttachments == 1,
3351 "function must have a single !kcfi_type attachment", &F,
3352 I.second);
3353 break;
3354 }
3355
3356 // Verify the metadata itself.
3357 visitMDNode(*I.second, AllowLocs);
3358 }
3359 }
3360
3361 // If this function is actually an intrinsic, verify that it is only used in
3362 // direct call/invokes, never having its "address taken".
3363 // Only do this if the module is materialized, otherwise we don't have all the
3364 // uses.
3365 if (F.isIntrinsic() && F.getParent()->isMaterialized()) {
3366 const User *U;
3367 if (F.hasAddressTaken(&U, false, true, false,
3368 /*IgnoreARCAttachedCall=*/true))
3369 Check(false, "Invalid user of intrinsic instruction!", U);
3370 }
3371
3372 // Check intrinsics' signatures.
3373 switch (F.getIntrinsicID()) {
3374 case Intrinsic::experimental_gc_get_pointer_base: {
3375 FunctionType *FT = F.getFunctionType();
3376 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3377 Check(isa<PointerType>(F.getReturnType()),
3378 "gc.get.pointer.base must return a pointer", F);
3379 Check(FT->getParamType(0) == F.getReturnType(),
3380 "gc.get.pointer.base operand and result must be of the same type", F);
3381 break;
3382 }
3383 case Intrinsic::experimental_gc_get_pointer_offset: {
3384 FunctionType *FT = F.getFunctionType();
3385 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3386 Check(isa<PointerType>(FT->getParamType(0)),
3387 "gc.get.pointer.offset operand must be a pointer", F);
3388 Check(F.getReturnType()->isIntegerTy(),
3389 "gc.get.pointer.offset must return integer", F);
3390 break;
3391 }
3392 }
3393
3394 auto *N = F.getSubprogram();
3395 HasDebugInfo = (N != nullptr);
3396 if (!HasDebugInfo)
3397 return;
3398
3399 // Check that all !dbg attachments lead to back to N.
3400 //
3401 // FIXME: Check this incrementally while visiting !dbg attachments.
3402 // FIXME: Only check when N is the canonical subprogram for F.
3403 SmallPtrSet<const MDNode *, 32> Seen;
3404 auto VisitDebugLoc = [&](const Instruction &I, const MDNode *Node) {
3405 // Be careful about using DILocation here since we might be dealing with
3406 // broken code (this is the Verifier after all).
3407 const DILocation *DL = dyn_cast_or_null<DILocation>(Node);
3408 if (!DL)
3409 return;
3410 if (!Seen.insert(DL).second)
3411 return;
3412
3413 Metadata *Parent = DL->getRawScope();
3414 CheckDI(Parent && isa<DILocalScope>(Parent),
3415 "DILocation's scope must be a DILocalScope", N, &F, &I, DL, Parent);
3416
3417 DILocalScope *Scope = DL->getInlinedAtScope();
3418 Check(Scope, "Failed to find DILocalScope", DL);
3419
3420 if (!Seen.insert(Scope).second)
3421 return;
3422
3423 DISubprogram *SP = Scope->getSubprogram();
3424
3425 // Scope and SP could be the same MDNode and we don't want to skip
3426 // validation in that case
3427 if ((Scope != SP) && !Seen.insert(SP).second)
3428 return;
3429
3430 CheckDI(SP->describes(&F),
3431 "!dbg attachment points at wrong subprogram for function", N, &F,
3432 &I, DL, Scope, SP);
3433 };
3434 for (auto &BB : F)
3435 for (auto &I : BB) {
3436 VisitDebugLoc(I, I.getDebugLoc().getAsMDNode());
3437 // The llvm.loop annotations also contain two DILocations.
3438 if (auto MD = I.getMetadata(LLVMContext::MD_loop))
3439 for (unsigned i = 1; i < MD->getNumOperands(); ++i)
3440 VisitDebugLoc(I, dyn_cast_or_null<MDNode>(MD->getOperand(i)));
3441 if (BrokenDebugInfo)
3442 return;
3443 }
3444}
3445
3446// verifyBasicBlock - Verify that a basic block is well formed...
3447//
3448void Verifier::visitBasicBlock(BasicBlock &BB) {
3449 InstsInThisBlock.clear();
3450 ConvergenceVerifyHelper.visit(BB);
3451
3452 // Ensure that basic blocks have terminators!
3453 Check(BB.getTerminator(), "Basic Block does not have terminator!", &BB);
3454
3455 // Check constraints that this basic block imposes on all of the PHI nodes in
3456 // it.
3457 if (isa<PHINode>(BB.front())) {
3458 SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
3460 llvm::sort(Preds);
3461 for (const PHINode &PN : BB.phis()) {
3462 Check(PN.getNumIncomingValues() == Preds.size(),
3463 "PHINode should have one entry for each predecessor of its "
3464 "parent basic block!",
3465 &PN);
3466
3467 // Get and sort all incoming values in the PHI node...
3468 Values.clear();
3469 Values.reserve(PN.getNumIncomingValues());
3470 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
3471 Values.push_back(
3472 std::make_pair(PN.getIncomingBlock(i), PN.getIncomingValue(i)));
3473 llvm::sort(Values);
3474
3475 for (unsigned i = 0, e = Values.size(); i != e; ++i) {
3476 // Check to make sure that if there is more than one entry for a
3477 // particular basic block in this PHI node, that the incoming values are
3478 // all identical.
3479 //
3480 Check(i == 0 || Values[i].first != Values[i - 1].first ||
3481 Values[i].second == Values[i - 1].second,
3482 "PHI node has multiple entries for the same basic block with "
3483 "different incoming values!",
3484 &PN, Values[i].first, Values[i].second, Values[i - 1].second);
3485
3486 // Check to make sure that the predecessors and PHI node entries are
3487 // matched up.
3488 Check(Values[i].first == Preds[i],
3489 "PHI node entries do not match predecessors!", &PN,
3490 Values[i].first, Preds[i]);
3491 }
3492 }
3493 }
3494
3495 // Check that all instructions have their parent pointers set up correctly.
3496 for (auto &I : BB)
3497 {
3498 Check(I.getParent() == &BB, "Instruction has bogus parent pointer!");
3499 }
3500
3501 // Confirm that no issues arise from the debug program.
3502 CheckDI(!BB.getTrailingDbgRecords(), "Basic Block has trailing DbgRecords!",
3503 &BB);
3504}
3505
3506void Verifier::visitTerminator(Instruction &I) {
3507 // Ensure that terminators only exist at the end of the basic block.
3508 Check(&I == I.getParent()->getTerminator(),
3509 "Terminator found in the middle of a basic block!", I.getParent());
3510 visitInstruction(I);
3511}
3512
3513void Verifier::visitCondBrInst(CondBrInst &BI) {
3515 "Branch condition is not 'i1' type!", &BI, BI.getCondition());
3516 visitTerminator(BI);
3517}
3518
3519void Verifier::visitReturnInst(ReturnInst &RI) {
3520 Function *F = RI.getParent()->getParent();
3521 unsigned N = RI.getNumOperands();
3522 if (F->getReturnType()->isVoidTy())
3523 Check(N == 0,
3524 "Found return instr that returns non-void in Function of void "
3525 "return type!",
3526 &RI, F->getReturnType());
3527 else
3528 Check(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(),
3529 "Function return type does not match operand "
3530 "type of return inst!",
3531 &RI, F->getReturnType());
3532
3533 // Check to make sure that the return value has necessary properties for
3534 // terminators...
3535 visitTerminator(RI);
3536}
3537
3538void Verifier::visitSwitchInst(SwitchInst &SI) {
3539 Check(SI.getType()->isVoidTy(), "Switch must have void result type!", &SI);
3540 // Check to make sure that all of the constants in the switch instruction
3541 // have the same type as the switched-on value.
3542 Type *SwitchTy = SI.getCondition()->getType();
3543 SmallPtrSet<ConstantInt*, 32> Constants;
3544 for (auto &Case : SI.cases()) {
3545 Check(isa<ConstantInt>(Case.getCaseValue()),
3546 "Case value is not a constant integer.", &SI);
3547 Check(Case.getCaseValue()->getType() == SwitchTy,
3548 "Switch constants must all be same type as switch value!", &SI);
3549 Check(Constants.insert(Case.getCaseValue()).second,
3550 "Duplicate integer as switch case", &SI, Case.getCaseValue());
3551 }
3552
3553 visitTerminator(SI);
3554}
3555
3556void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
3558 "Indirectbr operand must have pointer type!", &BI);
3559 for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i)
3561 "Indirectbr destinations must all have pointer type!", &BI);
3562
3563 visitTerminator(BI);
3564}
3565
3566void Verifier::visitCallBrInst(CallBrInst &CBI) {
3567 if (!CBI.isInlineAsm()) {
3569 "Callbr: indirect function / invalid signature");
3570 Check(!CBI.hasOperandBundles(),
3571 "Callbr for intrinsics currently doesn't support operand bundles");
3572
3573 switch (CBI.getIntrinsicID()) {
3574 case Intrinsic::amdgcn_kill: {
3575 Check(CBI.getNumIndirectDests() == 1,
3576 "Callbr amdgcn_kill only supports one indirect dest");
3577 bool Unreachable = isa<UnreachableInst>(CBI.getIndirectDest(0)->begin());
3578 CallInst *Call = dyn_cast<CallInst>(CBI.getIndirectDest(0)->begin());
3579 Check(Unreachable || (Call && Call->getIntrinsicID() ==
3580 Intrinsic::amdgcn_unreachable),
3581 "Callbr amdgcn_kill indirect dest needs to be unreachable");
3582 break;
3583 }
3584 default:
3585 CheckFailed(
3586 "Callbr currently only supports asm-goto and selected intrinsics");
3587 }
3588 visitIntrinsicCall(CBI.getIntrinsicID(), CBI);
3589 } else {
3590 const InlineAsm *IA = cast<InlineAsm>(CBI.getCalledOperand());
3591 Check(!IA->canThrow(), "Unwinding from Callbr is not allowed");
3592
3593 verifyInlineAsmCall(CBI);
3594 }
3595 visitTerminator(CBI);
3596}
3597
3598void Verifier::visitSelectInst(SelectInst &SI) {
3599 Check(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1),
3600 SI.getOperand(2)),
3601 "Invalid operands for select instruction!", &SI);
3602
3603 Check(SI.getTrueValue()->getType() == SI.getType(),
3604 "Select values must have same type as select instruction!", &SI);
3605 visitInstruction(SI);
3606}
3607
3608/// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of
3609/// a pass, if any exist, it's an error.
3610///
3611void Verifier::visitUserOp1(Instruction &I) {
3612 Check(false, "User-defined operators should not live outside of a pass!", &I);
3613}
3614
3615void Verifier::visitTruncInst(TruncInst &I) {
3616 // Get the source and destination types
3617 Type *SrcTy = I.getOperand(0)->getType();
3618 Type *DestTy = I.getType();
3619
3620 // Get the size of the types in bits, we'll need this later
3621 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3622 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3623
3624 Check(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer", &I);
3625 Check(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer", &I);
3626 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3627 "trunc source and destination must both be a vector or neither", &I);
3628 Check(SrcBitSize > DestBitSize, "DestTy too big for Trunc", &I);
3629
3630 visitInstruction(I);
3631}
3632
3633void Verifier::visitZExtInst(ZExtInst &I) {
3634 // Get the source and destination types
3635 Type *SrcTy = I.getOperand(0)->getType();
3636 Type *DestTy = I.getType();
3637
3638 // Get the size of the types in bits, we'll need this later
3639 Check(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I);
3640 Check(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer", &I);
3641 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3642 "zext source and destination must both be a vector or neither", &I);
3643 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3644 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3645
3646 Check(SrcBitSize < DestBitSize, "Type too small for ZExt", &I);
3647
3648 visitInstruction(I);
3649}
3650
3651void Verifier::visitSExtInst(SExtInst &I) {
3652 // Get the source and destination types
3653 Type *SrcTy = I.getOperand(0)->getType();
3654 Type *DestTy = I.getType();
3655
3656 // Get the size of the types in bits, we'll need this later
3657 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3658 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3659
3660 Check(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer", &I);
3661 Check(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer", &I);
3662 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3663 "sext source and destination must both be a vector or neither", &I);
3664 Check(SrcBitSize < DestBitSize, "Type too small for SExt", &I);
3665
3666 visitInstruction(I);
3667}
3668
3669void Verifier::visitFPTruncInst(FPTruncInst &I) {
3670 // Get the source and destination types
3671 Type *SrcTy = I.getOperand(0)->getType();
3672 Type *DestTy = I.getType();
3673 // Get the size of the types in bits, we'll need this later
3674 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3675 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3676
3677 Check(SrcTy->isFPOrFPVectorTy(), "FPTrunc only operates on FP", &I);
3678 Check(DestTy->isFPOrFPVectorTy(), "FPTrunc only produces an FP", &I);
3679 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3680 "fptrunc source and destination must both be a vector or neither", &I);
3681 Check(SrcBitSize > DestBitSize, "DestTy too big for FPTrunc", &I);
3682
3683 visitInstruction(I);
3684}
3685
3686void Verifier::visitFPExtInst(FPExtInst &I) {
3687 // Get the source and destination types
3688 Type *SrcTy = I.getOperand(0)->getType();
3689 Type *DestTy = I.getType();
3690
3691 // Get the size of the types in bits, we'll need this later
3692 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3693 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3694
3695 Check(SrcTy->isFPOrFPVectorTy(), "FPExt only operates on FP", &I);
3696 Check(DestTy->isFPOrFPVectorTy(), "FPExt only produces an FP", &I);
3697 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3698 "fpext source and destination must both be a vector or neither", &I);
3699 Check(SrcBitSize < DestBitSize, "DestTy too small for FPExt", &I);
3700
3701 visitInstruction(I);
3702}
3703
3704void Verifier::visitUIToFPInst(UIToFPInst &I) {
3705 // Get the source and destination types
3706 Type *SrcTy = I.getOperand(0)->getType();
3707 Type *DestTy = I.getType();
3708
3709 bool SrcVec = SrcTy->isVectorTy();
3710 bool DstVec = DestTy->isVectorTy();
3711
3712 Check(SrcVec == DstVec,
3713 "UIToFP source and dest must both be vector or scalar", &I);
3714 Check(SrcTy->isIntOrIntVectorTy(),
3715 "UIToFP source must be integer or integer vector", &I);
3716 Check(DestTy->isFPOrFPVectorTy(), "UIToFP result must be FP or FP vector",
3717 &I);
3718
3719 if (SrcVec && DstVec)
3720 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3721 cast<VectorType>(DestTy)->getElementCount(),
3722 "UIToFP source and dest vector length mismatch", &I);
3723
3724 visitInstruction(I);
3725}
3726
3727void Verifier::visitSIToFPInst(SIToFPInst &I) {
3728 // Get the source and destination types
3729 Type *SrcTy = I.getOperand(0)->getType();
3730 Type *DestTy = I.getType();
3731
3732 bool SrcVec = SrcTy->isVectorTy();
3733 bool DstVec = DestTy->isVectorTy();
3734
3735 Check(SrcVec == DstVec,
3736 "SIToFP source and dest must both be vector or scalar", &I);
3737 Check(SrcTy->isIntOrIntVectorTy(),
3738 "SIToFP source must be integer or integer vector", &I);
3739 Check(DestTy->isFPOrFPVectorTy(), "SIToFP result must be FP or FP vector",
3740 &I);
3741
3742 if (SrcVec && DstVec)
3743 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3744 cast<VectorType>(DestTy)->getElementCount(),
3745 "SIToFP source and dest vector length mismatch", &I);
3746
3747 visitInstruction(I);
3748}
3749
3750void Verifier::visitFPToUIInst(FPToUIInst &I) {
3751 // Get the source and destination types
3752 Type *SrcTy = I.getOperand(0)->getType();
3753 Type *DestTy = I.getType();
3754
3755 bool SrcVec = SrcTy->isVectorTy();
3756 bool DstVec = DestTy->isVectorTy();
3757
3758 Check(SrcVec == DstVec,
3759 "FPToUI source and dest must both be vector or scalar", &I);
3760 Check(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector", &I);
3761 Check(DestTy->isIntOrIntVectorTy(),
3762 "FPToUI result must be integer or integer vector", &I);
3763
3764 if (SrcVec && DstVec)
3765 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3766 cast<VectorType>(DestTy)->getElementCount(),
3767 "FPToUI source and dest vector length mismatch", &I);
3768
3769 visitInstruction(I);
3770}
3771
3772void Verifier::visitFPToSIInst(FPToSIInst &I) {
3773 // Get the source and destination types
3774 Type *SrcTy = I.getOperand(0)->getType();
3775 Type *DestTy = I.getType();
3776
3777 bool SrcVec = SrcTy->isVectorTy();
3778 bool DstVec = DestTy->isVectorTy();
3779
3780 Check(SrcVec == DstVec,
3781 "FPToSI source and dest must both be vector or scalar", &I);
3782 Check(SrcTy->isFPOrFPVectorTy(), "FPToSI source must be FP or FP vector", &I);
3783 Check(DestTy->isIntOrIntVectorTy(),
3784 "FPToSI result must be integer or integer vector", &I);
3785
3786 if (SrcVec && DstVec)
3787 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3788 cast<VectorType>(DestTy)->getElementCount(),
3789 "FPToSI source and dest vector length mismatch", &I);
3790
3791 visitInstruction(I);
3792}
3793
3794void Verifier::checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V) {
3795 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToAddr source must be pointer", V);
3796 Check(DestTy->isIntOrIntVectorTy(), "PtrToAddr result must be integral", V);
3797 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToAddr type mismatch",
3798 V);
3799
3800 if (SrcTy->isVectorTy()) {
3801 auto *VSrc = cast<VectorType>(SrcTy);
3802 auto *VDest = cast<VectorType>(DestTy);
3803 Check(VSrc->getElementCount() == VDest->getElementCount(),
3804 "PtrToAddr vector length mismatch", V);
3805 }
3806
3807 Type *AddrTy = DL.getAddressType(SrcTy);
3808 Check(AddrTy == DestTy, "PtrToAddr result must be address width", V);
3809}
3810
3811void Verifier::visitPtrToAddrInst(PtrToAddrInst &I) {
3812 checkPtrToAddr(I.getOperand(0)->getType(), I.getType(), I);
3813 visitInstruction(I);
3814}
3815
3816void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
3817 // Get the source and destination types
3818 Type *SrcTy = I.getOperand(0)->getType();
3819 Type *DestTy = I.getType();
3820
3821 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToInt source must be pointer", &I);
3822
3823 Check(DestTy->isIntOrIntVectorTy(), "PtrToInt result must be integral", &I);
3824 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch",
3825 &I);
3826
3827 if (SrcTy->isVectorTy()) {
3828 auto *VSrc = cast<VectorType>(SrcTy);
3829 auto *VDest = cast<VectorType>(DestTy);
3830 Check(VSrc->getElementCount() == VDest->getElementCount(),
3831 "PtrToInt Vector length mismatch", &I);
3832 }
3833
3834 visitInstruction(I);
3835}
3836
3837void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
3838 // Get the source and destination types
3839 Type *SrcTy = I.getOperand(0)->getType();
3840 Type *DestTy = I.getType();
3841
3842 Check(SrcTy->isIntOrIntVectorTy(), "IntToPtr source must be an integral", &I);
3843 Check(DestTy->isPtrOrPtrVectorTy(), "IntToPtr result must be a pointer", &I);
3844
3845 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch",
3846 &I);
3847 if (SrcTy->isVectorTy()) {
3848 auto *VSrc = cast<VectorType>(SrcTy);
3849 auto *VDest = cast<VectorType>(DestTy);
3850 Check(VSrc->getElementCount() == VDest->getElementCount(),
3851 "IntToPtr Vector length mismatch", &I);
3852 }
3853 visitInstruction(I);
3854}
3855
3856void Verifier::visitBitCastInst(BitCastInst &I) {
3857 Check(
3858 CastInst::castIsValid(Instruction::BitCast, I.getOperand(0), I.getType()),
3859 "Invalid bitcast", &I);
3860 visitInstruction(I);
3861}
3862
3863void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
3864 Type *SrcTy = I.getOperand(0)->getType();
3865 Type *DestTy = I.getType();
3866
3867 Check(SrcTy->isPtrOrPtrVectorTy(), "AddrSpaceCast source must be a pointer",
3868 &I);
3869 Check(DestTy->isPtrOrPtrVectorTy(), "AddrSpaceCast result must be a pointer",
3870 &I);
3872 "AddrSpaceCast must be between different address spaces", &I);
3873 if (auto *SrcVTy = dyn_cast<VectorType>(SrcTy))
3874 Check(SrcVTy->getElementCount() ==
3875 cast<VectorType>(DestTy)->getElementCount(),
3876 "AddrSpaceCast vector pointer number of elements mismatch", &I);
3877 visitInstruction(I);
3878}
3879
3880/// visitPHINode - Ensure that a PHI node is well formed.
3881///
3882void Verifier::visitPHINode(PHINode &PN) {
3883 // Ensure that the PHI nodes are all grouped together at the top of the block.
3884 // This can be tested by checking whether the instruction before this is
3885 // either nonexistent (because this is begin()) or is a PHI node. If not,
3886 // then there is some other instruction before a PHI.
3887 Check(&PN == &PN.getParent()->front() ||
3889 "PHI nodes not grouped at top of basic block!", &PN, PN.getParent());
3890
3891 // Check that a PHI doesn't yield a Token.
3892 Check(!PN.getType()->isTokenLikeTy(), "PHI nodes cannot have token type!");
3893
3894 // Check that all of the values of the PHI node have the same type as the
3895 // result.
3896 for (Value *IncValue : PN.incoming_values()) {
3897 Check(PN.getType() == IncValue->getType(),
3898 "PHI node operands are not the same type as the result!", &PN);
3899 }
3900
3901 // All other PHI node constraints are checked in the visitBasicBlock method.
3902
3903 visitInstruction(PN);
3904}
3905
3906void Verifier::visitCallBase(CallBase &Call) {
3908 "Called function must be a pointer!", Call);
3909 FunctionType *FTy = Call.getFunctionType();
3910
3911 // Verify that the correct number of arguments are being passed
3912 if (FTy->isVarArg())
3913 Check(Call.arg_size() >= FTy->getNumParams(),
3914 "Called function requires more parameters than were provided!", Call);
3915 else
3916 Check(Call.arg_size() == FTy->getNumParams(),
3917 "Incorrect number of arguments passed to called function!", Call);
3918
3919 // Verify that all arguments to the call match the function type.
3920 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
3921 Check(Call.getArgOperand(i)->getType() == FTy->getParamType(i),
3922 "Call parameter type does not match function signature!",
3923 Call.getArgOperand(i), FTy->getParamType(i), Call);
3924
3925 AttributeList Attrs = Call.getAttributes();
3926
3927 Check(verifyAttributeCount(Attrs, Call.arg_size()),
3928 "Attribute after last parameter!", Call);
3929
3930 Function *Callee =
3932 bool IsIntrinsic = Callee && Callee->isIntrinsic();
3933 if (IsIntrinsic)
3934 Check(Callee->getFunctionType() == FTy,
3935 "Intrinsic called with incompatible signature", Call);
3936
3937 // Verify if the calling convention of the callee is callable.
3939 "calling convention does not permit calls", Call);
3940
3941 // Disallow passing/returning values with alignment higher than we can
3942 // represent.
3943 // FIXME: Consider making DataLayout cap the alignment, so this isn't
3944 // necessary.
3945 auto VerifyTypeAlign = [&](Type *Ty, const Twine &Message) {
3946 if (!Ty->isSized())
3947 return;
3948 Align ABIAlign = DL.getABITypeAlign(Ty);
3949 Check(ABIAlign.value() <= Value::MaximumAlignment,
3950 "Incorrect alignment of " + Message + " to called function!", Call);
3951 };
3952
3953 if (!IsIntrinsic) {
3954 VerifyTypeAlign(FTy->getReturnType(), "return type");
3955 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3956 Type *Ty = FTy->getParamType(i);
3957 VerifyTypeAlign(Ty, "argument passed");
3958 }
3959 }
3960
3961 if (Attrs.hasFnAttr(Attribute::Speculatable)) {
3962 // Don't allow speculatable on call sites, unless the underlying function
3963 // declaration is also speculatable.
3964 Check(Callee && Callee->isSpeculatable(),
3965 "speculatable attribute may not apply to call sites", Call);
3966 }
3967
3968 if (Attrs.hasFnAttr(Attribute::Preallocated)) {
3969 Check(Call.getIntrinsicID() == Intrinsic::call_preallocated_arg,
3970 "preallocated as a call site attribute can only be on "
3971 "llvm.call.preallocated.arg");
3972 }
3973
3974 Check(!Attrs.hasFnAttr(Attribute::DenormalFPEnv),
3975 "denormal_fpenv attribute may not apply to call sites", Call);
3976
3977 // Verify call attributes.
3978 verifyFunctionAttrs(FTy, Attrs, &Call, IsIntrinsic, Call.isInlineAsm());
3979
3980 // Conservatively check the inalloca argument.
3981 // We have a bug if we can find that there is an underlying alloca without
3982 // inalloca.
3983 if (Call.hasInAllocaArgument()) {
3984 Value *InAllocaArg = Call.getArgOperand(FTy->getNumParams() - 1);
3985 if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets()))
3986 Check(AI->isUsedWithInAlloca(),
3987 "inalloca argument for call has mismatched alloca", AI, Call);
3988 }
3989
3990 // For each argument of the callsite, if it has the swifterror argument,
3991 // make sure the underlying alloca/parameter it comes from has a swifterror as
3992 // well.
3993 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3994 if (Call.paramHasAttr(i, Attribute::SwiftError)) {
3995 Value *SwiftErrorArg = Call.getArgOperand(i);
3996 if (auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets())) {
3997 Check(AI->isSwiftError(),
3998 "swifterror argument for call has mismatched alloca", AI, Call);
3999 continue;
4000 }
4001 auto ArgI = dyn_cast<Argument>(SwiftErrorArg);
4002 Check(ArgI, "swifterror argument should come from an alloca or parameter",
4003 SwiftErrorArg, Call);
4004 Check(ArgI->hasSwiftErrorAttr(),
4005 "swifterror argument for call has mismatched parameter", ArgI,
4006 Call);
4007 }
4008
4009 if (Attrs.hasParamAttr(i, Attribute::ImmArg)) {
4010 // Don't allow immarg on call sites, unless the underlying declaration
4011 // also has the matching immarg.
4012 Check(Callee && Callee->hasParamAttribute(i, Attribute::ImmArg),
4013 "immarg may not apply only to call sites", Call.getArgOperand(i),
4014 Call);
4015 }
4016
4017 if (Call.paramHasAttr(i, Attribute::ImmArg)) {
4018 Value *ArgVal = Call.getArgOperand(i);
4019 Check(isa<ConstantInt>(ArgVal) || isa<ConstantFP>(ArgVal),
4020 "immarg operand has non-immediate parameter", ArgVal, Call);
4021
4022 // If the imm-arg is an integer and also has a range attached,
4023 // check if the given value is within the range.
4024 if (Call.paramHasAttr(i, Attribute::Range)) {
4025 if (auto *CI = dyn_cast<ConstantInt>(ArgVal)) {
4026 const ConstantRange &CR =
4027 Call.getParamAttr(i, Attribute::Range).getValueAsConstantRange();
4028 Check(CR.contains(CI->getValue()),
4029 "immarg value " + Twine(CI->getValue().getSExtValue()) +
4030 " out of range [" + Twine(CR.getLower().getSExtValue()) +
4031 ", " + Twine(CR.getUpper().getSExtValue()) + ")",
4032 Call);
4033 }
4034 }
4035 }
4036
4037 if (Call.paramHasAttr(i, Attribute::Preallocated)) {
4038 Value *ArgVal = Call.getArgOperand(i);
4039 bool hasOB =
4041 bool isMustTail = Call.isMustTailCall();
4042 Check(hasOB != isMustTail,
4043 "preallocated operand either requires a preallocated bundle or "
4044 "the call to be musttail (but not both)",
4045 ArgVal, Call);
4046 }
4047 }
4048
4049 if (FTy->isVarArg()) {
4050 // FIXME? is 'nest' even legal here?
4051 bool SawNest = false;
4052 bool SawReturned = false;
4053
4054 for (unsigned Idx = 0; Idx < FTy->getNumParams(); ++Idx) {
4055 if (Attrs.hasParamAttr(Idx, Attribute::Nest))
4056 SawNest = true;
4057 if (Attrs.hasParamAttr(Idx, Attribute::Returned))
4058 SawReturned = true;
4059 }
4060
4061 // Check attributes on the varargs part.
4062 for (unsigned Idx = FTy->getNumParams(); Idx < Call.arg_size(); ++Idx) {
4063 Type *Ty = Call.getArgOperand(Idx)->getType();
4064 AttributeSet ArgAttrs = Attrs.getParamAttrs(Idx);
4065 verifyParameterAttrs(ArgAttrs, Ty, &Call);
4066
4067 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
4068 Check(!SawNest, "More than one parameter has attribute nest!", Call);
4069 SawNest = true;
4070 }
4071
4072 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
4073 Check(!SawReturned, "More than one parameter has attribute returned!",
4074 Call);
4075 Check(Ty->canLosslesslyBitCastTo(FTy->getReturnType()),
4076 "Incompatible argument and return types for 'returned' "
4077 "attribute",
4078 Call);
4079 SawReturned = true;
4080 }
4081
4082 // Statepoint intrinsic is vararg but the wrapped function may be not.
4083 // Allow sret here and check the wrapped function in verifyStatepoint.
4084 if (Call.getIntrinsicID() != Intrinsic::experimental_gc_statepoint)
4085 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
4086 "Attribute 'sret' cannot be used for vararg call arguments!",
4087 Call);
4088
4089 if (ArgAttrs.hasAttribute(Attribute::InAlloca))
4090 Check(Idx == Call.arg_size() - 1,
4091 "inalloca isn't on the last argument!", Call);
4092 }
4093 }
4094
4095 // Verify that there's no metadata unless it's a direct call to an intrinsic.
4096 if (!IsIntrinsic) {
4097 for (Type *ParamTy : FTy->params()) {
4098 Check(!ParamTy->isMetadataTy(),
4099 "Function has metadata parameter but isn't an intrinsic", Call);
4100 Check(!ParamTy->isTokenLikeTy(),
4101 "Function has token parameter but isn't an intrinsic", Call);
4102 }
4103 }
4104
4105 // Verify that indirect calls don't return tokens.
4106 if (!Call.getCalledFunction()) {
4107 Check(!FTy->getReturnType()->isTokenLikeTy(),
4108 "Return type cannot be token for indirect call!");
4109 Check(!FTy->getReturnType()->isX86_AMXTy(),
4110 "Return type cannot be x86_amx for indirect call!");
4111 }
4112
4114 visitIntrinsicCall(ID, Call);
4115
4116 // Verify that a callsite has at most one "deopt", at most one "funclet", at
4117 // most one "gc-transition", at most one "cfguardtarget", at most one
4118 // "preallocated" operand bundle, and at most one "ptrauth" operand bundle.
4119 bool FoundDeoptBundle = false, FoundFuncletBundle = false,
4120 FoundGCTransitionBundle = false, FoundCFGuardTargetBundle = false,
4121 FoundPreallocatedBundle = false, FoundGCLiveBundle = false,
4122 FoundPtrauthBundle = false, FoundKCFIBundle = false,
4123 FoundAttachedCallBundle = false;
4124 for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) {
4125 OperandBundleUse BU = Call.getOperandBundleAt(i);
4126 uint32_t Tag = BU.getTagID();
4127 if (Tag == LLVMContext::OB_deopt) {
4128 Check(!FoundDeoptBundle, "Multiple deopt operand bundles", Call);
4129 FoundDeoptBundle = true;
4130 } else if (Tag == LLVMContext::OB_gc_transition) {
4131 Check(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles",
4132 Call);
4133 FoundGCTransitionBundle = true;
4134 } else if (Tag == LLVMContext::OB_funclet) {
4135 Check(!FoundFuncletBundle, "Multiple funclet operand bundles", Call);
4136 FoundFuncletBundle = true;
4137 Check(BU.Inputs.size() == 1,
4138 "Expected exactly one funclet bundle operand", Call);
4139 Check(isa<FuncletPadInst>(BU.Inputs.front()),
4140 "Funclet bundle operands should correspond to a FuncletPadInst",
4141 Call);
4142 } else if (Tag == LLVMContext::OB_cfguardtarget) {
4143 Check(!FoundCFGuardTargetBundle, "Multiple CFGuardTarget operand bundles",
4144 Call);
4145 FoundCFGuardTargetBundle = true;
4146 Check(BU.Inputs.size() == 1,
4147 "Expected exactly one cfguardtarget bundle operand", Call);
4148 } else if (Tag == LLVMContext::OB_ptrauth) {
4149 Check(!FoundPtrauthBundle, "Multiple ptrauth operand bundles", Call);
4150 FoundPtrauthBundle = true;
4151 Check(BU.Inputs.size() == 2,
4152 "Expected exactly two ptrauth bundle operands", Call);
4153 Check(isa<ConstantInt>(BU.Inputs[0]) &&
4154 BU.Inputs[0]->getType()->isIntegerTy(32),
4155 "Ptrauth bundle key operand must be an i32 constant", Call);
4156 Check(BU.Inputs[1]->getType()->isIntegerTy(64),
4157 "Ptrauth bundle discriminator operand must be an i64", Call);
4158 } else if (Tag == LLVMContext::OB_kcfi) {
4159 Check(!FoundKCFIBundle, "Multiple kcfi operand bundles", Call);
4160 FoundKCFIBundle = true;
4161 Check(BU.Inputs.size() == 1, "Expected exactly one kcfi bundle operand",
4162 Call);
4163 Check(isa<ConstantInt>(BU.Inputs[0]) &&
4164 BU.Inputs[0]->getType()->isIntegerTy(32),
4165 "Kcfi bundle operand must be an i32 constant", Call);
4166 } else if (Tag == LLVMContext::OB_preallocated) {
4167 Check(!FoundPreallocatedBundle, "Multiple preallocated operand bundles",
4168 Call);
4169 FoundPreallocatedBundle = true;
4170 Check(BU.Inputs.size() == 1,
4171 "Expected exactly one preallocated bundle operand", Call);
4172 auto Input = dyn_cast<IntrinsicInst>(BU.Inputs.front());
4173 Check(Input &&
4174 Input->getIntrinsicID() == Intrinsic::call_preallocated_setup,
4175 "\"preallocated\" argument must be a token from "
4176 "llvm.call.preallocated.setup",
4177 Call);
4178 } else if (Tag == LLVMContext::OB_gc_live) {
4179 Check(!FoundGCLiveBundle, "Multiple gc-live operand bundles", Call);
4180 FoundGCLiveBundle = true;
4182 Check(!FoundAttachedCallBundle,
4183 "Multiple \"clang.arc.attachedcall\" operand bundles", Call);
4184 FoundAttachedCallBundle = true;
4185 verifyAttachedCallBundle(Call, BU);
4186 }
4187 }
4188
4189 // Verify that callee and callsite agree on whether to use pointer auth.
4190 Check(!(Call.getCalledFunction() && FoundPtrauthBundle),
4191 "Direct call cannot have a ptrauth bundle", Call);
4192
4193 // Verify that each inlinable callsite of a debug-info-bearing function in a
4194 // debug-info-bearing function has a debug location attached to it. Failure to
4195 // do so causes assertion failures when the inliner sets up inline scope info
4196 // (Interposable functions are not inlinable, neither are functions without
4197 // definitions.)
4203 "inlinable function call in a function with "
4204 "debug info must have a !dbg location",
4205 Call);
4206
4207 if (Call.isInlineAsm())
4208 verifyInlineAsmCall(Call);
4209
4210 ConvergenceVerifyHelper.visit(Call);
4211
4212 visitInstruction(Call);
4213}
4214
4215void Verifier::verifyTailCCMustTailAttrs(const AttrBuilder &Attrs,
4216 StringRef Context) {
4217 Check(!Attrs.contains(Attribute::InAlloca),
4218 Twine("inalloca attribute not allowed in ") + Context);
4219 Check(!Attrs.contains(Attribute::InReg),
4220 Twine("inreg attribute not allowed in ") + Context);
4221 Check(!Attrs.contains(Attribute::SwiftError),
4222 Twine("swifterror attribute not allowed in ") + Context);
4223 Check(!Attrs.contains(Attribute::Preallocated),
4224 Twine("preallocated attribute not allowed in ") + Context);
4225 Check(!Attrs.contains(Attribute::ByRef),
4226 Twine("byref attribute not allowed in ") + Context);
4227}
4228
4229/// Two types are "congruent" if they are identical, or if they are both pointer
4230/// types with different pointee types and the same address space.
4231static bool isTypeCongruent(Type *L, Type *R) {
4232 if (L == R)
4233 return true;
4236 if (!PL || !PR)
4237 return false;
4238 return PL->getAddressSpace() == PR->getAddressSpace();
4239}
4240
4241static AttrBuilder getParameterABIAttributes(LLVMContext& C, unsigned I, AttributeList Attrs) {
4242 static const Attribute::AttrKind ABIAttrs[] = {
4243 Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
4244 Attribute::InReg, Attribute::StackAlignment, Attribute::SwiftSelf,
4245 Attribute::SwiftAsync, Attribute::SwiftError, Attribute::Preallocated,
4246 Attribute::ByRef};
4247 AttrBuilder Copy(C);
4248 for (auto AK : ABIAttrs) {
4249 Attribute Attr = Attrs.getParamAttrs(I).getAttribute(AK);
4250 if (Attr.isValid())
4251 Copy.addAttribute(Attr);
4252 }
4253
4254 // `align` is ABI-affecting only in combination with `byval` or `byref`.
4255 if (Attrs.hasParamAttr(I, Attribute::Alignment) &&
4256 (Attrs.hasParamAttr(I, Attribute::ByVal) ||
4257 Attrs.hasParamAttr(I, Attribute::ByRef)))
4258 Copy.addAlignmentAttr(Attrs.getParamAlignment(I));
4259 return Copy;
4260}
4261
4262void Verifier::verifyMustTailCall(CallInst &CI) {
4263 Check(!CI.isInlineAsm(), "cannot use musttail call with inline asm", &CI);
4264
4265 Function *F = CI.getParent()->getParent();
4266 FunctionType *CallerTy = F->getFunctionType();
4267 FunctionType *CalleeTy = CI.getFunctionType();
4268 Check(CallerTy->isVarArg() == CalleeTy->isVarArg(),
4269 "cannot guarantee tail call due to mismatched varargs", &CI);
4270 Check(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()),
4271 "cannot guarantee tail call due to mismatched return types", &CI);
4272
4273 // - The calling conventions of the caller and callee must match.
4274 Check(F->getCallingConv() == CI.getCallingConv(),
4275 "cannot guarantee tail call due to mismatched calling conv", &CI);
4276
4277 // - The call must immediately precede a :ref:`ret <i_ret>` instruction,
4278 // or a pointer bitcast followed by a ret instruction.
4279 // - The ret instruction must return the (possibly bitcasted) value
4280 // produced by the call or void.
4281 Value *RetVal = &CI;
4283
4284 // Handle the optional bitcast.
4285 if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Next)) {
4286 Check(BI->getOperand(0) == RetVal,
4287 "bitcast following musttail call must use the call", BI);
4288 RetVal = BI;
4289 Next = BI->getNextNode();
4290 }
4291
4292 // Check the return.
4293 ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Next);
4294 Check(Ret, "musttail call must precede a ret with an optional bitcast", &CI);
4295 Check(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal ||
4297 "musttail call result must be returned", Ret);
4298
4299 AttributeList CallerAttrs = F->getAttributes();
4300 AttributeList CalleeAttrs = CI.getAttributes();
4301 if (CI.getCallingConv() == CallingConv::SwiftTail ||
4302 CI.getCallingConv() == CallingConv::Tail) {
4303 StringRef CCName =
4304 CI.getCallingConv() == CallingConv::Tail ? "tailcc" : "swifttailcc";
4305
4306 // - Only sret, byval, swiftself, and swiftasync ABI-impacting attributes
4307 // are allowed in swifttailcc call
4308 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4309 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
4310 SmallString<32> Context{CCName, StringRef(" musttail caller")};
4311 verifyTailCCMustTailAttrs(ABIAttrs, Context);
4312 }
4313 for (unsigned I = 0, E = CalleeTy->getNumParams(); I != E; ++I) {
4314 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
4315 SmallString<32> Context{CCName, StringRef(" musttail callee")};
4316 verifyTailCCMustTailAttrs(ABIAttrs, Context);
4317 }
4318 // - Varargs functions are not allowed
4319 Check(!CallerTy->isVarArg(), Twine("cannot guarantee ") + CCName +
4320 " tail call for varargs function");
4321 return;
4322 }
4323
4324 // - The caller and callee prototypes must match. Pointer types of
4325 // parameters or return types may differ in pointee type, but not
4326 // address space.
4327 if (!CI.getIntrinsicID()) {
4328 Check(CallerTy->getNumParams() == CalleeTy->getNumParams(),
4329 "cannot guarantee tail call due to mismatched parameter counts", &CI);
4330 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4331 Check(
4332 isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)),
4333 "cannot guarantee tail call due to mismatched parameter types", &CI);
4334 }
4335 }
4336
4337 // - All ABI-impacting function attributes, such as sret, byval, inreg,
4338 // returned, preallocated, and inalloca, must match.
4339 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4340 AttrBuilder CallerABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
4341 AttrBuilder CalleeABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
4342 Check(CallerABIAttrs == CalleeABIAttrs,
4343 "cannot guarantee tail call due to mismatched ABI impacting "
4344 "function attributes",
4345 &CI, CI.getOperand(I));
4346 }
4347}
4348
4349void Verifier::visitCallInst(CallInst &CI) {
4350 visitCallBase(CI);
4351
4352 if (CI.isMustTailCall())
4353 verifyMustTailCall(CI);
4354}
4355
4356void Verifier::visitInvokeInst(InvokeInst &II) {
4357 visitCallBase(II);
4358
4359 // Verify that the first non-PHI instruction of the unwind destination is an
4360 // exception handling instruction.
4361 Check(
4362 II.getUnwindDest()->isEHPad(),
4363 "The unwind destination does not have an exception handling instruction!",
4364 &II);
4365
4366 visitTerminator(II);
4367}
4368
4369/// visitUnaryOperator - Check the argument to the unary operator.
4370///
4371void Verifier::visitUnaryOperator(UnaryOperator &U) {
4372 Check(U.getType() == U.getOperand(0)->getType(),
4373 "Unary operators must have same type for"
4374 "operands and result!",
4375 &U);
4376
4377 switch (U.getOpcode()) {
4378 // Check that floating-point arithmetic operators are only used with
4379 // floating-point operands.
4380 case Instruction::FNeg:
4381 Check(U.getType()->isFPOrFPVectorTy(),
4382 "FNeg operator only works with float types!", &U);
4383 break;
4384 default:
4385 llvm_unreachable("Unknown UnaryOperator opcode!");
4386 }
4387
4388 visitInstruction(U);
4389}
4390
4391/// visitBinaryOperator - Check that both arguments to the binary operator are
4392/// of the same type!
4393///
4394void Verifier::visitBinaryOperator(BinaryOperator &B) {
4395 Check(B.getOperand(0)->getType() == B.getOperand(1)->getType(),
4396 "Both operands to a binary operator are not of the same type!", &B);
4397
4398 switch (B.getOpcode()) {
4399 // Check that integer arithmetic operators are only used with
4400 // integral operands.
4401 case Instruction::Add:
4402 case Instruction::Sub:
4403 case Instruction::Mul:
4404 case Instruction::SDiv:
4405 case Instruction::UDiv:
4406 case Instruction::SRem:
4407 case Instruction::URem:
4408 Check(B.getType()->isIntOrIntVectorTy(),
4409 "Integer arithmetic operators only work with integral types!", &B);
4410 Check(B.getType() == B.getOperand(0)->getType(),
4411 "Integer arithmetic operators must have same type "
4412 "for operands and result!",
4413 &B);
4414 break;
4415 // Check that floating-point arithmetic operators are only used with
4416 // floating-point operands.
4417 case Instruction::FAdd:
4418 case Instruction::FSub:
4419 case Instruction::FMul:
4420 case Instruction::FDiv:
4421 case Instruction::FRem:
4422 Check(B.getType()->isFPOrFPVectorTy(),
4423 "Floating-point arithmetic operators only work with "
4424 "floating-point types!",
4425 &B);
4426 Check(B.getType() == B.getOperand(0)->getType(),
4427 "Floating-point arithmetic operators must have same type "
4428 "for operands and result!",
4429 &B);
4430 break;
4431 // Check that logical operators are only used with integral operands.
4432 case Instruction::And:
4433 case Instruction::Or:
4434 case Instruction::Xor:
4435 Check(B.getType()->isIntOrIntVectorTy(),
4436 "Logical operators only work with integral types!", &B);
4437 Check(B.getType() == B.getOperand(0)->getType(),
4438 "Logical operators must have same type for operands and result!", &B);
4439 break;
4440 case Instruction::Shl:
4441 case Instruction::LShr:
4442 case Instruction::AShr:
4443 Check(B.getType()->isIntOrIntVectorTy(),
4444 "Shifts only work with integral types!", &B);
4445 Check(B.getType() == B.getOperand(0)->getType(),
4446 "Shift return type must be same as operands!", &B);
4447 break;
4448 default:
4449 llvm_unreachable("Unknown BinaryOperator opcode!");
4450 }
4451
4452 visitInstruction(B);
4453}
4454
4455void Verifier::visitICmpInst(ICmpInst &IC) {
4456 // Check that the operands are the same type
4457 Type *Op0Ty = IC.getOperand(0)->getType();
4458 Type *Op1Ty = IC.getOperand(1)->getType();
4459 Check(Op0Ty == Op1Ty,
4460 "Both operands to ICmp instruction are not of the same type!", &IC);
4461 // Check that the operands are the right type
4462 Check(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPtrOrPtrVectorTy(),
4463 "Invalid operand types for ICmp instruction", &IC);
4464 // Check that the predicate is valid.
4465 Check(IC.isIntPredicate(), "Invalid predicate in ICmp instruction!", &IC);
4466
4467 visitInstruction(IC);
4468}
4469
4470void Verifier::visitFCmpInst(FCmpInst &FC) {
4471 // Check that the operands are the same type
4472 Type *Op0Ty = FC.getOperand(0)->getType();
4473 Type *Op1Ty = FC.getOperand(1)->getType();
4474 Check(Op0Ty == Op1Ty,
4475 "Both operands to FCmp instruction are not of the same type!", &FC);
4476 // Check that the operands are the right type
4477 Check(Op0Ty->isFPOrFPVectorTy(), "Invalid operand types for FCmp instruction",
4478 &FC);
4479 // Check that the predicate is valid.
4480 Check(FC.isFPPredicate(), "Invalid predicate in FCmp instruction!", &FC);
4481
4482 visitInstruction(FC);
4483}
4484
4485void Verifier::visitExtractElementInst(ExtractElementInst &EI) {
4487 "Invalid extractelement operands!", &EI);
4488 visitInstruction(EI);
4489}
4490
4491void Verifier::visitInsertElementInst(InsertElementInst &IE) {
4492 Check(InsertElementInst::isValidOperands(IE.getOperand(0), IE.getOperand(1),
4493 IE.getOperand(2)),
4494 "Invalid insertelement operands!", &IE);
4495 visitInstruction(IE);
4496}
4497
4498void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) {
4500 SV.getShuffleMask()),
4501 "Invalid shufflevector operands!", &SV);
4502 visitInstruction(SV);
4503}
4504
4505void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
4507 GEP.getModule()->getModuleFlag("require-logical-pointer")))
4508 Check(!MD->getZExtValue(),
4509 "Non-logical getelementptr disallowed for this module.");
4510
4511 Type *TargetTy = GEP.getPointerOperandType()->getScalarType();
4512
4513 Check(isa<PointerType>(TargetTy),
4514 "GEP base pointer is not a vector or a vector of pointers", &GEP);
4515 Check(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP);
4516
4517 if (auto *STy = dyn_cast<StructType>(GEP.getSourceElementType())) {
4518 Check(!STy->isScalableTy(),
4519 "getelementptr cannot target structure that contains scalable vector"
4520 "type",
4521 &GEP);
4522 }
4523
4524 SmallVector<Value *, 16> Idxs(GEP.indices());
4525 Check(
4526 all_of(Idxs, [](Value *V) { return V->getType()->isIntOrIntVectorTy(); }),
4527 "GEP indexes must be integers", &GEP);
4528 Type *ElTy =
4529 GetElementPtrInst::getIndexedType(GEP.getSourceElementType(), Idxs);
4530 Check(ElTy, "Invalid indices for GEP pointer type!", &GEP);
4531
4532 PointerType *PtrTy = dyn_cast<PointerType>(GEP.getType()->getScalarType());
4533
4534 Check(PtrTy && GEP.getResultElementType() == ElTy,
4535 "GEP is not of right type for indices!", &GEP, ElTy);
4536
4537 if (auto *GEPVTy = dyn_cast<VectorType>(GEP.getType())) {
4538 // Additional checks for vector GEPs.
4539 ElementCount GEPWidth = GEPVTy->getElementCount();
4540 if (GEP.getPointerOperandType()->isVectorTy())
4541 Check(
4542 GEPWidth ==
4543 cast<VectorType>(GEP.getPointerOperandType())->getElementCount(),
4544 "Vector GEP result width doesn't match operand's", &GEP);
4545 for (Value *Idx : Idxs) {
4546 Type *IndexTy = Idx->getType();
4547 if (auto *IndexVTy = dyn_cast<VectorType>(IndexTy)) {
4548 ElementCount IndexWidth = IndexVTy->getElementCount();
4549 Check(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP);
4550 }
4551 Check(IndexTy->isIntOrIntVectorTy(),
4552 "All GEP indices should be of integer type");
4553 }
4554 }
4555
4556 // Check that GEP does not index into a vector with non-byte-addressable
4557 // elements.
4559 GTI != GTE; ++GTI) {
4560 if (GTI.isVector()) {
4561 Type *ElemTy = GTI.getIndexedType();
4562 Check(DL.typeSizeEqualsStoreSize(ElemTy),
4563 "GEP into vector with non-byte-addressable element type", &GEP);
4564 }
4565 }
4566
4567 Check(GEP.getAddressSpace() == PtrTy->getAddressSpace(),
4568 "GEP address space doesn't match type", &GEP);
4569
4570 visitInstruction(GEP);
4571}
4572
4573static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
4574 return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
4575}
4576
4577/// Verify !range and !absolute_symbol metadata. These have the same
4578/// restrictions, except !absolute_symbol allows the full set.
4579void Verifier::verifyRangeLikeMetadata(const Value &I, const MDNode *Range,
4580 Type *Ty, RangeLikeMetadataKind Kind) {
4581 unsigned NumOperands = Range->getNumOperands();
4582 Check(NumOperands % 2 == 0, "Unfinished range!", Range);
4583 unsigned NumRanges = NumOperands / 2;
4584 Check(NumRanges >= 1, "It should have at least one range!", Range);
4585
4586 ConstantRange LastRange(1, true); // Dummy initial value
4587 for (unsigned i = 0; i < NumRanges; ++i) {
4588 ConstantInt *Low =
4589 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i));
4590 Check(Low, "The lower limit must be an integer!", Low);
4591 ConstantInt *High =
4592 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i + 1));
4593 Check(High, "The upper limit must be an integer!", High);
4594
4595 Check(High->getType() == Low->getType(), "Range pair types must match!",
4596 &I);
4597
4598 if (Kind == RangeLikeMetadataKind::NoaliasAddrspace) {
4599 Check(High->getType()->isIntegerTy(32),
4600 "noalias.addrspace type must be i32!", &I);
4601 } else {
4602 Check(High->getType() == Ty->getScalarType(),
4603 "Range types must match instruction type!", &I);
4604 }
4605
4606 APInt HighV = High->getValue();
4607 APInt LowV = Low->getValue();
4608
4609 // ConstantRange asserts if the ranges are the same except for the min/max
4610 // value. Leave the cases it tolerates for the empty range error below.
4611 Check(LowV != HighV || LowV.isMaxValue() || LowV.isMinValue(),
4612 "The upper and lower limits cannot be the same value", &I);
4613
4614 ConstantRange CurRange(LowV, HighV);
4615 Check(!CurRange.isEmptySet() &&
4616 (Kind == RangeLikeMetadataKind::AbsoluteSymbol ||
4617 !CurRange.isFullSet()),
4618 "Range must not be empty!", Range);
4619 if (i != 0) {
4620 Check(CurRange.intersectWith(LastRange).isEmptySet(),
4621 "Intervals are overlapping", Range);
4622 Check(LowV.sgt(LastRange.getLower()), "Intervals are not in order",
4623 Range);
4624 Check(!isContiguous(CurRange, LastRange), "Intervals are contiguous",
4625 Range);
4626 }
4627 LastRange = ConstantRange(LowV, HighV);
4628 }
4629 if (NumRanges > 2) {
4630 APInt FirstLow =
4631 mdconst::dyn_extract<ConstantInt>(Range->getOperand(0))->getValue();
4632 APInt FirstHigh =
4633 mdconst::dyn_extract<ConstantInt>(Range->getOperand(1))->getValue();
4634 ConstantRange FirstRange(FirstLow, FirstHigh);
4635 Check(FirstRange.intersectWith(LastRange).isEmptySet(),
4636 "Intervals are overlapping", Range);
4637 Check(!isContiguous(FirstRange, LastRange), "Intervals are contiguous",
4638 Range);
4639 }
4640}
4641
4642void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) {
4643 assert(Range && Range == I.getMetadata(LLVMContext::MD_range) &&
4644 "precondition violation");
4645 verifyRangeLikeMetadata(I, Range, Ty, RangeLikeMetadataKind::Range);
4646}
4647
4648void Verifier::visitNoFPClassMetadata(Instruction &I, MDNode *NoFPClass,
4649 Type *Ty) {
4650 Check(AttributeFuncs::isNoFPClassCompatibleType(Ty),
4651 "nofpclass only applies to floating-point typed loads", I);
4652
4653 Check(NoFPClass->getNumOperands() == 1,
4654 "nofpclass must have exactly one entry", NoFPClass);
4655 ConstantInt *MaskVal =
4657 Check(MaskVal && MaskVal->getType()->isIntegerTy(32),
4658 "nofpclass entry must be a constant i32", NoFPClass);
4659 uint32_t Val = MaskVal->getZExtValue();
4660 Check(Val != 0, "'nofpclass' must have at least one test bit set", NoFPClass,
4661 I);
4662
4663 Check((Val & ~static_cast<unsigned>(fcAllFlags)) == 0,
4664 "Invalid value for 'nofpclass' test mask", NoFPClass, I);
4665}
4666
4667void Verifier::visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range,
4668 Type *Ty) {
4669 assert(Range && Range == I.getMetadata(LLVMContext::MD_noalias_addrspace) &&
4670 "precondition violation");
4671 verifyRangeLikeMetadata(I, Range, Ty,
4672 RangeLikeMetadataKind::NoaliasAddrspace);
4673}
4674
4675void Verifier::checkAtomicMemAccessSize(Type *Ty, const Instruction *I) {
4676 unsigned Size = DL.getTypeSizeInBits(Ty).getFixedValue();
4677 Check(Size >= 8, "atomic memory access' size must be byte-sized", Ty, I);
4678 Check(!(Size & (Size - 1)),
4679 "atomic memory access' operand must have a power-of-two size", Ty, I);
4680}
4681
4682void Verifier::visitLoadInst(LoadInst &LI) {
4684 Check(PTy, "Load operand must be a pointer.", &LI);
4685 Type *ElTy = LI.getType();
4686 if (MaybeAlign A = LI.getAlign()) {
4687 Check(A->value() <= Value::MaximumAlignment,
4688 "huge alignment values are unsupported", &LI);
4689 }
4690 Check(ElTy->isSized(), "loading unsized types is not allowed", &LI);
4691 if (LI.isAtomic()) {
4692 Check(LI.getOrdering() != AtomicOrdering::Release &&
4693 LI.getOrdering() != AtomicOrdering::AcquireRelease,
4694 "Load cannot have Release ordering", &LI);
4695 Check(ElTy->getScalarType()->isIntOrPtrTy() ||
4696 ElTy->getScalarType()->isByteTy() ||
4698 "atomic load operand must have integer, byte, pointer, floating "
4699 "point, or vector type!",
4700 ElTy, &LI);
4701
4702 checkAtomicMemAccessSize(ElTy, &LI);
4703 } else {
4705 "Non-atomic load cannot have SynchronizationScope specified", &LI);
4706 }
4707
4708 visitInstruction(LI);
4709}
4710
4711void Verifier::visitStoreInst(StoreInst &SI) {
4712 PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType());
4713 Check(PTy, "Store operand must be a pointer.", &SI);
4714 Type *ElTy = SI.getOperand(0)->getType();
4715 if (MaybeAlign A = SI.getAlign()) {
4716 Check(A->value() <= Value::MaximumAlignment,
4717 "huge alignment values are unsupported", &SI);
4718 }
4719 Check(ElTy->isSized(), "storing unsized types is not allowed", &SI);
4720 if (SI.isAtomic()) {
4721 Check(SI.getOrdering() != AtomicOrdering::Acquire &&
4722 SI.getOrdering() != AtomicOrdering::AcquireRelease,
4723 "Store cannot have Acquire ordering", &SI);
4724 Check(ElTy->getScalarType()->isIntOrPtrTy() ||
4725 ElTy->getScalarType()->isByteTy() ||
4727 "atomic store operand must have integer, byte, pointer, floating "
4728 "point, or vector type!",
4729 ElTy, &SI);
4730 checkAtomicMemAccessSize(ElTy, &SI);
4731 } else {
4732 Check(SI.getSyncScopeID() == SyncScope::System,
4733 "Non-atomic store cannot have SynchronizationScope specified", &SI);
4734 }
4735 visitInstruction(SI);
4736}
4737
4738/// Check that SwiftErrorVal is used as a swifterror argument in CS.
4739void Verifier::verifySwiftErrorCall(CallBase &Call,
4740 const Value *SwiftErrorVal) {
4741 for (const auto &I : llvm::enumerate(Call.args())) {
4742 if (I.value() == SwiftErrorVal) {
4743 Check(Call.paramHasAttr(I.index(), Attribute::SwiftError),
4744 "swifterror value when used in a callsite should be marked "
4745 "with swifterror attribute",
4746 SwiftErrorVal, Call);
4747 }
4748 }
4749}
4750
4751void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) {
4752 // Check that swifterror value is only used by loads, stores, or as
4753 // a swifterror argument.
4754 for (const User *U : SwiftErrorVal->users()) {
4756 isa<InvokeInst>(U),
4757 "swifterror value can only be loaded and stored from, or "
4758 "as a swifterror argument!",
4759 SwiftErrorVal, U);
4760 // If it is used by a store, check it is the second operand.
4761 if (auto StoreI = dyn_cast<StoreInst>(U))
4762 Check(StoreI->getOperand(1) == SwiftErrorVal,
4763 "swifterror value should be the second operand when used "
4764 "by stores",
4765 SwiftErrorVal, U);
4766 if (auto *Call = dyn_cast<CallBase>(U))
4767 verifySwiftErrorCall(*const_cast<CallBase *>(Call), SwiftErrorVal);
4768 }
4769}
4770
4771void Verifier::visitAllocaInst(AllocaInst &AI) {
4773 AI.getModule()->getModuleFlag("require-logical-pointer")))
4774 Check(!MD->getZExtValue(),
4775 "Non-logical alloca disallowed for this module.");
4776
4777 Type *Ty = AI.getAllocatedType();
4778 SmallPtrSet<Type*, 4> Visited;
4779 Check(Ty->isSized(&Visited), "Cannot allocate unsized type", &AI);
4780 // Check if it's a target extension type that disallows being used on the
4781 // stack.
4783 "Alloca has illegal target extension type", &AI);
4785 "Alloca array size must have integer type", &AI);
4786 if (MaybeAlign A = AI.getAlign()) {
4787 Check(A->value() <= Value::MaximumAlignment,
4788 "huge alignment values are unsupported", &AI);
4789 }
4790
4791 if (AI.isSwiftError()) {
4792 Check(Ty->isPointerTy(), "swifterror alloca must have pointer type", &AI);
4794 "swifterror alloca must not be array allocation", &AI);
4795 verifySwiftErrorValue(&AI);
4796 }
4797
4798 if (TT.isAMDGPU()) {
4800 "alloca on amdgpu must be in addrspace(5)", &AI);
4801 }
4802
4803 visitInstruction(AI);
4804}
4805
4806void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
4807 Type *ElTy = CXI.getOperand(1)->getType();
4808 Check(ElTy->isIntOrPtrTy(),
4809 "cmpxchg operand must have integer or pointer type", ElTy, &CXI);
4810 checkAtomicMemAccessSize(ElTy, &CXI);
4811 visitInstruction(CXI);
4812}
4813
4814void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
4815 Check(RMWI.getOrdering() != AtomicOrdering::Unordered,
4816 "atomicrmw instructions cannot be unordered.", &RMWI);
4817 auto Op = RMWI.getOperation();
4818 Type *ElTy = RMWI.getOperand(1)->getType();
4819 Type *ScalarTy = ElTy;
4820 if (RMWI.isElementwise()) {
4821 auto *VecTy = dyn_cast<FixedVectorType>(ElTy);
4822 Check(VecTy, "atomicrmw elementwise operand must have fixed vector type!",
4823 &RMWI, ElTy);
4824 if (VecTy)
4825 ScalarTy = VecTy->getElementType();
4826 }
4827
4828 if (Op == AtomicRMWInst::Xchg) {
4829 Check(ScalarTy->isIntegerTy() || ScalarTy->isFloatingPointTy() ||
4830 ScalarTy->isPointerTy(),
4831 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4832 " operand must have integer or floating point type!",
4833 &RMWI, ElTy);
4834 } else if (AtomicRMWInst::isFPOperation(Op)) {
4836 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4837 " operand must have floating-point or fixed vector of "
4838 "floating-point "
4839 "type!",
4840 &RMWI, ElTy);
4841 } else {
4842 Check(ScalarTy->isIntegerTy(),
4843 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4844 " operand must have integer type!",
4845 &RMWI, ElTy);
4846 }
4847 checkAtomicMemAccessSize(ElTy, &RMWI);
4849 "Invalid binary operation!", &RMWI);
4850 visitInstruction(RMWI);
4851}
4852
4853void Verifier::visitFenceInst(FenceInst &FI) {
4854 const AtomicOrdering Ordering = FI.getOrdering();
4855 Check(Ordering == AtomicOrdering::Acquire ||
4856 Ordering == AtomicOrdering::Release ||
4857 Ordering == AtomicOrdering::AcquireRelease ||
4858 Ordering == AtomicOrdering::SequentiallyConsistent,
4859 "fence instructions may only have acquire, release, acq_rel, or "
4860 "seq_cst ordering.",
4861 &FI);
4862 visitInstruction(FI);
4863}
4864
4865void Verifier::visitExtractValueInst(ExtractValueInst &EVI) {
4867 EVI.getIndices()) == EVI.getType(),
4868 "Invalid ExtractValueInst operands!", &EVI);
4869
4870 visitInstruction(EVI);
4871}
4872
4873void Verifier::visitInsertValueInst(InsertValueInst &IVI) {
4875 IVI.getIndices()) ==
4876 IVI.getOperand(1)->getType(),
4877 "Invalid InsertValueInst operands!", &IVI);
4878
4879 visitInstruction(IVI);
4880}
4881
4882static Value *getParentPad(Value *EHPad) {
4883 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
4884 return FPI->getParentPad();
4885
4886 return cast<CatchSwitchInst>(EHPad)->getParentPad();
4887}
4888
4889void Verifier::visitEHPadPredecessors(Instruction &I) {
4890 assert(I.isEHPad());
4891
4892 BasicBlock *BB = I.getParent();
4893 Function *F = BB->getParent();
4894
4895 Check(BB != &F->getEntryBlock(), "EH pad cannot be in entry block.", &I);
4896
4897 if (auto *LPI = dyn_cast<LandingPadInst>(&I)) {
4898 // The landingpad instruction defines its parent as a landing pad block. The
4899 // landing pad block may be branched to only by the unwind edge of an
4900 // invoke.
4901 for (BasicBlock *PredBB : predecessors(BB)) {
4902 const auto *II = dyn_cast<InvokeInst>(PredBB->getTerminator());
4903 Check(II && II->getUnwindDest() == BB && II->getNormalDest() != BB,
4904 "Block containing LandingPadInst must be jumped to "
4905 "only by the unwind edge of an invoke.",
4906 LPI);
4907 }
4908 return;
4909 }
4910 if (auto *CPI = dyn_cast<CatchPadInst>(&I)) {
4911 if (!pred_empty(BB))
4912 Check(BB->getUniquePredecessor() == CPI->getCatchSwitch()->getParent(),
4913 "Block containg CatchPadInst must be jumped to "
4914 "only by its catchswitch.",
4915 CPI);
4916 Check(BB != CPI->getCatchSwitch()->getUnwindDest(),
4917 "Catchswitch cannot unwind to one of its catchpads",
4918 CPI->getCatchSwitch(), CPI);
4919 return;
4920 }
4921
4922 // Verify that each pred has a legal terminator with a legal to/from EH
4923 // pad relationship.
4924 Instruction *ToPad = &I;
4925 Value *ToPadParent = getParentPad(ToPad);
4926 for (BasicBlock *PredBB : predecessors(BB)) {
4927 Instruction *TI = PredBB->getTerminator();
4928 Value *FromPad;
4929 if (auto *II = dyn_cast<InvokeInst>(TI)) {
4930 Check(II->getUnwindDest() == BB && II->getNormalDest() != BB,
4931 "EH pad must be jumped to via an unwind edge", ToPad, II);
4932 auto *CalledFn =
4933 dyn_cast<Function>(II->getCalledOperand()->stripPointerCasts());
4934 if (CalledFn && CalledFn->isIntrinsic() && II->doesNotThrow() &&
4935 !IntrinsicInst::mayLowerToFunctionCall(CalledFn->getIntrinsicID()))
4936 continue;
4937 if (auto Bundle = II->getOperandBundle(LLVMContext::OB_funclet))
4938 FromPad = Bundle->Inputs[0];
4939 else
4940 FromPad = ConstantTokenNone::get(II->getContext());
4941 } else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
4942 FromPad = CRI->getOperand(0);
4943 Check(FromPad != ToPadParent, "A cleanupret must exit its cleanup", CRI);
4944 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) {
4945 FromPad = CSI;
4946 } else {
4947 Check(false, "EH pad must be jumped to via an unwind edge", ToPad, TI);
4948 }
4949
4950 // The edge may exit from zero or more nested pads.
4951 SmallPtrSet<Value *, 8> Seen;
4952 for (;; FromPad = getParentPad(FromPad)) {
4953 Check(FromPad != ToPad,
4954 "EH pad cannot handle exceptions raised within it", FromPad, TI);
4955 if (FromPad == ToPadParent) {
4956 // This is a legal unwind edge.
4957 break;
4958 }
4959 Check(!isa<ConstantTokenNone>(FromPad),
4960 "A single unwind edge may only enter one EH pad", TI);
4961 Check(Seen.insert(FromPad).second, "EH pad jumps through a cycle of pads",
4962 FromPad);
4963
4964 // This will be diagnosed on the corresponding instruction already. We
4965 // need the extra check here to make sure getParentPad() works.
4966 Check(isa<FuncletPadInst>(FromPad) || isa<CatchSwitchInst>(FromPad),
4967 "Parent pad must be catchpad/cleanuppad/catchswitch", TI);
4968 }
4969 }
4970}
4971
4972void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
4973 // The landingpad instruction is ill-formed if it doesn't have any clauses and
4974 // isn't a cleanup.
4975 Check(LPI.getNumClauses() > 0 || LPI.isCleanup(),
4976 "LandingPadInst needs at least one clause or to be a cleanup.", &LPI);
4977
4978 visitEHPadPredecessors(LPI);
4979
4980 if (!LandingPadResultTy)
4981 LandingPadResultTy = LPI.getType();
4982 else
4983 Check(LandingPadResultTy == LPI.getType(),
4984 "The landingpad instruction should have a consistent result type "
4985 "inside a function.",
4986 &LPI);
4987
4988 Function *F = LPI.getParent()->getParent();
4989 Check(F->hasPersonalityFn(),
4990 "LandingPadInst needs to be in a function with a personality.", &LPI);
4991
4992 // The landingpad instruction must be the first non-PHI instruction in the
4993 // block.
4994 Check(LPI.getParent()->getLandingPadInst() == &LPI,
4995 "LandingPadInst not the first non-PHI instruction in the block.", &LPI);
4996
4997 for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) {
4998 Constant *Clause = LPI.getClause(i);
4999 if (LPI.isCatch(i)) {
5000 Check(isa<PointerType>(Clause->getType()),
5001 "Catch operand does not have pointer type!", &LPI);
5002 } else {
5003 Check(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI);
5005 "Filter operand is not an array of constants!", &LPI);
5006 }
5007 }
5008
5009 visitInstruction(LPI);
5010}
5011
5012void Verifier::visitResumeInst(ResumeInst &RI) {
5014 "ResumeInst needs to be in a function with a personality.", &RI);
5015
5016 if (!LandingPadResultTy)
5017 LandingPadResultTy = RI.getValue()->getType();
5018 else
5019 Check(LandingPadResultTy == RI.getValue()->getType(),
5020 "The resume instruction should have a consistent result type "
5021 "inside a function.",
5022 &RI);
5023
5024 visitTerminator(RI);
5025}
5026
5027void Verifier::visitCatchPadInst(CatchPadInst &CPI) {
5028 BasicBlock *BB = CPI.getParent();
5029
5030 Function *F = BB->getParent();
5031 Check(F->hasPersonalityFn(),
5032 "CatchPadInst needs to be in a function with a personality.", &CPI);
5033
5035 "CatchPadInst needs to be directly nested in a CatchSwitchInst.",
5036 CPI.getParentPad());
5037
5038 // The catchpad instruction must be the first non-PHI instruction in the
5039 // block.
5040 Check(&*BB->getFirstNonPHIIt() == &CPI,
5041 "CatchPadInst not the first non-PHI instruction in the block.", &CPI);
5042
5044 [](Use &U) {
5045 auto *V = U.get();
5046 return isa<Constant>(V) || isa<AllocaInst>(V);
5047 }),
5048 "Argument operand must be alloca or constant.", &CPI);
5049
5050 visitEHPadPredecessors(CPI);
5051 visitFuncletPadInst(CPI);
5052}
5053
5054void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) {
5055 Check(isa<CatchPadInst>(CatchReturn.getOperand(0)),
5056 "CatchReturnInst needs to be provided a CatchPad", &CatchReturn,
5057 CatchReturn.getOperand(0));
5058
5059 visitTerminator(CatchReturn);
5060}
5061
5062void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) {
5063 BasicBlock *BB = CPI.getParent();
5064
5065 Function *F = BB->getParent();
5066 Check(F->hasPersonalityFn(),
5067 "CleanupPadInst needs to be in a function with a personality.", &CPI);
5068
5069 // The cleanuppad instruction must be the first non-PHI instruction in the
5070 // block.
5071 Check(&*BB->getFirstNonPHIIt() == &CPI,
5072 "CleanupPadInst not the first non-PHI instruction in the block.", &CPI);
5073
5074 auto *ParentPad = CPI.getParentPad();
5075 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
5076 "CleanupPadInst has an invalid parent.", &CPI);
5077
5078 visitEHPadPredecessors(CPI);
5079 visitFuncletPadInst(CPI);
5080}
5081
5082void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) {
5083 User *FirstUser = nullptr;
5084 Value *FirstUnwindPad = nullptr;
5085 SmallVector<FuncletPadInst *, 8> Worklist({&FPI});
5086 SmallPtrSet<FuncletPadInst *, 8> Seen;
5087
5088 while (!Worklist.empty()) {
5089 FuncletPadInst *CurrentPad = Worklist.pop_back_val();
5090 Check(Seen.insert(CurrentPad).second,
5091 "FuncletPadInst must not be nested within itself", CurrentPad);
5092 Value *UnresolvedAncestorPad = nullptr;
5093 for (User *U : CurrentPad->users()) {
5094 BasicBlock *UnwindDest;
5095 if (auto *CRI = dyn_cast<CleanupReturnInst>(U)) {
5096 UnwindDest = CRI->getUnwindDest();
5097 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(U)) {
5098 // We allow catchswitch unwind to caller to nest
5099 // within an outer pad that unwinds somewhere else,
5100 // because catchswitch doesn't have a nounwind variant.
5101 // See e.g. SimplifyCFGOpt::SimplifyUnreachable.
5102 if (CSI->unwindsToCaller())
5103 continue;
5104 UnwindDest = CSI->getUnwindDest();
5105 } else if (auto *II = dyn_cast<InvokeInst>(U)) {
5106 UnwindDest = II->getUnwindDest();
5107 } else if (isa<CallInst>(U)) {
5108 // Calls which don't unwind may be found inside funclet
5109 // pads that unwind somewhere else. We don't *require*
5110 // such calls to be annotated nounwind.
5111 continue;
5112 } else if (auto *CPI = dyn_cast<CleanupPadInst>(U)) {
5113 // The unwind dest for a cleanup can only be found by
5114 // recursive search. Add it to the worklist, and we'll
5115 // search for its first use that determines where it unwinds.
5116 Worklist.push_back(CPI);
5117 continue;
5118 } else {
5119 Check(isa<CatchReturnInst>(U), "Bogus funclet pad use", U);
5120 continue;
5121 }
5122
5123 Value *UnwindPad;
5124 bool ExitsFPI;
5125 if (UnwindDest) {
5126 UnwindPad = &*UnwindDest->getFirstNonPHIIt();
5127 if (!cast<Instruction>(UnwindPad)->isEHPad())
5128 continue;
5129 Value *UnwindParent = getParentPad(UnwindPad);
5130 // Ignore unwind edges that don't exit CurrentPad.
5131 if (UnwindParent == CurrentPad)
5132 continue;
5133 // Determine whether the original funclet pad is exited,
5134 // and if we are scanning nested pads determine how many
5135 // of them are exited so we can stop searching their
5136 // children.
5137 Value *ExitedPad = CurrentPad;
5138 ExitsFPI = false;
5139 do {
5140 if (ExitedPad == &FPI) {
5141 ExitsFPI = true;
5142 // Now we can resolve any ancestors of CurrentPad up to
5143 // FPI, but not including FPI since we need to make sure
5144 // to check all direct users of FPI for consistency.
5145 UnresolvedAncestorPad = &FPI;
5146 break;
5147 }
5148 Value *ExitedParent = getParentPad(ExitedPad);
5149 if (ExitedParent == UnwindParent) {
5150 // ExitedPad is the ancestor-most pad which this unwind
5151 // edge exits, so we can resolve up to it, meaning that
5152 // ExitedParent is the first ancestor still unresolved.
5153 UnresolvedAncestorPad = ExitedParent;
5154 break;
5155 }
5156 ExitedPad = ExitedParent;
5157 } while (!isa<ConstantTokenNone>(ExitedPad));
5158 } else {
5159 // Unwinding to caller exits all pads.
5160 UnwindPad = ConstantTokenNone::get(FPI.getContext());
5161 ExitsFPI = true;
5162 UnresolvedAncestorPad = &FPI;
5163 }
5164
5165 if (ExitsFPI) {
5166 // This unwind edge exits FPI. Make sure it agrees with other
5167 // such edges.
5168 if (FirstUser) {
5169 Check(UnwindPad == FirstUnwindPad,
5170 "Unwind edges out of a funclet "
5171 "pad must have the same unwind "
5172 "dest",
5173 &FPI, U, FirstUser);
5174 } else {
5175 FirstUser = U;
5176 FirstUnwindPad = UnwindPad;
5177 // Record cleanup sibling unwinds for verifySiblingFuncletUnwinds
5178 if (isa<CleanupPadInst>(&FPI) && !isa<ConstantTokenNone>(UnwindPad) &&
5179 getParentPad(UnwindPad) == getParentPad(&FPI))
5180 SiblingFuncletInfo[&FPI] = cast<Instruction>(U);
5181 }
5182 }
5183 // Make sure we visit all uses of FPI, but for nested pads stop as
5184 // soon as we know where they unwind to.
5185 if (CurrentPad != &FPI)
5186 break;
5187 }
5188 if (UnresolvedAncestorPad) {
5189 if (CurrentPad == UnresolvedAncestorPad) {
5190 // When CurrentPad is FPI itself, we don't mark it as resolved even if
5191 // we've found an unwind edge that exits it, because we need to verify
5192 // all direct uses of FPI.
5193 assert(CurrentPad == &FPI);
5194 continue;
5195 }
5196 // Pop off the worklist any nested pads that we've found an unwind
5197 // destination for. The pads on the worklist are the uncles,
5198 // great-uncles, etc. of CurrentPad. We've found an unwind destination
5199 // for all ancestors of CurrentPad up to but not including
5200 // UnresolvedAncestorPad.
5201 Value *ResolvedPad = CurrentPad;
5202 while (!Worklist.empty()) {
5203 Value *UnclePad = Worklist.back();
5204 Value *AncestorPad = getParentPad(UnclePad);
5205 // Walk ResolvedPad up the ancestor list until we either find the
5206 // uncle's parent or the last resolved ancestor.
5207 while (ResolvedPad != AncestorPad) {
5208 Value *ResolvedParent = getParentPad(ResolvedPad);
5209 if (ResolvedParent == UnresolvedAncestorPad) {
5210 break;
5211 }
5212 ResolvedPad = ResolvedParent;
5213 }
5214 // If the resolved ancestor search didn't find the uncle's parent,
5215 // then the uncle is not yet resolved.
5216 if (ResolvedPad != AncestorPad)
5217 break;
5218 // This uncle is resolved, so pop it from the worklist.
5219 Worklist.pop_back();
5220 }
5221 }
5222 }
5223
5224 if (FirstUnwindPad) {
5225 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FPI.getParentPad())) {
5226 BasicBlock *SwitchUnwindDest = CatchSwitch->getUnwindDest();
5227 Value *SwitchUnwindPad;
5228 if (SwitchUnwindDest)
5229 SwitchUnwindPad = &*SwitchUnwindDest->getFirstNonPHIIt();
5230 else
5231 SwitchUnwindPad = ConstantTokenNone::get(FPI.getContext());
5232 Check(SwitchUnwindPad == FirstUnwindPad,
5233 "Unwind edges out of a catch must have the same unwind dest as "
5234 "the parent catchswitch",
5235 &FPI, FirstUser, CatchSwitch);
5236 }
5237 }
5238
5239 visitInstruction(FPI);
5240}
5241
5242void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) {
5243 BasicBlock *BB = CatchSwitch.getParent();
5244
5245 Function *F = BB->getParent();
5246 Check(F->hasPersonalityFn(),
5247 "CatchSwitchInst needs to be in a function with a personality.",
5248 &CatchSwitch);
5249
5250 // The catchswitch instruction must be the first non-PHI instruction in the
5251 // block.
5252 Check(&*BB->getFirstNonPHIIt() == &CatchSwitch,
5253 "CatchSwitchInst not the first non-PHI instruction in the block.",
5254 &CatchSwitch);
5255
5256 auto *ParentPad = CatchSwitch.getParentPad();
5257 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
5258 "CatchSwitchInst has an invalid parent.", ParentPad);
5259
5260 if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) {
5261 BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt();
5262 Check(I->isEHPad() && !isa<LandingPadInst>(I),
5263 "CatchSwitchInst must unwind to an EH block which is not a "
5264 "landingpad.",
5265 &CatchSwitch);
5266
5267 // Record catchswitch sibling unwinds for verifySiblingFuncletUnwinds
5268 if (getParentPad(&*I) == ParentPad)
5269 SiblingFuncletInfo[&CatchSwitch] = &CatchSwitch;
5270 }
5271
5272 Check(CatchSwitch.getNumHandlers() != 0,
5273 "CatchSwitchInst cannot have empty handler list", &CatchSwitch);
5274
5275 for (BasicBlock *Handler : CatchSwitch.handlers()) {
5276 Check(isa<CatchPadInst>(Handler->getFirstNonPHIIt()),
5277 "CatchSwitchInst handlers must be catchpads", &CatchSwitch, Handler);
5278 }
5279
5280 visitEHPadPredecessors(CatchSwitch);
5281 visitTerminator(CatchSwitch);
5282}
5283
5284void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) {
5286 "CleanupReturnInst needs to be provided a CleanupPad", &CRI,
5287 CRI.getOperand(0));
5288
5289 if (BasicBlock *UnwindDest = CRI.getUnwindDest()) {
5290 BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt();
5291 Check(I->isEHPad() && !isa<LandingPadInst>(I),
5292 "CleanupReturnInst must unwind to an EH block which is not a "
5293 "landingpad.",
5294 &CRI);
5295 }
5296
5297 visitTerminator(CRI);
5298}
5299
5300void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
5301 Instruction *Op = cast<Instruction>(I.getOperand(i));
5302 // If the we have an invalid invoke, don't try to compute the dominance.
5303 // We already reject it in the invoke specific checks and the dominance
5304 // computation doesn't handle multiple edges.
5305 if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) {
5306 if (II->getNormalDest() == II->getUnwindDest())
5307 return;
5308 }
5309
5310 // Quick check whether the def has already been encountered in the same block.
5311 // PHI nodes are not checked to prevent accepting preceding PHIs, because PHI
5312 // uses are defined to happen on the incoming edge, not at the instruction.
5313 //
5314 // FIXME: If this operand is a MetadataAsValue (wrapping a LocalAsMetadata)
5315 // wrapping an SSA value, assert that we've already encountered it. See
5316 // related FIXME in Mapper::mapLocalAsMetadata in ValueMapper.cpp.
5317 if (!isa<PHINode>(I) && InstsInThisBlock.count(Op))
5318 return;
5319
5320 const Use &U = I.getOperandUse(i);
5321 Check(DT.dominates(Op, U), "Instruction does not dominate all uses!", Op, &I);
5322}
5323
5324void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) {
5325 Check(I.getType()->isPointerTy(),
5326 "dereferenceable, dereferenceable_or_null "
5327 "apply only to pointer types",
5328 &I);
5330 "dereferenceable, dereferenceable_or_null apply only to load"
5331 " and inttoptr instructions, use attributes for calls or invokes",
5332 &I);
5333 Check(MD->getNumOperands() == 1,
5334 "dereferenceable, dereferenceable_or_null "
5335 "take one operand!",
5336 &I);
5337 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0));
5338 Check(CI && CI->getType()->isIntegerTy(64),
5339 "dereferenceable, "
5340 "dereferenceable_or_null metadata value must be an i64!",
5341 &I);
5342}
5343
5344void Verifier::visitNofreeMetadata(Instruction &I, MDNode *MD) {
5345 Check(I.getType()->isPointerTy(), "nofree applies only to pointer types", &I);
5346 Check((isa<IntToPtrInst>(I)), "nofree applies only to inttoptr instruction",
5347 &I);
5348 Check(MD->getNumOperands() == 0, "nofree metadata must be empty", &I);
5349}
5350
5351void Verifier::visitProfMetadata(Instruction &I, MDNode *MD) {
5352 auto GetBranchingTerminatorNumOperands = [&]() {
5353 unsigned ExpectedNumOperands = 0;
5354 if (CondBrInst *BI = dyn_cast<CondBrInst>(&I))
5355 ExpectedNumOperands = BI->getNumSuccessors();
5356 else if (SwitchInst *SI = dyn_cast<SwitchInst>(&I))
5357 ExpectedNumOperands = SI->getNumSuccessors();
5358 else if (isa<CallInst>(&I))
5359 ExpectedNumOperands = 1;
5360 else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(&I))
5361 ExpectedNumOperands = IBI->getNumDestinations();
5362 else if (isa<SelectInst>(&I))
5363 ExpectedNumOperands = 2;
5364 else if (CallBrInst *CI = dyn_cast<CallBrInst>(&I))
5365 ExpectedNumOperands = CI->getNumSuccessors();
5366 return ExpectedNumOperands;
5367 };
5368 Check(MD->getNumOperands() >= 1,
5369 "!prof annotations should have at least 1 operand", MD);
5370 // Check first operand.
5371 Check(MD->getOperand(0) != nullptr, "first operand should not be null", MD);
5373 "expected string with name of the !prof annotation", MD);
5374 MDString *MDS = cast<MDString>(MD->getOperand(0));
5375 StringRef ProfName = MDS->getString();
5376
5378 Check(GetBranchingTerminatorNumOperands() != 0 || isa<InvokeInst>(I),
5379 "'unknown' !prof should only appear on instructions on which "
5380 "'branch_weights' would",
5381 MD);
5382 verifyUnknownProfileMetadata(MD);
5383 return;
5384 }
5385
5386 Check(MD->getNumOperands() >= 2,
5387 "!prof annotations should have no less than 2 operands", MD);
5388
5389 // Check consistency of !prof branch_weights metadata.
5390 if (ProfName == MDProfLabels::BranchWeights) {
5391 unsigned NumBranchWeights = getNumBranchWeights(*MD);
5392 if (isa<InvokeInst>(&I)) {
5393 Check(NumBranchWeights == 1 || NumBranchWeights == 2,
5394 "Wrong number of InvokeInst branch_weights operands", MD);
5395 } else {
5396 const unsigned ExpectedNumOperands = GetBranchingTerminatorNumOperands();
5397 if (ExpectedNumOperands == 0)
5398 CheckFailed("!prof branch_weights are not allowed for this instruction",
5399 MD);
5400
5401 Check(NumBranchWeights == ExpectedNumOperands, "Wrong number of operands",
5402 MD);
5403 }
5404 for (unsigned i = getBranchWeightOffset(MD); i < MD->getNumOperands();
5405 ++i) {
5406 auto &MDO = MD->getOperand(i);
5407 Check(MDO, "second operand should not be null", MD);
5409 "!prof brunch_weights operand is not a const int");
5410 }
5411 } else if (ProfName == MDProfLabels::ValueProfile) {
5412 Check(isValueProfileMD(MD), "invalid value profiling metadata", MD);
5413 ConstantInt *KindInt = mdconst::dyn_extract<ConstantInt>(MD->getOperand(1));
5414 Check(KindInt, "VP !prof missing kind argument", MD);
5415
5416 auto Kind = KindInt->getZExtValue();
5417 Check(Kind >= InstrProfValueKind::IPVK_First &&
5418 Kind <= InstrProfValueKind::IPVK_Last,
5419 "Invalid VP !prof kind", MD);
5420 Check(MD->getNumOperands() % 2 == 1,
5421 "VP !prof should have an even number "
5422 "of arguments after 'VP'",
5423 MD);
5424 if (Kind == InstrProfValueKind::IPVK_IndirectCallTarget ||
5425 Kind == InstrProfValueKind::IPVK_MemOPSize)
5427 "VP !prof indirect call or memop size expected to be applied to "
5428 "CallBase instructions only",
5429 MD);
5430 } else {
5431 CheckFailed("expected either branch_weights or VP profile name", MD);
5432 }
5433}
5434
5435void Verifier::visitDIAssignIDMetadata(Instruction &I, MDNode *MD) {
5436 assert(I.hasMetadata(LLVMContext::MD_DIAssignID));
5437 // DIAssignID metadata must be attached to either an alloca or some form of
5438 // store/memory-writing instruction.
5439 // FIXME: We allow all intrinsic insts here to avoid trying to enumerate all
5440 // possible store intrinsics.
5441 bool ExpectedInstTy =
5443 CheckDI(ExpectedInstTy, "!DIAssignID attached to unexpected instruction kind",
5444 I, MD);
5445 // Iterate over the MetadataAsValue uses of the DIAssignID - these should
5446 // only be found as DbgAssignIntrinsic operands.
5447 if (auto *AsValue = MetadataAsValue::getIfExists(Context, MD)) {
5448 for (auto *User : AsValue->users()) {
5450 "!DIAssignID should only be used by llvm.dbg.assign intrinsics",
5451 MD, User);
5452 // All of the dbg.assign intrinsics should be in the same function as I.
5453 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(User))
5454 CheckDI(DAI->getFunction() == I.getFunction(),
5455 "dbg.assign not in same function as inst", DAI, &I);
5456 }
5457 }
5458 for (DbgVariableRecord *DVR :
5459 cast<DIAssignID>(MD)->getAllDbgVariableRecordUsers()) {
5460 CheckDI(DVR->isDbgAssign(),
5461 "!DIAssignID should only be used by Assign DVRs.", MD, DVR);
5462 CheckDI(DVR->getFunction() == I.getFunction(),
5463 "DVRAssign not in same function as inst", DVR, &I);
5464 }
5465}
5466
5467void Verifier::visitMMRAMetadata(Instruction &I, MDNode *MD) {
5469 "!mmra metadata attached to unexpected instruction kind", I, MD);
5470
5471 // MMRA Metadata should either be a tag, e.g. !{!"foo", !"bar"}, or a
5472 // list of tags such as !2 in the following example:
5473 // !0 = !{!"a", !"b"}
5474 // !1 = !{!"c", !"d"}
5475 // !2 = !{!0, !1}
5476 if (MMRAMetadata::isTagMD(MD))
5477 return;
5478
5479 Check(isa<MDTuple>(MD), "!mmra expected to be a metadata tuple", I, MD);
5480 for (const MDOperand &MDOp : MD->operands())
5481 Check(MMRAMetadata::isTagMD(MDOp.get()),
5482 "!mmra metadata tuple operand is not an MMRA tag", I, MDOp.get());
5483}
5484
5485void Verifier::visitCallStackMetadata(MDNode *MD) {
5486 // Call stack metadata should consist of a list of at least 1 constant int
5487 // (representing a hash of the location).
5488 Check(MD->getNumOperands() >= 1,
5489 "call stack metadata should have at least 1 operand", MD);
5490
5491 for (const auto &Op : MD->operands())
5493 "call stack metadata operand should be constant integer", Op);
5494}
5495
5496void Verifier::visitMemProfMetadata(Instruction &I, MDNode *MD) {
5497 Check(isa<CallBase>(I), "!memprof metadata should only exist on calls", &I);
5498 Check(MD->getNumOperands() >= 1,
5499 "!memprof annotations should have at least 1 metadata operand "
5500 "(MemInfoBlock)",
5501 MD);
5502
5503 // Check each MIB
5504 for (auto &MIBOp : MD->operands()) {
5505 MDNode *MIB = dyn_cast<MDNode>(MIBOp);
5506 // The first operand of an MIB should be the call stack metadata.
5507 // There rest of the operands should be MDString tags, and there should be
5508 // at least one.
5509 Check(MIB->getNumOperands() >= 2,
5510 "Each !memprof MemInfoBlock should have at least 2 operands", MIB);
5511
5512 // Check call stack metadata (first operand).
5513 Check(MIB->getOperand(0) != nullptr,
5514 "!memprof MemInfoBlock first operand should not be null", MIB);
5515 Check(isa<MDNode>(MIB->getOperand(0)),
5516 "!memprof MemInfoBlock first operand should be an MDNode", MIB);
5517 MDNode *StackMD = dyn_cast<MDNode>(MIB->getOperand(0));
5518 visitCallStackMetadata(StackMD);
5519
5520 // The second MIB operand should be MDString.
5522 "!memprof MemInfoBlock second operand should be an MDString", MIB);
5523
5524 // Any remaining should be MDNode that are pairs of integers
5525 for (unsigned I = 2; I < MIB->getNumOperands(); ++I) {
5526 MDNode *OpNode = dyn_cast<MDNode>(MIB->getOperand(I));
5527 Check(OpNode, "Not all !memprof MemInfoBlock operands 2 to N are MDNode",
5528 MIB);
5529 Check(OpNode->getNumOperands() == 2,
5530 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with 2 "
5531 "operands",
5532 MIB);
5533 // Check that all of Op's operands are ConstantInt.
5534 Check(llvm::all_of(OpNode->operands(),
5535 [](const MDOperand &Op) {
5536 return mdconst::hasa<ConstantInt>(Op);
5537 }),
5538 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with "
5539 "ConstantInt operands",
5540 MIB);
5541 }
5542 }
5543}
5544
5545void Verifier::visitCallsiteMetadata(Instruction &I, MDNode *MD) {
5546 Check(isa<CallBase>(I), "!callsite metadata should only exist on calls", &I);
5547 // Verify the partial callstack annotated from memprof profiles. This callsite
5548 // is a part of a profiled allocation callstack.
5549 visitCallStackMetadata(MD);
5550}
5551
5552static inline bool isConstantIntMetadataOperand(const Metadata *MD) {
5553 if (auto *VAL = dyn_cast<ValueAsMetadata>(MD))
5554 return isa<ConstantInt>(VAL->getValue());
5555 return false;
5556}
5557
5558void Verifier::visitCalleeTypeMetadata(Instruction &I, MDNode *MD) {
5559 Check(isa<CallBase>(I), "!callee_type metadata should only exist on calls",
5560 &I);
5561 for (Metadata *Op : MD->operands()) {
5563 "The callee_type metadata must be a list of type metadata nodes", Op);
5564 auto *TypeMD = cast<MDNode>(Op);
5565 Check(TypeMD->getNumOperands() == 2,
5566 "Well-formed generalized type metadata must contain exactly two "
5567 "operands",
5568 Op);
5569 Check(isConstantIntMetadataOperand(TypeMD->getOperand(0)) &&
5570 mdconst::extract<ConstantInt>(TypeMD->getOperand(0))->isZero(),
5571 "The first operand of type metadata for functions must be zero", Op);
5572 Check(TypeMD->hasGeneralizedMDString(),
5573 "Only generalized type metadata can be part of the callee_type "
5574 "metadata list",
5575 Op);
5576 }
5577}
5578
5579void Verifier::visitAnnotationMetadata(MDNode *Annotation) {
5580 Check(isa<MDTuple>(Annotation), "annotation must be a tuple");
5581 Check(Annotation->getNumOperands() >= 1,
5582 "annotation must have at least one operand");
5583 for (const MDOperand &Op : Annotation->operands()) {
5584 bool TupleOfStrings =
5585 isa<MDTuple>(Op.get()) &&
5586 all_of(cast<MDTuple>(Op)->operands(), [](auto &Annotation) {
5587 return isa<MDString>(Annotation.get());
5588 });
5589 Check(isa<MDString>(Op.get()) || TupleOfStrings,
5590 "operands must be a string or a tuple of strings");
5591 }
5592}
5593
5594void Verifier::visitAliasScopeMetadata(const MDNode *MD) {
5595 unsigned NumOps = MD->getNumOperands();
5596 Check(NumOps >= 2 && NumOps <= 3, "scope must have two or three operands",
5597 MD);
5598 Check(MD->getOperand(0).get() == MD || isa<MDString>(MD->getOperand(0)),
5599 "first scope operand must be self-referential or string", MD);
5600 if (NumOps == 3)
5602 "third scope operand must be string (if used)", MD);
5603
5604 MDNode *Domain = dyn_cast<MDNode>(MD->getOperand(1));
5605 Check(Domain != nullptr, "second scope operand must be MDNode", MD);
5606
5607 unsigned NumDomainOps = Domain->getNumOperands();
5608 Check(NumDomainOps >= 1 && NumDomainOps <= 2,
5609 "domain must have one or two operands", Domain);
5610 Check(Domain->getOperand(0).get() == Domain ||
5611 isa<MDString>(Domain->getOperand(0)),
5612 "first domain operand must be self-referential or string", Domain);
5613 if (NumDomainOps == 2)
5614 Check(isa<MDString>(Domain->getOperand(1)),
5615 "second domain operand must be string (if used)", Domain);
5616}
5617
5618void Verifier::visitAliasScopeListMetadata(const MDNode *MD) {
5619 for (const MDOperand &Op : MD->operands()) {
5620 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5621 Check(OpMD != nullptr, "scope list must consist of MDNodes", MD);
5622 visitAliasScopeMetadata(OpMD);
5623 }
5624}
5625
5626void Verifier::visitAccessGroupMetadata(const MDNode *MD) {
5627 auto IsValidAccessScope = [](const MDNode *MD) {
5628 return MD->getNumOperands() == 0 && MD->isDistinct();
5629 };
5630
5631 // It must be either an access scope itself...
5632 if (IsValidAccessScope(MD))
5633 return;
5634
5635 // ...or a list of access scopes.
5636 for (const MDOperand &Op : MD->operands()) {
5637 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5638 Check(OpMD != nullptr, "Access scope list must consist of MDNodes", MD);
5639 Check(IsValidAccessScope(OpMD),
5640 "Access scope list contains invalid access scope", MD);
5641 }
5642}
5643
5644void Verifier::visitCapturesMetadata(Instruction &I, const MDNode *Captures) {
5645 static const char *ValidArgs[] = {"address_is_null", "address",
5646 "read_provenance", "provenance"};
5647
5648 auto *SI = dyn_cast<StoreInst>(&I);
5649 Check(SI, "!captures metadata can only be applied to store instructions", &I);
5650 Check(SI->getValueOperand()->getType()->isPointerTy(),
5651 "!captures metadata can only be applied to store with value operand of "
5652 "pointer type",
5653 &I);
5654 Check(Captures->getNumOperands() != 0, "!captures metadata cannot be empty",
5655 &I);
5656
5657 for (Metadata *Op : Captures->operands()) {
5658 auto *Str = dyn_cast<MDString>(Op);
5659 Check(Str, "!captures metadata must be a list of strings", &I);
5660 Check(is_contained(ValidArgs, Str->getString()),
5661 "invalid entry in !captures metadata", &I, Str);
5662 }
5663}
5664
5665void Verifier::visitAllocTokenMetadata(Instruction &I, MDNode *MD) {
5666 Check(isa<CallBase>(I), "!alloc_token should only exist on calls", &I);
5667 Check(MD->getNumOperands() == 2, "!alloc_token must have 2 operands", MD);
5668 Check(isa<MDString>(MD->getOperand(0)), "expected string", MD);
5670 "expected integer constant", MD);
5671}
5672
5673void Verifier::visitInlineHistoryMetadata(Instruction &I, MDNode *MD) {
5674 Check(isa<CallBase>(I), "!inline_history should only exist on calls", &I);
5675 for (Metadata *Op : MD->operands()) {
5676 // Can be null when a function is erased.
5677 if (!Op)
5678 continue;
5681 ->getValue()
5682 ->stripPointerCastsAndAliases()),
5683 "!inline_history operands must be functions or null", MD);
5684 }
5685}
5686
5687/// verifyInstruction - Verify that an instruction is well formed.
5688///
5689void Verifier::visitInstruction(Instruction &I) {
5690 BasicBlock *BB = I.getParent();
5691 Check(BB, "Instruction not embedded in basic block!", &I);
5692
5693 if (!isa<PHINode>(I)) { // Check that non-phi nodes are not self referential
5694 for (User *U : I.users()) {
5695 Check(U != (User *)&I || !DT.isReachableFromEntry(BB),
5696 "Only PHI nodes may reference their own value!", &I);
5697 }
5698 }
5699
5700 // Check that void typed values don't have names
5701 Check(!I.getType()->isVoidTy() || !I.hasName(),
5702 "Instruction has a name, but provides a void value!", &I);
5703
5704 // Check that the return value of the instruction is either void or a legal
5705 // value type.
5706 Check(I.getType()->isVoidTy() || I.getType()->isFirstClassType(),
5707 "Instruction returns a non-scalar type!", &I);
5708
5709 // Check that the instruction doesn't produce metadata. Calls are already
5710 // checked against the callee type.
5711 Check(!I.getType()->isMetadataTy() || isa<CallInst>(I) || isa<InvokeInst>(I),
5712 "Invalid use of metadata!", &I);
5713
5714 // Check that all uses of the instruction, if they are instructions
5715 // themselves, actually have parent basic blocks. If the use is not an
5716 // instruction, it is an error!
5717 for (Use &U : I.uses()) {
5718 if (Instruction *Used = dyn_cast<Instruction>(U.getUser()))
5719 Check(Used->getParent() != nullptr,
5720 "Instruction referencing"
5721 " instruction not embedded in a basic block!",
5722 &I, Used);
5723 else {
5724 CheckFailed("Use of instruction is not an instruction!", U);
5725 return;
5726 }
5727 }
5728
5729 // Get a pointer to the call base of the instruction if it is some form of
5730 // call.
5731 const CallBase *CBI = dyn_cast<CallBase>(&I);
5732
5733 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
5734 Check(I.getOperand(i) != nullptr, "Instruction has null operand!", &I);
5735
5736 // Check to make sure that only first-class-values are operands to
5737 // instructions.
5738 if (!I.getOperand(i)->getType()->isFirstClassType()) {
5739 Check(false, "Instruction operands must be first-class values!", &I);
5740 }
5741
5742 if (Function *F = dyn_cast<Function>(I.getOperand(i))) {
5743 // This code checks whether the function is used as the operand of a
5744 // clang_arc_attachedcall operand bundle.
5745 auto IsAttachedCallOperand = [](Function *F, const CallBase *CBI,
5746 int Idx) {
5747 return CBI && CBI->isOperandBundleOfType(
5749 };
5750
5751 // Check to make sure that the "address of" an intrinsic function is never
5752 // taken. Ignore cases where the address of the intrinsic function is used
5753 // as the argument of operand bundle "clang.arc.attachedcall" as those
5754 // cases are handled in verifyAttachedCallBundle.
5755 Check((!F->isIntrinsic() ||
5756 (CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i)) ||
5757 IsAttachedCallOperand(F, CBI, i)),
5758 "Cannot take the address of an intrinsic!", &I);
5759 Check(!F->isIntrinsic() || isa<CallInst>(I) || isa<CallBrInst>(I) ||
5760 F->getIntrinsicID() == Intrinsic::donothing ||
5761 F->getIntrinsicID() == Intrinsic::seh_try_begin ||
5762 F->getIntrinsicID() == Intrinsic::seh_try_end ||
5763 F->getIntrinsicID() == Intrinsic::seh_scope_begin ||
5764 F->getIntrinsicID() == Intrinsic::seh_scope_end ||
5765 F->getIntrinsicID() == Intrinsic::coro_resume ||
5766 F->getIntrinsicID() == Intrinsic::coro_destroy ||
5767 F->getIntrinsicID() == Intrinsic::coro_await_suspend_void ||
5768 F->getIntrinsicID() == Intrinsic::coro_await_suspend_bool ||
5769 F->getIntrinsicID() == Intrinsic::coro_await_suspend_handle ||
5770 F->getIntrinsicID() ==
5771 Intrinsic::experimental_patchpoint_void ||
5772 F->getIntrinsicID() == Intrinsic::experimental_patchpoint ||
5773 F->getIntrinsicID() == Intrinsic::fake_use ||
5774 F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint ||
5775 F->getIntrinsicID() == Intrinsic::wasm_throw ||
5776 F->getIntrinsicID() == Intrinsic::wasm_rethrow ||
5777 IsAttachedCallOperand(F, CBI, i),
5778 "Cannot invoke an intrinsic other than donothing, patchpoint, "
5779 "statepoint, coro_resume, coro_destroy, clang.arc.attachedcall or "
5780 "wasm.(re)throw",
5781 &I);
5782 Check(F->getParent() == &M, "Referencing function in another module!", &I,
5783 &M, F, F->getParent());
5784 } else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) {
5785 Check(OpBB->getParent() == BB->getParent(),
5786 "Referring to a basic block in another function!", &I);
5787 } else if (Argument *OpArg = dyn_cast<Argument>(I.getOperand(i))) {
5788 Check(OpArg->getParent() == BB->getParent(),
5789 "Referring to an argument in another function!", &I);
5790 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) {
5791 Check(GV->getParent() == &M, "Referencing global in another module!", &I,
5792 &M, GV, GV->getParent());
5793 } else if (Instruction *OpInst = dyn_cast<Instruction>(I.getOperand(i))) {
5794 Check(OpInst->getFunction() == BB->getParent(),
5795 "Referring to an instruction in another function!", &I);
5796 verifyDominatesUse(I, i);
5797 } else if (isa<InlineAsm>(I.getOperand(i))) {
5798 Check(CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i),
5799 "Cannot take the address of an inline asm!", &I);
5800 } else if (auto *C = dyn_cast<Constant>(I.getOperand(i))) {
5801 visitConstantExprsRecursively(C);
5802 }
5803 }
5804
5805 if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) {
5807 "fpmath requires a floating point result!", &I);
5808 Check(MD->getNumOperands() == 1, "fpmath takes one operand!", &I);
5809 if (ConstantFP *CFP0 =
5811 const APFloat &Accuracy = CFP0->getValueAPF();
5812 Check(&Accuracy.getSemantics() == &APFloat::IEEEsingle(),
5813 "fpmath accuracy must have float type", &I);
5814 Check(Accuracy.isFiniteNonZero() && !Accuracy.isNegative(),
5815 "fpmath accuracy not a positive number!", &I);
5816 } else {
5817 Check(false, "invalid fpmath accuracy!", &I);
5818 }
5819 }
5820
5821 if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) {
5823 "Ranges are only for loads, calls and invokes!", &I);
5824 visitRangeMetadata(I, Range, I.getType());
5825 }
5826
5827 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nofpclass)) {
5828 Check(isa<LoadInst>(I), "nofpclass is only for loads", &I);
5829 visitNoFPClassMetadata(I, MD, I.getType());
5830 }
5831
5832 if (MDNode *Range = I.getMetadata(LLVMContext::MD_noalias_addrspace)) {
5835 "noalias.addrspace are only for memory operations!", &I);
5836 visitNoaliasAddrspaceMetadata(I, Range, I.getType());
5837 }
5838
5839 if (I.hasMetadata(LLVMContext::MD_invariant_group)) {
5841 "invariant.group metadata is only for loads and stores", &I);
5842 }
5843
5844 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nonnull)) {
5845 Check(I.getType()->isPointerTy(), "nonnull applies only to pointer types",
5846 &I);
5848 "nonnull applies only to load instructions, use attributes"
5849 " for calls or invokes",
5850 &I);
5851 Check(MD->getNumOperands() == 0, "nonnull metadata must be empty", &I);
5852 }
5853
5854 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable))
5855 visitDereferenceableMetadata(I, MD);
5856
5857 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable_or_null))
5858 visitDereferenceableMetadata(I, MD);
5859
5860 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nofree))
5861 visitNofreeMetadata(I, MD);
5862
5863 if (MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa))
5864 TBAAVerifyHelper.visitTBAAMetadata(&I, TBAA);
5865
5866 if (MDNode *MD = I.getMetadata(LLVMContext::MD_noalias))
5867 visitAliasScopeListMetadata(MD);
5868 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alias_scope))
5869 visitAliasScopeListMetadata(MD);
5870
5871 if (MDNode *MD = I.getMetadata(LLVMContext::MD_access_group))
5872 visitAccessGroupMetadata(MD);
5873
5874 if (MDNode *AlignMD = I.getMetadata(LLVMContext::MD_align)) {
5875 Check(I.getType()->isPointerTy(), "align applies only to pointer types",
5876 &I);
5878 "align applies only to load instructions, "
5879 "use attributes for calls or invokes",
5880 &I);
5881 Check(AlignMD->getNumOperands() == 1, "align takes one operand!", &I);
5882 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(AlignMD->getOperand(0));
5883 Check(CI && CI->getType()->isIntegerTy(64),
5884 "align metadata value must be an i64!", &I);
5885 uint64_t Align = CI->getZExtValue();
5886 Check(isPowerOf2_64(Align), "align metadata value must be a power of 2!",
5887 &I);
5888 Check(Align <= Value::MaximumAlignment,
5889 "alignment is larger that implementation defined limit", &I);
5890 }
5891
5892 if (MDNode *MD = I.getMetadata(LLVMContext::MD_prof))
5893 visitProfMetadata(I, MD);
5894
5895 if (MDNode *MD = I.getMetadata(LLVMContext::MD_memprof))
5896 visitMemProfMetadata(I, MD);
5897
5898 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callsite))
5899 visitCallsiteMetadata(I, MD);
5900
5901 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callee_type))
5902 visitCalleeTypeMetadata(I, MD);
5903
5904 if (MDNode *MD = I.getMetadata(LLVMContext::MD_DIAssignID))
5905 visitDIAssignIDMetadata(I, MD);
5906
5907 if (MDNode *MMRA = I.getMetadata(LLVMContext::MD_mmra))
5908 visitMMRAMetadata(I, MMRA);
5909
5910 if (MDNode *Annotation = I.getMetadata(LLVMContext::MD_annotation))
5911 visitAnnotationMetadata(Annotation);
5912
5913 if (MDNode *Captures = I.getMetadata(LLVMContext::MD_captures))
5914 visitCapturesMetadata(I, Captures);
5915
5916 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alloc_token))
5917 visitAllocTokenMetadata(I, MD);
5918
5919 if (MDNode *MD = I.getMetadata(LLVMContext::MD_inline_history))
5920 visitInlineHistoryMetadata(I, MD);
5921
5922 if (MDNode *N = I.getDebugLoc().getAsMDNode()) {
5923 CheckDI(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N);
5924 visitMDNode(*N, AreDebugLocsAllowed::Yes);
5925
5926 if (auto *DL = dyn_cast<DILocation>(N)) {
5927 if (DL->getAtomGroup()) {
5928 CheckDI(DL->getScope()->getSubprogram()->getKeyInstructionsEnabled(),
5929 "DbgLoc uses atomGroup but DISubprogram doesn't have Key "
5930 "Instructions enabled",
5931 DL, DL->getScope()->getSubprogram());
5932 }
5933 }
5934 }
5935
5937 I.getAllMetadata(MDs);
5938 for (auto Attachment : MDs) {
5939 unsigned Kind = Attachment.first;
5940 auto AllowLocs =
5941 (Kind == LLVMContext::MD_dbg || Kind == LLVMContext::MD_loop)
5942 ? AreDebugLocsAllowed::Yes
5943 : AreDebugLocsAllowed::No;
5944 visitMDNode(*Attachment.second, AllowLocs);
5945 }
5946
5947 InstsInThisBlock.insert(&I);
5948}
5949
5950/// Allow intrinsics to be verified in different ways.
5951void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
5953 Check(IF->isDeclaration(), "Intrinsic functions should never be defined!",
5954 IF);
5955
5956 // Verify that the intrinsic prototype lines up with what the .td files
5957 // describe.
5958 FunctionType *IFTy = IF->getFunctionType();
5959
5960 // Walk the descriptors to extract overloaded types.
5961 std::string ErrMsg;
5962 raw_string_ostream ErrOS(ErrMsg);
5963 SmallVector<Type *, 4> OverloadTys;
5964 bool IsValid = Intrinsic::isSignatureValid(ID, IFTy, OverloadTys, ErrOS);
5965 Check(IsValid, ErrMsg, IF);
5966
5967 // Now that we have the intrinsic ID and the actual argument types (and we
5968 // know they are legal for the intrinsic!) get the intrinsic name through the
5969 // usual means. This allows us to verify the mangling of argument types into
5970 // the name.
5971 const std::string ExpectedName =
5972 Intrinsic::getName(ID, OverloadTys, IF->getParent(), IFTy);
5973 Check(ExpectedName == IF->getName(),
5974 "Intrinsic name not mangled correctly for type arguments! "
5975 "Should be: " +
5976 ExpectedName,
5977 IF);
5978
5979 // If the intrinsic takes MDNode arguments, verify that they are either global
5980 // or are local to *this* function.
5981 for (Value *V : Call.args()) {
5982 if (auto *MD = dyn_cast<MetadataAsValue>(V))
5983 visitMetadataAsValue(*MD, Call.getCaller());
5984 if (auto *Const = dyn_cast<Constant>(V))
5985 Check(!Const->getType()->isX86_AMXTy(),
5986 "const x86_amx is not allowed in argument!");
5987 }
5988
5989 switch (ID) {
5990 default:
5991 break;
5992 case Intrinsic::assume: {
5993 if (Call.hasOperandBundles()) {
5995 Check(Cond && Cond->isOne(),
5996 "assume with operand bundles must have i1 true condition", Call);
5997 }
5998 for (auto &Elem : Call.bundle_op_infos()) {
5999 unsigned ArgCount = Elem.End - Elem.Begin;
6000 // Separate storage assumptions are special insofar as they're the only
6001 // operand bundles allowed on assumes that aren't parameter attributes.
6002 if (Elem.Tag->getKey() == "separate_storage") {
6003 Check(ArgCount == 2,
6004 "separate_storage assumptions should have 2 arguments", Call);
6005 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy() &&
6006 Call.getOperand(Elem.Begin + 1)->getType()->isPointerTy(),
6007 "arguments to separate_storage assumptions should be pointers",
6008 Call);
6009 continue;
6010 }
6011 Check(Elem.Tag->getKey() == "ignore" ||
6012 Attribute::isExistingAttribute(Elem.Tag->getKey()),
6013 "tags must be valid attribute names", Call);
6014 Attribute::AttrKind Kind =
6015 Attribute::getAttrKindFromName(Elem.Tag->getKey());
6016 if (Kind == Attribute::Alignment) {
6017 Check(ArgCount <= 3 && ArgCount >= 2,
6018 "alignment assumptions should have 2 or 3 arguments", Call);
6019 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
6020 "first argument should be a pointer", Call);
6021 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
6022 "second argument should be an integer", Call);
6023 if (ArgCount == 3)
6024 Check(Call.getOperand(Elem.Begin + 2)->getType()->isIntegerTy(),
6025 "third argument should be an integer if present", Call);
6026 continue;
6027 }
6028 if (Kind == Attribute::Dereferenceable) {
6029 Check(ArgCount == 2,
6030 "dereferenceable assumptions should have 2 arguments", Call);
6031 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
6032 "first argument should be a pointer", Call);
6033 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
6034 "second argument should be an integer", Call);
6035 continue;
6036 }
6037 Check(ArgCount <= 2, "too many arguments", Call);
6038 if (Kind == Attribute::None)
6039 break;
6040 if (Attribute::isIntAttrKind(Kind)) {
6041 Check(ArgCount == 2, "this attribute should have 2 arguments", Call);
6042 Check(isa<ConstantInt>(Call.getOperand(Elem.Begin + 1)),
6043 "the second argument should be a constant integral value", Call);
6044 } else if (Attribute::canUseAsParamAttr(Kind)) {
6045 Check((ArgCount) == 1, "this attribute should have one argument", Call);
6046 } else if (Attribute::canUseAsFnAttr(Kind)) {
6047 Check((ArgCount) == 0, "this attribute has no argument", Call);
6048 }
6049 }
6050 break;
6051 }
6052 case Intrinsic::ucmp:
6053 case Intrinsic::scmp: {
6054 Type *SrcTy = Call.getOperand(0)->getType();
6055 Type *DestTy = Call.getType();
6056
6057 Check(DestTy->getScalarSizeInBits() >= 2,
6058 "result type must be at least 2 bits wide", Call);
6059
6060 bool IsDestTypeVector = DestTy->isVectorTy();
6061 Check(SrcTy->isVectorTy() == IsDestTypeVector,
6062 "ucmp/scmp argument and result types must both be either vector or "
6063 "scalar types",
6064 Call);
6065 if (IsDestTypeVector) {
6066 auto SrcVecLen = cast<VectorType>(SrcTy)->getElementCount();
6067 auto DestVecLen = cast<VectorType>(DestTy)->getElementCount();
6068 Check(SrcVecLen == DestVecLen,
6069 "return type and arguments must have the same number of "
6070 "elements",
6071 Call);
6072 }
6073 break;
6074 }
6075 case Intrinsic::coro_begin:
6076 case Intrinsic::coro_begin_custom_abi:
6078 "id argument of llvm.coro.begin must refer to coro.id");
6079 break;
6080 case Intrinsic::coro_id: {
6082 "align argument only accepts constants");
6083 auto *Promise = Call.getArgOperand(1);
6084 Check(isa<ConstantPointerNull>(Promise) || isa<AllocaInst>(Promise),
6085 "promise argument must refer to an alloca");
6086
6087 auto *CoroAddr = Call.getArgOperand(2)->stripPointerCasts();
6088 bool BeforeCoroEarly = isa<ConstantPointerNull>(CoroAddr);
6089 Check(BeforeCoroEarly || isa<Function>(CoroAddr),
6090 "coro argument must refer to a function");
6091
6092 auto *InfoArg = Call.getArgOperand(3);
6093 bool BeforeCoroSplit = isa<ConstantPointerNull>(InfoArg);
6094 if (BeforeCoroSplit)
6095 break;
6096
6097 Check(!BeforeCoroEarly, "cannot run CoroSplit before CoroEarly");
6098 auto *GV = dyn_cast<GlobalVariable>(InfoArg);
6099 Check(GV && GV->isConstant() && GV->hasDefinitiveInitializer(),
6100 "info argument of llvm.coro.id must refer to an initialized "
6101 "constant");
6102 Constant *Init = GV->getInitializer();
6104 "info argument of llvm.coro.id must refer to either a struct or "
6105 "an array");
6106 break;
6107 }
6108 case Intrinsic::is_fpclass: {
6109 const ConstantInt *TestMask = cast<ConstantInt>(Call.getOperand(1));
6110 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
6111 "unsupported bits for llvm.is.fpclass test mask");
6112 break;
6113 }
6114 case Intrinsic::fptrunc_round: {
6115 // Check the rounding mode
6116 Metadata *MD = nullptr;
6118 if (MAV)
6119 MD = MAV->getMetadata();
6120
6121 Check(MD != nullptr, "missing rounding mode argument", Call);
6122
6123 Check(isa<MDString>(MD),
6124 ("invalid value for llvm.fptrunc.round metadata operand"
6125 " (the operand should be a string)"),
6126 MD);
6127
6128 std::optional<RoundingMode> RoundMode =
6129 convertStrToRoundingMode(cast<MDString>(MD)->getString());
6130 Check(RoundMode && *RoundMode != RoundingMode::Dynamic,
6131 "unsupported rounding mode argument", Call);
6132 break;
6133 }
6134 case Intrinsic::convert_to_arbitrary_fp: {
6135 // Check that vector element counts are consistent.
6136 Type *ValueTy = Call.getArgOperand(0)->getType();
6137 Type *IntTy = Call.getType();
6138
6139 if (auto *ValueVecTy = dyn_cast<VectorType>(ValueTy)) {
6140 auto *IntVecTy = dyn_cast<VectorType>(IntTy);
6141 Check(IntVecTy,
6142 "if floating-point operand is a vector, integer operand must also "
6143 "be a vector",
6144 Call);
6145 Check(ValueVecTy->getElementCount() == IntVecTy->getElementCount(),
6146 "floating-point and integer vector operands must have the same "
6147 "element count",
6148 Call);
6149 }
6150
6151 // Check interpretation metadata (argoperand 1).
6152 auto *InterpMAV = dyn_cast<MetadataAsValue>(Call.getArgOperand(1));
6153 Check(InterpMAV, "missing interpretation metadata operand", Call);
6154 auto *InterpStr = dyn_cast<MDString>(InterpMAV->getMetadata());
6155 Check(InterpStr, "interpretation metadata operand must be a string", Call);
6156 StringRef Interp = InterpStr->getString();
6157
6158 Check(!Interp.empty(), "interpretation metadata string must not be empty",
6159 Call);
6160
6161 // Valid interpretation strings: mini-float format names.
6163 "unsupported interpretation metadata string", Call);
6164
6165 // Check rounding mode metadata (argoperand 2).
6166 auto *RoundingMAV = dyn_cast<MetadataAsValue>(Call.getArgOperand(2));
6167 Check(RoundingMAV, "missing rounding mode metadata operand", Call);
6168 auto *RoundingStr = dyn_cast<MDString>(RoundingMAV->getMetadata());
6169 Check(RoundingStr, "rounding mode metadata operand must be a string", Call);
6170
6171 std::optional<RoundingMode> RM =
6172 convertStrToRoundingMode(RoundingStr->getString());
6173 Check(RM && *RM != RoundingMode::Dynamic,
6174 "unsupported rounding mode argument", Call);
6175 break;
6176 }
6177 case Intrinsic::convert_from_arbitrary_fp: {
6178 // Check that vector element counts are consistent.
6179 Type *IntTy = Call.getArgOperand(0)->getType();
6180 Type *ValueTy = Call.getType();
6181
6182 if (auto *ValueVecTy = dyn_cast<VectorType>(ValueTy)) {
6183 auto *IntVecTy = dyn_cast<VectorType>(IntTy);
6184 Check(IntVecTy,
6185 "if floating-point operand is a vector, integer operand must also "
6186 "be a vector",
6187 Call);
6188 Check(ValueVecTy->getElementCount() == IntVecTy->getElementCount(),
6189 "floating-point and integer vector operands must have the same "
6190 "element count",
6191 Call);
6192 }
6193
6194 // Check interpretation metadata (argoperand 1).
6195 auto *InterpMAV = dyn_cast<MetadataAsValue>(Call.getArgOperand(1));
6196 Check(InterpMAV, "missing interpretation metadata operand", Call);
6197 auto *InterpStr = dyn_cast<MDString>(InterpMAV->getMetadata());
6198 Check(InterpStr, "interpretation metadata operand must be a string", Call);
6199 StringRef Interp = InterpStr->getString();
6200
6201 Check(!Interp.empty(), "interpretation metadata string must not be empty",
6202 Call);
6203
6204 // Valid interpretation strings: mini-float format names.
6206 "unsupported interpretation metadata string", Call);
6207 break;
6208 }
6209#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
6210#include "llvm/IR/VPIntrinsics.def"
6211#undef BEGIN_REGISTER_VP_INTRINSIC
6212 visitVPIntrinsic(cast<VPIntrinsic>(Call));
6213 break;
6214#define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC) \
6215 case Intrinsic::INTRINSIC:
6216#include "llvm/IR/ConstrainedOps.def"
6217#undef INSTRUCTION
6218 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(Call));
6219 break;
6220 case Intrinsic::dbg_declare: // llvm.dbg.declare
6221 case Intrinsic::dbg_value: // llvm.dbg.value
6222 case Intrinsic::dbg_assign: // llvm.dbg.assign
6223 case Intrinsic::dbg_label: // llvm.dbg.label
6224 // We no longer interpret debug intrinsics (the old variable-location
6225 // design). They're meaningless as far as LLVM is concerned we could make
6226 // it an error for them to appear, but it's possible we'll have users
6227 // converting back to intrinsics for the forseeable future (such as DXIL),
6228 // so tolerate their existance.
6229 break;
6230 case Intrinsic::memcpy:
6231 case Intrinsic::memcpy_inline:
6232 case Intrinsic::memmove:
6233 case Intrinsic::memset:
6234 case Intrinsic::memset_inline:
6235 break;
6236 case Intrinsic::experimental_memset_pattern: {
6237 const auto Memset = cast<MemSetPatternInst>(&Call);
6238 Check(Memset->getValue()->getType()->isSized(),
6239 "unsized types cannot be used as memset patterns", Call);
6240 break;
6241 }
6242 case Intrinsic::memcpy_element_unordered_atomic:
6243 case Intrinsic::memmove_element_unordered_atomic:
6244 case Intrinsic::memset_element_unordered_atomic: {
6245 const auto *AMI = cast<AnyMemIntrinsic>(&Call);
6246
6247 ConstantInt *ElementSizeCI =
6248 cast<ConstantInt>(AMI->getRawElementSizeInBytes());
6249 const APInt &ElementSizeVal = ElementSizeCI->getValue();
6250 Check(ElementSizeVal.isPowerOf2(),
6251 "element size of the element-wise atomic memory intrinsic "
6252 "must be a power of 2",
6253 Call);
6254
6255 auto IsValidAlignment = [&](MaybeAlign Alignment) {
6256 return Alignment && ElementSizeVal.ule(Alignment->value());
6257 };
6258 Check(IsValidAlignment(AMI->getDestAlign()),
6259 "incorrect alignment of the destination argument", Call);
6260 if (const auto *AMT = dyn_cast<AnyMemTransferInst>(AMI)) {
6261 Check(IsValidAlignment(AMT->getSourceAlign()),
6262 "incorrect alignment of the source argument", Call);
6263 }
6264 break;
6265 }
6266 case Intrinsic::call_preallocated_setup: {
6267 auto *NumArgs = cast<ConstantInt>(Call.getArgOperand(0));
6268 bool FoundCall = false;
6269 for (User *U : Call.users()) {
6270 auto *UseCall = dyn_cast<CallBase>(U);
6271 Check(UseCall != nullptr,
6272 "Uses of llvm.call.preallocated.setup must be calls");
6273 Intrinsic::ID IID = UseCall->getIntrinsicID();
6274 if (IID == Intrinsic::call_preallocated_arg) {
6275 auto *AllocArgIndex = dyn_cast<ConstantInt>(UseCall->getArgOperand(1));
6276 Check(AllocArgIndex != nullptr,
6277 "llvm.call.preallocated.alloc arg index must be a constant");
6278 auto AllocArgIndexInt = AllocArgIndex->getValue();
6279 Check(AllocArgIndexInt.sge(0) &&
6280 AllocArgIndexInt.slt(NumArgs->getValue()),
6281 "llvm.call.preallocated.alloc arg index must be between 0 and "
6282 "corresponding "
6283 "llvm.call.preallocated.setup's argument count");
6284 } else if (IID == Intrinsic::call_preallocated_teardown) {
6285 // nothing to do
6286 } else {
6287 Check(!FoundCall, "Can have at most one call corresponding to a "
6288 "llvm.call.preallocated.setup");
6289 FoundCall = true;
6290 size_t NumPreallocatedArgs = 0;
6291 for (unsigned i = 0; i < UseCall->arg_size(); i++) {
6292 if (UseCall->paramHasAttr(i, Attribute::Preallocated)) {
6293 ++NumPreallocatedArgs;
6294 }
6295 }
6296 Check(NumPreallocatedArgs != 0,
6297 "cannot use preallocated intrinsics on a call without "
6298 "preallocated arguments");
6299 Check(NumArgs->equalsInt(NumPreallocatedArgs),
6300 "llvm.call.preallocated.setup arg size must be equal to number "
6301 "of preallocated arguments "
6302 "at call site",
6303 Call, *UseCall);
6304 // getOperandBundle() cannot be called if more than one of the operand
6305 // bundle exists. There is already a check elsewhere for this, so skip
6306 // here if we see more than one.
6307 if (UseCall->countOperandBundlesOfType(LLVMContext::OB_preallocated) >
6308 1) {
6309 return;
6310 }
6311 auto PreallocatedBundle =
6312 UseCall->getOperandBundle(LLVMContext::OB_preallocated);
6313 Check(PreallocatedBundle,
6314 "Use of llvm.call.preallocated.setup outside intrinsics "
6315 "must be in \"preallocated\" operand bundle");
6316 Check(PreallocatedBundle->Inputs.front().get() == &Call,
6317 "preallocated bundle must have token from corresponding "
6318 "llvm.call.preallocated.setup");
6319 }
6320 }
6321 break;
6322 }
6323 case Intrinsic::call_preallocated_arg: {
6324 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
6325 Check(Token &&
6326 Token->getIntrinsicID() == Intrinsic::call_preallocated_setup,
6327 "llvm.call.preallocated.arg token argument must be a "
6328 "llvm.call.preallocated.setup");
6329 Check(Call.hasFnAttr(Attribute::Preallocated),
6330 "llvm.call.preallocated.arg must be called with a \"preallocated\" "
6331 "call site attribute");
6332 break;
6333 }
6334 case Intrinsic::call_preallocated_teardown: {
6335 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
6336 Check(Token &&
6337 Token->getIntrinsicID() == Intrinsic::call_preallocated_setup,
6338 "llvm.call.preallocated.teardown token argument must be a "
6339 "llvm.call.preallocated.setup");
6340 break;
6341 }
6342 case Intrinsic::gcroot:
6343 case Intrinsic::gcwrite:
6344 case Intrinsic::gcread:
6345 if (ID == Intrinsic::gcroot) {
6346 AllocaInst *AI =
6348 Check(AI, "llvm.gcroot parameter #1 must be an alloca.", Call);
6350 "llvm.gcroot parameter #2 must be a constant.", Call);
6351 if (!AI->getAllocatedType()->isPointerTy()) {
6353 "llvm.gcroot parameter #1 must either be a pointer alloca, "
6354 "or argument #2 must be a non-null constant.",
6355 Call);
6356 }
6357 }
6358
6359 Check(Call.getParent()->getParent()->hasGC(),
6360 "Enclosing function does not use GC.", Call);
6361 break;
6362 case Intrinsic::init_trampoline:
6364 "llvm.init_trampoline parameter #2 must resolve to a function.",
6365 Call);
6366 break;
6367 case Intrinsic::prefetch:
6368 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6369 "rw argument to llvm.prefetch must be 0-1", Call);
6370 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6371 "locality argument to llvm.prefetch must be 0-3", Call);
6372 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6373 "cache type argument to llvm.prefetch must be 0-1", Call);
6374 break;
6375 case Intrinsic::reloc_none: {
6377 cast<MetadataAsValue>(Call.getArgOperand(0))->getMetadata()),
6378 "llvm.reloc.none argument must be a metadata string", &Call);
6379 break;
6380 }
6381 case Intrinsic::stackprotector:
6383 "llvm.stackprotector parameter #2 must resolve to an alloca.", Call);
6384 break;
6385 case Intrinsic::localescape: {
6386 BasicBlock *BB = Call.getParent();
6387 Check(BB->isEntryBlock(), "llvm.localescape used outside of entry block",
6388 Call);
6389 Check(!SawFrameEscape, "multiple calls to llvm.localescape in one function",
6390 Call);
6391 for (Value *Arg : Call.args()) {
6392 if (isa<ConstantPointerNull>(Arg))
6393 continue; // Null values are allowed as placeholders.
6394 auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
6395 Check(AI && AI->isStaticAlloca(),
6396 "llvm.localescape only accepts static allocas", Call);
6397 }
6398 FrameEscapeInfo[BB->getParent()].first = Call.arg_size();
6399 SawFrameEscape = true;
6400 break;
6401 }
6402 case Intrinsic::localrecover: {
6404 Function *Fn = dyn_cast<Function>(FnArg);
6405 Check(Fn && !Fn->isDeclaration(),
6406 "llvm.localrecover first "
6407 "argument must be function defined in this module",
6408 Call);
6409 auto *IdxArg = cast<ConstantInt>(Call.getArgOperand(2));
6410 auto &Entry = FrameEscapeInfo[Fn];
6411 Entry.second = unsigned(
6412 std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1));
6413 break;
6414 }
6415
6416 case Intrinsic::experimental_gc_statepoint:
6417 if (auto *CI = dyn_cast<CallInst>(&Call))
6418 Check(!CI->isInlineAsm(),
6419 "gc.statepoint support for inline assembly unimplemented", CI);
6420 Check(Call.getParent()->getParent()->hasGC(),
6421 "Enclosing function does not use GC.", Call);
6422
6423 verifyStatepoint(Call);
6424 break;
6425 case Intrinsic::experimental_gc_result: {
6426 Check(Call.getParent()->getParent()->hasGC(),
6427 "Enclosing function does not use GC.", Call);
6428
6429 auto *Statepoint = Call.getArgOperand(0);
6430 if (isa<UndefValue>(Statepoint))
6431 break;
6432
6433 // Are we tied to a statepoint properly?
6434 const auto *StatepointCall = dyn_cast<CallBase>(Statepoint);
6435 Check(StatepointCall && StatepointCall->getIntrinsicID() ==
6436 Intrinsic::experimental_gc_statepoint,
6437 "gc.result operand #1 must be from a statepoint", Call,
6438 Call.getArgOperand(0));
6439
6440 // Check that result type matches wrapped callee.
6441 auto *TargetFuncType =
6442 cast<FunctionType>(StatepointCall->getParamElementType(2));
6443 Check(Call.getType() == TargetFuncType->getReturnType(),
6444 "gc.result result type does not match wrapped callee", Call);
6445 break;
6446 }
6447 case Intrinsic::experimental_gc_relocate: {
6448 Check(Call.arg_size() == 3, "wrong number of arguments", Call);
6449
6451 "gc.relocate must return a pointer or a vector of pointers", Call);
6452
6453 // Check that this relocate is correctly tied to the statepoint
6454
6455 // This is case for relocate on the unwinding path of an invoke statepoint
6456 if (LandingPadInst *LandingPad =
6458
6459 const BasicBlock *InvokeBB =
6460 LandingPad->getParent()->getUniquePredecessor();
6461
6462 // Landingpad relocates should have only one predecessor with invoke
6463 // statepoint terminator
6464 Check(InvokeBB, "safepoints should have unique landingpads",
6465 LandingPad->getParent());
6466 Check(InvokeBB->getTerminator(), "safepoint block should be well formed",
6467 InvokeBB);
6469 "gc relocate should be linked to a statepoint", InvokeBB);
6470 } else {
6471 // In all other cases relocate should be tied to the statepoint directly.
6472 // This covers relocates on a normal return path of invoke statepoint and
6473 // relocates of a call statepoint.
6474 auto *Token = Call.getArgOperand(0);
6476 "gc relocate is incorrectly tied to the statepoint", Call, Token);
6477 }
6478
6479 // Verify rest of the relocate arguments.
6480 const Value &StatepointCall = *cast<GCRelocateInst>(Call).getStatepoint();
6481
6482 // Both the base and derived must be piped through the safepoint.
6485 "gc.relocate operand #2 must be integer offset", Call);
6486
6487 Value *Derived = Call.getArgOperand(2);
6488 Check(isa<ConstantInt>(Derived),
6489 "gc.relocate operand #3 must be integer offset", Call);
6490
6491 const uint64_t BaseIndex = cast<ConstantInt>(Base)->getZExtValue();
6492 const uint64_t DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue();
6493
6494 // Check the bounds
6495 if (isa<UndefValue>(StatepointCall))
6496 break;
6497 if (auto Opt = cast<GCStatepointInst>(StatepointCall)
6498 .getOperandBundle(LLVMContext::OB_gc_live)) {
6499 Check(BaseIndex < Opt->Inputs.size(),
6500 "gc.relocate: statepoint base index out of bounds", Call);
6501 Check(DerivedIndex < Opt->Inputs.size(),
6502 "gc.relocate: statepoint derived index out of bounds", Call);
6503 }
6504
6505 // Relocated value must be either a pointer type or vector-of-pointer type,
6506 // but gc_relocate does not need to return the same pointer type as the
6507 // relocated pointer. It can be casted to the correct type later if it's
6508 // desired. However, they must have the same address space and 'vectorness'
6509 GCRelocateInst &Relocate = cast<GCRelocateInst>(Call);
6510 auto *ResultType = Call.getType();
6511 auto *DerivedType = Relocate.getDerivedPtr()->getType();
6512 auto *BaseType = Relocate.getBasePtr()->getType();
6513
6514 Check(BaseType->isPtrOrPtrVectorTy(),
6515 "gc.relocate: relocated value must be a pointer", Call);
6516 Check(DerivedType->isPtrOrPtrVectorTy(),
6517 "gc.relocate: relocated value must be a pointer", Call);
6518
6519 Check(ResultType->isVectorTy() == DerivedType->isVectorTy(),
6520 "gc.relocate: vector relocates to vector and pointer to pointer",
6521 Call);
6522 Check(
6523 ResultType->getPointerAddressSpace() ==
6524 DerivedType->getPointerAddressSpace(),
6525 "gc.relocate: relocating a pointer shouldn't change its address space",
6526 Call);
6527
6528 auto GC = llvm::getGCStrategy(Relocate.getFunction()->getGC());
6529 Check(GC, "gc.relocate: calling function must have GCStrategy",
6530 Call.getFunction());
6531 if (GC) {
6532 auto isGCPtr = [&GC](Type *PTy) {
6533 return GC->isGCManagedPointer(PTy->getScalarType()).value_or(true);
6534 };
6535 Check(isGCPtr(ResultType), "gc.relocate: must return gc pointer", Call);
6536 Check(isGCPtr(BaseType),
6537 "gc.relocate: relocated value must be a gc pointer", Call);
6538 Check(isGCPtr(DerivedType),
6539 "gc.relocate: relocated value must be a gc pointer", Call);
6540 }
6541 break;
6542 }
6543 case Intrinsic::experimental_patchpoint: {
6544 if (Call.getCallingConv() == CallingConv::AnyReg) {
6546 "patchpoint: invalid return type used with anyregcc", Call);
6547 }
6548 break;
6549 }
6550 case Intrinsic::eh_exceptioncode:
6551 case Intrinsic::eh_exceptionpointer: {
6553 "eh.exceptionpointer argument must be a catchpad", Call);
6554 break;
6555 }
6556 case Intrinsic::get_active_lane_mask: {
6558 "get_active_lane_mask: must return a "
6559 "vector",
6560 Call);
6561 auto *ElemTy = Call.getType()->getScalarType();
6562 Check(ElemTy->isIntegerTy(1),
6563 "get_active_lane_mask: element type is not "
6564 "i1",
6565 Call);
6566 break;
6567 }
6568 case Intrinsic::experimental_get_vector_length: {
6569 ConstantInt *VF = cast<ConstantInt>(Call.getArgOperand(1));
6570 Check(!VF->isNegative() && !VF->isZero(),
6571 "get_vector_length: VF must be positive", Call);
6572 break;
6573 }
6574 case Intrinsic::masked_load: {
6575 Check(Call.getType()->isVectorTy(), "masked_load: must return a vector",
6576 Call);
6577
6579 Value *PassThru = Call.getArgOperand(2);
6580 Check(Mask->getType()->isVectorTy(), "masked_load: mask must be vector",
6581 Call);
6582 Check(PassThru->getType() == Call.getType(),
6583 "masked_load: pass through and return type must match", Call);
6584 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
6585 cast<VectorType>(Call.getType())->getElementCount(),
6586 "masked_load: vector mask must be same length as return", Call);
6587 break;
6588 }
6589 case Intrinsic::masked_store: {
6590 Value *Val = Call.getArgOperand(0);
6592 Check(Mask->getType()->isVectorTy(), "masked_store: mask must be vector",
6593 Call);
6594 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
6595 cast<VectorType>(Val->getType())->getElementCount(),
6596 "masked_store: vector mask must be same length as value", Call);
6597 break;
6598 }
6599 case Intrinsic::experimental_guard: {
6600 Check(isa<CallInst>(Call), "experimental_guard cannot be invoked", Call);
6602 "experimental_guard must have exactly one "
6603 "\"deopt\" operand bundle");
6604 break;
6605 }
6606
6607 case Intrinsic::experimental_deoptimize: {
6608 Check(isa<CallInst>(Call), "experimental_deoptimize cannot be invoked",
6609 Call);
6611 "experimental_deoptimize must have exactly one "
6612 "\"deopt\" operand bundle");
6614 "experimental_deoptimize return type must match caller return type");
6615
6616 if (isa<CallInst>(Call)) {
6618 Check(RI,
6619 "calls to experimental_deoptimize must be followed by a return");
6620
6621 if (!Call.getType()->isVoidTy() && RI)
6622 Check(RI->getReturnValue() == &Call,
6623 "calls to experimental_deoptimize must be followed by a return "
6624 "of the value computed by experimental_deoptimize");
6625 }
6626
6627 break;
6628 }
6629 case Intrinsic::vastart: {
6631 "va_start called in a non-varargs function");
6632 break;
6633 }
6634 case Intrinsic::get_dynamic_area_offset: {
6635 auto *IntTy = dyn_cast<IntegerType>(Call.getType());
6636 Check(IntTy && DL.getPointerSizeInBits(DL.getAllocaAddrSpace()) ==
6637 IntTy->getBitWidth(),
6638 "get_dynamic_area_offset result type must be scalar integer matching "
6639 "alloca address space width",
6640 Call);
6641 break;
6642 }
6643 case Intrinsic::masked_udiv:
6644 case Intrinsic::masked_sdiv:
6645 case Intrinsic::masked_urem:
6646 case Intrinsic::masked_srem:
6647 case Intrinsic::vector_reduce_and:
6648 case Intrinsic::vector_reduce_or:
6649 case Intrinsic::vector_reduce_xor:
6650 case Intrinsic::vector_reduce_add:
6651 case Intrinsic::vector_reduce_mul:
6652 case Intrinsic::vector_reduce_smax:
6653 case Intrinsic::vector_reduce_smin:
6654 case Intrinsic::vector_reduce_umax:
6655 case Intrinsic::vector_reduce_umin: {
6656 Type *ArgTy = Call.getArgOperand(0)->getType();
6657 Check(ArgTy->isIntOrIntVectorTy() && ArgTy->isVectorTy(),
6658 "intrinsic has incorrect argument type!");
6659 break;
6660 }
6661 case Intrinsic::vector_reduce_fmax:
6662 case Intrinsic::vector_reduce_fmin: {
6663 Type *ArgTy = Call.getArgOperand(0)->getType();
6664 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6665 "intrinsic has incorrect argument type!");
6666 break;
6667 }
6668 case Intrinsic::vector_reduce_fadd:
6669 case Intrinsic::vector_reduce_fmul: {
6670 // Unlike the other reductions, the first argument is a start value. The
6671 // second argument is the vector to be reduced.
6672 Type *ArgTy = Call.getArgOperand(1)->getType();
6673 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6674 "intrinsic has incorrect argument type!");
6675 break;
6676 }
6677 case Intrinsic::smul_fix:
6678 case Intrinsic::smul_fix_sat:
6679 case Intrinsic::umul_fix:
6680 case Intrinsic::umul_fix_sat:
6681 case Intrinsic::sdiv_fix:
6682 case Intrinsic::sdiv_fix_sat:
6683 case Intrinsic::udiv_fix:
6684 case Intrinsic::udiv_fix_sat: {
6685 Value *Op1 = Call.getArgOperand(0);
6686 Value *Op2 = Call.getArgOperand(1);
6688 "first operand of [us][mul|div]_fix[_sat] must be an int type or "
6689 "vector of ints");
6691 "second operand of [us][mul|div]_fix[_sat] must be an int type or "
6692 "vector of ints");
6693
6694 auto *Op3 = cast<ConstantInt>(Call.getArgOperand(2));
6695 Check(Op3->getType()->isIntegerTy(),
6696 "third operand of [us][mul|div]_fix[_sat] must be an int type");
6697 Check(Op3->getBitWidth() <= 32,
6698 "third operand of [us][mul|div]_fix[_sat] must fit within 32 bits");
6699
6700 if (ID == Intrinsic::smul_fix || ID == Intrinsic::smul_fix_sat ||
6701 ID == Intrinsic::sdiv_fix || ID == Intrinsic::sdiv_fix_sat) {
6702 Check(Op3->getZExtValue() < Op1->getType()->getScalarSizeInBits(),
6703 "the scale of s[mul|div]_fix[_sat] must be less than the width of "
6704 "the operands");
6705 } else {
6706 Check(Op3->getZExtValue() <= Op1->getType()->getScalarSizeInBits(),
6707 "the scale of u[mul|div]_fix[_sat] must be less than or equal "
6708 "to the width of the operands");
6709 }
6710 break;
6711 }
6712 case Intrinsic::lrint:
6713 case Intrinsic::llrint:
6714 case Intrinsic::lround:
6715 case Intrinsic::llround: {
6716 Type *ValTy = Call.getArgOperand(0)->getType();
6717 Type *ResultTy = Call.getType();
6718 auto *VTy = dyn_cast<VectorType>(ValTy);
6719 auto *RTy = dyn_cast<VectorType>(ResultTy);
6720 Check(ValTy->isFPOrFPVectorTy() && ResultTy->isIntOrIntVectorTy(),
6721 ExpectedName + ": argument must be floating-point or vector "
6722 "of floating-points, and result must be integer or "
6723 "vector of integers",
6724 &Call);
6725 Check(ValTy->isVectorTy() == ResultTy->isVectorTy(),
6726 ExpectedName + ": argument and result disagree on vector use", &Call);
6727 if (VTy) {
6728 Check(VTy->getElementCount() == RTy->getElementCount(),
6729 ExpectedName + ": argument must be same length as result", &Call);
6730 }
6731 break;
6732 }
6733 case Intrinsic::bswap: {
6734 Type *Ty = Call.getType();
6735 unsigned Size = Ty->getScalarSizeInBits();
6736 Check(Size % 16 == 0, "bswap must be an even number of bytes", &Call);
6737 break;
6738 }
6739 case Intrinsic::invariant_start: {
6740 ConstantInt *InvariantSize = dyn_cast<ConstantInt>(Call.getArgOperand(0));
6741 Check(InvariantSize &&
6742 (!InvariantSize->isNegative() || InvariantSize->isMinusOne()),
6743 "invariant_start parameter must be -1, 0 or a positive number",
6744 &Call);
6745 break;
6746 }
6747 case Intrinsic::matrix_multiply:
6748 case Intrinsic::matrix_transpose:
6749 case Intrinsic::matrix_column_major_load:
6750 case Intrinsic::matrix_column_major_store: {
6752 ConstantInt *Stride = nullptr;
6753 ConstantInt *NumRows;
6754 ConstantInt *NumColumns;
6755 VectorType *ResultTy;
6756 Type *Op0ElemTy = nullptr;
6757 Type *Op1ElemTy = nullptr;
6758 switch (ID) {
6759 case Intrinsic::matrix_multiply: {
6760 NumRows = cast<ConstantInt>(Call.getArgOperand(2));
6761 ConstantInt *N = cast<ConstantInt>(Call.getArgOperand(3));
6762 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6764 ->getNumElements() ==
6765 NumRows->getZExtValue() * N->getZExtValue(),
6766 "First argument of a matrix operation does not match specified "
6767 "shape!");
6769 ->getNumElements() ==
6770 N->getZExtValue() * NumColumns->getZExtValue(),
6771 "Second argument of a matrix operation does not match specified "
6772 "shape!");
6773
6774 ResultTy = cast<VectorType>(Call.getType());
6775 Op0ElemTy =
6776 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6777 Op1ElemTy =
6778 cast<VectorType>(Call.getArgOperand(1)->getType())->getElementType();
6779 break;
6780 }
6781 case Intrinsic::matrix_transpose:
6782 NumRows = cast<ConstantInt>(Call.getArgOperand(1));
6783 NumColumns = cast<ConstantInt>(Call.getArgOperand(2));
6784 ResultTy = cast<VectorType>(Call.getType());
6785 Op0ElemTy =
6786 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6787 break;
6788 case Intrinsic::matrix_column_major_load: {
6790 NumRows = cast<ConstantInt>(Call.getArgOperand(3));
6791 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6792 ResultTy = cast<VectorType>(Call.getType());
6793 break;
6794 }
6795 case Intrinsic::matrix_column_major_store: {
6797 NumRows = cast<ConstantInt>(Call.getArgOperand(4));
6798 NumColumns = cast<ConstantInt>(Call.getArgOperand(5));
6799 ResultTy = cast<VectorType>(Call.getArgOperand(0)->getType());
6800 Op0ElemTy =
6801 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6802 break;
6803 }
6804 default:
6805 llvm_unreachable("unexpected intrinsic");
6806 }
6807
6808 Check(ResultTy->getElementType()->isIntegerTy() ||
6809 ResultTy->getElementType()->isFloatingPointTy(),
6810 "Result type must be an integer or floating-point type!", IF);
6811
6812 if (Op0ElemTy)
6813 Check(ResultTy->getElementType() == Op0ElemTy,
6814 "Vector element type mismatch of the result and first operand "
6815 "vector!",
6816 IF);
6817
6818 if (Op1ElemTy)
6819 Check(ResultTy->getElementType() == Op1ElemTy,
6820 "Vector element type mismatch of the result and second operand "
6821 "vector!",
6822 IF);
6823
6825 NumRows->getZExtValue() * NumColumns->getZExtValue(),
6826 "Result of a matrix operation does not fit in the returned vector!");
6827
6828 if (Stride) {
6829 Check(Stride->getBitWidth() <= 64, "Stride bitwidth cannot exceed 64!",
6830 IF);
6831 Check(Stride->getZExtValue() >= NumRows->getZExtValue(),
6832 "Stride must be greater or equal than the number of rows!", IF);
6833 }
6834
6835 break;
6836 }
6837 case Intrinsic::stepvector: {
6839 Check(VecTy && VecTy->getScalarType()->isIntegerTy() &&
6840 VecTy->getScalarSizeInBits() >= 8,
6841 "stepvector only supported for vectors of integers "
6842 "with a bitwidth of at least 8.",
6843 &Call);
6844 break;
6845 }
6846 case Intrinsic::experimental_vector_match: {
6847 Value *Op1 = Call.getArgOperand(0);
6848 Value *Op2 = Call.getArgOperand(1);
6850
6851 VectorType *Op1Ty = dyn_cast<VectorType>(Op1->getType());
6852 VectorType *Op2Ty = dyn_cast<VectorType>(Op2->getType());
6853 VectorType *MaskTy = dyn_cast<VectorType>(Mask->getType());
6854
6855 Check(Op1Ty && Op2Ty && MaskTy, "Operands must be vectors.", &Call);
6857 "Second operand must be a fixed length vector.", &Call);
6858 Check(Op1Ty->getElementType()->isIntegerTy(),
6859 "First operand must be a vector of integers.", &Call);
6860 Check(Op1Ty->getElementType() == Op2Ty->getElementType(),
6861 "First two operands must have the same element type.", &Call);
6862 Check(Op1Ty->getElementCount() == MaskTy->getElementCount(),
6863 "First operand and mask must have the same number of elements.",
6864 &Call);
6865 Check(MaskTy->getElementType()->isIntegerTy(1),
6866 "Mask must be a vector of i1's.", &Call);
6867 Check(Call.getType() == MaskTy, "Return type must match the mask type.",
6868 &Call);
6869 break;
6870 }
6871 case Intrinsic::vector_insert: {
6872 Value *Vec = Call.getArgOperand(0);
6873 Value *SubVec = Call.getArgOperand(1);
6874 Value *Idx = Call.getArgOperand(2);
6875 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6876
6877 VectorType *VecTy = cast<VectorType>(Vec->getType());
6878 VectorType *SubVecTy = cast<VectorType>(SubVec->getType());
6879
6880 ElementCount VecEC = VecTy->getElementCount();
6881 ElementCount SubVecEC = SubVecTy->getElementCount();
6882 Check(VecTy->getElementType() == SubVecTy->getElementType(),
6883 "vector_insert parameters must have the same element "
6884 "type.",
6885 &Call);
6886 Check(IdxN % SubVecEC.getKnownMinValue() == 0,
6887 "vector_insert index must be a constant multiple of "
6888 "the subvector's known minimum vector length.");
6889
6890 // If this insertion is not the 'mixed' case where a fixed vector is
6891 // inserted into a scalable vector, ensure that the insertion of the
6892 // subvector does not overrun the parent vector.
6893 if (VecEC.isScalable() == SubVecEC.isScalable()) {
6894 Check(IdxN < VecEC.getKnownMinValue() &&
6895 IdxN + SubVecEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6896 "subvector operand of vector_insert would overrun the "
6897 "vector being inserted into.");
6898 }
6899 break;
6900 }
6901 case Intrinsic::vector_extract: {
6902 Value *Vec = Call.getArgOperand(0);
6903 Value *Idx = Call.getArgOperand(1);
6904 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6905
6906 VectorType *ResultTy = cast<VectorType>(Call.getType());
6907 VectorType *VecTy = cast<VectorType>(Vec->getType());
6908
6909 ElementCount VecEC = VecTy->getElementCount();
6910 ElementCount ResultEC = ResultTy->getElementCount();
6911
6912 Check(ResultTy->getElementType() == VecTy->getElementType(),
6913 "vector_extract result must have the same element "
6914 "type as the input vector.",
6915 &Call);
6916 Check(IdxN % ResultEC.getKnownMinValue() == 0,
6917 "vector_extract index must be a constant multiple of "
6918 "the result type's known minimum vector length.");
6919
6920 // If this extraction is not the 'mixed' case where a fixed vector is
6921 // extracted from a scalable vector, ensure that the extraction does not
6922 // overrun the parent vector.
6923 if (VecEC.isScalable() == ResultEC.isScalable()) {
6924 Check(IdxN < VecEC.getKnownMinValue() &&
6925 IdxN + ResultEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6926 "vector_extract would overrun.");
6927 }
6928 break;
6929 }
6930 case Intrinsic::vector_partial_reduce_fadd:
6931 case Intrinsic::vector_partial_reduce_add: {
6934
6935 unsigned VecWidth = VecTy->getElementCount().getKnownMinValue();
6936 unsigned AccWidth = AccTy->getElementCount().getKnownMinValue();
6937
6938 Check((VecWidth % AccWidth) == 0,
6939 "Invalid vector widths for partial "
6940 "reduction. The width of the input vector "
6941 "must be a positive integer multiple of "
6942 "the width of the accumulator vector.");
6943 break;
6944 }
6945 case Intrinsic::experimental_noalias_scope_decl: {
6946 NoAliasScopeDecls.push_back(cast<IntrinsicInst>(&Call));
6947 break;
6948 }
6949 case Intrinsic::preserve_array_access_index:
6950 case Intrinsic::preserve_struct_access_index:
6951 case Intrinsic::aarch64_ldaxr:
6952 case Intrinsic::aarch64_ldxr:
6953 case Intrinsic::arm_ldaex:
6954 case Intrinsic::arm_ldrex: {
6955 Type *ElemTy = Call.getParamElementType(0);
6956 Check(ElemTy, "Intrinsic requires elementtype attribute on first argument.",
6957 &Call);
6958 break;
6959 }
6960 case Intrinsic::aarch64_stlxr:
6961 case Intrinsic::aarch64_stxr:
6962 case Intrinsic::arm_stlex:
6963 case Intrinsic::arm_strex: {
6964 Type *ElemTy = Call.getAttributes().getParamElementType(1);
6965 Check(ElemTy,
6966 "Intrinsic requires elementtype attribute on second argument.",
6967 &Call);
6968 break;
6969 }
6970 case Intrinsic::aarch64_prefetch: {
6971 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6972 "write argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6973 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6974 "target argument to llvm.aarch64.prefetch must be 0-3", Call);
6975 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6976 "stream argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6977 Check(cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue() < 2,
6978 "isdata argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6979 break;
6980 }
6981 case Intrinsic::aarch64_range_prefetch: {
6982 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6983 "write argument to llvm.aarch64.range.prefetch must be 0 or 1", Call);
6984 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 2,
6985 "stream argument to llvm.aarch64.range.prefetch must be 0 or 1",
6986 Call);
6987 break;
6988 }
6989 case Intrinsic::callbr_landingpad: {
6990 const auto *CBR = dyn_cast<CallBrInst>(Call.getOperand(0));
6991 Check(CBR, "intrinstic requires callbr operand", &Call);
6992 if (!CBR)
6993 break;
6994
6995 const BasicBlock *LandingPadBB = Call.getParent();
6996 const BasicBlock *PredBB = LandingPadBB->getUniquePredecessor();
6997 if (!PredBB) {
6998 CheckFailed("Intrinsic in block must have 1 unique predecessor", &Call);
6999 break;
7000 }
7001 if (!isa<CallBrInst>(PredBB->getTerminator())) {
7002 CheckFailed("Intrinsic must have corresponding callbr in predecessor",
7003 &Call);
7004 break;
7005 }
7006 Check(llvm::is_contained(CBR->getIndirectDests(), LandingPadBB),
7007 "Intrinsic's corresponding callbr must have intrinsic's parent basic "
7008 "block in indirect destination list",
7009 &Call);
7010 const Instruction &First = *LandingPadBB->begin();
7011 Check(&First == &Call, "No other instructions may proceed intrinsic",
7012 &Call);
7013 break;
7014 }
7015 case Intrinsic::structured_gep: {
7016 // Parser should refuse those 2 cases.
7017 assert(Call.arg_size() >= 1);
7019
7020 Check(Call.paramHasAttr(0, Attribute::ElementType),
7021 "Intrinsic first parameter is missing an ElementType attribute",
7022 &Call);
7023
7024 Type *T = Call.getParamAttr(0, Attribute::ElementType).getValueAsType();
7025 for (unsigned I = 1; I < Call.arg_size(); ++I) {
7027 ConstantInt *CI = dyn_cast<ConstantInt>(Index);
7028 Check(Index->getType()->isIntegerTy(),
7029 "Index operand type must be an integer", &Call);
7030
7031 if (ArrayType *AT = dyn_cast<ArrayType>(T)) {
7032 T = AT->getElementType();
7033 } else if (StructType *ST = dyn_cast<StructType>(T)) {
7034 Check(CI, "Indexing into a struct requires a constant int", &Call);
7035 Check(CI->getZExtValue() < ST->getNumElements(),
7036 "Indexing in a struct should be inbounds", &Call);
7037 T = ST->getElementType(CI->getZExtValue());
7038 } else if (VectorType *VT = dyn_cast<VectorType>(T)) {
7039 T = VT->getElementType();
7040 } else {
7041 CheckFailed("Reached a non-composite type with more indices to process",
7042 &Call);
7043 }
7044 }
7045 break;
7046 }
7047 case Intrinsic::structured_alloca:
7048 Check(Call.hasRetAttr(Attribute::ElementType),
7049 "@llvm.structured.alloca calls require elementtype attribute.",
7050 &Call);
7051 break;
7052 case Intrinsic::amdgcn_cs_chain: {
7053 auto CallerCC = Call.getCaller()->getCallingConv();
7054 switch (CallerCC) {
7055 case CallingConv::AMDGPU_CS:
7056 case CallingConv::AMDGPU_CS_Chain:
7057 case CallingConv::AMDGPU_CS_ChainPreserve:
7058 case CallingConv::AMDGPU_ES:
7059 case CallingConv::AMDGPU_GS:
7060 case CallingConv::AMDGPU_HS:
7061 case CallingConv::AMDGPU_LS:
7062 case CallingConv::AMDGPU_VS:
7063 break;
7064 default:
7065 CheckFailed("Intrinsic cannot be called from functions with this "
7066 "calling convention",
7067 &Call);
7068 break;
7069 }
7070
7071 Check(Call.paramHasAttr(2, Attribute::InReg),
7072 "SGPR arguments must have the `inreg` attribute", &Call);
7073 Check(!Call.paramHasAttr(3, Attribute::InReg),
7074 "VGPR arguments must not have the `inreg` attribute", &Call);
7075
7076 auto *Next = Call.getNextNode();
7077 bool IsAMDUnreachable = Next && isa<IntrinsicInst>(Next) &&
7078 cast<IntrinsicInst>(Next)->getIntrinsicID() ==
7079 Intrinsic::amdgcn_unreachable;
7080 Check(Next && (isa<UnreachableInst>(Next) || IsAMDUnreachable),
7081 "llvm.amdgcn.cs.chain must be followed by unreachable", &Call);
7082 break;
7083 }
7084 case Intrinsic::amdgcn_init_exec_from_input: {
7085 const Argument *Arg = dyn_cast<Argument>(Call.getOperand(0));
7086 Check(Arg && Arg->hasInRegAttr(),
7087 "only inreg arguments to the parent function are valid as inputs to "
7088 "this intrinsic",
7089 &Call);
7090 break;
7091 }
7092 case Intrinsic::amdgcn_set_inactive_chain_arg: {
7093 auto CallerCC = Call.getCaller()->getCallingConv();
7094 switch (CallerCC) {
7095 case CallingConv::AMDGPU_CS_Chain:
7096 case CallingConv::AMDGPU_CS_ChainPreserve:
7097 break;
7098 default:
7099 CheckFailed("Intrinsic can only be used from functions with the "
7100 "amdgpu_cs_chain or amdgpu_cs_chain_preserve "
7101 "calling conventions",
7102 &Call);
7103 break;
7104 }
7105
7106 unsigned InactiveIdx = 1;
7107 Check(!Call.paramHasAttr(InactiveIdx, Attribute::InReg),
7108 "Value for inactive lanes must not have the `inreg` attribute",
7109 &Call);
7110 Check(isa<Argument>(Call.getArgOperand(InactiveIdx)),
7111 "Value for inactive lanes must be a function argument", &Call);
7112 Check(!cast<Argument>(Call.getArgOperand(InactiveIdx))->hasInRegAttr(),
7113 "Value for inactive lanes must be a VGPR function argument", &Call);
7114 break;
7115 }
7116 case Intrinsic::amdgcn_call_whole_wave: {
7118 Check(F, "Indirect whole wave calls are not allowed", &Call);
7119
7120 CallingConv::ID CC = F->getCallingConv();
7121 Check(CC == CallingConv::AMDGPU_Gfx_WholeWave,
7122 "Callee must have the amdgpu_gfx_whole_wave calling convention",
7123 &Call);
7124
7125 Check(!F->isVarArg(), "Variadic whole wave calls are not allowed", &Call);
7126
7127 Check(Call.arg_size() == F->arg_size(),
7128 "Call argument count must match callee argument count", &Call);
7129
7130 // The first argument of the call is the callee, and the first argument of
7131 // the callee is the active mask. The rest of the arguments must match.
7132 Check(F->arg_begin()->getType()->isIntegerTy(1),
7133 "Callee must have i1 as its first argument", &Call);
7134 for (auto [CallArg, FuncArg] :
7135 drop_begin(zip_equal(Call.args(), F->args()))) {
7136 Check(CallArg->getType() == FuncArg.getType(),
7137 "Argument types must match", &Call);
7138
7139 // Check that inreg attributes match between call site and function
7140 Check(Call.paramHasAttr(FuncArg.getArgNo(), Attribute::InReg) ==
7141 FuncArg.hasInRegAttr(),
7142 "Argument inreg attributes must match", &Call);
7143 }
7144 break;
7145 }
7146 case Intrinsic::amdgcn_s_prefetch_data: {
7147 Check(
7150 "llvm.amdgcn.s.prefetch.data only supports global or constant memory");
7151 break;
7152 }
7153 case Intrinsic::amdgcn_mfma_scale_f32_16x16x128_f8f6f4:
7154 case Intrinsic::amdgcn_mfma_scale_f32_32x32x64_f8f6f4: {
7155 Value *Src0 = Call.getArgOperand(0);
7156 Value *Src1 = Call.getArgOperand(1);
7157
7158 uint64_t CBSZ = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
7159 uint64_t BLGP = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
7160 Check(CBSZ <= 4, "invalid value for cbsz format", Call,
7161 Call.getArgOperand(3));
7162 Check(BLGP <= 4, "invalid value for blgp format", Call,
7163 Call.getArgOperand(4));
7164
7165 // AMDGPU::MFMAScaleFormats values
7166 auto getFormatNumRegs = [](unsigned FormatVal) {
7167 switch (FormatVal) {
7168 case 0:
7169 case 1:
7170 return 8u;
7171 case 2:
7172 case 3:
7173 return 6u;
7174 case 4:
7175 return 4u;
7176 default:
7177 llvm_unreachable("invalid format value");
7178 }
7179 };
7180
7181 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
7182 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
7183 return false;
7184 unsigned NumElts = Ty->getNumElements();
7185 return NumElts == 4 || NumElts == 6 || NumElts == 8;
7186 };
7187
7188 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
7189 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
7190 Check(isValidSrcASrcBVector(Src0Ty),
7191 "operand 0 must be 4, 6 or 8 element i32 vector", &Call, Src0);
7192 Check(isValidSrcASrcBVector(Src1Ty),
7193 "operand 1 must be 4, 6 or 8 element i32 vector", &Call, Src1);
7194
7195 // Permit excess registers for the format.
7196 Check(Src0Ty->getNumElements() >= getFormatNumRegs(CBSZ),
7197 "invalid vector type for format", &Call, Src0, Call.getArgOperand(3));
7198 Check(Src1Ty->getNumElements() >= getFormatNumRegs(BLGP),
7199 "invalid vector type for format", &Call, Src1, Call.getArgOperand(5));
7200 break;
7201 }
7202 case Intrinsic::amdgcn_wmma_f32_16x16x128_f8f6f4:
7203 case Intrinsic::amdgcn_wmma_scale_f32_16x16x128_f8f6f4:
7204 case Intrinsic::amdgcn_wmma_scale16_f32_16x16x128_f8f6f4: {
7205 Value *Src0 = Call.getArgOperand(1);
7206 Value *Src1 = Call.getArgOperand(3);
7207
7208 unsigned FmtA = cast<ConstantInt>(Call.getArgOperand(0))->getZExtValue();
7209 unsigned FmtB = cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue();
7210 Check(FmtA <= 4, "invalid value for matrix format", Call,
7211 Call.getArgOperand(0));
7212 Check(FmtB <= 4, "invalid value for matrix format", Call,
7213 Call.getArgOperand(2));
7214
7215 // AMDGPU::MatrixFMT values
7216 auto getFormatNumRegs = [](unsigned FormatVal) {
7217 switch (FormatVal) {
7218 case 0:
7219 case 1:
7220 return 16u;
7221 case 2:
7222 case 3:
7223 return 12u;
7224 case 4:
7225 return 8u;
7226 default:
7227 llvm_unreachable("invalid format value");
7228 }
7229 };
7230
7231 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
7232 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
7233 return false;
7234 unsigned NumElts = Ty->getNumElements();
7235 return NumElts == 16 || NumElts == 12 || NumElts == 8;
7236 };
7237
7238 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
7239 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
7240 Check(isValidSrcASrcBVector(Src0Ty),
7241 "operand 1 must be 8, 12 or 16 element i32 vector", &Call, Src0);
7242 Check(isValidSrcASrcBVector(Src1Ty),
7243 "operand 3 must be 8, 12 or 16 element i32 vector", &Call, Src1);
7244
7245 // Permit excess registers for the format.
7246 Check(Src0Ty->getNumElements() >= getFormatNumRegs(FmtA),
7247 "invalid vector type for format", &Call, Src0, Call.getArgOperand(0));
7248 Check(Src1Ty->getNumElements() >= getFormatNumRegs(FmtB),
7249 "invalid vector type for format", &Call, Src1, Call.getArgOperand(2));
7250 break;
7251 }
7252 case Intrinsic::amdgcn_cooperative_atomic_load_32x4B:
7253 case Intrinsic::amdgcn_cooperative_atomic_load_16x8B:
7254 case Intrinsic::amdgcn_cooperative_atomic_load_8x16B:
7255 case Intrinsic::amdgcn_cooperative_atomic_store_32x4B:
7256 case Intrinsic::amdgcn_cooperative_atomic_store_16x8B:
7257 case Intrinsic::amdgcn_cooperative_atomic_store_8x16B: {
7258 // Check we only use this intrinsic on the FLAT or GLOBAL address spaces.
7259 Value *PtrArg = Call.getArgOperand(0);
7260 const unsigned AS = PtrArg->getType()->getPointerAddressSpace();
7262 "cooperative atomic intrinsics require a generic or global pointer",
7263 &Call, PtrArg);
7264
7265 // Last argument must be a MD string
7267 MDNode *MD = cast<MDNode>(Op->getMetadata());
7268 Check((MD->getNumOperands() == 1) && isa<MDString>(MD->getOperand(0)),
7269 "cooperative atomic intrinsics require that the last argument is a "
7270 "metadata string",
7271 &Call, Op);
7272 break;
7273 }
7274 case Intrinsic::nvvm_setmaxnreg_inc_sync_aligned_u32:
7275 case Intrinsic::nvvm_setmaxnreg_dec_sync_aligned_u32: {
7276 Value *V = Call.getArgOperand(0);
7277 unsigned RegCount = cast<ConstantInt>(V)->getZExtValue();
7278 Check(RegCount % 8 == 0,
7279 "reg_count argument to nvvm.setmaxnreg must be in multiples of 8");
7280 break;
7281 }
7282 case Intrinsic::experimental_convergence_entry:
7283 case Intrinsic::experimental_convergence_anchor:
7284 break;
7285 case Intrinsic::experimental_convergence_loop:
7286 break;
7287 case Intrinsic::ptrmask: {
7288 Type *Ty0 = Call.getArgOperand(0)->getType();
7289 Type *Ty1 = Call.getArgOperand(1)->getType();
7291 "llvm.ptrmask intrinsic first argument must be pointer or vector "
7292 "of pointers",
7293 &Call);
7294 Check(
7295 Ty0->isVectorTy() == Ty1->isVectorTy(),
7296 "llvm.ptrmask intrinsic arguments must be both scalars or both vectors",
7297 &Call);
7298 if (Ty0->isVectorTy())
7299 Check(cast<VectorType>(Ty0)->getElementCount() ==
7300 cast<VectorType>(Ty1)->getElementCount(),
7301 "llvm.ptrmask intrinsic arguments must have the same number of "
7302 "elements",
7303 &Call);
7304 Check(DL.getIndexTypeSizeInBits(Ty0) == Ty1->getScalarSizeInBits(),
7305 "llvm.ptrmask intrinsic second argument bitwidth must match "
7306 "pointer index type size of first argument",
7307 &Call);
7308 break;
7309 }
7310 case Intrinsic::thread_pointer: {
7312 DL.getDefaultGlobalsAddressSpace(),
7313 "llvm.thread.pointer intrinsic return type must be for the globals "
7314 "address space",
7315 &Call);
7316 break;
7317 }
7318 case Intrinsic::threadlocal_address: {
7319 const Value &Arg0 = *Call.getArgOperand(0);
7320 Check(isa<GlobalValue>(Arg0),
7321 "llvm.threadlocal.address first argument must be a GlobalValue");
7322 Check(cast<GlobalValue>(Arg0).isThreadLocal(),
7323 "llvm.threadlocal.address operand isThreadLocal() must be true");
7324 break;
7325 }
7326 case Intrinsic::lifetime_start:
7327 case Intrinsic::lifetime_end: {
7328 Value *Ptr = Call.getArgOperand(0);
7329 IntrinsicInst *II = dyn_cast<IntrinsicInst>(Ptr);
7330 Check(isa<AllocaInst>(Ptr) || isa<PoisonValue>(Ptr) ||
7331 (II && II->getIntrinsicID() == Intrinsic::structured_alloca),
7332 "llvm.lifetime.start/end can only be used on alloca or poison",
7333 &Call);
7334 break;
7335 }
7336 case Intrinsic::sponentry: {
7337 const unsigned StackAS = DL.getAllocaAddrSpace();
7338 const Type *RetTy = Call.getFunctionType()->getReturnType();
7339 Check(RetTy->getPointerAddressSpace() == StackAS,
7340 "llvm.sponentry must return a pointer to the stack", &Call);
7341 break;
7342 }
7343 };
7344
7345 // Verify that there aren't any unmediated control transfers between funclets.
7347 Function *F = Call.getParent()->getParent();
7348 if (F->hasPersonalityFn() &&
7349 isScopedEHPersonality(classifyEHPersonality(F->getPersonalityFn()))) {
7350 // Run EH funclet coloring on-demand and cache results for other intrinsic
7351 // calls in this function
7352 if (BlockEHFuncletColors.empty())
7353 BlockEHFuncletColors = colorEHFunclets(*F);
7354
7355 // Check for catch-/cleanup-pad in first funclet block
7356 bool InEHFunclet = false;
7357 BasicBlock *CallBB = Call.getParent();
7358 const ColorVector &CV = BlockEHFuncletColors.find(CallBB)->second;
7359 assert(CV.size() > 0 && "Uncolored block");
7360 for (BasicBlock *ColorFirstBB : CV)
7361 if (auto It = ColorFirstBB->getFirstNonPHIIt();
7362 It != ColorFirstBB->end())
7364 InEHFunclet = true;
7365
7366 // Check for funclet operand bundle
7367 bool HasToken = false;
7368 for (unsigned I = 0, E = Call.getNumOperandBundles(); I != E; ++I)
7370 HasToken = true;
7371
7372 // This would cause silent code truncation in WinEHPrepare
7373 if (InEHFunclet)
7374 Check(HasToken, "Missing funclet token on intrinsic call", &Call);
7375 }
7376 }
7377}
7378
7379/// Carefully grab the subprogram from a local scope.
7380///
7381/// This carefully grabs the subprogram from a local scope, avoiding the
7382/// built-in assertions that would typically fire.
7384 if (!LocalScope)
7385 return nullptr;
7386
7387 if (auto *SP = dyn_cast<DISubprogram>(LocalScope))
7388 return SP;
7389
7390 if (auto *LB = dyn_cast<DILexicalBlockBase>(LocalScope))
7391 return getSubprogram(LB->getRawScope());
7392
7393 // Just return null; broken scope chains are checked elsewhere.
7394 assert(!isa<DILocalScope>(LocalScope) && "Unknown type of local scope");
7395 return nullptr;
7396}
7397
7398void Verifier::visit(DbgLabelRecord &DLR) {
7400 "invalid #dbg_label intrinsic variable", &DLR, DLR.getRawLabel());
7401
7402 // Ignore broken !dbg attachments; they're checked elsewhere.
7403 if (MDNode *N = DLR.getDebugLoc().getAsMDNode())
7404 if (!isa<DILocation>(N))
7405 return;
7406
7407 BasicBlock *BB = DLR.getParent();
7408 Function *F = BB ? BB->getParent() : nullptr;
7409
7410 // The scopes for variables and !dbg attachments must agree.
7411 DILabel *Label = DLR.getLabel();
7412 DILocation *Loc = DLR.getDebugLoc();
7413 CheckDI(Loc, "#dbg_label record requires a !dbg attachment", &DLR, BB, F);
7414
7415 DISubprogram *LabelSP = getSubprogram(Label->getRawScope());
7416 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
7417 if (!LabelSP || !LocSP)
7418 return;
7419
7420 CheckDI(LabelSP == LocSP,
7421 "mismatched subprogram between #dbg_label label and !dbg attachment",
7422 &DLR, BB, F, Label, Label->getScope()->getSubprogram(), Loc,
7423 Loc->getScope()->getSubprogram());
7424}
7425
7426void Verifier::visit(DbgVariableRecord &DVR) {
7427 BasicBlock *BB = DVR.getParent();
7428 Function *F = BB->getParent();
7429
7430 CheckDI(DVR.getType() == DbgVariableRecord::LocationType::Value ||
7431 DVR.getType() == DbgVariableRecord::LocationType::Declare ||
7432 DVR.getType() == DbgVariableRecord::LocationType::DeclareValue ||
7433 DVR.getType() == DbgVariableRecord::LocationType::Assign,
7434 "invalid #dbg record type", &DVR, DVR.getType(), BB, F);
7435
7436 // The location for a DbgVariableRecord must be either a ValueAsMetadata,
7437 // DIArgList, or an empty MDNode (which is a legacy representation for an
7438 // "undef" location).
7439 auto *MD = DVR.getRawLocation();
7440 CheckDI(MD && (isa<ValueAsMetadata>(MD) || isa<DIArgList>(MD) ||
7441 (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands())),
7442 "invalid #dbg record address/value", &DVR, MD, BB, F);
7443 if (auto *VAM = dyn_cast<ValueAsMetadata>(MD)) {
7444 visitValueAsMetadata(*VAM, F);
7445 if (DVR.isDbgDeclare()) {
7446 // Allow integers here to support inttoptr salvage.
7447 Type *Ty = VAM->getValue()->getType();
7448 CheckDI(Ty->isPointerTy() || Ty->isIntegerTy(),
7449 "location of #dbg_declare must be a pointer or int", &DVR, MD, BB,
7450 F);
7451 }
7452 } else if (auto *AL = dyn_cast<DIArgList>(MD)) {
7453 visitDIArgList(*AL, F);
7454 }
7455
7457 "invalid #dbg record variable", &DVR, DVR.getRawVariable(), BB, F);
7458 visitMDNode(*DVR.getRawVariable(), AreDebugLocsAllowed::No);
7459
7461 "invalid #dbg record expression", &DVR, DVR.getRawExpression(), BB,
7462 F);
7463 visitMDNode(*DVR.getExpression(), AreDebugLocsAllowed::No);
7464
7465 if (DVR.isDbgAssign()) {
7467 "invalid #dbg_assign DIAssignID", &DVR, DVR.getRawAssignID(), BB,
7468 F);
7469 visitMDNode(*cast<DIAssignID>(DVR.getRawAssignID()),
7470 AreDebugLocsAllowed::No);
7471
7472 const auto *RawAddr = DVR.getRawAddress();
7473 // Similarly to the location above, the address for an assign
7474 // DbgVariableRecord must be a ValueAsMetadata or an empty MDNode, which
7475 // represents an undef address.
7476 CheckDI(
7477 isa<ValueAsMetadata>(RawAddr) ||
7478 (isa<MDNode>(RawAddr) && !cast<MDNode>(RawAddr)->getNumOperands()),
7479 "invalid #dbg_assign address", &DVR, DVR.getRawAddress(), BB, F);
7480 if (auto *VAM = dyn_cast<ValueAsMetadata>(RawAddr))
7481 visitValueAsMetadata(*VAM, F);
7482
7484 "invalid #dbg_assign address expression", &DVR,
7485 DVR.getRawAddressExpression(), BB, F);
7486 visitMDNode(*DVR.getAddressExpression(), AreDebugLocsAllowed::No);
7487
7488 // All of the linked instructions should be in the same function as DVR.
7489 for (Instruction *I : at::getAssignmentInsts(&DVR))
7490 CheckDI(DVR.getFunction() == I->getFunction(),
7491 "inst not in same function as #dbg_assign", I, &DVR, BB, F);
7492 }
7493
7494 // This check is redundant with one in visitLocalVariable().
7495 DILocalVariable *Var = DVR.getVariable();
7496 CheckDI(isType(Var->getRawType()), "invalid type ref", Var, Var->getRawType(),
7497 BB, F);
7498
7499 auto *DLNode = DVR.getDebugLoc().getAsMDNode();
7500 CheckDI(isa_and_nonnull<DILocation>(DLNode), "invalid #dbg record DILocation",
7501 &DVR, DLNode, BB, F);
7502 DILocation *Loc = DVR.getDebugLoc();
7503
7504 // The scopes for variables and !dbg attachments must agree.
7505 DISubprogram *VarSP = getSubprogram(Var->getRawScope());
7506 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
7507 if (!VarSP || !LocSP)
7508 return; // Broken scope chains are checked elsewhere.
7509
7510 CheckDI(VarSP == LocSP,
7511 "mismatched subprogram between #dbg record variable and DILocation",
7512 &DVR, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
7513 Loc->getScope()->getSubprogram(), BB, F);
7514
7515 verifyFnArgs(DVR);
7516}
7517
7518void Verifier::visitVPIntrinsic(VPIntrinsic &VPI) {
7519 if (auto *VPCast = dyn_cast<VPCastIntrinsic>(&VPI)) {
7520 auto *RetTy = cast<VectorType>(VPCast->getType());
7521 auto *ValTy = cast<VectorType>(VPCast->getOperand(0)->getType());
7522 Check(RetTy->getElementCount() == ValTy->getElementCount(),
7523 "VP cast intrinsic first argument and result vector lengths must be "
7524 "equal",
7525 *VPCast);
7526
7527 switch (VPCast->getIntrinsicID()) {
7528 default:
7529 llvm_unreachable("Unknown VP cast intrinsic");
7530 case Intrinsic::vp_trunc:
7531 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
7532 "llvm.vp.trunc intrinsic first argument and result element type "
7533 "must be integer",
7534 *VPCast);
7535 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
7536 "llvm.vp.trunc intrinsic the bit size of first argument must be "
7537 "larger than the bit size of the return type",
7538 *VPCast);
7539 break;
7540 case Intrinsic::vp_zext:
7541 case Intrinsic::vp_sext:
7542 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
7543 "llvm.vp.zext or llvm.vp.sext intrinsic first argument and result "
7544 "element type must be integer",
7545 *VPCast);
7546 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
7547 "llvm.vp.zext or llvm.vp.sext intrinsic the bit size of first "
7548 "argument must be smaller than the bit size of the return type",
7549 *VPCast);
7550 break;
7551 case Intrinsic::vp_fptoui:
7552 case Intrinsic::vp_fptosi:
7553 case Intrinsic::vp_lrint:
7554 case Intrinsic::vp_llrint:
7555 Check(
7556 RetTy->isIntOrIntVectorTy() && ValTy->isFPOrFPVectorTy(),
7557 "llvm.vp.fptoui, llvm.vp.fptosi, llvm.vp.lrint or llvm.vp.llrint" "intrinsic first argument element "
7558 "type must be floating-point and result element type must be integer",
7559 *VPCast);
7560 break;
7561 case Intrinsic::vp_uitofp:
7562 case Intrinsic::vp_sitofp:
7563 Check(
7564 RetTy->isFPOrFPVectorTy() && ValTy->isIntOrIntVectorTy(),
7565 "llvm.vp.uitofp or llvm.vp.sitofp intrinsic first argument element "
7566 "type must be integer and result element type must be floating-point",
7567 *VPCast);
7568 break;
7569 case Intrinsic::vp_fptrunc:
7570 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
7571 "llvm.vp.fptrunc intrinsic first argument and result element type "
7572 "must be floating-point",
7573 *VPCast);
7574 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
7575 "llvm.vp.fptrunc intrinsic the bit size of first argument must be "
7576 "larger than the bit size of the return type",
7577 *VPCast);
7578 break;
7579 case Intrinsic::vp_fpext:
7580 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
7581 "llvm.vp.fpext intrinsic first argument and result element type "
7582 "must be floating-point",
7583 *VPCast);
7584 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
7585 "llvm.vp.fpext intrinsic the bit size of first argument must be "
7586 "smaller than the bit size of the return type",
7587 *VPCast);
7588 break;
7589 case Intrinsic::vp_ptrtoint:
7590 Check(RetTy->isIntOrIntVectorTy() && ValTy->isPtrOrPtrVectorTy(),
7591 "llvm.vp.ptrtoint intrinsic first argument element type must be "
7592 "pointer and result element type must be integer",
7593 *VPCast);
7594 break;
7595 case Intrinsic::vp_inttoptr:
7596 Check(RetTy->isPtrOrPtrVectorTy() && ValTy->isIntOrIntVectorTy(),
7597 "llvm.vp.inttoptr intrinsic first argument element type must be "
7598 "integer and result element type must be pointer",
7599 *VPCast);
7600 break;
7601 }
7602 }
7603
7604 switch (VPI.getIntrinsicID()) {
7605 case Intrinsic::vp_fcmp: {
7606 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
7608 "invalid predicate for VP FP comparison intrinsic", &VPI);
7609 break;
7610 }
7611 case Intrinsic::vp_icmp: {
7612 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
7614 "invalid predicate for VP integer comparison intrinsic", &VPI);
7615 break;
7616 }
7617 case Intrinsic::vp_is_fpclass: {
7618 auto TestMask = cast<ConstantInt>(VPI.getOperand(1));
7619 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
7620 "unsupported bits for llvm.vp.is.fpclass test mask");
7621 break;
7622 }
7623 case Intrinsic::experimental_vp_splice: {
7624 VectorType *VecTy = cast<VectorType>(VPI.getType());
7625 int64_t Idx = cast<ConstantInt>(VPI.getArgOperand(2))->getSExtValue();
7626 int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
7627 if (VPI.getParent() && VPI.getParent()->getParent()) {
7628 AttributeList Attrs = VPI.getParent()->getParent()->getAttributes();
7629 if (Attrs.hasFnAttr(Attribute::VScaleRange))
7630 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
7631 }
7632 Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
7633 (Idx >= 0 && Idx < KnownMinNumElements),
7634 "The splice index exceeds the range [-VL, VL-1] where VL is the "
7635 "known minimum number of elements in the vector. For scalable "
7636 "vectors the minimum number of elements is determined from "
7637 "vscale_range.",
7638 &VPI);
7639 break;
7640 }
7641 }
7642}
7643
7644void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
7645 unsigned NumOperands = FPI.getNonMetadataArgCount();
7646 bool HasRoundingMD =
7648
7649 // Add the expected number of metadata operands.
7650 NumOperands += (1 + HasRoundingMD);
7651
7652 // Compare intrinsics carry an extra predicate metadata operand.
7654 NumOperands += 1;
7655 Check((FPI.arg_size() == NumOperands),
7656 "invalid arguments for constrained FP intrinsic", &FPI);
7657
7658 switch (FPI.getIntrinsicID()) {
7659 case Intrinsic::experimental_constrained_lrint:
7660 case Intrinsic::experimental_constrained_llrint: {
7661 Type *ValTy = FPI.getArgOperand(0)->getType();
7662 Type *ResultTy = FPI.getType();
7663 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
7664 "Intrinsic does not support vectors", &FPI);
7665 break;
7666 }
7667
7668 case Intrinsic::experimental_constrained_lround:
7669 case Intrinsic::experimental_constrained_llround: {
7670 Type *ValTy = FPI.getArgOperand(0)->getType();
7671 Type *ResultTy = FPI.getType();
7672 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
7673 "Intrinsic does not support vectors", &FPI);
7674 break;
7675 }
7676
7677 case Intrinsic::experimental_constrained_fcmp:
7678 case Intrinsic::experimental_constrained_fcmps: {
7679 auto Pred = cast<ConstrainedFPCmpIntrinsic>(&FPI)->getPredicate();
7681 "invalid predicate for constrained FP comparison intrinsic", &FPI);
7682 break;
7683 }
7684
7685 case Intrinsic::experimental_constrained_fptosi:
7686 case Intrinsic::experimental_constrained_fptoui: {
7687 Value *Operand = FPI.getArgOperand(0);
7688 ElementCount SrcEC;
7689 Check(Operand->getType()->isFPOrFPVectorTy(),
7690 "Intrinsic first argument must be floating point", &FPI);
7691 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7692 SrcEC = cast<VectorType>(OperandT)->getElementCount();
7693 }
7694
7695 Operand = &FPI;
7696 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
7697 "Intrinsic first argument and result disagree on vector use", &FPI);
7698 Check(Operand->getType()->isIntOrIntVectorTy(),
7699 "Intrinsic result must be an integer", &FPI);
7700 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7701 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
7702 "Intrinsic first argument and result vector lengths must be equal",
7703 &FPI);
7704 }
7705 break;
7706 }
7707
7708 case Intrinsic::experimental_constrained_sitofp:
7709 case Intrinsic::experimental_constrained_uitofp: {
7710 Value *Operand = FPI.getArgOperand(0);
7711 ElementCount SrcEC;
7712 Check(Operand->getType()->isIntOrIntVectorTy(),
7713 "Intrinsic first argument must be integer", &FPI);
7714 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7715 SrcEC = cast<VectorType>(OperandT)->getElementCount();
7716 }
7717
7718 Operand = &FPI;
7719 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
7720 "Intrinsic first argument and result disagree on vector use", &FPI);
7721 Check(Operand->getType()->isFPOrFPVectorTy(),
7722 "Intrinsic result must be a floating point", &FPI);
7723 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7724 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
7725 "Intrinsic first argument and result vector lengths must be equal",
7726 &FPI);
7727 }
7728 break;
7729 }
7730
7731 case Intrinsic::experimental_constrained_fptrunc:
7732 case Intrinsic::experimental_constrained_fpext: {
7733 Value *Operand = FPI.getArgOperand(0);
7734 Type *OperandTy = Operand->getType();
7735 Value *Result = &FPI;
7736 Type *ResultTy = Result->getType();
7737 Check(OperandTy->isFPOrFPVectorTy(),
7738 "Intrinsic first argument must be FP or FP vector", &FPI);
7739 Check(ResultTy->isFPOrFPVectorTy(),
7740 "Intrinsic result must be FP or FP vector", &FPI);
7741 Check(OperandTy->isVectorTy() == ResultTy->isVectorTy(),
7742 "Intrinsic first argument and result disagree on vector use", &FPI);
7743 if (OperandTy->isVectorTy()) {
7744 Check(cast<VectorType>(OperandTy)->getElementCount() ==
7745 cast<VectorType>(ResultTy)->getElementCount(),
7746 "Intrinsic first argument and result vector lengths must be equal",
7747 &FPI);
7748 }
7749 if (FPI.getIntrinsicID() == Intrinsic::experimental_constrained_fptrunc) {
7750 Check(OperandTy->getScalarSizeInBits() > ResultTy->getScalarSizeInBits(),
7751 "Intrinsic first argument's type must be larger than result type",
7752 &FPI);
7753 } else {
7754 Check(OperandTy->getScalarSizeInBits() < ResultTy->getScalarSizeInBits(),
7755 "Intrinsic first argument's type must be smaller than result type",
7756 &FPI);
7757 }
7758 break;
7759 }
7760
7761 default:
7762 break;
7763 }
7764
7765 // If a non-metadata argument is passed in a metadata slot then the
7766 // error will be caught earlier when the incorrect argument doesn't
7767 // match the specification in the intrinsic call table. Thus, no
7768 // argument type check is needed here.
7769
7770 Check(FPI.getExceptionBehavior().has_value(),
7771 "invalid exception behavior argument", &FPI);
7772 if (HasRoundingMD) {
7773 Check(FPI.getRoundingMode().has_value(), "invalid rounding mode argument",
7774 &FPI);
7775 }
7776}
7777
7778void Verifier::verifyFragmentExpression(const DbgVariableRecord &DVR) {
7779 DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(DVR.getRawVariable());
7780 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7781
7782 // We don't know whether this intrinsic verified correctly.
7783 if (!V || !E || !E->isValid())
7784 return;
7785
7786 // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
7787 auto Fragment = E->getFragmentInfo();
7788 if (!Fragment)
7789 return;
7790
7791 // The frontend helps out GDB by emitting the members of local anonymous
7792 // unions as artificial local variables with shared storage. When SROA splits
7793 // the storage for artificial local variables that are smaller than the entire
7794 // union, the overhang piece will be outside of the allotted space for the
7795 // variable and this check fails.
7796 // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
7797 if (V->isArtificial())
7798 return;
7799
7800 verifyFragmentExpression(*V, *Fragment, &DVR);
7801}
7802
7803template <typename ValueOrMetadata>
7804void Verifier::verifyFragmentExpression(const DIVariable &V,
7806 ValueOrMetadata *Desc) {
7807 // If there's no size, the type is broken, but that should be checked
7808 // elsewhere.
7809 auto VarSize = V.getSizeInBits();
7810 if (!VarSize)
7811 return;
7812
7813 unsigned FragSize = Fragment.SizeInBits;
7814 unsigned FragOffset = Fragment.OffsetInBits;
7815 CheckDI(FragSize + FragOffset <= *VarSize,
7816 "fragment is larger than or outside of variable", Desc, &V);
7817 CheckDI(FragSize != *VarSize, "fragment covers entire variable", Desc, &V);
7818}
7819
7820void Verifier::verifyFnArgs(const DbgVariableRecord &DVR) {
7821 // This function does not take the scope of noninlined function arguments into
7822 // account. Don't run it if current function is nodebug, because it may
7823 // contain inlined debug intrinsics.
7824 if (!HasDebugInfo)
7825 return;
7826
7827 // For performance reasons only check non-inlined ones.
7828 if (DVR.getDebugLoc()->getInlinedAt())
7829 return;
7830
7831 DILocalVariable *Var = DVR.getVariable();
7832 CheckDI(Var, "#dbg record without variable");
7833
7834 unsigned ArgNo = Var->getArg();
7835 if (!ArgNo)
7836 return;
7837
7838 // Verify there are no duplicate function argument debug info entries.
7839 // These will cause hard-to-debug assertions in the DWARF backend.
7840 if (DebugFnArgs.size() < ArgNo)
7841 DebugFnArgs.resize(ArgNo, nullptr);
7842
7843 auto *Prev = DebugFnArgs[ArgNo - 1];
7844 DebugFnArgs[ArgNo - 1] = Var;
7845 CheckDI(!Prev || (Prev == Var), "conflicting debug info for argument", &DVR,
7846 Prev, Var);
7847}
7848
7849void Verifier::verifyNotEntryValue(const DbgVariableRecord &DVR) {
7850 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7851
7852 // We don't know whether this intrinsic verified correctly.
7853 if (!E || !E->isValid())
7854 return;
7855
7857 Value *VarValue = DVR.getVariableLocationOp(0);
7858 if (isa<UndefValue>(VarValue) || isa<PoisonValue>(VarValue))
7859 return;
7860 // We allow EntryValues for swift async arguments, as they have an
7861 // ABI-guarantee to be turned into a specific register.
7862 if (auto *ArgLoc = dyn_cast_or_null<Argument>(VarValue);
7863 ArgLoc && ArgLoc->hasAttribute(Attribute::SwiftAsync))
7864 return;
7865 }
7866
7867 CheckDI(!E->isEntryValue(),
7868 "Entry values are only allowed in MIR unless they target a "
7869 "swiftasync Argument",
7870 &DVR);
7871}
7872
7873void Verifier::verifyCompileUnits() {
7874 // When more than one Module is imported into the same context, such as during
7875 // an LTO build before linking the modules, ODR type uniquing may cause types
7876 // to point to a different CU. This check does not make sense in this case.
7877 if (M.getContext().isODRUniquingDebugTypes())
7878 return;
7879 auto *CUs = M.getNamedMetadata("llvm.dbg.cu");
7880 SmallPtrSet<const Metadata *, 2> Listed;
7881 if (CUs)
7882 Listed.insert_range(CUs->operands());
7883 for (const auto *CU : CUVisited)
7884 CheckDI(Listed.count(CU), "DICompileUnit not listed in llvm.dbg.cu", CU);
7885 CUVisited.clear();
7886}
7887
7888void Verifier::verifyDeoptimizeCallingConvs() {
7889 if (DeoptimizeDeclarations.empty())
7890 return;
7891
7892 const Function *First = DeoptimizeDeclarations[0];
7893 for (const auto *F : ArrayRef(DeoptimizeDeclarations).slice(1)) {
7894 Check(First->getCallingConv() == F->getCallingConv(),
7895 "All llvm.experimental.deoptimize declarations must have the same "
7896 "calling convention",
7897 First, F);
7898 }
7899}
7900
7901void Verifier::verifyAttachedCallBundle(const CallBase &Call,
7902 const OperandBundleUse &BU) {
7903 FunctionType *FTy = Call.getFunctionType();
7904
7905 Check((FTy->getReturnType()->isPointerTy() ||
7906 (Call.doesNotReturn() && FTy->getReturnType()->isVoidTy())),
7907 "a call with operand bundle \"clang.arc.attachedcall\" must call a "
7908 "function returning a pointer or a non-returning function that has a "
7909 "void return type",
7910 Call);
7911
7912 Check(BU.Inputs.size() == 1 && isa<Function>(BU.Inputs.front()),
7913 "operand bundle \"clang.arc.attachedcall\" requires one function as "
7914 "an argument",
7915 Call);
7916
7917 auto *Fn = cast<Function>(BU.Inputs.front());
7918 Intrinsic::ID IID = Fn->getIntrinsicID();
7919
7920 if (IID) {
7921 Check((IID == Intrinsic::objc_retainAutoreleasedReturnValue ||
7922 IID == Intrinsic::objc_claimAutoreleasedReturnValue ||
7923 IID == Intrinsic::objc_unsafeClaimAutoreleasedReturnValue),
7924 "invalid function argument", Call);
7925 } else {
7926 StringRef FnName = Fn->getName();
7927 Check((FnName == "objc_retainAutoreleasedReturnValue" ||
7928 FnName == "objc_claimAutoreleasedReturnValue" ||
7929 FnName == "objc_unsafeClaimAutoreleasedReturnValue"),
7930 "invalid function argument", Call);
7931 }
7932}
7933
7934void Verifier::verifyNoAliasScopeDecl() {
7935 if (NoAliasScopeDecls.empty())
7936 return;
7937
7938 // only a single scope must be declared at a time.
7939 for (auto *II : NoAliasScopeDecls) {
7940 assert(II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl &&
7941 "Not a llvm.experimental.noalias.scope.decl ?");
7942 const auto *ScopeListMV = dyn_cast<MetadataAsValue>(
7944 Check(ScopeListMV != nullptr,
7945 "llvm.experimental.noalias.scope.decl must have a MetadataAsValue "
7946 "argument",
7947 II);
7948
7949 const auto *ScopeListMD = dyn_cast<MDNode>(ScopeListMV->getMetadata());
7950 Check(ScopeListMD != nullptr, "!id.scope.list must point to an MDNode", II);
7951 Check(ScopeListMD->getNumOperands() == 1,
7952 "!id.scope.list must point to a list with a single scope", II);
7953 visitAliasScopeListMetadata(ScopeListMD);
7954 }
7955
7956 // Only check the domination rule when requested. Once all passes have been
7957 // adapted this option can go away.
7959 return;
7960
7961 // Now sort the intrinsics based on the scope MDNode so that declarations of
7962 // the same scopes are next to each other.
7963 auto GetScope = [](IntrinsicInst *II) {
7964 const auto *ScopeListMV = cast<MetadataAsValue>(
7966 return &cast<MDNode>(ScopeListMV->getMetadata())->getOperand(0);
7967 };
7968
7969 // We are sorting on MDNode pointers here. For valid input IR this is ok.
7970 // TODO: Sort on Metadata ID to avoid non-deterministic error messages.
7971 auto Compare = [GetScope](IntrinsicInst *Lhs, IntrinsicInst *Rhs) {
7972 return GetScope(Lhs) < GetScope(Rhs);
7973 };
7974
7975 llvm::sort(NoAliasScopeDecls, Compare);
7976
7977 // Go over the intrinsics and check that for the same scope, they are not
7978 // dominating each other.
7979 auto ItCurrent = NoAliasScopeDecls.begin();
7980 while (ItCurrent != NoAliasScopeDecls.end()) {
7981 auto CurScope = GetScope(*ItCurrent);
7982 auto ItNext = ItCurrent;
7983 do {
7984 ++ItNext;
7985 } while (ItNext != NoAliasScopeDecls.end() &&
7986 GetScope(*ItNext) == CurScope);
7987
7988 // [ItCurrent, ItNext) represents the declarations for the same scope.
7989 // Ensure they are not dominating each other.. but only if it is not too
7990 // expensive.
7991 if (ItNext - ItCurrent < 32)
7992 for (auto *I : llvm::make_range(ItCurrent, ItNext))
7993 for (auto *J : llvm::make_range(ItCurrent, ItNext))
7994 if (I != J)
7995 Check(!DT.dominates(I, J),
7996 "llvm.experimental.noalias.scope.decl dominates another one "
7997 "with the same scope",
7998 I);
7999 ItCurrent = ItNext;
8000 }
8001}
8002
8003//===----------------------------------------------------------------------===//
8004// Implement the public interfaces to this file...
8005//===----------------------------------------------------------------------===//
8006
8008 Function &F = const_cast<Function &>(f);
8009
8010 // Don't use a raw_null_ostream. Printing IR is expensive.
8011 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/true, *f.getParent());
8012
8013 // Note that this function's return value is inverted from what you would
8014 // expect of a function called "verify".
8015 return !V.verify(F);
8016}
8017
8019 bool *BrokenDebugInfo) {
8020 // Don't use a raw_null_ostream. Printing IR is expensive.
8021 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/!BrokenDebugInfo, M);
8022
8023 bool Broken = false;
8024 for (const Function &F : M)
8025 Broken |= !V.verify(F);
8026
8027 Broken |= !V.verify();
8028 if (BrokenDebugInfo)
8029 *BrokenDebugInfo = V.hasBrokenDebugInfo();
8030 // Note that this function's return value is inverted from what you would
8031 // expect of a function called "verify".
8032 return Broken;
8033}
8034
8035namespace {
8036
8037struct VerifierLegacyPass : public FunctionPass {
8038 static char ID;
8039
8040 std::unique_ptr<Verifier> V;
8041 bool FatalErrors = true;
8042
8043 VerifierLegacyPass() : FunctionPass(ID) {}
8044 explicit VerifierLegacyPass(bool FatalErrors)
8045 : FunctionPass(ID), FatalErrors(FatalErrors) {}
8046
8047 bool doInitialization(Module &M) override {
8048 V = std::make_unique<Verifier>(
8049 &dbgs(), /*ShouldTreatBrokenDebugInfoAsError=*/false, M);
8050 return false;
8051 }
8052
8053 bool runOnFunction(Function &F) override {
8054 if (!V->verify(F) && FatalErrors) {
8055 errs() << "in function " << F.getName() << '\n';
8056 report_fatal_error("Broken function found, compilation aborted!");
8057 }
8058 return false;
8059 }
8060
8061 bool doFinalization(Module &M) override {
8062 bool HasErrors = false;
8063 for (Function &F : M)
8064 if (F.isDeclaration())
8065 HasErrors |= !V->verify(F);
8066
8067 HasErrors |= !V->verify();
8068 if (FatalErrors && (HasErrors || V->hasBrokenDebugInfo()))
8069 report_fatal_error("Broken module found, compilation aborted!");
8070 return false;
8071 }
8072
8073 void getAnalysisUsage(AnalysisUsage &AU) const override {
8074 AU.setPreservesAll();
8075 }
8076};
8077
8078} // end anonymous namespace
8079
8080/// Helper to issue failure from the TBAA verification
8081template <typename... Tys> void TBAAVerifier::CheckFailed(Tys &&... Args) {
8082 if (Diagnostic)
8083 return Diagnostic->CheckFailed(Args...);
8084}
8085
8086#define CheckTBAA(C, ...) \
8087 do { \
8088 if (!(C)) { \
8089 CheckFailed(__VA_ARGS__); \
8090 return false; \
8091 } \
8092 } while (false)
8093
8094/// Verify that \p BaseNode can be used as the "base type" in the struct-path
8095/// TBAA scheme. This means \p BaseNode is either a scalar node, or a
8096/// struct-type node describing an aggregate data structure (like a struct).
8097TBAAVerifier::TBAABaseNodeSummary
8098TBAAVerifier::verifyTBAABaseNode(const Instruction *I, const MDNode *BaseNode,
8099 bool IsNewFormat) {
8100 if (BaseNode->getNumOperands() < 2) {
8101 CheckFailed("Base nodes must have at least two operands", I, BaseNode);
8102 return {true, ~0u};
8103 }
8104
8105 auto Itr = TBAABaseNodes.find(BaseNode);
8106 if (Itr != TBAABaseNodes.end())
8107 return Itr->second;
8108
8109 auto Result = verifyTBAABaseNodeImpl(I, BaseNode, IsNewFormat);
8110 auto InsertResult = TBAABaseNodes.insert({BaseNode, Result});
8111 (void)InsertResult;
8112 assert(InsertResult.second && "We just checked!");
8113 return Result;
8114}
8115
8116TBAAVerifier::TBAABaseNodeSummary
8117TBAAVerifier::verifyTBAABaseNodeImpl(const Instruction *I,
8118 const MDNode *BaseNode, bool IsNewFormat) {
8119 const TBAAVerifier::TBAABaseNodeSummary InvalidNode = {true, ~0u};
8120
8121 if (BaseNode->getNumOperands() == 2) {
8122 // Scalar nodes can only be accessed at offset 0.
8123 return isValidScalarTBAANode(BaseNode)
8124 ? TBAAVerifier::TBAABaseNodeSummary({false, 0})
8125 : InvalidNode;
8126 }
8127
8128 if (IsNewFormat) {
8129 if (BaseNode->getNumOperands() % 3 != 0) {
8130 CheckFailed("Access tag nodes must have the number of operands that is a "
8131 "multiple of 3!", BaseNode);
8132 return InvalidNode;
8133 }
8134 } else {
8135 if (BaseNode->getNumOperands() % 2 != 1) {
8136 CheckFailed("Struct tag nodes must have an odd number of operands!",
8137 BaseNode);
8138 return InvalidNode;
8139 }
8140 }
8141
8142 // Check the type size field.
8143 if (IsNewFormat) {
8144 auto *TypeSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
8145 BaseNode->getOperand(1));
8146 if (!TypeSizeNode) {
8147 CheckFailed("Type size nodes must be constants!", I, BaseNode);
8148 return InvalidNode;
8149 }
8150 }
8151
8152 // Check the type name field. In the new format it can be anything.
8153 if (!IsNewFormat && !isa<MDString>(BaseNode->getOperand(0))) {
8154 CheckFailed("Struct tag nodes have a string as their first operand",
8155 BaseNode);
8156 return InvalidNode;
8157 }
8158
8159 bool Failed = false;
8160
8161 std::optional<APInt> PrevOffset;
8162 unsigned BitWidth = ~0u;
8163
8164 // We've already checked that BaseNode is not a degenerate root node with one
8165 // operand in \c verifyTBAABaseNode, so this loop should run at least once.
8166 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
8167 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
8168 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
8169 Idx += NumOpsPerField) {
8170 const MDOperand &FieldTy = BaseNode->getOperand(Idx);
8171 const MDOperand &FieldOffset = BaseNode->getOperand(Idx + 1);
8172 if (!isa<MDNode>(FieldTy)) {
8173 CheckFailed("Incorrect field entry in struct type node!", I, BaseNode);
8174 Failed = true;
8175 continue;
8176 }
8177
8178 auto *OffsetEntryCI =
8180 if (!OffsetEntryCI) {
8181 CheckFailed("Offset entries must be constants!", I, BaseNode);
8182 Failed = true;
8183 continue;
8184 }
8185
8186 if (BitWidth == ~0u)
8187 BitWidth = OffsetEntryCI->getBitWidth();
8188
8189 if (OffsetEntryCI->getBitWidth() != BitWidth) {
8190 CheckFailed(
8191 "Bitwidth between the offsets and struct type entries must match", I,
8192 BaseNode);
8193 Failed = true;
8194 continue;
8195 }
8196
8197 // NB! As far as I can tell, we generate a non-strictly increasing offset
8198 // sequence only from structs that have zero size bit fields. When
8199 // recursing into a contained struct in \c getFieldNodeFromTBAABaseNode we
8200 // pick the field lexically the latest in struct type metadata node. This
8201 // mirrors the actual behavior of the alias analysis implementation.
8202 bool IsAscending =
8203 !PrevOffset || PrevOffset->ule(OffsetEntryCI->getValue());
8204
8205 if (!IsAscending) {
8206 CheckFailed("Offsets must be increasing!", I, BaseNode);
8207 Failed = true;
8208 }
8209
8210 PrevOffset = OffsetEntryCI->getValue();
8211
8212 if (IsNewFormat) {
8213 auto *MemberSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
8214 BaseNode->getOperand(Idx + 2));
8215 if (!MemberSizeNode) {
8216 CheckFailed("Member size entries must be constants!", I, BaseNode);
8217 Failed = true;
8218 continue;
8219 }
8220 }
8221 }
8222
8223 return Failed ? InvalidNode
8224 : TBAAVerifier::TBAABaseNodeSummary(false, BitWidth);
8225}
8226
8227static bool IsRootTBAANode(const MDNode *MD) {
8228 return MD->getNumOperands() < 2;
8229}
8230
8231static bool IsScalarTBAANodeImpl(const MDNode *MD,
8233 if (MD->getNumOperands() != 2 && MD->getNumOperands() != 3)
8234 return false;
8235
8236 if (!isa<MDString>(MD->getOperand(0)))
8237 return false;
8238
8239 if (MD->getNumOperands() == 3) {
8241 if (!(Offset && Offset->isZero() && isa<MDString>(MD->getOperand(0))))
8242 return false;
8243 }
8244
8245 auto *Parent = dyn_cast_or_null<MDNode>(MD->getOperand(1));
8246 return Parent && Visited.insert(Parent).second &&
8247 (IsRootTBAANode(Parent) || IsScalarTBAANodeImpl(Parent, Visited));
8248}
8249
8250bool TBAAVerifier::isValidScalarTBAANode(const MDNode *MD) {
8251 auto ResultIt = TBAAScalarNodes.find(MD);
8252 if (ResultIt != TBAAScalarNodes.end())
8253 return ResultIt->second;
8254
8255 SmallPtrSet<const MDNode *, 4> Visited;
8256 bool Result = IsScalarTBAANodeImpl(MD, Visited);
8257 auto InsertResult = TBAAScalarNodes.insert({MD, Result});
8258 (void)InsertResult;
8259 assert(InsertResult.second && "Just checked!");
8260
8261 return Result;
8262}
8263
8264/// Returns the field node at the offset \p Offset in \p BaseNode. Update \p
8265/// Offset in place to be the offset within the field node returned.
8266///
8267/// We assume we've okayed \p BaseNode via \c verifyTBAABaseNode.
8268MDNode *TBAAVerifier::getFieldNodeFromTBAABaseNode(const Instruction *I,
8269 const MDNode *BaseNode,
8270 APInt &Offset,
8271 bool IsNewFormat) {
8272 assert(BaseNode->getNumOperands() >= 2 && "Invalid base node!");
8273
8274 // Scalar nodes have only one possible "field" -- their parent in the access
8275 // hierarchy. Offset must be zero at this point, but our caller is supposed
8276 // to check that.
8277 if (BaseNode->getNumOperands() == 2)
8278 return cast<MDNode>(BaseNode->getOperand(1));
8279
8280 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
8281 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
8282 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
8283 Idx += NumOpsPerField) {
8284 auto *OffsetEntryCI =
8285 mdconst::extract<ConstantInt>(BaseNode->getOperand(Idx + 1));
8286 if (OffsetEntryCI->getValue().ugt(Offset)) {
8287 if (Idx == FirstFieldOpNo) {
8288 CheckFailed("Could not find TBAA parent in struct type node", I,
8289 BaseNode, &Offset);
8290 return nullptr;
8291 }
8292
8293 unsigned PrevIdx = Idx - NumOpsPerField;
8294 auto *PrevOffsetEntryCI =
8295 mdconst::extract<ConstantInt>(BaseNode->getOperand(PrevIdx + 1));
8296 Offset -= PrevOffsetEntryCI->getValue();
8297 return cast<MDNode>(BaseNode->getOperand(PrevIdx));
8298 }
8299 }
8300
8301 unsigned LastIdx = BaseNode->getNumOperands() - NumOpsPerField;
8302 auto *LastOffsetEntryCI = mdconst::extract<ConstantInt>(
8303 BaseNode->getOperand(LastIdx + 1));
8304 Offset -= LastOffsetEntryCI->getValue();
8305 return cast<MDNode>(BaseNode->getOperand(LastIdx));
8306}
8307
8309 if (!Type || Type->getNumOperands() < 3)
8310 return false;
8311
8312 // In the new format type nodes shall have a reference to the parent type as
8313 // its first operand.
8314 return isa_and_nonnull<MDNode>(Type->getOperand(0));
8315}
8316
8318 CheckTBAA(MD->getNumOperands() > 0, "TBAA metadata cannot have 0 operands", I,
8319 MD);
8320
8321 if (I)
8325 "This instruction shall not have a TBAA access tag!", I);
8326
8327 bool IsStructPathTBAA =
8328 isa<MDNode>(MD->getOperand(0)) && MD->getNumOperands() >= 3;
8329
8330 CheckTBAA(IsStructPathTBAA,
8331 "Old-style TBAA is no longer allowed, use struct-path TBAA instead",
8332 I);
8333
8334 MDNode *BaseNode = dyn_cast_or_null<MDNode>(MD->getOperand(0));
8335 MDNode *AccessType = dyn_cast_or_null<MDNode>(MD->getOperand(1));
8336
8337 bool IsNewFormat = isNewFormatTBAATypeNode(AccessType);
8338
8339 if (IsNewFormat) {
8340 CheckTBAA(MD->getNumOperands() == 4 || MD->getNumOperands() == 5,
8341 "Access tag metadata must have either 4 or 5 operands", I, MD);
8342 } else {
8343 CheckTBAA(MD->getNumOperands() < 5,
8344 "Struct tag metadata must have either 3 or 4 operands", I, MD);
8345 }
8346
8347 // Check the access size field.
8348 if (IsNewFormat) {
8349 auto *AccessSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
8350 MD->getOperand(3));
8351 CheckTBAA(AccessSizeNode, "Access size field must be a constant", I, MD);
8352 }
8353
8354 // Check the immutability flag.
8355 unsigned ImmutabilityFlagOpNo = IsNewFormat ? 4 : 3;
8356 if (MD->getNumOperands() == ImmutabilityFlagOpNo + 1) {
8357 auto *IsImmutableCI = mdconst::dyn_extract_or_null<ConstantInt>(
8358 MD->getOperand(ImmutabilityFlagOpNo));
8359 CheckTBAA(IsImmutableCI,
8360 "Immutability tag on struct tag metadata must be a constant", I,
8361 MD);
8362 CheckTBAA(
8363 IsImmutableCI->isZero() || IsImmutableCI->isOne(),
8364 "Immutability part of the struct tag metadata must be either 0 or 1", I,
8365 MD);
8366 }
8367
8368 CheckTBAA(BaseNode && AccessType,
8369 "Malformed struct tag metadata: base and access-type "
8370 "should be non-null and point to Metadata nodes",
8371 I, MD, BaseNode, AccessType);
8372
8373 if (!IsNewFormat) {
8374 CheckTBAA(isValidScalarTBAANode(AccessType),
8375 "Access type node must be a valid scalar type", I, MD,
8376 AccessType);
8377 }
8378
8380 CheckTBAA(OffsetCI, "Offset must be constant integer", I, MD);
8381
8382 APInt Offset = OffsetCI->getValue();
8383 bool SeenAccessTypeInPath = false;
8384
8385 SmallPtrSet<MDNode *, 4> StructPath;
8386
8387 for (/* empty */; BaseNode && !IsRootTBAANode(BaseNode);
8388 BaseNode =
8389 getFieldNodeFromTBAABaseNode(I, BaseNode, Offset, IsNewFormat)) {
8390 if (!StructPath.insert(BaseNode).second) {
8391 CheckFailed("Cycle detected in struct path", I, MD);
8392 return false;
8393 }
8394
8395 bool Invalid;
8396 unsigned BaseNodeBitWidth;
8397 std::tie(Invalid, BaseNodeBitWidth) =
8398 verifyTBAABaseNode(I, BaseNode, IsNewFormat);
8399
8400 // If the base node is invalid in itself, then we've already printed all the
8401 // errors we wanted to print.
8402 if (Invalid)
8403 return false;
8404
8405 SeenAccessTypeInPath |= BaseNode == AccessType;
8406
8407 if (isValidScalarTBAANode(BaseNode) || BaseNode == AccessType)
8408 CheckTBAA(Offset == 0, "Offset not zero at the point of scalar access", I,
8409 MD, &Offset);
8410
8411 CheckTBAA(BaseNodeBitWidth == Offset.getBitWidth() ||
8412 (BaseNodeBitWidth == 0 && Offset == 0) ||
8413 (IsNewFormat && BaseNodeBitWidth == ~0u),
8414 "Access bit-width not the same as description bit-width", I, MD,
8415 BaseNodeBitWidth, Offset.getBitWidth());
8416
8417 if (IsNewFormat && SeenAccessTypeInPath)
8418 break;
8419 }
8420
8421 CheckTBAA(SeenAccessTypeInPath, "Did not see access type in access path!", I,
8422 MD);
8423 return true;
8424}
8425
8426char VerifierLegacyPass::ID = 0;
8427INITIALIZE_PASS(VerifierLegacyPass, "verify", "Module Verifier", false, false)
8428
8430 return new VerifierLegacyPass(FatalErrors);
8431}
8432
8433AnalysisKey VerifierAnalysis::Key;
8440
8445
8447 auto Res = AM.getResult<VerifierAnalysis>(M);
8448 if (FatalErrors && (Res.IRBroken || Res.DebugInfoBroken))
8449 report_fatal_error("Broken module found, compilation aborted!");
8450
8451 return PreservedAnalyses::all();
8452}
8453
8455 auto res = AM.getResult<VerifierAnalysis>(F);
8456 if (res.IRBroken && FatalErrors)
8457 report_fatal_error("Broken function found, compilation aborted!");
8458
8459 return PreservedAnalyses::all();
8460}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU address space definition.
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Atomic ordering constants.
@ RetAttr
@ FnAttr
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file declares the LLVM IR specialization of the GenericConvergenceVerifier template.
static DISubprogram * getSubprogram(bool IsDistinct, Ts &&...Args)
dxil translate DXIL Translate Metadata
This file defines the DenseMap class.
This file contains constants used for implementing Dwarf debug support.
static bool runOnFunction(Function &F, bool PostInlining)
This file contains the declarations of entities that describe floating point environment and related ...
#define Check(C,...)
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Machine Check Debug Module
This file implements a map that provides insertion order iteration.
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
static bool isContiguous(const ConstantRange &A, const ConstantRange &B)
This file contains the declarations for metadata subclasses.
#define T
#define T1
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t High
uint64_t IntrinsicInst * II
#define P(N)
ppc ctr loops verify
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
static unsigned getNumElements(Type *Ty)
static void visit(BasicBlock &Start, std::function< bool(BasicBlock *)> op)
This file contains some templates that are useful if you are working with the STL at all.
verify safepoint Safepoint IR Verifier
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
static bool IsScalarTBAANodeImpl(const MDNode *MD, SmallPtrSetImpl< const MDNode * > &Visited)
static bool isType(const Metadata *MD)
static Instruction * getSuccPad(Instruction *Terminator)
static bool isMDTuple(const Metadata *MD)
static bool isNewFormatTBAATypeNode(llvm::MDNode *Type)
#define CheckDI(C,...)
We know that a debug info condition should be true, if not print an error message.
Definition Verifier.cpp:691
static void forEachUser(const Value *User, SmallPtrSet< const Value *, 32 > &Visited, llvm::function_ref< bool(const Value *)> Callback)
Definition Verifier.cpp:732
static bool isDINode(const Metadata *MD)
static bool isScope(const Metadata *MD)
static cl::opt< bool > VerifyNoAliasScopeDomination("verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false), cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical " "scopes are not dominating"))
static bool isTypeCongruent(Type *L, Type *R)
Two types are "congruent" if they are identical, or if they are both pointer types with different poi...
#define CheckTBAA(C,...)
static bool isConstantIntMetadataOperand(const Metadata *MD)
static bool IsRootTBAANode(const MDNode *MD)
static Value * getParentPad(Value *EHPad)
static bool hasConflictingReferenceFlags(unsigned Flags)
Detect mutually exclusive flags.
static AttrBuilder getParameterABIAttributes(LLVMContext &C, unsigned I, AttributeList Attrs)
static const char PassName[]
static LLVM_ABI bool isValidArbitraryFPFormat(StringRef Format)
Returns true if the given string is a valid arbitrary floating-point format interpretation for llvm....
Definition APFloat.cpp:5999
bool isFiniteNonZero() const
Definition APFloat.h:1548
bool isNegative() const
Definition APFloat.h:1538
const fltSemantics & getSemantics() const
Definition APFloat.h:1546
Class for arbitrary precision integers.
Definition APInt.h:78
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition APInt.h:1208
bool isMinValue() const
Determine if this is the smallest unsigned value.
Definition APInt.h:418
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition APInt.h:1157
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:441
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1585
bool isMaxValue() const
Determine if this is the largest unsigned value.
Definition APInt.h:400
This class represents a conversion between pointers from one address space to another.
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
unsigned getAddressSpace() const
Return the address space for the allocation.
LLVM_ABI bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
const Value * getArraySize() const
Get the number of elements allocated.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
void setPreservesAll()
Set by analyses that do not transform their input at all.
LLVM_ABI bool hasInRegAttr() const
Return true if this argument has the inreg attribute.
Definition Function.cpp:292
bool isElementwise() const
Return true if this RMW has elementwise vector semantics.
static bool isFPOperation(BinOp Op)
BinOp getOperation() const
static LLVM_ABI StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
bool contains(Attribute::AttrKind A) const
Return true if the builder has the specified attribute.
LLVM_ABI bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
LLVM_ABI std::string getAsString(bool InAttrGrp=false) const
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:105
LLVM_ABI const ConstantRange & getValueAsConstantRange() const
Return the attribute's value as a ConstantRange.
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition Attributes.h:124
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:261
LLVM_ABI Type * getValueAsType() const
Return the attribute's value as a Type.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:461
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:530
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
const Instruction & front() const
Definition BasicBlock.h:484
LLVM_ABI const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction; assumes that the block is well-formed.
Definition BasicBlock.h:237
This class represents a no-op cast from one type to another.
static LLVM_ABI BlockAddress * lookup(const BasicBlock *BB)
Lookup an existing BlockAddress constant for the given BasicBlock.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool isInlineAsm() const
Check if this call is an inline asm statement.
bool hasInAllocaArgument() const
Determine if there are is an inalloca argument.
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool doesNotAccessMemory(unsigned OpNo) const
bool hasFnAttr(Attribute::AttrKind Kind) const
Determine whether this call has the given attribute.
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
CallingConv::ID getCallingConv() const
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Get the attribute of a given kind from a given arg.
iterator_range< bundle_op_iterator > bundle_op_infos()
Return the range [bundle_op_info_begin, bundle_op_info_end).
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
bool onlyReadsMemory(unsigned OpNo) const
Value * getCalledOperand() const
Type * getParamElementType(unsigned ArgNo) const
Extract the elementtype type for a parameter.
Value * getArgOperand(unsigned i) const
FunctionType * getFunctionType() const
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
bool doesNotReturn() const
Determine if the call cannot return.
LLVM_ABI bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
bool hasOperandBundles() const
Return true if this User has any operand bundles.
LLVM_ABI Function * getCaller()
Helper to get the caller (the parent function).
BasicBlock * getIndirectDest(unsigned i) const
unsigned getNumIndirectDests() const
Return the number of callbr indirect dest labels.
bool isMustTailCall() const
static LLVM_ABI bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
unsigned getNumHandlers() const
return the number of 'handlers' in this catchswitch instruction, except the default handler
Value * getParentPad() const
BasicBlock * getUnwindDest() const
handler_range handlers()
iteration adapter for range-for loops.
BasicBlock * getUnwindDest() const
bool isFPPredicate() const
Definition InstrTypes.h:782
bool isIntPredicate() const
Definition InstrTypes.h:783
static bool isIntPredicate(Predicate P)
Definition InstrTypes.h:776
Value * getCondition() const
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
Definition Constants.h:231
bool isNegative() const
Definition Constants.h:214
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition Constants.h:219
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition Constants.h:162
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:168
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:159
Constant * getAddrDiscriminator() const
The address discriminator if any, or the null constant.
Definition Constants.h:1245
Constant * getPointer() const
The pointer that is signed in this ptrauth signed pointer.
Definition Constants.h:1232
ConstantInt * getKey() const
The Key ID, an i32 constant.
Definition Constants.h:1235
Constant * getDeactivationSymbol() const
Definition Constants.h:1254
ConstantInt * getDiscriminator() const
The integer discriminator, an i64 constant, or 0.
Definition Constants.h:1238
static LLVM_ABI bool isOrderedRanges(ArrayRef< ConstantRange > RangesRef)
This class represents a range of values.
const APInt & getLower() const
Return the lower value for this range.
const APInt & getUpper() const
Return the upper value for this range.
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
static LLVM_ABI ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition Constants.cpp:90
LLVM_ABI std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
LLVM_ABI std::optional< RoundingMode > getRoundingMode() const
LLVM_ABI unsigned getNonMetadataArgCount() const
DbgVariableFragmentInfo FragmentInfo
@ FixedPointBinary
Scale factor 2^Factor.
@ FixedPointDecimal
Scale factor 10^Factor.
@ FixedPointRational
Arbitrary rational scale factor.
DIGlobalVariable * getVariable() const
LLVM_ABI DISubprogram * getSubprogram() const
Get the subprogram for this scope.
DILocalScope * getScope() const
Get the local scope for this variable.
Metadata * getRawScope() const
Base class for scope-like contexts.
Subprogram description. Uses SubclassData1.
static const DIScope * getRawRetainedNodeScope(const MDNode *N)
Base class for template parameters.
Base class for types.
Base class for variables.
Metadata * getRawType() const
Metadata * getRawScope() const
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
LLVM_ABI void print(raw_ostream &O, bool IsForDebug=false) const
DebugLoc getDebugLoc() const
LLVM_ABI const BasicBlock * getParent() const
LLVM_ABI Function * getFunction()
Record of a variable value-assignment, aka a non instruction representation of the dbg....
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
DIExpression * getExpression() const
DILocalVariable * getVariable() const
Metadata * getRawLocation() const
Returns the metadata operand for the first location description.
@ End
Marks the end of the concrete types.
@ Any
To indicate all LocationTypes in searches.
DIExpression * getAddressExpression() const
MDNode * getAsMDNode() const
Return this as a bar MDNode.
Definition DebugLoc.h:290
ValueT lookup(const_arg_type_t< KeyT > Val) const
Return the entry for the specified key, or a default constructed value if no such entry exists.
Definition DenseMap.h:205
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
bool empty() const
Definition DenseMap.h:109
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:239
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:159
This instruction extracts a single (scalar) element from a VectorType value.
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
ArrayRef< unsigned > getIndices() const
static LLVM_ABI Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
This instruction compares its operands according to the predicate given to the constructor.
This class represents an extension of floating point types.
static bool isSupportedFloatingPointType(Type *Ty)
Returns true if Ty is a supported floating-point type for phi, select, or call FPMathOperators.
Definition Operator.h:344
This class represents a cast from floating point to signed integer.
This class represents a cast from floating point to unsigned integer.
This class represents a truncation of floating point types.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
op_range arg_operands()
arg_operands - iteration adapter for range-for loops.
Value * getParentPad() const
Convenience accessors.
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
Type * getReturnType() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition Function.h:211
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition Function.h:246
DISubprogram * getSubprogram() const
Get the attached subprogram.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:272
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition Function.h:905
const Function & getFunction() const
Definition Function.h:166
const std::string & getGC() const
Definition Function.cpp:818
Type * getReturnType() const
Returns the type of the ret val.
Definition Function.h:216
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
Definition Function.h:229
LLVM_ABI Value * getBasePtr() const
LLVM_ABI Value * getDerivedPtr() const
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
static bool isValidLinkage(LinkageTypes L)
Definition GlobalAlias.h:98
const Constant * getAliasee() const
Definition GlobalAlias.h:87
LLVM_ABI const Function * getResolverFunction() const
Definition Globals.cpp:688
static bool isValidLinkage(LinkageTypes L)
Definition GlobalIFunc.h:86
const Constant * getResolver() const
Definition GlobalIFunc.h:73
LLVM_ABI void getAllMetadata(SmallVectorImpl< std::pair< unsigned, MDNode * > > &MDs) const
Appends all metadata attached to this value to MDs, sorting by KindID.
bool hasComdat() const
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this GlobalObject.
bool hasExternalLinkage() const
bool isDSOLocal() const
bool isImplicitDSOLocal() const
LLVM_ABI bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition Globals.cpp:337
bool hasValidDeclarationLinkage() const
LinkageTypes getLinkage() const
bool hasDefaultVisibility() const
bool hasPrivateLinkage() const
bool hasHiddenVisibility() const
bool hasExternalWeakLinkage() const
bool hasDLLImportStorageClass() const
bool hasDLLExportStorageClass() const
bool isDeclarationForLinker() const
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
PointerType * getType() const
Global values are always pointers.
LLVM_ABI bool isInterposable() const
Return true if this global's definition can be substituted with an arbitrary definition at link time ...
Definition Globals.cpp:116
bool hasComdat() const
bool hasCommonLinkage() const
bool hasGlobalUnnamedAddr() const
bool hasAppendingLinkage() const
bool hasAvailableExternallyLinkage() const
Type * getValueType() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool hasInitializer() const
Definitions have initializers, declarations don't.
MaybeAlign getAlign() const
Returns the alignment of the given variable.
LLVM_ABI uint64_t getGlobalSize(const DataLayout &DL) const
Get the size of this global variable in bytes.
Definition Globals.cpp:569
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
This instruction compares its operands according to the predicate given to the constructor.
BasicBlock * getDestination(unsigned i)
Return the specified destination.
unsigned getNumDestinations() const
return the number of possible destinations in this indirectbr instruction.
unsigned getNumSuccessors() const
This instruction inserts a single (scalar) element into a VectorType value.
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
ArrayRef< unsigned > getIndices() const
Base class for instruction visitors.
Definition InstVisitor.h:78
void visit(Iterator Start, Iterator End)
Definition InstVisitor.h:87
LLVM_ABI unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
LLVM_ABI bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
This class represents a cast from an integer to a pointer.
static LLVM_ABI bool mayLowerToFunctionCall(Intrinsic::ID IID)
Check if the intrinsic might lower into a regular function call in the course of IR transformations.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Align getAlign() const
Return the alignment of the access that is being performed.
Metadata node.
Definition Metadata.h:1080
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1444
bool isTemporary() const
Definition Metadata.h:1264
ArrayRef< MDOperand > operands() const
Definition Metadata.h:1442
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1450
bool isDistinct() const
Definition Metadata.h:1263
bool isResolved() const
Check if node is fully resolved.
Definition Metadata.h:1260
LLVMContext & getContext() const
Definition Metadata.h:1244
bool equalsStr(StringRef Str) const
Definition Metadata.h:924
Metadata * get() const
Definition Metadata.h:931
LLVM_ABI StringRef getString() const
Definition Metadata.cpp:632
static LLVM_ABI bool isTagMD(const Metadata *MD)
This class implements a map that also provides access to all stored values in a deterministic order.
Definition MapVector.h:38
static LLVM_ABI MetadataAsValue * getIfExists(LLVMContext &Context, Metadata *MD)
Definition Metadata.cpp:118
Metadata * getMetadata() const
Definition Metadata.h:202
Root of the metadata hierarchy.
Definition Metadata.h:64
LLVM_ABI void print(raw_ostream &OS, const Module *M=nullptr, bool IsForDebug=false) const
Print.
unsigned getMetadataID() const
Definition Metadata.h:104
Manage lifetime of a slot tracker for printing IR.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
Metadata * getModuleFlag(StringRef Key) const
Return the corresponding value if Key appears in module flags, otherwise return null.
Definition Module.cpp:358
LLVM_ABI StringRef getName() const
LLVM_ABI void print(raw_ostream &ROS, bool IsForDebug=false) const
LLVM_ABI unsigned getNumOperands() const
iterator_range< op_iterator > operands()
Definition Metadata.h:1856
op_range incoming_values()
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
This class represents a cast from a pointer to an address (non-capturing ptrtoint).
This class represents a cast from a pointer to an integer.
Value * getValue() const
Convenience accessor.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
This class represents a sign extension of integer types.
This class represents a cast from signed integer to floating point.
static LLVM_ABI const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
This instruction constructs a fixed permutation of two input vectors.
static LLVM_ABI bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
static LLVM_ABI void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
void insert_range(Range &&R)
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
void reserve(size_type N)
iterator insert(iterator I, T &&Elt)
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition StringRef.h:730
static constexpr size_t npos
Definition StringRef.h:58
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition StringRef.h:490
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:258
constexpr bool empty() const
Check if the string is empty.
Definition StringRef.h:141
unsigned getNumElements() const
Random access to the elements.
LLVM_ABI Type * getTypeAtIndex(const Value *V) const
Given an index value into the type, return the type of the element.
Definition Type.cpp:788
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Returns true if this struct contains a scalable vector.
Definition Type.cpp:510
Verify that the TBAA Metadatas are valid.
Definition Verifier.h:40
LLVM_ABI bool visitTBAAMetadata(const Instruction *I, const MDNode *MD)
Visit an instruction, or a TBAA node itself as part of a metadata, and return true if it is valid,...
unsigned size() const
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
bool isByteTy() const
True if this is an instance of ByteType.
Definition Type.h:242
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:290
LLVM_ABI bool containsNonGlobalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a global...
Definition Type.cpp:78
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition Type.h:281
LLVM_ABI bool containsNonLocalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a local.
Definition Type.cpp:94
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
Definition Type.cpp:65
bool isLabelTy() const
Return true if this is 'label'.
Definition Type.h:230
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition Type.h:263
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:284
LLVM_ABI bool isTokenLikeTy() const
Returns true if this is 'token' or a token-like target type.s.
Definition Type.cpp:1136
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isSingleValueType() const
Return true if the type is a valid type for a register in codegen.
Definition Type.h:313
LLVM_ABI bool canLosslesslyBitCastTo(Type *Ty) const
Return true if this type could be converted with a lossless BitCast to type 'Ty'.
Definition Type.cpp:157
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:370
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition Type.h:328
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:236
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:186
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition Type.h:287
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
Definition Type.h:272
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:257
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition Type.h:227
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:141
bool isMetadataTy() const
Return true if this is 'metadata'.
Definition Type.h:233
This class represents a cast unsigned integer to floating point.
op_range operands()
Definition User.h:267
Value * getOperand(unsigned i) const
Definition User.h:207
unsigned getNumOperands() const
Definition User.h:229
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
Value * getValue() const
Definition Metadata.h:499
LLVM Value Representation.
Definition Value.h:75
iterator_range< user_iterator > materialized_users()
Definition Value.h:420
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:255
LLVM_ABI const Value * stripPointerCastsAndAliases() const
Strip off pointer casts, all-zero GEPs, address space casts, and aliases.
Definition Value.cpp:713
LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.h:258
LLVM_ABI const Value * stripInBoundsOffsets(function_ref< void(const Value *)> Func=[](const Value *) {}) const
Strip off pointer casts and inbounds GEPs.
Definition Value.cpp:820
iterator_range< user_iterator > users()
Definition Value.h:426
bool materialized_use_empty() const
Definition Value.h:351
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:709
bool hasName() const
Definition Value.h:261
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:318
Check a module for errors, and report separate error states for IR and debug info errors.
Definition Verifier.h:109
LLVM_ABI Result run(Module &M, ModuleAnalysisManager &)
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
This class represents zero extension of integer types.
constexpr bool isNonZero() const
Definition TypeSize.h:155
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
CallInst * Call
This file contains the declaration of the Comdat class, which represents a single COMDAT in LLVM.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ FLAT_ADDRESS
Address space for flat memory.
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
@ PRIVATE_ADDRESS
Address space for private memory.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
bool isFlatGlobalAddrSpace(unsigned AS)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Entry
Definition COFF.h:862
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
LLVM_ABI bool hasConstrainedFPRoundingModeOperand(ID QID)
Returns true if the intrinsic ID is for one of the "ConstrainedFloating-Point Intrinsics" that take r...
LLVM_ABI StringRef getName(ID id)
Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
static const int NoAliasScopeDeclScopeArg
Definition Intrinsics.h:42
LLVM_ABI bool isSignatureValid(Intrinsic::ID ID, FunctionType *FT, SmallVectorImpl< Type * > &OverloadTys, raw_ostream &OS=nulls())
Returns true if FT is a valid function type for intrinsic ID.
std::variant< std::monostate, Loc::Single, Loc::Multi, Loc::MMI, Loc::EntryValue > Variant
Alias for the std::variant specialization base class of DbgVariable.
Definition DwarfDebug.h:190
Flag
These should be considered private to the implementation of the MCInstrDesc class.
@ System
Synchronized with respect to all concurrently executing threads.
Definition LLVMContext.h:58
LLVM_ABI std::optional< VFInfo > tryDemangleForVFABI(StringRef MangledName, const FunctionType *FTy)
Function to construct a VFInfo out of a mangled names in the following format:
@ CE
Windows NT (Windows on ARM)
Definition MCAsmInfo.h:50
LLVM_ABI AssignmentInstRange getAssignmentInsts(DIAssignID *ID)
Return a range of instructions (typically just one) that have ID as an attachment.
initializer< Ty > init(const Ty &Val)
@ DW_MACINFO_undef
Definition Dwarf.h:818
@ DW_MACINFO_start_file
Definition Dwarf.h:819
@ DW_MACINFO_define
Definition Dwarf.h:817
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract_or_null(Y &&MD)
Extract a Value from Metadata, if any, allowing null.
Definition Metadata.h:709
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract_or_null(Y &&MD)
Extract a Value from Metadata, allowing null.
Definition Metadata.h:683
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract(Y &&MD)
Extract a Value from Metadata, if any.
Definition Metadata.h:696
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:668
@ User
could "use" a pointer
NodeAddr< UseNode * > Use
Definition RDFGraph.h:385
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:315
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
Definition Threading.h:280
@ Offset
Definition DWP.cpp:557
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1738
LLVM_ABI bool canInstructionHaveMMRAs(const Instruction &I)
detail::zippy< detail::zip_first, T, U, Args... > zip_equal(T &&t, U &&u, Args &&...args)
zip iterator that assumes that all iteratees have the same length.
Definition STLExtras.h:840
LLVM_ABI unsigned getBranchWeightOffset(const MDNode *ProfileData)
Return the offset to the first branch weight data.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2553
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
AllocFnKind
Definition Attributes.h:53
testing::Matcher< const detail::ErrorHolder & > Failed()
Definition Error.h:198
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2207
LLVM_ABI DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
Definition MathExtras.h:243
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition MathExtras.h:284
gep_type_iterator gep_type_end(const User *GEP)
bool isa_and_nonnull(const Y &Val)
Definition Casting.h:676
Op::Description Desc
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
GenericConvergenceVerifier< SSAContext > ConvergenceVerifier
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
bool isModSet(const ModRefInfo MRI)
Definition ModRef.h:49
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1635
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:209
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
generic_gep_type_iterator<> gep_type_iterator
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
iterator_range< SplittingIterator > split(StringRef Str, StringRef Separator)
Split the specified string over a separator and return a range-compatible iterable over its partition...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI bool isValueProfileMD(const MDNode *ProfileData)
Checks if an MDNode contains value profiling Metadata.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
LLVM_ABI unsigned getNumBranchWeights(const MDNode &ProfileData)
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:74
LLVM_ABI FunctionPass * createVerifierPass(bool FatalErrors=true)
FunctionAddr VTableAddr Next
Definition InstrProf.h:141
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
TinyPtrVector< BasicBlock * > ColorVector
LLVM_ABI const char * LLVMLoopEstimatedTripCount
Profile-based loop metadata that should be accessed only by using llvm::getLoopEstimatedTripCount and...
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
Definition FPEnv.cpp:25
gep_type_iterator gep_type_begin(const User *GEP)
LLVM_ABI std::unique_ptr< GCStrategy > getGCStrategy(const StringRef Name)
Lookup the GCStrategy object associated with the given gc name.
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1946
bool pred_empty(const BasicBlock *BB)
Definition CFG.h:119
bool isHexDigit(char C)
Checks if character C is a hexadecimal numeric character.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
constexpr bool isCallableCC(CallingConv::ID CC)
LLVM_ABI bool verifyModule(const Module &M, raw_ostream *OS=nullptr, bool *BrokenDebugInfo=nullptr)
Check a module for errors.
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
Definition MIRParser.h:39
#define N
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition Analysis.h:29
static LLVM_ABI const char * SyntheticFunctionEntryCount
static LLVM_ABI const char * UnknownBranchWeightsMarker
static LLVM_ABI const char * ValueProfile
static LLVM_ABI const char * FunctionEntryCount
static LLVM_ABI const char * BranchWeights
uint32_t getTagID() const
Return the tag of this operand bundle as an integer.
ArrayRef< Use > Inputs
void DebugInfoCheckFailed(const Twine &Message)
A debug info check failed.
Definition Verifier.cpp:309
VerifierSupport(raw_ostream *OS, const Module &M)
Definition Verifier.cpp:158
bool Broken
Track the brokenness of the module while recursively visiting.
Definition Verifier.cpp:152
void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A check failed (with values to print).
Definition Verifier.cpp:302
bool BrokenDebugInfo
Broken debug info can be "recovered" from by stripping the debug info.
Definition Verifier.cpp:154
LLVMContext & Context
Definition Verifier.cpp:149
bool TreatBrokenDebugInfoAsError
Whether to treat broken debug info as an error.
Definition Verifier.cpp:156
void CheckFailed(const Twine &Message)
A check failed, so printout out the condition and the message.
Definition Verifier.cpp:291
const Module & M
Definition Verifier.cpp:145
const DataLayout & DL
Definition Verifier.cpp:148
void DebugInfoCheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A debug info check failed (with values to print).
Definition Verifier.cpp:318
const Triple & TT
Definition Verifier.cpp:147
ModuleSlotTracker MST
Definition Verifier.cpp:146