LLVM 22.0.0git
Verifier.cpp
Go to the documentation of this file.
1//===-- Verifier.cpp - Implement the Module Verifier -----------------------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the function verifier interface, that can be used for some
10// basic correctness checking of input to the system.
11//
12// Note that this does not provide full `Java style' security and verifications,
13// instead it just tries to ensure that code is well-formed.
14//
15// * Both of a binary operator's parameters are of the same type
16// * Verify that the indices of mem access instructions match other operands
17// * Verify that arithmetic and other things are only performed on first-class
18// types. Verify that shifts & logicals only happen on integrals f.e.
19// * All of the constants in a switch statement are of the correct type
20// * The code is in valid SSA form
21// * It should be illegal to put a label into any other type (like a structure)
22// or to return one. [except constant arrays!]
23// * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad
24// * PHI nodes must have an entry for each predecessor, with no extras.
25// * PHI nodes must be the first thing in a basic block, all grouped together
26// * All basic blocks should only end with terminator insts, not contain them
27// * The entry node to a function must not have predecessors
28// * All Instructions must be embedded into a basic block
29// * Functions cannot take a void-typed parameter
30// * Verify that a function's argument list agrees with it's declared type.
31// * It is illegal to specify a name for a void value.
32// * It is illegal to have a internal global value with no initializer
33// * It is illegal to have a ret instruction that returns a value that does not
34// agree with the function return value type.
35// * Function call argument types match the function prototype
36// * A landing pad is defined by a landingpad instruction, and can be jumped to
37// only by the unwind edge of an invoke instruction.
38// * A landingpad instruction must be the first non-PHI instruction in the
39// block.
40// * Landingpad instructions must be in a function with a personality function.
41// * Convergence control intrinsics are introduced in ConvergentOperations.rst.
42// The applied restrictions are too numerous to list here.
43// * The convergence entry intrinsic and the loop heart must be the first
44// non-PHI instruction in their respective block. This does not conflict with
45// the landing pads, since these two kinds cannot occur in the same block.
46// * All other things that are tested by asserts spread about the code...
47//
48//===----------------------------------------------------------------------===//
49
50#include "llvm/IR/Verifier.h"
51#include "llvm/ADT/APFloat.h"
52#include "llvm/ADT/APInt.h"
53#include "llvm/ADT/ArrayRef.h"
54#include "llvm/ADT/DenseMap.h"
55#include "llvm/ADT/MapVector.h"
56#include "llvm/ADT/STLExtras.h"
60#include "llvm/ADT/StringRef.h"
61#include "llvm/ADT/Twine.h"
63#include "llvm/IR/Argument.h"
65#include "llvm/IR/Attributes.h"
66#include "llvm/IR/BasicBlock.h"
67#include "llvm/IR/CFG.h"
68#include "llvm/IR/CallingConv.h"
69#include "llvm/IR/Comdat.h"
70#include "llvm/IR/Constant.h"
73#include "llvm/IR/Constants.h"
75#include "llvm/IR/DataLayout.h"
76#include "llvm/IR/DebugInfo.h"
78#include "llvm/IR/DebugLoc.h"
80#include "llvm/IR/Dominators.h"
82#include "llvm/IR/Function.h"
83#include "llvm/IR/GCStrategy.h"
84#include "llvm/IR/GlobalAlias.h"
85#include "llvm/IR/GlobalValue.h"
87#include "llvm/IR/InlineAsm.h"
88#include "llvm/IR/InstVisitor.h"
89#include "llvm/IR/InstrTypes.h"
90#include "llvm/IR/Instruction.h"
93#include "llvm/IR/Intrinsics.h"
94#include "llvm/IR/IntrinsicsAArch64.h"
95#include "llvm/IR/IntrinsicsAMDGPU.h"
96#include "llvm/IR/IntrinsicsARM.h"
97#include "llvm/IR/IntrinsicsNVPTX.h"
98#include "llvm/IR/IntrinsicsWebAssembly.h"
99#include "llvm/IR/LLVMContext.h"
101#include "llvm/IR/Metadata.h"
102#include "llvm/IR/Module.h"
104#include "llvm/IR/PassManager.h"
106#include "llvm/IR/Statepoint.h"
107#include "llvm/IR/Type.h"
108#include "llvm/IR/Use.h"
109#include "llvm/IR/User.h"
111#include "llvm/IR/Value.h"
113#include "llvm/Pass.h"
117#include "llvm/Support/Casting.h"
121#include "llvm/Support/ModRef.h"
124#include <algorithm>
125#include <cassert>
126#include <cstdint>
127#include <memory>
128#include <optional>
129#include <string>
130#include <utility>
131
132using namespace llvm;
133
135 "verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false),
136 cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical "
137 "scopes are not dominating"));
138
141 const Module &M;
143 const Triple &TT;
146
147 /// Track the brokenness of the module while recursively visiting.
148 bool Broken = false;
149 /// Broken debug info can be "recovered" from by stripping the debug info.
150 bool BrokenDebugInfo = false;
151 /// Whether to treat broken debug info as an error.
153
155 : OS(OS), M(M), MST(&M), TT(M.getTargetTriple()), DL(M.getDataLayout()),
156 Context(M.getContext()) {}
157
158private:
159 void Write(const Module *M) {
160 *OS << "; ModuleID = '" << M->getModuleIdentifier() << "'\n";
161 }
162
163 void Write(const Value *V) {
164 if (V)
165 Write(*V);
166 }
167
168 void Write(const Value &V) {
169 if (isa<Instruction>(V)) {
170 V.print(*OS, MST);
171 *OS << '\n';
172 } else {
173 V.printAsOperand(*OS, true, MST);
174 *OS << '\n';
175 }
176 }
177
178 void Write(const DbgRecord *DR) {
179 if (DR) {
180 DR->print(*OS, MST, false);
181 *OS << '\n';
182 }
183 }
184
186 switch (Type) {
188 *OS << "value";
189 break;
191 *OS << "declare";
192 break;
194 *OS << "declare_value";
195 break;
197 *OS << "assign";
198 break;
200 *OS << "end";
201 break;
203 *OS << "any";
204 break;
205 };
206 }
207
208 void Write(const Metadata *MD) {
209 if (!MD)
210 return;
211 MD->print(*OS, MST, &M);
212 *OS << '\n';
213 }
214
215 template <class T> void Write(const MDTupleTypedArrayWrapper<T> &MD) {
216 Write(MD.get());
217 }
218
219 void Write(const NamedMDNode *NMD) {
220 if (!NMD)
221 return;
222 NMD->print(*OS, MST);
223 *OS << '\n';
224 }
225
226 void Write(Type *T) {
227 if (!T)
228 return;
229 *OS << ' ' << *T;
230 }
231
232 void Write(const Comdat *C) {
233 if (!C)
234 return;
235 *OS << *C;
236 }
237
238 void Write(const APInt *AI) {
239 if (!AI)
240 return;
241 *OS << *AI << '\n';
242 }
243
244 void Write(const unsigned i) { *OS << i << '\n'; }
245
246 // NOLINTNEXTLINE(readability-identifier-naming)
247 void Write(const Attribute *A) {
248 if (!A)
249 return;
250 *OS << A->getAsString() << '\n';
251 }
252
253 // NOLINTNEXTLINE(readability-identifier-naming)
254 void Write(const AttributeSet *AS) {
255 if (!AS)
256 return;
257 *OS << AS->getAsString() << '\n';
258 }
259
260 // NOLINTNEXTLINE(readability-identifier-naming)
261 void Write(const AttributeList *AL) {
262 if (!AL)
263 return;
264 AL->print(*OS);
265 }
266
267 void Write(Printable P) { *OS << P << '\n'; }
268
269 template <typename T> void Write(ArrayRef<T> Vs) {
270 for (const T &V : Vs)
271 Write(V);
272 }
273
274 template <typename T1, typename... Ts>
275 void WriteTs(const T1 &V1, const Ts &... Vs) {
276 Write(V1);
277 WriteTs(Vs...);
278 }
279
280 template <typename... Ts> void WriteTs() {}
281
282public:
283 /// A check failed, so printout out the condition and the message.
284 ///
285 /// This provides a nice place to put a breakpoint if you want to see why
286 /// something is not correct.
287 void CheckFailed(const Twine &Message) {
288 if (OS)
289 *OS << Message << '\n';
290 Broken = true;
291 }
292
293 /// A check failed (with values to print).
294 ///
295 /// This calls the Message-only version so that the above is easier to set a
296 /// breakpoint on.
297 template <typename T1, typename... Ts>
298 void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) {
299 CheckFailed(Message);
300 if (OS)
301 WriteTs(V1, Vs...);
302 }
303
304 /// A debug info check failed.
305 void DebugInfoCheckFailed(const Twine &Message) {
306 if (OS)
307 *OS << Message << '\n';
309 BrokenDebugInfo = true;
310 }
311
312 /// A debug info check failed (with values to print).
313 template <typename T1, typename... Ts>
314 void DebugInfoCheckFailed(const Twine &Message, const T1 &V1,
315 const Ts &... Vs) {
316 DebugInfoCheckFailed(Message);
317 if (OS)
318 WriteTs(V1, Vs...);
319 }
320};
321
322namespace {
323
324class Verifier : public InstVisitor<Verifier>, VerifierSupport {
325 friend class InstVisitor<Verifier>;
326 DominatorTree DT;
327
328 /// When verifying a basic block, keep track of all of the
329 /// instructions we have seen so far.
330 ///
331 /// This allows us to do efficient dominance checks for the case when an
332 /// instruction has an operand that is an instruction in the same block.
333 SmallPtrSet<Instruction *, 16> InstsInThisBlock;
334
335 /// Keep track of the metadata nodes that have been checked already.
337
338 /// Keep track which DISubprogram is attached to which function.
340
341 /// Track all DICompileUnits visited.
343
344 /// The result type for a landingpad.
345 Type *LandingPadResultTy;
346
347 /// Whether we've seen a call to @llvm.localescape in this function
348 /// already.
349 bool SawFrameEscape;
350
351 /// Whether the current function has a DISubprogram attached to it.
352 bool HasDebugInfo = false;
353
354 /// Stores the count of how many objects were passed to llvm.localescape for a
355 /// given function and the largest index passed to llvm.localrecover.
357
358 // Maps catchswitches and cleanuppads that unwind to siblings to the
359 // terminators that indicate the unwind, used to detect cycles therein.
361
362 /// Cache which blocks are in which funclet, if an EH funclet personality is
363 /// in use. Otherwise empty.
364 DenseMap<BasicBlock *, ColorVector> BlockEHFuncletColors;
365
366 /// Cache of constants visited in search of ConstantExprs.
367 SmallPtrSet<const Constant *, 32> ConstantExprVisited;
368
369 /// Cache of declarations of the llvm.experimental.deoptimize.<ty> intrinsic.
370 SmallVector<const Function *, 4> DeoptimizeDeclarations;
371
372 /// Cache of attribute lists verified.
373 SmallPtrSet<const void *, 32> AttributeListsVisited;
374
375 // Verify that this GlobalValue is only used in this module.
376 // This map is used to avoid visiting uses twice. We can arrive at a user
377 // twice, if they have multiple operands. In particular for very large
378 // constant expressions, we can arrive at a particular user many times.
379 SmallPtrSet<const Value *, 32> GlobalValueVisited;
380
381 // Keeps track of duplicate function argument debug info.
383
384 TBAAVerifier TBAAVerifyHelper;
385 ConvergenceVerifier ConvergenceVerifyHelper;
386
387 SmallVector<IntrinsicInst *, 4> NoAliasScopeDecls;
388
389 void checkAtomicMemAccessSize(Type *Ty, const Instruction *I);
390
391public:
392 explicit Verifier(raw_ostream *OS, bool ShouldTreatBrokenDebugInfoAsError,
393 const Module &M)
394 : VerifierSupport(OS, M), LandingPadResultTy(nullptr),
395 SawFrameEscape(false), TBAAVerifyHelper(this) {
396 TreatBrokenDebugInfoAsError = ShouldTreatBrokenDebugInfoAsError;
397 }
398
399 bool hasBrokenDebugInfo() const { return BrokenDebugInfo; }
400
401 bool verify(const Function &F) {
402 llvm::TimeTraceScope timeScope("Verifier");
403 assert(F.getParent() == &M &&
404 "An instance of this class only works with a specific module!");
405
406 // First ensure the function is well-enough formed to compute dominance
407 // information, and directly compute a dominance tree. We don't rely on the
408 // pass manager to provide this as it isolates us from a potentially
409 // out-of-date dominator tree and makes it significantly more complex to run
410 // this code outside of a pass manager.
411 // FIXME: It's really gross that we have to cast away constness here.
412 if (!F.empty())
413 DT.recalculate(const_cast<Function &>(F));
414
415 for (const BasicBlock &BB : F) {
416 if (!BB.empty() && BB.back().isTerminator())
417 continue;
418
419 if (OS) {
420 *OS << "Basic Block in function '" << F.getName()
421 << "' does not have terminator!\n";
422 BB.printAsOperand(*OS, true, MST);
423 *OS << "\n";
424 }
425 return false;
426 }
427
428 auto FailureCB = [this](const Twine &Message) {
429 this->CheckFailed(Message);
430 };
431 ConvergenceVerifyHelper.initialize(OS, FailureCB, F);
432
433 Broken = false;
434 // FIXME: We strip const here because the inst visitor strips const.
435 visit(const_cast<Function &>(F));
436 verifySiblingFuncletUnwinds();
437
438 if (ConvergenceVerifyHelper.sawTokens())
439 ConvergenceVerifyHelper.verify(DT);
440
441 InstsInThisBlock.clear();
442 DebugFnArgs.clear();
443 LandingPadResultTy = nullptr;
444 SawFrameEscape = false;
445 SiblingFuncletInfo.clear();
446 verifyNoAliasScopeDecl();
447 NoAliasScopeDecls.clear();
448
449 return !Broken;
450 }
451
452 /// Verify the module that this instance of \c Verifier was initialized with.
453 bool verify() {
454 Broken = false;
455
456 // Collect all declarations of the llvm.experimental.deoptimize intrinsic.
457 for (const Function &F : M)
458 if (F.getIntrinsicID() == Intrinsic::experimental_deoptimize)
459 DeoptimizeDeclarations.push_back(&F);
460
461 // Now that we've visited every function, verify that we never asked to
462 // recover a frame index that wasn't escaped.
463 verifyFrameRecoverIndices();
464 for (const GlobalVariable &GV : M.globals())
465 visitGlobalVariable(GV);
466
467 for (const GlobalAlias &GA : M.aliases())
468 visitGlobalAlias(GA);
469
470 for (const GlobalIFunc &GI : M.ifuncs())
471 visitGlobalIFunc(GI);
472
473 for (const NamedMDNode &NMD : M.named_metadata())
474 visitNamedMDNode(NMD);
475
476 for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable())
477 visitComdat(SMEC.getValue());
478
479 visitModuleFlags();
480 visitModuleIdents();
481 visitModuleCommandLines();
482 visitModuleErrnoTBAA();
483
484 verifyCompileUnits();
485
486 verifyDeoptimizeCallingConvs();
487 DISubprogramAttachments.clear();
488 return !Broken;
489 }
490
491private:
492 /// Whether a metadata node is allowed to be, or contain, a DILocation.
493 enum class AreDebugLocsAllowed { No, Yes };
494
495 /// Metadata that should be treated as a range, with slightly different
496 /// requirements.
497 enum class RangeLikeMetadataKind {
498 Range, // MD_range
499 AbsoluteSymbol, // MD_absolute_symbol
500 NoaliasAddrspace // MD_noalias_addrspace
501 };
502
503 // Verification methods...
504 void visitGlobalValue(const GlobalValue &GV);
505 void visitGlobalVariable(const GlobalVariable &GV);
506 void visitGlobalAlias(const GlobalAlias &GA);
507 void visitGlobalIFunc(const GlobalIFunc &GI);
508 void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C);
509 void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited,
510 const GlobalAlias &A, const Constant &C);
511 void visitNamedMDNode(const NamedMDNode &NMD);
512 void visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs);
513 void visitMetadataAsValue(const MetadataAsValue &MD, Function *F);
514 void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F);
515 void visitDIArgList(const DIArgList &AL, Function *F);
516 void visitComdat(const Comdat &C);
517 void visitModuleIdents();
518 void visitModuleCommandLines();
519 void visitModuleErrnoTBAA();
520 void visitModuleFlags();
521 void visitModuleFlag(const MDNode *Op,
522 DenseMap<const MDString *, const MDNode *> &SeenIDs,
523 SmallVectorImpl<const MDNode *> &Requirements);
524 void visitModuleFlagCGProfileEntry(const MDOperand &MDO);
525 void visitFunction(const Function &F);
526 void visitBasicBlock(BasicBlock &BB);
527 void verifyRangeLikeMetadata(const Value &V, const MDNode *Range, Type *Ty,
528 RangeLikeMetadataKind Kind);
529 void visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty);
530 void visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range, Type *Ty);
531 void visitDereferenceableMetadata(Instruction &I, MDNode *MD);
532 void visitNofreeMetadata(Instruction &I, MDNode *MD);
533 void visitProfMetadata(Instruction &I, MDNode *MD);
534 void visitCallStackMetadata(MDNode *MD);
535 void visitMemProfMetadata(Instruction &I, MDNode *MD);
536 void visitCallsiteMetadata(Instruction &I, MDNode *MD);
537 void visitCalleeTypeMetadata(Instruction &I, MDNode *MD);
538 void visitDIAssignIDMetadata(Instruction &I, MDNode *MD);
539 void visitMMRAMetadata(Instruction &I, MDNode *MD);
540 void visitAnnotationMetadata(MDNode *Annotation);
541 void visitAliasScopeMetadata(const MDNode *MD);
542 void visitAliasScopeListMetadata(const MDNode *MD);
543 void visitAccessGroupMetadata(const MDNode *MD);
544 void visitCapturesMetadata(Instruction &I, const MDNode *Captures);
545 void visitAllocTokenMetadata(Instruction &I, MDNode *MD);
546
547 template <class Ty> bool isValidMetadataArray(const MDTuple &N);
548#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N);
549#include "llvm/IR/Metadata.def"
550 void visitDIScope(const DIScope &N);
551 void visitDIVariable(const DIVariable &N);
552 void visitDILexicalBlockBase(const DILexicalBlockBase &N);
553 void visitDITemplateParameter(const DITemplateParameter &N);
554
555 void visitTemplateParams(const MDNode &N, const Metadata &RawParams);
556
557 void visit(DbgLabelRecord &DLR);
558 void visit(DbgVariableRecord &DVR);
559 // InstVisitor overrides...
560 using InstVisitor<Verifier>::visit;
561 void visitDbgRecords(Instruction &I);
562 void visit(Instruction &I);
563
564 void visitTruncInst(TruncInst &I);
565 void visitZExtInst(ZExtInst &I);
566 void visitSExtInst(SExtInst &I);
567 void visitFPTruncInst(FPTruncInst &I);
568 void visitFPExtInst(FPExtInst &I);
569 void visitFPToUIInst(FPToUIInst &I);
570 void visitFPToSIInst(FPToSIInst &I);
571 void visitUIToFPInst(UIToFPInst &I);
572 void visitSIToFPInst(SIToFPInst &I);
573 void visitIntToPtrInst(IntToPtrInst &I);
574 void checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V);
575 void visitPtrToAddrInst(PtrToAddrInst &I);
576 void visitPtrToIntInst(PtrToIntInst &I);
577 void visitBitCastInst(BitCastInst &I);
578 void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
579 void visitPHINode(PHINode &PN);
580 void visitCallBase(CallBase &Call);
581 void visitUnaryOperator(UnaryOperator &U);
582 void visitBinaryOperator(BinaryOperator &B);
583 void visitICmpInst(ICmpInst &IC);
584 void visitFCmpInst(FCmpInst &FC);
585 void visitExtractElementInst(ExtractElementInst &EI);
586 void visitInsertElementInst(InsertElementInst &EI);
587 void visitShuffleVectorInst(ShuffleVectorInst &EI);
588 void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); }
589 void visitCallInst(CallInst &CI);
590 void visitInvokeInst(InvokeInst &II);
591 void visitGetElementPtrInst(GetElementPtrInst &GEP);
592 void visitLoadInst(LoadInst &LI);
593 void visitStoreInst(StoreInst &SI);
594 void verifyDominatesUse(Instruction &I, unsigned i);
595 void visitInstruction(Instruction &I);
596 void visitTerminator(Instruction &I);
597 void visitBranchInst(BranchInst &BI);
598 void visitReturnInst(ReturnInst &RI);
599 void visitSwitchInst(SwitchInst &SI);
600 void visitIndirectBrInst(IndirectBrInst &BI);
601 void visitCallBrInst(CallBrInst &CBI);
602 void visitSelectInst(SelectInst &SI);
603 void visitUserOp1(Instruction &I);
604 void visitUserOp2(Instruction &I) { visitUserOp1(I); }
605 void visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call);
606 void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI);
607 void visitVPIntrinsic(VPIntrinsic &VPI);
608 void visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI);
609 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
610 void visitAtomicRMWInst(AtomicRMWInst &RMWI);
611 void visitFenceInst(FenceInst &FI);
612 void visitAllocaInst(AllocaInst &AI);
613 void visitExtractValueInst(ExtractValueInst &EVI);
614 void visitInsertValueInst(InsertValueInst &IVI);
615 void visitEHPadPredecessors(Instruction &I);
616 void visitLandingPadInst(LandingPadInst &LPI);
617 void visitResumeInst(ResumeInst &RI);
618 void visitCatchPadInst(CatchPadInst &CPI);
619 void visitCatchReturnInst(CatchReturnInst &CatchReturn);
620 void visitCleanupPadInst(CleanupPadInst &CPI);
621 void visitFuncletPadInst(FuncletPadInst &FPI);
622 void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch);
623 void visitCleanupReturnInst(CleanupReturnInst &CRI);
624
625 void verifySwiftErrorCall(CallBase &Call, const Value *SwiftErrorVal);
626 void verifySwiftErrorValue(const Value *SwiftErrorVal);
627 void verifyTailCCMustTailAttrs(const AttrBuilder &Attrs, StringRef Context);
628 void verifyMustTailCall(CallInst &CI);
629 bool verifyAttributeCount(AttributeList Attrs, unsigned Params);
630 void verifyAttributeTypes(AttributeSet Attrs, const Value *V);
631 void verifyParameterAttrs(AttributeSet Attrs, Type *Ty, const Value *V);
632 void checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
633 const Value *V);
634 void verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
635 const Value *V, bool IsIntrinsic, bool IsInlineAsm);
636 void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs);
637 void verifyUnknownProfileMetadata(MDNode *MD);
638 void visitConstantExprsRecursively(const Constant *EntryC);
639 void visitConstantExpr(const ConstantExpr *CE);
640 void visitConstantPtrAuth(const ConstantPtrAuth *CPA);
641 void verifyInlineAsmCall(const CallBase &Call);
642 void verifyStatepoint(const CallBase &Call);
643 void verifyFrameRecoverIndices();
644 void verifySiblingFuncletUnwinds();
645
646 void verifyFragmentExpression(const DbgVariableRecord &I);
647 template <typename ValueOrMetadata>
648 void verifyFragmentExpression(const DIVariable &V,
650 ValueOrMetadata *Desc);
651 void verifyFnArgs(const DbgVariableRecord &DVR);
652 void verifyNotEntryValue(const DbgVariableRecord &I);
653
654 /// Module-level debug info verification...
655 void verifyCompileUnits();
656
657 /// Module-level verification that all @llvm.experimental.deoptimize
658 /// declarations share the same calling convention.
659 void verifyDeoptimizeCallingConvs();
660
661 void verifyAttachedCallBundle(const CallBase &Call,
662 const OperandBundleUse &BU);
663
664 /// Verify the llvm.experimental.noalias.scope.decl declarations
665 void verifyNoAliasScopeDecl();
666};
667
668} // end anonymous namespace
669
670/// We know that cond should be true, if not print an error message.
671#define Check(C, ...) \
672 do { \
673 if (!(C)) { \
674 CheckFailed(__VA_ARGS__); \
675 return; \
676 } \
677 } while (false)
678
679/// We know that a debug info condition should be true, if not print
680/// an error message.
681#define CheckDI(C, ...) \
682 do { \
683 if (!(C)) { \
684 DebugInfoCheckFailed(__VA_ARGS__); \
685 return; \
686 } \
687 } while (false)
688
689void Verifier::visitDbgRecords(Instruction &I) {
690 if (!I.DebugMarker)
691 return;
692 CheckDI(I.DebugMarker->MarkedInstr == &I,
693 "Instruction has invalid DebugMarker", &I);
694 CheckDI(!isa<PHINode>(&I) || !I.hasDbgRecords(),
695 "PHI Node must not have any attached DbgRecords", &I);
696 for (DbgRecord &DR : I.getDbgRecordRange()) {
697 CheckDI(DR.getMarker() == I.DebugMarker,
698 "DbgRecord had invalid DebugMarker", &I, &DR);
699 if (auto *Loc =
701 visitMDNode(*Loc, AreDebugLocsAllowed::Yes);
702 if (auto *DVR = dyn_cast<DbgVariableRecord>(&DR)) {
703 visit(*DVR);
704 // These have to appear after `visit` for consistency with existing
705 // intrinsic behaviour.
706 verifyFragmentExpression(*DVR);
707 verifyNotEntryValue(*DVR);
708 } else if (auto *DLR = dyn_cast<DbgLabelRecord>(&DR)) {
709 visit(*DLR);
710 }
711 }
712}
713
714void Verifier::visit(Instruction &I) {
715 visitDbgRecords(I);
716 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
717 Check(I.getOperand(i) != nullptr, "Operand is null", &I);
719}
720
721// Helper to iterate over indirect users. By returning false, the callback can ask to stop traversing further.
722static void forEachUser(const Value *User,
724 llvm::function_ref<bool(const Value *)> Callback) {
725 if (!Visited.insert(User).second)
726 return;
727
729 while (!WorkList.empty()) {
730 const Value *Cur = WorkList.pop_back_val();
731 if (!Visited.insert(Cur).second)
732 continue;
733 if (Callback(Cur))
734 append_range(WorkList, Cur->materialized_users());
735 }
736}
737
738void Verifier::visitGlobalValue(const GlobalValue &GV) {
740 "Global is external, but doesn't have external or weak linkage!", &GV);
741
742 if (const GlobalObject *GO = dyn_cast<GlobalObject>(&GV)) {
743 if (const MDNode *Associated =
744 GO->getMetadata(LLVMContext::MD_associated)) {
745 Check(Associated->getNumOperands() == 1,
746 "associated metadata must have one operand", &GV, Associated);
747 const Metadata *Op = Associated->getOperand(0).get();
748 Check(Op, "associated metadata must have a global value", GO, Associated);
749
750 const auto *VM = dyn_cast_or_null<ValueAsMetadata>(Op);
751 Check(VM, "associated metadata must be ValueAsMetadata", GO, Associated);
752 if (VM) {
753 Check(isa<PointerType>(VM->getValue()->getType()),
754 "associated value must be pointer typed", GV, Associated);
755
756 const Value *Stripped = VM->getValue()->stripPointerCastsAndAliases();
757 Check(isa<GlobalObject>(Stripped) || isa<Constant>(Stripped),
758 "associated metadata must point to a GlobalObject", GO, Stripped);
759 Check(Stripped != GO,
760 "global values should not associate to themselves", GO,
761 Associated);
762 }
763 }
764
765 // FIXME: Why is getMetadata on GlobalValue protected?
766 if (const MDNode *AbsoluteSymbol =
767 GO->getMetadata(LLVMContext::MD_absolute_symbol)) {
768 verifyRangeLikeMetadata(*GO, AbsoluteSymbol,
769 DL.getIntPtrType(GO->getType()),
770 RangeLikeMetadataKind::AbsoluteSymbol);
771 }
772
773 if (GO->hasMetadata(LLVMContext::MD_implicit_ref)) {
774 Check(!GO->isDeclaration(),
775 "ref metadata must not be placed on a declaration", GO);
776
778 GO->getMetadata(LLVMContext::MD_implicit_ref, MDs);
779 for (const MDNode *MD : MDs) {
780 Check(MD->getNumOperands() == 1, "ref metadata must have one operand",
781 &GV, MD);
782 const Metadata *Op = MD->getOperand(0).get();
783 const auto *VM = dyn_cast_or_null<ValueAsMetadata>(Op);
784 Check(VM, "ref metadata must be ValueAsMetadata", GO, MD);
785 if (VM) {
786 Check(isa<PointerType>(VM->getValue()->getType()),
787 "ref value must be pointer typed", GV, MD);
788
789 const Value *Stripped = VM->getValue()->stripPointerCastsAndAliases();
790 Check(isa<GlobalObject>(Stripped) || isa<Constant>(Stripped),
791 "ref metadata must point to a GlobalObject", GO, Stripped);
792 Check(Stripped != GO, "values should not reference themselves", GO,
793 MD);
794 }
795 }
796 }
797 }
798
800 "Only global variables can have appending linkage!", &GV);
801
802 if (GV.hasAppendingLinkage()) {
803 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV);
804 Check(GVar && GVar->getValueType()->isArrayTy(),
805 "Only global arrays can have appending linkage!", GVar);
806 }
807
808 if (GV.isDeclarationForLinker())
809 Check(!GV.hasComdat(), "Declaration may not be in a Comdat!", &GV);
810
811 if (GV.hasDLLExportStorageClass()) {
813 "dllexport GlobalValue must have default or protected visibility",
814 &GV);
815 }
816 if (GV.hasDLLImportStorageClass()) {
818 "dllimport GlobalValue must have default visibility", &GV);
819 Check(!GV.isDSOLocal(), "GlobalValue with DLLImport Storage is dso_local!",
820 &GV);
821
822 Check((GV.isDeclaration() &&
825 "Global is marked as dllimport, but not external", &GV);
826 }
827
828 if (GV.isImplicitDSOLocal())
829 Check(GV.isDSOLocal(),
830 "GlobalValue with local linkage or non-default "
831 "visibility must be dso_local!",
832 &GV);
833
834 forEachUser(&GV, GlobalValueVisited, [&](const Value *V) -> bool {
835 if (const Instruction *I = dyn_cast<Instruction>(V)) {
836 if (!I->getParent() || !I->getParent()->getParent())
837 CheckFailed("Global is referenced by parentless instruction!", &GV, &M,
838 I);
839 else if (I->getParent()->getParent()->getParent() != &M)
840 CheckFailed("Global is referenced in a different module!", &GV, &M, I,
841 I->getParent()->getParent(),
842 I->getParent()->getParent()->getParent());
843 return false;
844 } else if (const Function *F = dyn_cast<Function>(V)) {
845 if (F->getParent() != &M)
846 CheckFailed("Global is used by function in a different module", &GV, &M,
847 F, F->getParent());
848 return false;
849 }
850 return true;
851 });
852}
853
854void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
855 Type *GVType = GV.getValueType();
856
857 if (MaybeAlign A = GV.getAlign()) {
858 Check(A->value() <= Value::MaximumAlignment,
859 "huge alignment values are unsupported", &GV);
860 }
861
862 if (GV.hasInitializer()) {
863 Check(GV.getInitializer()->getType() == GVType,
864 "Global variable initializer type does not match global "
865 "variable type!",
866 &GV);
868 "Global variable initializer must be sized", &GV);
869 visitConstantExprsRecursively(GV.getInitializer());
870 // If the global has common linkage, it must have a zero initializer and
871 // cannot be constant.
872 if (GV.hasCommonLinkage()) {
874 "'common' global must have a zero initializer!", &GV);
875 Check(!GV.isConstant(), "'common' global may not be marked constant!",
876 &GV);
877 Check(!GV.hasComdat(), "'common' global may not be in a Comdat!", &GV);
878 }
879 }
880
881 if (GV.hasName() && (GV.getName() == "llvm.global_ctors" ||
882 GV.getName() == "llvm.global_dtors")) {
884 "invalid linkage for intrinsic global variable", &GV);
886 "invalid uses of intrinsic global variable", &GV);
887
888 // Don't worry about emitting an error for it not being an array,
889 // visitGlobalValue will complain on appending non-array.
890 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
891 StructType *STy = dyn_cast<StructType>(ATy->getElementType());
892 PointerType *FuncPtrTy =
893 PointerType::get(Context, DL.getProgramAddressSpace());
894 Check(STy && (STy->getNumElements() == 2 || STy->getNumElements() == 3) &&
895 STy->getTypeAtIndex(0u)->isIntegerTy(32) &&
896 STy->getTypeAtIndex(1) == FuncPtrTy,
897 "wrong type for intrinsic global variable", &GV);
898 Check(STy->getNumElements() == 3,
899 "the third field of the element type is mandatory, "
900 "specify ptr null to migrate from the obsoleted 2-field form");
901 Type *ETy = STy->getTypeAtIndex(2);
902 Check(ETy->isPointerTy(), "wrong type for intrinsic global variable",
903 &GV);
904 }
905 }
906
907 if (GV.hasName() && (GV.getName() == "llvm.used" ||
908 GV.getName() == "llvm.compiler.used")) {
910 "invalid linkage for intrinsic global variable", &GV);
912 "invalid uses of intrinsic global variable", &GV);
913
914 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
915 PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType());
916 Check(PTy, "wrong type for intrinsic global variable", &GV);
917 if (GV.hasInitializer()) {
918 const Constant *Init = GV.getInitializer();
919 const ConstantArray *InitArray = dyn_cast<ConstantArray>(Init);
920 Check(InitArray, "wrong initializer for intrinsic global variable",
921 Init);
922 for (Value *Op : InitArray->operands()) {
923 Value *V = Op->stripPointerCasts();
926 Twine("invalid ") + GV.getName() + " member", V);
927 Check(V->hasName(),
928 Twine("members of ") + GV.getName() + " must be named", V);
929 }
930 }
931 }
932 }
933
934 // Visit any debug info attachments.
936 GV.getMetadata(LLVMContext::MD_dbg, MDs);
937 for (auto *MD : MDs) {
938 if (auto *GVE = dyn_cast<DIGlobalVariableExpression>(MD))
939 visitDIGlobalVariableExpression(*GVE);
940 else
941 CheckDI(false, "!dbg attachment of global variable must be a "
942 "DIGlobalVariableExpression");
943 }
944
945 // Scalable vectors cannot be global variables, since we don't know
946 // the runtime size.
947 Check(!GVType->isScalableTy(), "Globals cannot contain scalable types", &GV);
948
949 // Check if it is or contains a target extension type that disallows being
950 // used as a global.
952 "Global @" + GV.getName() + " has illegal target extension type",
953 GVType);
954
955 if (!GV.hasInitializer()) {
956 visitGlobalValue(GV);
957 return;
958 }
959
960 // Walk any aggregate initializers looking for bitcasts between address spaces
961 visitConstantExprsRecursively(GV.getInitializer());
962
963 visitGlobalValue(GV);
964}
965
966void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) {
967 SmallPtrSet<const GlobalAlias*, 4> Visited;
968 Visited.insert(&GA);
969 visitAliaseeSubExpr(Visited, GA, C);
970}
971
972void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited,
973 const GlobalAlias &GA, const Constant &C) {
976 cast<GlobalValue>(C).hasAvailableExternallyLinkage(),
977 "available_externally alias must point to available_externally "
978 "global value",
979 &GA);
980 }
981 if (const auto *GV = dyn_cast<GlobalValue>(&C)) {
983 Check(!GV->isDeclarationForLinker(), "Alias must point to a definition",
984 &GA);
985 }
986
987 if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) {
988 Check(Visited.insert(GA2).second, "Aliases cannot form a cycle", &GA);
989
990 Check(!GA2->isInterposable(),
991 "Alias cannot point to an interposable alias", &GA);
992 } else {
993 // Only continue verifying subexpressions of GlobalAliases.
994 // Do not recurse into global initializers.
995 return;
996 }
997 }
998
999 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
1000 visitConstantExprsRecursively(CE);
1001
1002 for (const Use &U : C.operands()) {
1003 Value *V = &*U;
1004 if (const auto *GA2 = dyn_cast<GlobalAlias>(V))
1005 visitAliaseeSubExpr(Visited, GA, *GA2->getAliasee());
1006 else if (const auto *C2 = dyn_cast<Constant>(V))
1007 visitAliaseeSubExpr(Visited, GA, *C2);
1008 }
1009}
1010
1011void Verifier::visitGlobalAlias(const GlobalAlias &GA) {
1013 "Alias should have private, internal, linkonce, weak, linkonce_odr, "
1014 "weak_odr, external, or available_externally linkage!",
1015 &GA);
1016 const Constant *Aliasee = GA.getAliasee();
1017 Check(Aliasee, "Aliasee cannot be NULL!", &GA);
1018 Check(GA.getType() == Aliasee->getType(),
1019 "Alias and aliasee types should match!", &GA);
1020
1021 Check(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee),
1022 "Aliasee should be either GlobalValue or ConstantExpr", &GA);
1023
1024 visitAliaseeSubExpr(GA, *Aliasee);
1025
1026 visitGlobalValue(GA);
1027}
1028
1029void Verifier::visitGlobalIFunc(const GlobalIFunc &GI) {
1030 visitGlobalValue(GI);
1031
1033 GI.getAllMetadata(MDs);
1034 for (const auto &I : MDs) {
1035 CheckDI(I.first != LLVMContext::MD_dbg,
1036 "an ifunc may not have a !dbg attachment", &GI);
1037 Check(I.first != LLVMContext::MD_prof,
1038 "an ifunc may not have a !prof attachment", &GI);
1039 visitMDNode(*I.second, AreDebugLocsAllowed::No);
1040 }
1041
1043 "IFunc should have private, internal, linkonce, weak, linkonce_odr, "
1044 "weak_odr, or external linkage!",
1045 &GI);
1046 // Pierce through ConstantExprs and GlobalAliases and check that the resolver
1047 // is a Function definition.
1048 const Function *Resolver = GI.getResolverFunction();
1049 Check(Resolver, "IFunc must have a Function resolver", &GI);
1050 Check(!Resolver->isDeclarationForLinker(),
1051 "IFunc resolver must be a definition", &GI);
1052
1053 // Check that the immediate resolver operand (prior to any bitcasts) has the
1054 // correct type.
1055 const Type *ResolverTy = GI.getResolver()->getType();
1056
1058 "IFunc resolver must return a pointer", &GI);
1059
1060 Check(ResolverTy == PointerType::get(Context, GI.getAddressSpace()),
1061 "IFunc resolver has incorrect type", &GI);
1062}
1063
1064void Verifier::visitNamedMDNode(const NamedMDNode &NMD) {
1065 // There used to be various other llvm.dbg.* nodes, but we don't support
1066 // upgrading them and we want to reserve the namespace for future uses.
1067 if (NMD.getName().starts_with("llvm.dbg."))
1068 CheckDI(NMD.getName() == "llvm.dbg.cu",
1069 "unrecognized named metadata node in the llvm.dbg namespace", &NMD);
1070 for (const MDNode *MD : NMD.operands()) {
1071 if (NMD.getName() == "llvm.dbg.cu")
1072 CheckDI(MD && isa<DICompileUnit>(MD), "invalid compile unit", &NMD, MD);
1073
1074 if (!MD)
1075 continue;
1076
1077 visitMDNode(*MD, AreDebugLocsAllowed::Yes);
1078 }
1079}
1080
1081void Verifier::visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs) {
1082 // Only visit each node once. Metadata can be mutually recursive, so this
1083 // avoids infinite recursion here, as well as being an optimization.
1084 if (!MDNodes.insert(&MD).second)
1085 return;
1086
1087 Check(&MD.getContext() == &Context,
1088 "MDNode context does not match Module context!", &MD);
1089
1090 switch (MD.getMetadataID()) {
1091 default:
1092 llvm_unreachable("Invalid MDNode subclass");
1093 case Metadata::MDTupleKind:
1094 break;
1095#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) \
1096 case Metadata::CLASS##Kind: \
1097 visit##CLASS(cast<CLASS>(MD)); \
1098 break;
1099#include "llvm/IR/Metadata.def"
1100 }
1101
1102 for (const Metadata *Op : MD.operands()) {
1103 if (!Op)
1104 continue;
1105 Check(!isa<LocalAsMetadata>(Op), "Invalid operand for global metadata!",
1106 &MD, Op);
1107 CheckDI(!isa<DILocation>(Op) || AllowLocs == AreDebugLocsAllowed::Yes,
1108 "DILocation not allowed within this metadata node", &MD, Op);
1109 if (auto *N = dyn_cast<MDNode>(Op)) {
1110 visitMDNode(*N, AllowLocs);
1111 continue;
1112 }
1113 if (auto *V = dyn_cast<ValueAsMetadata>(Op)) {
1114 visitValueAsMetadata(*V, nullptr);
1115 continue;
1116 }
1117 }
1118
1119 // Check llvm.loop.estimated_trip_count.
1120 if (MD.getNumOperands() > 0 &&
1122 Check(MD.getNumOperands() == 2, "Expected two operands", &MD);
1124 Check(Count && Count->getType()->isIntegerTy() &&
1125 cast<IntegerType>(Count->getType())->getBitWidth() <= 32,
1126 "Expected second operand to be an integer constant of type i32 or "
1127 "smaller",
1128 &MD);
1129 }
1130
1131 // Check these last, so we diagnose problems in operands first.
1132 Check(!MD.isTemporary(), "Expected no forward declarations!", &MD);
1133 Check(MD.isResolved(), "All nodes should be resolved!", &MD);
1134}
1135
1136void Verifier::visitValueAsMetadata(const ValueAsMetadata &MD, Function *F) {
1137 Check(MD.getValue(), "Expected valid value", &MD);
1138 Check(!MD.getValue()->getType()->isMetadataTy(),
1139 "Unexpected metadata round-trip through values", &MD, MD.getValue());
1140
1141 auto *L = dyn_cast<LocalAsMetadata>(&MD);
1142 if (!L)
1143 return;
1144
1145 Check(F, "function-local metadata used outside a function", L);
1146
1147 // If this was an instruction, bb, or argument, verify that it is in the
1148 // function that we expect.
1149 Function *ActualF = nullptr;
1150 if (Instruction *I = dyn_cast<Instruction>(L->getValue())) {
1151 Check(I->getParent(), "function-local metadata not in basic block", L, I);
1152 ActualF = I->getParent()->getParent();
1153 } else if (BasicBlock *BB = dyn_cast<BasicBlock>(L->getValue()))
1154 ActualF = BB->getParent();
1155 else if (Argument *A = dyn_cast<Argument>(L->getValue()))
1156 ActualF = A->getParent();
1157 assert(ActualF && "Unimplemented function local metadata case!");
1158
1159 Check(ActualF == F, "function-local metadata used in wrong function", L);
1160}
1161
1162void Verifier::visitDIArgList(const DIArgList &AL, Function *F) {
1163 for (const ValueAsMetadata *VAM : AL.getArgs())
1164 visitValueAsMetadata(*VAM, F);
1165}
1166
1167void Verifier::visitMetadataAsValue(const MetadataAsValue &MDV, Function *F) {
1168 Metadata *MD = MDV.getMetadata();
1169 if (auto *N = dyn_cast<MDNode>(MD)) {
1170 visitMDNode(*N, AreDebugLocsAllowed::No);
1171 return;
1172 }
1173
1174 // Only visit each node once. Metadata can be mutually recursive, so this
1175 // avoids infinite recursion here, as well as being an optimization.
1176 if (!MDNodes.insert(MD).second)
1177 return;
1178
1179 if (auto *V = dyn_cast<ValueAsMetadata>(MD))
1180 visitValueAsMetadata(*V, F);
1181
1182 if (auto *AL = dyn_cast<DIArgList>(MD))
1183 visitDIArgList(*AL, F);
1184}
1185
1186static bool isType(const Metadata *MD) { return !MD || isa<DIType>(MD); }
1187static bool isScope(const Metadata *MD) { return !MD || isa<DIScope>(MD); }
1188static bool isDINode(const Metadata *MD) { return !MD || isa<DINode>(MD); }
1189static bool isMDTuple(const Metadata *MD) { return !MD || isa<MDTuple>(MD); }
1190
1191void Verifier::visitDILocation(const DILocation &N) {
1192 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1193 "location requires a valid scope", &N, N.getRawScope());
1194 if (auto *IA = N.getRawInlinedAt())
1195 CheckDI(isa<DILocation>(IA), "inlined-at should be a location", &N, IA);
1196 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1197 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1198}
1199
1200void Verifier::visitGenericDINode(const GenericDINode &N) {
1201 CheckDI(N.getTag(), "invalid tag", &N);
1202}
1203
1204void Verifier::visitDIScope(const DIScope &N) {
1205 if (auto *F = N.getRawFile())
1206 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1207}
1208
1209void Verifier::visitDISubrangeType(const DISubrangeType &N) {
1210 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1211 auto *BaseType = N.getRawBaseType();
1212 CheckDI(!BaseType || isType(BaseType), "BaseType must be a type");
1213 auto *LBound = N.getRawLowerBound();
1214 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1215 isa<DIVariable>(LBound) || isa<DIExpression>(LBound) ||
1216 isa<DIDerivedType>(LBound),
1217 "LowerBound must be signed constant or DIVariable or DIExpression or "
1218 "DIDerivedType",
1219 &N);
1220 auto *UBound = N.getRawUpperBound();
1221 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1222 isa<DIVariable>(UBound) || isa<DIExpression>(UBound) ||
1223 isa<DIDerivedType>(UBound),
1224 "UpperBound must be signed constant or DIVariable or DIExpression or "
1225 "DIDerivedType",
1226 &N);
1227 auto *Stride = N.getRawStride();
1228 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1229 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1230 "Stride must be signed constant or DIVariable or DIExpression", &N);
1231 auto *Bias = N.getRawBias();
1232 CheckDI(!Bias || isa<ConstantAsMetadata>(Bias) || isa<DIVariable>(Bias) ||
1233 isa<DIExpression>(Bias),
1234 "Bias must be signed constant or DIVariable or DIExpression", &N);
1235 // Subrange types currently only support constant size.
1236 auto *Size = N.getRawSizeInBits();
1238 "SizeInBits must be a constant");
1239}
1240
1241void Verifier::visitDISubrange(const DISubrange &N) {
1242 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1243 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1244 "Subrange can have any one of count or upperBound", &N);
1245 auto *CBound = N.getRawCountNode();
1246 CheckDI(!CBound || isa<ConstantAsMetadata>(CBound) ||
1247 isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1248 "Count must be signed constant or DIVariable or DIExpression", &N);
1249 auto Count = N.getCount();
1251 cast<ConstantInt *>(Count)->getSExtValue() >= -1,
1252 "invalid subrange count", &N);
1253 auto *LBound = N.getRawLowerBound();
1254 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1255 isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1256 "LowerBound must be signed constant or DIVariable or DIExpression",
1257 &N);
1258 auto *UBound = N.getRawUpperBound();
1259 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1260 isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1261 "UpperBound must be signed constant or DIVariable or DIExpression",
1262 &N);
1263 auto *Stride = N.getRawStride();
1264 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1265 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1266 "Stride must be signed constant or DIVariable or DIExpression", &N);
1267}
1268
1269void Verifier::visitDIGenericSubrange(const DIGenericSubrange &N) {
1270 CheckDI(N.getTag() == dwarf::DW_TAG_generic_subrange, "invalid tag", &N);
1271 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1272 "GenericSubrange can have any one of count or upperBound", &N);
1273 auto *CBound = N.getRawCountNode();
1274 CheckDI(!CBound || isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1275 "Count must be signed constant or DIVariable or DIExpression", &N);
1276 auto *LBound = N.getRawLowerBound();
1277 CheckDI(LBound, "GenericSubrange must contain lowerBound", &N);
1278 CheckDI(isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1279 "LowerBound must be signed constant or DIVariable or DIExpression",
1280 &N);
1281 auto *UBound = N.getRawUpperBound();
1282 CheckDI(!UBound || isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1283 "UpperBound must be signed constant or DIVariable or DIExpression",
1284 &N);
1285 auto *Stride = N.getRawStride();
1286 CheckDI(Stride, "GenericSubrange must contain stride", &N);
1287 CheckDI(isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1288 "Stride must be signed constant or DIVariable or DIExpression", &N);
1289}
1290
1291void Verifier::visitDIEnumerator(const DIEnumerator &N) {
1292 CheckDI(N.getTag() == dwarf::DW_TAG_enumerator, "invalid tag", &N);
1293}
1294
1295void Verifier::visitDIBasicType(const DIBasicType &N) {
1296 CheckDI(N.getTag() == dwarf::DW_TAG_base_type ||
1297 N.getTag() == dwarf::DW_TAG_unspecified_type ||
1298 N.getTag() == dwarf::DW_TAG_string_type,
1299 "invalid tag", &N);
1300 // Basic types currently only support constant size.
1301 auto *Size = N.getRawSizeInBits();
1303 "SizeInBits must be a constant");
1304}
1305
1306void Verifier::visitDIFixedPointType(const DIFixedPointType &N) {
1307 visitDIBasicType(N);
1308
1309 CheckDI(N.getTag() == dwarf::DW_TAG_base_type, "invalid tag", &N);
1310 CheckDI(N.getEncoding() == dwarf::DW_ATE_signed_fixed ||
1311 N.getEncoding() == dwarf::DW_ATE_unsigned_fixed,
1312 "invalid encoding", &N);
1316 "invalid kind", &N);
1318 N.getFactorRaw() == 0,
1319 "factor should be 0 for rationals", &N);
1321 (N.getNumeratorRaw() == 0 && N.getDenominatorRaw() == 0),
1322 "numerator and denominator should be 0 for non-rationals", &N);
1323}
1324
1325void Verifier::visitDIStringType(const DIStringType &N) {
1326 CheckDI(N.getTag() == dwarf::DW_TAG_string_type, "invalid tag", &N);
1327 CheckDI(!(N.isBigEndian() && N.isLittleEndian()), "has conflicting flags",
1328 &N);
1329}
1330
1331void Verifier::visitDIDerivedType(const DIDerivedType &N) {
1332 // Common scope checks.
1333 visitDIScope(N);
1334
1335 CheckDI(N.getTag() == dwarf::DW_TAG_typedef ||
1336 N.getTag() == dwarf::DW_TAG_pointer_type ||
1337 N.getTag() == dwarf::DW_TAG_ptr_to_member_type ||
1338 N.getTag() == dwarf::DW_TAG_reference_type ||
1339 N.getTag() == dwarf::DW_TAG_rvalue_reference_type ||
1340 N.getTag() == dwarf::DW_TAG_const_type ||
1341 N.getTag() == dwarf::DW_TAG_immutable_type ||
1342 N.getTag() == dwarf::DW_TAG_volatile_type ||
1343 N.getTag() == dwarf::DW_TAG_restrict_type ||
1344 N.getTag() == dwarf::DW_TAG_atomic_type ||
1345 N.getTag() == dwarf::DW_TAG_LLVM_ptrauth_type ||
1346 N.getTag() == dwarf::DW_TAG_member ||
1347 (N.getTag() == dwarf::DW_TAG_variable && N.isStaticMember()) ||
1348 N.getTag() == dwarf::DW_TAG_inheritance ||
1349 N.getTag() == dwarf::DW_TAG_friend ||
1350 N.getTag() == dwarf::DW_TAG_set_type ||
1351 N.getTag() == dwarf::DW_TAG_template_alias,
1352 "invalid tag", &N);
1353 if (N.getTag() == dwarf::DW_TAG_ptr_to_member_type) {
1354 CheckDI(isType(N.getRawExtraData()), "invalid pointer to member type", &N,
1355 N.getRawExtraData());
1356 } else if (N.getTag() == dwarf::DW_TAG_template_alias) {
1357 CheckDI(isMDTuple(N.getRawExtraData()), "invalid template parameters", &N,
1358 N.getRawExtraData());
1359 } else if (N.getTag() == dwarf::DW_TAG_inheritance ||
1360 N.getTag() == dwarf::DW_TAG_member ||
1361 N.getTag() == dwarf::DW_TAG_variable) {
1362 auto *ExtraData = N.getRawExtraData();
1363 auto IsValidExtraData = [&]() {
1364 if (ExtraData == nullptr)
1365 return true;
1366 if (isa<ConstantAsMetadata>(ExtraData) || isa<MDString>(ExtraData) ||
1367 isa<DIObjCProperty>(ExtraData))
1368 return true;
1369 if (auto *Tuple = dyn_cast<MDTuple>(ExtraData)) {
1370 if (Tuple->getNumOperands() != 1)
1371 return false;
1372 return isa_and_nonnull<ConstantAsMetadata>(Tuple->getOperand(0).get());
1373 }
1374 return false;
1375 };
1376 CheckDI(IsValidExtraData(),
1377 "extraData must be ConstantAsMetadata, MDString, DIObjCProperty, "
1378 "or MDTuple with single ConstantAsMetadata operand",
1379 &N, ExtraData);
1380 }
1381
1382 if (N.getTag() == dwarf::DW_TAG_set_type) {
1383 if (auto *T = N.getRawBaseType()) {
1387 CheckDI(
1388 (Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type) ||
1389 (Subrange && Subrange->getTag() == dwarf::DW_TAG_subrange_type) ||
1390 (Basic && (Basic->getEncoding() == dwarf::DW_ATE_unsigned ||
1391 Basic->getEncoding() == dwarf::DW_ATE_signed ||
1392 Basic->getEncoding() == dwarf::DW_ATE_unsigned_char ||
1393 Basic->getEncoding() == dwarf::DW_ATE_signed_char ||
1394 Basic->getEncoding() == dwarf::DW_ATE_boolean)),
1395 "invalid set base type", &N, T);
1396 }
1397 }
1398
1399 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1400 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1401 N.getRawBaseType());
1402
1403 if (N.getDWARFAddressSpace()) {
1404 CheckDI(N.getTag() == dwarf::DW_TAG_pointer_type ||
1405 N.getTag() == dwarf::DW_TAG_reference_type ||
1406 N.getTag() == dwarf::DW_TAG_rvalue_reference_type,
1407 "DWARF address space only applies to pointer or reference types",
1408 &N);
1409 }
1410
1411 auto *Size = N.getRawSizeInBits();
1414 "SizeInBits must be a constant or DIVariable or DIExpression");
1415}
1416
1417/// Detect mutually exclusive flags.
1418static bool hasConflictingReferenceFlags(unsigned Flags) {
1419 return ((Flags & DINode::FlagLValueReference) &&
1420 (Flags & DINode::FlagRValueReference)) ||
1421 ((Flags & DINode::FlagTypePassByValue) &&
1422 (Flags & DINode::FlagTypePassByReference));
1423}
1424
1425void Verifier::visitTemplateParams(const MDNode &N, const Metadata &RawParams) {
1426 auto *Params = dyn_cast<MDTuple>(&RawParams);
1427 CheckDI(Params, "invalid template params", &N, &RawParams);
1428 for (Metadata *Op : Params->operands()) {
1429 CheckDI(Op && isa<DITemplateParameter>(Op), "invalid template parameter",
1430 &N, Params, Op);
1431 }
1432}
1433
1434void Verifier::visitDICompositeType(const DICompositeType &N) {
1435 // Common scope checks.
1436 visitDIScope(N);
1437
1438 CheckDI(N.getTag() == dwarf::DW_TAG_array_type ||
1439 N.getTag() == dwarf::DW_TAG_structure_type ||
1440 N.getTag() == dwarf::DW_TAG_union_type ||
1441 N.getTag() == dwarf::DW_TAG_enumeration_type ||
1442 N.getTag() == dwarf::DW_TAG_class_type ||
1443 N.getTag() == dwarf::DW_TAG_variant_part ||
1444 N.getTag() == dwarf::DW_TAG_variant ||
1445 N.getTag() == dwarf::DW_TAG_namelist,
1446 "invalid tag", &N);
1447
1448 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1449 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1450 N.getRawBaseType());
1451
1452 CheckDI(!N.getRawElements() || isa<MDTuple>(N.getRawElements()),
1453 "invalid composite elements", &N, N.getRawElements());
1454 CheckDI(isType(N.getRawVTableHolder()), "invalid vtable holder", &N,
1455 N.getRawVTableHolder());
1457 "invalid reference flags", &N);
1458 unsigned DIBlockByRefStruct = 1 << 4;
1459 CheckDI((N.getFlags() & DIBlockByRefStruct) == 0,
1460 "DIBlockByRefStruct on DICompositeType is no longer supported", &N);
1461 CheckDI(llvm::all_of(N.getElements(), [](const DINode *N) { return N; }),
1462 "DISubprogram contains null entry in `elements` field", &N);
1463
1464 if (N.isVector()) {
1465 const DINodeArray Elements = N.getElements();
1466 CheckDI(Elements.size() == 1 &&
1467 Elements[0]->getTag() == dwarf::DW_TAG_subrange_type,
1468 "invalid vector, expected one element of type subrange", &N);
1469 }
1470
1471 if (auto *Params = N.getRawTemplateParams())
1472 visitTemplateParams(N, *Params);
1473
1474 if (auto *D = N.getRawDiscriminator()) {
1475 CheckDI(isa<DIDerivedType>(D) && N.getTag() == dwarf::DW_TAG_variant_part,
1476 "discriminator can only appear on variant part");
1477 }
1478
1479 if (N.getRawDataLocation()) {
1480 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1481 "dataLocation can only appear in array type");
1482 }
1483
1484 if (N.getRawAssociated()) {
1485 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1486 "associated can only appear in array type");
1487 }
1488
1489 if (N.getRawAllocated()) {
1490 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1491 "allocated can only appear in array type");
1492 }
1493
1494 if (N.getRawRank()) {
1495 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1496 "rank can only appear in array type");
1497 }
1498
1499 if (N.getTag() == dwarf::DW_TAG_array_type) {
1500 CheckDI(N.getRawBaseType(), "array types must have a base type", &N);
1501 }
1502
1503 auto *Size = N.getRawSizeInBits();
1506 "SizeInBits must be a constant or DIVariable or DIExpression");
1507}
1508
1509void Verifier::visitDISubroutineType(const DISubroutineType &N) {
1510 CheckDI(N.getTag() == dwarf::DW_TAG_subroutine_type, "invalid tag", &N);
1511 if (auto *Types = N.getRawTypeArray()) {
1512 CheckDI(isa<MDTuple>(Types), "invalid composite elements", &N, Types);
1513 for (Metadata *Ty : N.getTypeArray()->operands()) {
1514 CheckDI(isType(Ty), "invalid subroutine type ref", &N, Types, Ty);
1515 }
1516 }
1518 "invalid reference flags", &N);
1519}
1520
1521void Verifier::visitDIFile(const DIFile &N) {
1522 CheckDI(N.getTag() == dwarf::DW_TAG_file_type, "invalid tag", &N);
1523 std::optional<DIFile::ChecksumInfo<StringRef>> Checksum = N.getChecksum();
1524 if (Checksum) {
1525 CheckDI(Checksum->Kind <= DIFile::ChecksumKind::CSK_Last,
1526 "invalid checksum kind", &N);
1527 size_t Size;
1528 switch (Checksum->Kind) {
1529 case DIFile::CSK_MD5:
1530 Size = 32;
1531 break;
1532 case DIFile::CSK_SHA1:
1533 Size = 40;
1534 break;
1535 case DIFile::CSK_SHA256:
1536 Size = 64;
1537 break;
1538 }
1539 CheckDI(Checksum->Value.size() == Size, "invalid checksum length", &N);
1540 CheckDI(Checksum->Value.find_if_not(llvm::isHexDigit) == StringRef::npos,
1541 "invalid checksum", &N);
1542 }
1543}
1544
1545void Verifier::visitDICompileUnit(const DICompileUnit &N) {
1546 CheckDI(N.isDistinct(), "compile units must be distinct", &N);
1547 CheckDI(N.getTag() == dwarf::DW_TAG_compile_unit, "invalid tag", &N);
1548
1549 // Don't bother verifying the compilation directory or producer string
1550 // as those could be empty.
1551 CheckDI(N.getRawFile() && isa<DIFile>(N.getRawFile()), "invalid file", &N,
1552 N.getRawFile());
1553 CheckDI(!N.getFile()->getFilename().empty(), "invalid filename", &N,
1554 N.getFile());
1555
1556 CheckDI((N.getEmissionKind() <= DICompileUnit::LastEmissionKind),
1557 "invalid emission kind", &N);
1558
1559 if (auto *Array = N.getRawEnumTypes()) {
1560 CheckDI(isa<MDTuple>(Array), "invalid enum list", &N, Array);
1561 for (Metadata *Op : N.getEnumTypes()->operands()) {
1563 CheckDI(Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type,
1564 "invalid enum type", &N, N.getEnumTypes(), Op);
1565 }
1566 }
1567 if (auto *Array = N.getRawRetainedTypes()) {
1568 CheckDI(isa<MDTuple>(Array), "invalid retained type list", &N, Array);
1569 for (Metadata *Op : N.getRetainedTypes()->operands()) {
1570 CheckDI(
1571 Op && (isa<DIType>(Op) || (isa<DISubprogram>(Op) &&
1572 !cast<DISubprogram>(Op)->isDefinition())),
1573 "invalid retained type", &N, Op);
1574 }
1575 }
1576 if (auto *Array = N.getRawGlobalVariables()) {
1577 CheckDI(isa<MDTuple>(Array), "invalid global variable list", &N, Array);
1578 for (Metadata *Op : N.getGlobalVariables()->operands()) {
1580 "invalid global variable ref", &N, Op);
1581 }
1582 }
1583 if (auto *Array = N.getRawImportedEntities()) {
1584 CheckDI(isa<MDTuple>(Array), "invalid imported entity list", &N, Array);
1585 for (Metadata *Op : N.getImportedEntities()->operands()) {
1586 CheckDI(Op && isa<DIImportedEntity>(Op), "invalid imported entity ref",
1587 &N, Op);
1588 }
1589 }
1590 if (auto *Array = N.getRawMacros()) {
1591 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1592 for (Metadata *Op : N.getMacros()->operands()) {
1593 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1594 }
1595 }
1596 CUVisited.insert(&N);
1597}
1598
1599void Verifier::visitDISubprogram(const DISubprogram &N) {
1600 CheckDI(N.getTag() == dwarf::DW_TAG_subprogram, "invalid tag", &N);
1601 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1602 if (auto *F = N.getRawFile())
1603 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1604 else
1605 CheckDI(N.getLine() == 0, "line specified with no file", &N, N.getLine());
1606 if (auto *T = N.getRawType())
1607 CheckDI(isa<DISubroutineType>(T), "invalid subroutine type", &N, T);
1608 CheckDI(isType(N.getRawContainingType()), "invalid containing type", &N,
1609 N.getRawContainingType());
1610 if (auto *Params = N.getRawTemplateParams())
1611 visitTemplateParams(N, *Params);
1612 if (auto *S = N.getRawDeclaration())
1613 CheckDI(isa<DISubprogram>(S) && !cast<DISubprogram>(S)->isDefinition(),
1614 "invalid subprogram declaration", &N, S);
1615 if (auto *RawNode = N.getRawRetainedNodes()) {
1616 auto *Node = dyn_cast<MDTuple>(RawNode);
1617 CheckDI(Node, "invalid retained nodes list", &N, RawNode);
1618 for (Metadata *Op : Node->operands()) {
1619 CheckDI(Op, "nullptr in retained nodes", &N, Node);
1620
1621 auto True = [](const Metadata *) { return true; };
1622 auto False = [](const Metadata *) { return false; };
1623 bool IsTypeCorrect =
1624 DISubprogram::visitRetainedNode<bool>(Op, True, True, True, False);
1625 CheckDI(IsTypeCorrect,
1626 "invalid retained nodes, expected DILocalVariable, DILabel or "
1627 "DIImportedEntity",
1628 &N, Node, Op);
1629
1630 auto *RetainedNode = cast<DINode>(Op);
1631 auto *RetainedNodeScope = dyn_cast_or_null<DILocalScope>(
1633 CheckDI(RetainedNodeScope,
1634 "invalid retained nodes, retained node is not local", &N, Node,
1635 RetainedNode);
1636 CheckDI(
1637 RetainedNodeScope->getSubprogram() == &N,
1638 "invalid retained nodes, retained node does not belong to subprogram",
1639 &N, Node, RetainedNode, RetainedNodeScope);
1640 }
1641 }
1643 "invalid reference flags", &N);
1644
1645 auto *Unit = N.getRawUnit();
1646 if (N.isDefinition()) {
1647 // Subprogram definitions (not part of the type hierarchy).
1648 CheckDI(N.isDistinct(), "subprogram definitions must be distinct", &N);
1649 CheckDI(Unit, "subprogram definitions must have a compile unit", &N);
1650 CheckDI(isa<DICompileUnit>(Unit), "invalid unit type", &N, Unit);
1651 // There's no good way to cross the CU boundary to insert a nested
1652 // DISubprogram definition in one CU into a type defined in another CU.
1653 auto *CT = dyn_cast_or_null<DICompositeType>(N.getRawScope());
1654 if (CT && CT->getRawIdentifier() &&
1655 M.getContext().isODRUniquingDebugTypes())
1656 CheckDI(N.getDeclaration(),
1657 "definition subprograms cannot be nested within DICompositeType "
1658 "when enabling ODR",
1659 &N);
1660 } else {
1661 // Subprogram declarations (part of the type hierarchy).
1662 CheckDI(!Unit, "subprogram declarations must not have a compile unit", &N);
1663 CheckDI(!N.getRawDeclaration(),
1664 "subprogram declaration must not have a declaration field");
1665 }
1666
1667 if (auto *RawThrownTypes = N.getRawThrownTypes()) {
1668 auto *ThrownTypes = dyn_cast<MDTuple>(RawThrownTypes);
1669 CheckDI(ThrownTypes, "invalid thrown types list", &N, RawThrownTypes);
1670 for (Metadata *Op : ThrownTypes->operands())
1671 CheckDI(Op && isa<DIType>(Op), "invalid thrown type", &N, ThrownTypes,
1672 Op);
1673 }
1674
1675 if (N.areAllCallsDescribed())
1676 CheckDI(N.isDefinition(),
1677 "DIFlagAllCallsDescribed must be attached to a definition");
1678}
1679
1680void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) {
1681 CheckDI(N.getTag() == dwarf::DW_TAG_lexical_block, "invalid tag", &N);
1682 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1683 "invalid local scope", &N, N.getRawScope());
1684 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1685 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1686}
1687
1688void Verifier::visitDILexicalBlock(const DILexicalBlock &N) {
1689 visitDILexicalBlockBase(N);
1690
1691 CheckDI(N.getLine() || !N.getColumn(),
1692 "cannot have column info without line info", &N);
1693}
1694
1695void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) {
1696 visitDILexicalBlockBase(N);
1697}
1698
1699void Verifier::visitDICommonBlock(const DICommonBlock &N) {
1700 CheckDI(N.getTag() == dwarf::DW_TAG_common_block, "invalid tag", &N);
1701 if (auto *S = N.getRawScope())
1702 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1703 if (auto *S = N.getRawDecl())
1704 CheckDI(isa<DIGlobalVariable>(S), "invalid declaration", &N, S);
1705}
1706
1707void Verifier::visitDINamespace(const DINamespace &N) {
1708 CheckDI(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag", &N);
1709 if (auto *S = N.getRawScope())
1710 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1711}
1712
1713void Verifier::visitDIMacro(const DIMacro &N) {
1714 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_define ||
1715 N.getMacinfoType() == dwarf::DW_MACINFO_undef,
1716 "invalid macinfo type", &N);
1717 CheckDI(!N.getName().empty(), "anonymous macro", &N);
1718 if (!N.getValue().empty()) {
1719 assert(N.getValue().data()[0] != ' ' && "Macro value has a space prefix");
1720 }
1721}
1722
1723void Verifier::visitDIMacroFile(const DIMacroFile &N) {
1724 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_start_file,
1725 "invalid macinfo type", &N);
1726 if (auto *F = N.getRawFile())
1727 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1728
1729 if (auto *Array = N.getRawElements()) {
1730 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1731 for (Metadata *Op : N.getElements()->operands()) {
1732 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1733 }
1734 }
1735}
1736
1737void Verifier::visitDIModule(const DIModule &N) {
1738 CheckDI(N.getTag() == dwarf::DW_TAG_module, "invalid tag", &N);
1739 CheckDI(!N.getName().empty(), "anonymous module", &N);
1740}
1741
1742void Verifier::visitDITemplateParameter(const DITemplateParameter &N) {
1743 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1744}
1745
1746void Verifier::visitDITemplateTypeParameter(const DITemplateTypeParameter &N) {
1747 visitDITemplateParameter(N);
1748
1749 CheckDI(N.getTag() == dwarf::DW_TAG_template_type_parameter, "invalid tag",
1750 &N);
1751}
1752
1753void Verifier::visitDITemplateValueParameter(
1754 const DITemplateValueParameter &N) {
1755 visitDITemplateParameter(N);
1756
1757 CheckDI(N.getTag() == dwarf::DW_TAG_template_value_parameter ||
1758 N.getTag() == dwarf::DW_TAG_GNU_template_template_param ||
1759 N.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack,
1760 "invalid tag", &N);
1761}
1762
1763void Verifier::visitDIVariable(const DIVariable &N) {
1764 if (auto *S = N.getRawScope())
1765 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1766 if (auto *F = N.getRawFile())
1767 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1768}
1769
1770void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) {
1771 // Checks common to all variables.
1772 visitDIVariable(N);
1773
1774 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1775 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1776 // Check only if the global variable is not an extern
1777 if (N.isDefinition())
1778 CheckDI(N.getType(), "missing global variable type", &N);
1779 if (auto *Member = N.getRawStaticDataMemberDeclaration()) {
1781 "invalid static data member declaration", &N, Member);
1782 }
1783}
1784
1785void Verifier::visitDILocalVariable(const DILocalVariable &N) {
1786 // Checks common to all variables.
1787 visitDIVariable(N);
1788
1789 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1790 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1791 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1792 "local variable requires a valid scope", &N, N.getRawScope());
1793 if (auto Ty = N.getType())
1794 CheckDI(!isa<DISubroutineType>(Ty), "invalid type", &N, N.getType());
1795}
1796
1797void Verifier::visitDIAssignID(const DIAssignID &N) {
1798 CheckDI(!N.getNumOperands(), "DIAssignID has no arguments", &N);
1799 CheckDI(N.isDistinct(), "DIAssignID must be distinct", &N);
1800}
1801
1802void Verifier::visitDILabel(const DILabel &N) {
1803 if (auto *S = N.getRawScope())
1804 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1805 if (auto *F = N.getRawFile())
1806 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1807
1808 CheckDI(N.getTag() == dwarf::DW_TAG_label, "invalid tag", &N);
1809 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1810 "label requires a valid scope", &N, N.getRawScope());
1811}
1812
1813void Verifier::visitDIExpression(const DIExpression &N) {
1814 CheckDI(N.isValid(), "invalid expression", &N);
1815}
1816
1817void Verifier::visitDIGlobalVariableExpression(
1818 const DIGlobalVariableExpression &GVE) {
1819 CheckDI(GVE.getVariable(), "missing variable");
1820 if (auto *Var = GVE.getVariable())
1821 visitDIGlobalVariable(*Var);
1822 if (auto *Expr = GVE.getExpression()) {
1823 visitDIExpression(*Expr);
1824 if (auto Fragment = Expr->getFragmentInfo())
1825 verifyFragmentExpression(*GVE.getVariable(), *Fragment, &GVE);
1826 }
1827}
1828
1829void Verifier::visitDIObjCProperty(const DIObjCProperty &N) {
1830 CheckDI(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag", &N);
1831 if (auto *T = N.getRawType())
1832 CheckDI(isType(T), "invalid type ref", &N, T);
1833 if (auto *F = N.getRawFile())
1834 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1835}
1836
1837void Verifier::visitDIImportedEntity(const DIImportedEntity &N) {
1838 CheckDI(N.getTag() == dwarf::DW_TAG_imported_module ||
1839 N.getTag() == dwarf::DW_TAG_imported_declaration,
1840 "invalid tag", &N);
1841 if (auto *S = N.getRawScope())
1842 CheckDI(isa<DIScope>(S), "invalid scope for imported entity", &N, S);
1843 CheckDI(isDINode(N.getRawEntity()), "invalid imported entity", &N,
1844 N.getRawEntity());
1845}
1846
1847void Verifier::visitComdat(const Comdat &C) {
1848 // In COFF the Module is invalid if the GlobalValue has private linkage.
1849 // Entities with private linkage don't have entries in the symbol table.
1850 if (TT.isOSBinFormatCOFF())
1851 if (const GlobalValue *GV = M.getNamedValue(C.getName()))
1852 Check(!GV->hasPrivateLinkage(), "comdat global value has private linkage",
1853 GV);
1854}
1855
1856void Verifier::visitModuleIdents() {
1857 const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident");
1858 if (!Idents)
1859 return;
1860
1861 // llvm.ident takes a list of metadata entry. Each entry has only one string.
1862 // Scan each llvm.ident entry and make sure that this requirement is met.
1863 for (const MDNode *N : Idents->operands()) {
1864 Check(N->getNumOperands() == 1,
1865 "incorrect number of operands in llvm.ident metadata", N);
1866 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1867 ("invalid value for llvm.ident metadata entry operand"
1868 "(the operand should be a string)"),
1869 N->getOperand(0));
1870 }
1871}
1872
1873void Verifier::visitModuleCommandLines() {
1874 const NamedMDNode *CommandLines = M.getNamedMetadata("llvm.commandline");
1875 if (!CommandLines)
1876 return;
1877
1878 // llvm.commandline takes a list of metadata entry. Each entry has only one
1879 // string. Scan each llvm.commandline entry and make sure that this
1880 // requirement is met.
1881 for (const MDNode *N : CommandLines->operands()) {
1882 Check(N->getNumOperands() == 1,
1883 "incorrect number of operands in llvm.commandline metadata", N);
1884 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1885 ("invalid value for llvm.commandline metadata entry operand"
1886 "(the operand should be a string)"),
1887 N->getOperand(0));
1888 }
1889}
1890
1891void Verifier::visitModuleErrnoTBAA() {
1892 const NamedMDNode *ErrnoTBAA = M.getNamedMetadata("llvm.errno.tbaa");
1893 if (!ErrnoTBAA)
1894 return;
1895
1896 Check(ErrnoTBAA->getNumOperands() >= 1,
1897 "llvm.errno.tbaa must have at least one operand", ErrnoTBAA);
1898
1899 for (const MDNode *N : ErrnoTBAA->operands())
1900 TBAAVerifyHelper.visitTBAAMetadata(nullptr, N);
1901}
1902
1903void Verifier::visitModuleFlags() {
1904 const NamedMDNode *Flags = M.getModuleFlagsMetadata();
1905 if (!Flags) return;
1906
1907 // Scan each flag, and track the flags and requirements.
1908 DenseMap<const MDString*, const MDNode*> SeenIDs;
1909 SmallVector<const MDNode*, 16> Requirements;
1910 uint64_t PAuthABIPlatform = -1;
1911 uint64_t PAuthABIVersion = -1;
1912 for (const MDNode *MDN : Flags->operands()) {
1913 visitModuleFlag(MDN, SeenIDs, Requirements);
1914 if (MDN->getNumOperands() != 3)
1915 continue;
1916 if (const auto *FlagName = dyn_cast_or_null<MDString>(MDN->getOperand(1))) {
1917 if (FlagName->getString() == "aarch64-elf-pauthabi-platform") {
1918 if (const auto *PAP =
1920 PAuthABIPlatform = PAP->getZExtValue();
1921 } else if (FlagName->getString() == "aarch64-elf-pauthabi-version") {
1922 if (const auto *PAV =
1924 PAuthABIVersion = PAV->getZExtValue();
1925 }
1926 }
1927 }
1928
1929 if ((PAuthABIPlatform == uint64_t(-1)) != (PAuthABIVersion == uint64_t(-1)))
1930 CheckFailed("either both or no 'aarch64-elf-pauthabi-platform' and "
1931 "'aarch64-elf-pauthabi-version' module flags must be present");
1932
1933 // Validate that the requirements in the module are valid.
1934 for (const MDNode *Requirement : Requirements) {
1935 const MDString *Flag = cast<MDString>(Requirement->getOperand(0));
1936 const Metadata *ReqValue = Requirement->getOperand(1);
1937
1938 const MDNode *Op = SeenIDs.lookup(Flag);
1939 if (!Op) {
1940 CheckFailed("invalid requirement on flag, flag is not present in module",
1941 Flag);
1942 continue;
1943 }
1944
1945 if (Op->getOperand(2) != ReqValue) {
1946 CheckFailed(("invalid requirement on flag, "
1947 "flag does not have the required value"),
1948 Flag);
1949 continue;
1950 }
1951 }
1952}
1953
1954void
1955Verifier::visitModuleFlag(const MDNode *Op,
1956 DenseMap<const MDString *, const MDNode *> &SeenIDs,
1957 SmallVectorImpl<const MDNode *> &Requirements) {
1958 // Each module flag should have three arguments, the merge behavior (a
1959 // constant int), the flag ID (an MDString), and the value.
1960 Check(Op->getNumOperands() == 3,
1961 "incorrect number of operands in module flag", Op);
1962 Module::ModFlagBehavior MFB;
1963 if (!Module::isValidModFlagBehavior(Op->getOperand(0), MFB)) {
1965 "invalid behavior operand in module flag (expected constant integer)",
1966 Op->getOperand(0));
1967 Check(false,
1968 "invalid behavior operand in module flag (unexpected constant)",
1969 Op->getOperand(0));
1970 }
1971 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
1972 Check(ID, "invalid ID operand in module flag (expected metadata string)",
1973 Op->getOperand(1));
1974
1975 // Check the values for behaviors with additional requirements.
1976 switch (MFB) {
1977 case Module::Error:
1978 case Module::Warning:
1979 case Module::Override:
1980 // These behavior types accept any value.
1981 break;
1982
1983 case Module::Min: {
1984 auto *V = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1985 Check(V && V->getValue().isNonNegative(),
1986 "invalid value for 'min' module flag (expected constant non-negative "
1987 "integer)",
1988 Op->getOperand(2));
1989 break;
1990 }
1991
1992 case Module::Max: {
1994 "invalid value for 'max' module flag (expected constant integer)",
1995 Op->getOperand(2));
1996 break;
1997 }
1998
1999 case Module::Require: {
2000 // The value should itself be an MDNode with two operands, a flag ID (an
2001 // MDString), and a value.
2002 MDNode *Value = dyn_cast<MDNode>(Op->getOperand(2));
2003 Check(Value && Value->getNumOperands() == 2,
2004 "invalid value for 'require' module flag (expected metadata pair)",
2005 Op->getOperand(2));
2006 Check(isa<MDString>(Value->getOperand(0)),
2007 ("invalid value for 'require' module flag "
2008 "(first value operand should be a string)"),
2009 Value->getOperand(0));
2010
2011 // Append it to the list of requirements, to check once all module flags are
2012 // scanned.
2013 Requirements.push_back(Value);
2014 break;
2015 }
2016
2017 case Module::Append:
2018 case Module::AppendUnique: {
2019 // These behavior types require the operand be an MDNode.
2020 Check(isa<MDNode>(Op->getOperand(2)),
2021 "invalid value for 'append'-type module flag "
2022 "(expected a metadata node)",
2023 Op->getOperand(2));
2024 break;
2025 }
2026 }
2027
2028 // Unless this is a "requires" flag, check the ID is unique.
2029 if (MFB != Module::Require) {
2030 bool Inserted = SeenIDs.insert(std::make_pair(ID, Op)).second;
2031 Check(Inserted,
2032 "module flag identifiers must be unique (or of 'require' type)", ID);
2033 }
2034
2035 if (ID->getString() == "wchar_size") {
2036 ConstantInt *Value
2038 Check(Value, "wchar_size metadata requires constant integer argument");
2039 }
2040
2041 if (ID->getString() == "Linker Options") {
2042 // If the llvm.linker.options named metadata exists, we assume that the
2043 // bitcode reader has upgraded the module flag. Otherwise the flag might
2044 // have been created by a client directly.
2045 Check(M.getNamedMetadata("llvm.linker.options"),
2046 "'Linker Options' named metadata no longer supported");
2047 }
2048
2049 if (ID->getString() == "SemanticInterposition") {
2050 ConstantInt *Value =
2052 Check(Value,
2053 "SemanticInterposition metadata requires constant integer argument");
2054 }
2055
2056 if (ID->getString() == "CG Profile") {
2057 for (const MDOperand &MDO : cast<MDNode>(Op->getOperand(2))->operands())
2058 visitModuleFlagCGProfileEntry(MDO);
2059 }
2060}
2061
2062void Verifier::visitModuleFlagCGProfileEntry(const MDOperand &MDO) {
2063 auto CheckFunction = [&](const MDOperand &FuncMDO) {
2064 if (!FuncMDO)
2065 return;
2066 auto F = dyn_cast<ValueAsMetadata>(FuncMDO);
2067 Check(F && isa<Function>(F->getValue()->stripPointerCasts()),
2068 "expected a Function or null", FuncMDO);
2069 };
2070 auto Node = dyn_cast_or_null<MDNode>(MDO);
2071 Check(Node && Node->getNumOperands() == 3, "expected a MDNode triple", MDO);
2072 CheckFunction(Node->getOperand(0));
2073 CheckFunction(Node->getOperand(1));
2074 auto Count = dyn_cast_or_null<ConstantAsMetadata>(Node->getOperand(2));
2075 Check(Count && Count->getType()->isIntegerTy(),
2076 "expected an integer constant", Node->getOperand(2));
2077}
2078
2079void Verifier::verifyAttributeTypes(AttributeSet Attrs, const Value *V) {
2080 for (Attribute A : Attrs) {
2081
2082 if (A.isStringAttribute()) {
2083#define GET_ATTR_NAMES
2084#define ATTRIBUTE_ENUM(ENUM_NAME, DISPLAY_NAME)
2085#define ATTRIBUTE_STRBOOL(ENUM_NAME, DISPLAY_NAME) \
2086 if (A.getKindAsString() == #DISPLAY_NAME) { \
2087 auto V = A.getValueAsString(); \
2088 if (!(V.empty() || V == "true" || V == "false")) \
2089 CheckFailed("invalid value for '" #DISPLAY_NAME "' attribute: " + V + \
2090 ""); \
2091 }
2092
2093#include "llvm/IR/Attributes.inc"
2094 continue;
2095 }
2096
2097 if (A.isIntAttribute() != Attribute::isIntAttrKind(A.getKindAsEnum())) {
2098 CheckFailed("Attribute '" + A.getAsString() + "' should have an Argument",
2099 V);
2100 return;
2101 }
2102 }
2103}
2104
2105// VerifyParameterAttrs - Check the given attributes for an argument or return
2106// value of the specified type. The value V is printed in error messages.
2107void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty,
2108 const Value *V) {
2109 if (!Attrs.hasAttributes())
2110 return;
2111
2112 verifyAttributeTypes(Attrs, V);
2113
2114 for (Attribute Attr : Attrs)
2115 Check(Attr.isStringAttribute() ||
2116 Attribute::canUseAsParamAttr(Attr.getKindAsEnum()),
2117 "Attribute '" + Attr.getAsString() + "' does not apply to parameters",
2118 V);
2119
2120 if (Attrs.hasAttribute(Attribute::ImmArg)) {
2121 unsigned AttrCount =
2122 Attrs.getNumAttributes() - Attrs.hasAttribute(Attribute::Range);
2123 Check(AttrCount == 1,
2124 "Attribute 'immarg' is incompatible with other attributes except the "
2125 "'range' attribute",
2126 V);
2127 }
2128
2129 // Check for mutually incompatible attributes. Only inreg is compatible with
2130 // sret.
2131 unsigned AttrCount = 0;
2132 AttrCount += Attrs.hasAttribute(Attribute::ByVal);
2133 AttrCount += Attrs.hasAttribute(Attribute::InAlloca);
2134 AttrCount += Attrs.hasAttribute(Attribute::Preallocated);
2135 AttrCount += Attrs.hasAttribute(Attribute::StructRet) ||
2136 Attrs.hasAttribute(Attribute::InReg);
2137 AttrCount += Attrs.hasAttribute(Attribute::Nest);
2138 AttrCount += Attrs.hasAttribute(Attribute::ByRef);
2139 Check(AttrCount <= 1,
2140 "Attributes 'byval', 'inalloca', 'preallocated', 'inreg', 'nest', "
2141 "'byref', and 'sret' are incompatible!",
2142 V);
2143
2144 Check(!(Attrs.hasAttribute(Attribute::InAlloca) &&
2145 Attrs.hasAttribute(Attribute::ReadOnly)),
2146 "Attributes "
2147 "'inalloca and readonly' are incompatible!",
2148 V);
2149
2150 Check(!(Attrs.hasAttribute(Attribute::StructRet) &&
2151 Attrs.hasAttribute(Attribute::Returned)),
2152 "Attributes "
2153 "'sret and returned' are incompatible!",
2154 V);
2155
2156 Check(!(Attrs.hasAttribute(Attribute::ZExt) &&
2157 Attrs.hasAttribute(Attribute::SExt)),
2158 "Attributes "
2159 "'zeroext and signext' are incompatible!",
2160 V);
2161
2162 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
2163 Attrs.hasAttribute(Attribute::ReadOnly)),
2164 "Attributes "
2165 "'readnone and readonly' are incompatible!",
2166 V);
2167
2168 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
2169 Attrs.hasAttribute(Attribute::WriteOnly)),
2170 "Attributes "
2171 "'readnone and writeonly' are incompatible!",
2172 V);
2173
2174 Check(!(Attrs.hasAttribute(Attribute::ReadOnly) &&
2175 Attrs.hasAttribute(Attribute::WriteOnly)),
2176 "Attributes "
2177 "'readonly and writeonly' are incompatible!",
2178 V);
2179
2180 Check(!(Attrs.hasAttribute(Attribute::NoInline) &&
2181 Attrs.hasAttribute(Attribute::AlwaysInline)),
2182 "Attributes "
2183 "'noinline and alwaysinline' are incompatible!",
2184 V);
2185
2186 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2187 Attrs.hasAttribute(Attribute::ReadNone)),
2188 "Attributes writable and readnone are incompatible!", V);
2189
2190 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2191 Attrs.hasAttribute(Attribute::ReadOnly)),
2192 "Attributes writable and readonly are incompatible!", V);
2193
2194 AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible(Ty, Attrs);
2195 for (Attribute Attr : Attrs) {
2196 if (!Attr.isStringAttribute() &&
2197 IncompatibleAttrs.contains(Attr.getKindAsEnum())) {
2198 CheckFailed("Attribute '" + Attr.getAsString() +
2199 "' applied to incompatible type!", V);
2200 return;
2201 }
2202 }
2203
2204 if (isa<PointerType>(Ty)) {
2205 if (Attrs.hasAttribute(Attribute::Alignment)) {
2206 Align AttrAlign = Attrs.getAlignment().valueOrOne();
2207 Check(AttrAlign.value() <= Value::MaximumAlignment,
2208 "huge alignment values are unsupported", V);
2209 }
2210 if (Attrs.hasAttribute(Attribute::ByVal)) {
2211 Type *ByValTy = Attrs.getByValType();
2212 SmallPtrSet<Type *, 4> Visited;
2213 Check(ByValTy->isSized(&Visited),
2214 "Attribute 'byval' does not support unsized types!", V);
2215 // Check if it is or contains a target extension type that disallows being
2216 // used on the stack.
2218 "'byval' argument has illegal target extension type", V);
2219 Check(DL.getTypeAllocSize(ByValTy).getKnownMinValue() < (1ULL << 32),
2220 "huge 'byval' arguments are unsupported", V);
2221 }
2222 if (Attrs.hasAttribute(Attribute::ByRef)) {
2223 SmallPtrSet<Type *, 4> Visited;
2224 Check(Attrs.getByRefType()->isSized(&Visited),
2225 "Attribute 'byref' does not support unsized types!", V);
2226 Check(DL.getTypeAllocSize(Attrs.getByRefType()).getKnownMinValue() <
2227 (1ULL << 32),
2228 "huge 'byref' arguments are unsupported", V);
2229 }
2230 if (Attrs.hasAttribute(Attribute::InAlloca)) {
2231 SmallPtrSet<Type *, 4> Visited;
2232 Check(Attrs.getInAllocaType()->isSized(&Visited),
2233 "Attribute 'inalloca' does not support unsized types!", V);
2234 Check(DL.getTypeAllocSize(Attrs.getInAllocaType()).getKnownMinValue() <
2235 (1ULL << 32),
2236 "huge 'inalloca' arguments are unsupported", V);
2237 }
2238 if (Attrs.hasAttribute(Attribute::Preallocated)) {
2239 SmallPtrSet<Type *, 4> Visited;
2240 Check(Attrs.getPreallocatedType()->isSized(&Visited),
2241 "Attribute 'preallocated' does not support unsized types!", V);
2242 Check(
2243 DL.getTypeAllocSize(Attrs.getPreallocatedType()).getKnownMinValue() <
2244 (1ULL << 32),
2245 "huge 'preallocated' arguments are unsupported", V);
2246 }
2247 }
2248
2249 if (Attrs.hasAttribute(Attribute::Initializes)) {
2250 auto Inits = Attrs.getAttribute(Attribute::Initializes).getInitializes();
2251 Check(!Inits.empty(), "Attribute 'initializes' does not support empty list",
2252 V);
2254 "Attribute 'initializes' does not support unordered ranges", V);
2255 }
2256
2257 if (Attrs.hasAttribute(Attribute::NoFPClass)) {
2258 uint64_t Val = Attrs.getAttribute(Attribute::NoFPClass).getValueAsInt();
2259 Check(Val != 0, "Attribute 'nofpclass' must have at least one test bit set",
2260 V);
2261 Check((Val & ~static_cast<unsigned>(fcAllFlags)) == 0,
2262 "Invalid value for 'nofpclass' test mask", V);
2263 }
2264 if (Attrs.hasAttribute(Attribute::Range)) {
2265 const ConstantRange &CR =
2266 Attrs.getAttribute(Attribute::Range).getValueAsConstantRange();
2268 "Range bit width must match type bit width!", V);
2269 }
2270}
2271
2272void Verifier::checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
2273 const Value *V) {
2274 if (Attrs.hasFnAttr(Attr)) {
2275 StringRef S = Attrs.getFnAttr(Attr).getValueAsString();
2276 unsigned N;
2277 if (S.getAsInteger(10, N))
2278 CheckFailed("\"" + Attr + "\" takes an unsigned integer: " + S, V);
2279 }
2280}
2281
2282// Check parameter attributes against a function type.
2283// The value V is printed in error messages.
2284void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
2285 const Value *V, bool IsIntrinsic,
2286 bool IsInlineAsm) {
2287 if (Attrs.isEmpty())
2288 return;
2289
2290 if (AttributeListsVisited.insert(Attrs.getRawPointer()).second) {
2291 Check(Attrs.hasParentContext(Context),
2292 "Attribute list does not match Module context!", &Attrs, V);
2293 for (const auto &AttrSet : Attrs) {
2294 Check(!AttrSet.hasAttributes() || AttrSet.hasParentContext(Context),
2295 "Attribute set does not match Module context!", &AttrSet, V);
2296 for (const auto &A : AttrSet) {
2297 Check(A.hasParentContext(Context),
2298 "Attribute does not match Module context!", &A, V);
2299 }
2300 }
2301 }
2302
2303 bool SawNest = false;
2304 bool SawReturned = false;
2305 bool SawSRet = false;
2306 bool SawSwiftSelf = false;
2307 bool SawSwiftAsync = false;
2308 bool SawSwiftError = false;
2309
2310 // Verify return value attributes.
2311 AttributeSet RetAttrs = Attrs.getRetAttrs();
2312 for (Attribute RetAttr : RetAttrs)
2313 Check(RetAttr.isStringAttribute() ||
2314 Attribute::canUseAsRetAttr(RetAttr.getKindAsEnum()),
2315 "Attribute '" + RetAttr.getAsString() +
2316 "' does not apply to function return values",
2317 V);
2318
2319 unsigned MaxParameterWidth = 0;
2320 auto GetMaxParameterWidth = [&MaxParameterWidth](Type *Ty) {
2321 if (Ty->isVectorTy()) {
2322 if (auto *VT = dyn_cast<FixedVectorType>(Ty)) {
2323 unsigned Size = VT->getPrimitiveSizeInBits().getFixedValue();
2324 if (Size > MaxParameterWidth)
2325 MaxParameterWidth = Size;
2326 }
2327 }
2328 };
2329 GetMaxParameterWidth(FT->getReturnType());
2330 verifyParameterAttrs(RetAttrs, FT->getReturnType(), V);
2331
2332 // Verify parameter attributes.
2333 for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
2334 Type *Ty = FT->getParamType(i);
2335 AttributeSet ArgAttrs = Attrs.getParamAttrs(i);
2336
2337 if (!IsIntrinsic) {
2338 Check(!ArgAttrs.hasAttribute(Attribute::ImmArg),
2339 "immarg attribute only applies to intrinsics", V);
2340 if (!IsInlineAsm)
2341 Check(!ArgAttrs.hasAttribute(Attribute::ElementType),
2342 "Attribute 'elementtype' can only be applied to intrinsics"
2343 " and inline asm.",
2344 V);
2345 }
2346
2347 verifyParameterAttrs(ArgAttrs, Ty, V);
2348 GetMaxParameterWidth(Ty);
2349
2350 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
2351 Check(!SawNest, "More than one parameter has attribute nest!", V);
2352 SawNest = true;
2353 }
2354
2355 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
2356 Check(!SawReturned, "More than one parameter has attribute returned!", V);
2357 Check(Ty->canLosslesslyBitCastTo(FT->getReturnType()),
2358 "Incompatible argument and return types for 'returned' attribute",
2359 V);
2360 SawReturned = true;
2361 }
2362
2363 if (ArgAttrs.hasAttribute(Attribute::StructRet)) {
2364 Check(!SawSRet, "Cannot have multiple 'sret' parameters!", V);
2365 Check(i == 0 || i == 1,
2366 "Attribute 'sret' is not on first or second parameter!", V);
2367 SawSRet = true;
2368 }
2369
2370 if (ArgAttrs.hasAttribute(Attribute::SwiftSelf)) {
2371 Check(!SawSwiftSelf, "Cannot have multiple 'swiftself' parameters!", V);
2372 SawSwiftSelf = true;
2373 }
2374
2375 if (ArgAttrs.hasAttribute(Attribute::SwiftAsync)) {
2376 Check(!SawSwiftAsync, "Cannot have multiple 'swiftasync' parameters!", V);
2377 SawSwiftAsync = true;
2378 }
2379
2380 if (ArgAttrs.hasAttribute(Attribute::SwiftError)) {
2381 Check(!SawSwiftError, "Cannot have multiple 'swifterror' parameters!", V);
2382 SawSwiftError = true;
2383 }
2384
2385 if (ArgAttrs.hasAttribute(Attribute::InAlloca)) {
2386 Check(i == FT->getNumParams() - 1,
2387 "inalloca isn't on the last parameter!", V);
2388 }
2389 }
2390
2391 if (!Attrs.hasFnAttrs())
2392 return;
2393
2394 verifyAttributeTypes(Attrs.getFnAttrs(), V);
2395 for (Attribute FnAttr : Attrs.getFnAttrs())
2396 Check(FnAttr.isStringAttribute() ||
2397 Attribute::canUseAsFnAttr(FnAttr.getKindAsEnum()),
2398 "Attribute '" + FnAttr.getAsString() +
2399 "' does not apply to functions!",
2400 V);
2401
2402 Check(!(Attrs.hasFnAttr(Attribute::NoInline) &&
2403 Attrs.hasFnAttr(Attribute::AlwaysInline)),
2404 "Attributes 'noinline and alwaysinline' are incompatible!", V);
2405
2406 if (Attrs.hasFnAttr(Attribute::OptimizeNone)) {
2407 Check(Attrs.hasFnAttr(Attribute::NoInline),
2408 "Attribute 'optnone' requires 'noinline'!", V);
2409
2410 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2411 "Attributes 'optsize and optnone' are incompatible!", V);
2412
2413 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2414 "Attributes 'minsize and optnone' are incompatible!", V);
2415
2416 Check(!Attrs.hasFnAttr(Attribute::OptimizeForDebugging),
2417 "Attributes 'optdebug and optnone' are incompatible!", V);
2418 }
2419
2420 Check(!(Attrs.hasFnAttr(Attribute::SanitizeRealtime) &&
2421 Attrs.hasFnAttr(Attribute::SanitizeRealtimeBlocking)),
2422 "Attributes "
2423 "'sanitize_realtime and sanitize_realtime_blocking' are incompatible!",
2424 V);
2425
2426 if (Attrs.hasFnAttr(Attribute::OptimizeForDebugging)) {
2427 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2428 "Attributes 'optsize and optdebug' are incompatible!", V);
2429
2430 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2431 "Attributes 'minsize and optdebug' are incompatible!", V);
2432 }
2433
2434 Check(!Attrs.hasAttrSomewhere(Attribute::Writable) ||
2435 isModSet(Attrs.getMemoryEffects().getModRef(IRMemLocation::ArgMem)),
2436 "Attribute writable and memory without argmem: write are incompatible!",
2437 V);
2438
2439 if (Attrs.hasFnAttr("aarch64_pstate_sm_enabled")) {
2440 Check(!Attrs.hasFnAttr("aarch64_pstate_sm_compatible"),
2441 "Attributes 'aarch64_pstate_sm_enabled and "
2442 "aarch64_pstate_sm_compatible' are incompatible!",
2443 V);
2444 }
2445
2446 Check((Attrs.hasFnAttr("aarch64_new_za") + Attrs.hasFnAttr("aarch64_in_za") +
2447 Attrs.hasFnAttr("aarch64_inout_za") +
2448 Attrs.hasFnAttr("aarch64_out_za") +
2449 Attrs.hasFnAttr("aarch64_preserves_za") +
2450 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2451 "Attributes 'aarch64_new_za', 'aarch64_in_za', 'aarch64_out_za', "
2452 "'aarch64_inout_za', 'aarch64_preserves_za' and "
2453 "'aarch64_za_state_agnostic' are mutually exclusive",
2454 V);
2455
2456 Check((Attrs.hasFnAttr("aarch64_new_zt0") +
2457 Attrs.hasFnAttr("aarch64_in_zt0") +
2458 Attrs.hasFnAttr("aarch64_inout_zt0") +
2459 Attrs.hasFnAttr("aarch64_out_zt0") +
2460 Attrs.hasFnAttr("aarch64_preserves_zt0") +
2461 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2462 "Attributes 'aarch64_new_zt0', 'aarch64_in_zt0', 'aarch64_out_zt0', "
2463 "'aarch64_inout_zt0', 'aarch64_preserves_zt0' and "
2464 "'aarch64_za_state_agnostic' are mutually exclusive",
2465 V);
2466
2467 if (Attrs.hasFnAttr(Attribute::JumpTable)) {
2468 const GlobalValue *GV = cast<GlobalValue>(V);
2470 "Attribute 'jumptable' requires 'unnamed_addr'", V);
2471 }
2472
2473 if (auto Args = Attrs.getFnAttrs().getAllocSizeArgs()) {
2474 auto CheckParam = [&](StringRef Name, unsigned ParamNo) {
2475 if (ParamNo >= FT->getNumParams()) {
2476 CheckFailed("'allocsize' " + Name + " argument is out of bounds", V);
2477 return false;
2478 }
2479
2480 if (!FT->getParamType(ParamNo)->isIntegerTy()) {
2481 CheckFailed("'allocsize' " + Name +
2482 " argument must refer to an integer parameter",
2483 V);
2484 return false;
2485 }
2486
2487 return true;
2488 };
2489
2490 if (!CheckParam("element size", Args->first))
2491 return;
2492
2493 if (Args->second && !CheckParam("number of elements", *Args->second))
2494 return;
2495 }
2496
2497 if (Attrs.hasFnAttr(Attribute::AllocKind)) {
2498 AllocFnKind K = Attrs.getAllocKind();
2500 K & (AllocFnKind::Alloc | AllocFnKind::Realloc | AllocFnKind::Free);
2501 if (!is_contained(
2502 {AllocFnKind::Alloc, AllocFnKind::Realloc, AllocFnKind::Free},
2503 Type))
2504 CheckFailed(
2505 "'allockind()' requires exactly one of alloc, realloc, and free");
2506 if ((Type == AllocFnKind::Free) &&
2507 ((K & (AllocFnKind::Uninitialized | AllocFnKind::Zeroed |
2508 AllocFnKind::Aligned)) != AllocFnKind::Unknown))
2509 CheckFailed("'allockind(\"free\")' doesn't allow uninitialized, zeroed, "
2510 "or aligned modifiers.");
2511 AllocFnKind ZeroedUninit = AllocFnKind::Uninitialized | AllocFnKind::Zeroed;
2512 if ((K & ZeroedUninit) == ZeroedUninit)
2513 CheckFailed("'allockind()' can't be both zeroed and uninitialized");
2514 }
2515
2516 if (Attribute A = Attrs.getFnAttr("alloc-variant-zeroed"); A.isValid()) {
2517 StringRef S = A.getValueAsString();
2518 Check(!S.empty(), "'alloc-variant-zeroed' must not be empty");
2519 Function *Variant = M.getFunction(S);
2520 if (Variant) {
2521 Attribute Family = Attrs.getFnAttr("alloc-family");
2522 Attribute VariantFamily = Variant->getFnAttribute("alloc-family");
2523 if (Family.isValid())
2524 Check(VariantFamily.isValid() &&
2525 VariantFamily.getValueAsString() == Family.getValueAsString(),
2526 "'alloc-variant-zeroed' must name a function belonging to the "
2527 "same 'alloc-family'");
2528
2529 Check(Variant->hasFnAttribute(Attribute::AllocKind) &&
2530 (Variant->getFnAttribute(Attribute::AllocKind).getAllocKind() &
2531 AllocFnKind::Zeroed) != AllocFnKind::Unknown,
2532 "'alloc-variant-zeroed' must name a function with "
2533 "'allockind(\"zeroed\")'");
2534
2535 Check(FT == Variant->getFunctionType(),
2536 "'alloc-variant-zeroed' must name a function with the same "
2537 "signature");
2538 }
2539 }
2540
2541 if (Attrs.hasFnAttr(Attribute::VScaleRange)) {
2542 unsigned VScaleMin = Attrs.getFnAttrs().getVScaleRangeMin();
2543 if (VScaleMin == 0)
2544 CheckFailed("'vscale_range' minimum must be greater than 0", V);
2545 else if (!isPowerOf2_32(VScaleMin))
2546 CheckFailed("'vscale_range' minimum must be power-of-two value", V);
2547 std::optional<unsigned> VScaleMax = Attrs.getFnAttrs().getVScaleRangeMax();
2548 if (VScaleMax && VScaleMin > VScaleMax)
2549 CheckFailed("'vscale_range' minimum cannot be greater than maximum", V);
2550 else if (VScaleMax && !isPowerOf2_32(*VScaleMax))
2551 CheckFailed("'vscale_range' maximum must be power-of-two value", V);
2552 }
2553
2554 if (Attribute FPAttr = Attrs.getFnAttr("frame-pointer"); FPAttr.isValid()) {
2555 StringRef FP = FPAttr.getValueAsString();
2556 if (FP != "all" && FP != "non-leaf" && FP != "none" && FP != "reserved" &&
2557 FP != "non-leaf-no-reserve")
2558 CheckFailed("invalid value for 'frame-pointer' attribute: " + FP, V);
2559 }
2560
2561 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-prefix", V);
2562 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-entry", V);
2563 if (Attrs.hasFnAttr("patchable-function-entry-section"))
2564 Check(!Attrs.getFnAttr("patchable-function-entry-section")
2565 .getValueAsString()
2566 .empty(),
2567 "\"patchable-function-entry-section\" must not be empty");
2568 checkUnsignedBaseTenFuncAttr(Attrs, "warn-stack-size", V);
2569
2570 if (auto A = Attrs.getFnAttr("sign-return-address"); A.isValid()) {
2571 StringRef S = A.getValueAsString();
2572 if (S != "none" && S != "all" && S != "non-leaf")
2573 CheckFailed("invalid value for 'sign-return-address' attribute: " + S, V);
2574 }
2575
2576 if (auto A = Attrs.getFnAttr("sign-return-address-key"); A.isValid()) {
2577 StringRef S = A.getValueAsString();
2578 if (S != "a_key" && S != "b_key")
2579 CheckFailed("invalid value for 'sign-return-address-key' attribute: " + S,
2580 V);
2581 if (auto AA = Attrs.getFnAttr("sign-return-address"); !AA.isValid()) {
2582 CheckFailed(
2583 "'sign-return-address-key' present without `sign-return-address`");
2584 }
2585 }
2586
2587 if (auto A = Attrs.getFnAttr("branch-target-enforcement"); A.isValid()) {
2588 StringRef S = A.getValueAsString();
2589 if (S != "" && S != "true" && S != "false")
2590 CheckFailed(
2591 "invalid value for 'branch-target-enforcement' attribute: " + S, V);
2592 }
2593
2594 if (auto A = Attrs.getFnAttr("branch-protection-pauth-lr"); A.isValid()) {
2595 StringRef S = A.getValueAsString();
2596 if (S != "" && S != "true" && S != "false")
2597 CheckFailed(
2598 "invalid value for 'branch-protection-pauth-lr' attribute: " + S, V);
2599 }
2600
2601 if (auto A = Attrs.getFnAttr("guarded-control-stack"); A.isValid()) {
2602 StringRef S = A.getValueAsString();
2603 if (S != "" && S != "true" && S != "false")
2604 CheckFailed("invalid value for 'guarded-control-stack' attribute: " + S,
2605 V);
2606 }
2607
2608 if (auto A = Attrs.getFnAttr("vector-function-abi-variant"); A.isValid()) {
2609 StringRef S = A.getValueAsString();
2610 const std::optional<VFInfo> Info = VFABI::tryDemangleForVFABI(S, FT);
2611 if (!Info)
2612 CheckFailed("invalid name for a VFABI variant: " + S, V);
2613 }
2614
2615 if (auto A = Attrs.getFnAttr("denormal-fp-math"); A.isValid()) {
2616 StringRef S = A.getValueAsString();
2618 CheckFailed("invalid value for 'denormal-fp-math' attribute: " + S, V);
2619 }
2620
2621 if (auto A = Attrs.getFnAttr("denormal-fp-math-f32"); A.isValid()) {
2622 StringRef S = A.getValueAsString();
2624 CheckFailed("invalid value for 'denormal-fp-math-f32' attribute: " + S,
2625 V);
2626 }
2627
2628 if (auto A = Attrs.getFnAttr("modular-format"); A.isValid()) {
2629 StringRef S = A.getValueAsString();
2631 S.split(Args, ',');
2632 Check(Args.size() >= 5,
2633 "modular-format attribute requires at least 5 arguments", V);
2634 unsigned FirstArgIdx;
2635 Check(!Args[2].getAsInteger(10, FirstArgIdx),
2636 "modular-format attribute first arg index is not an integer", V);
2637 unsigned UpperBound = FT->getNumParams() + (FT->isVarArg() ? 1 : 0);
2638 Check(FirstArgIdx > 0 && FirstArgIdx <= UpperBound,
2639 "modular-format attribute first arg index is out of bounds", V);
2640 }
2641
2642 if (auto A = Attrs.getFnAttr("target-features"); A.isValid()) {
2643 StringRef S = A.getValueAsString();
2644 if (!S.empty()) {
2645 for (auto FeatureFlag : split(S, ',')) {
2646 if (FeatureFlag.empty())
2647 CheckFailed(
2648 "target-features attribute should not contain an empty string");
2649 else
2650 Check(FeatureFlag[0] == '+' || FeatureFlag[0] == '-',
2651 "target feature '" + FeatureFlag +
2652 "' must start with a '+' or '-'",
2653 V);
2654 }
2655 }
2656 }
2657}
2658void Verifier::verifyUnknownProfileMetadata(MDNode *MD) {
2659 Check(MD->getNumOperands() == 2,
2660 "'unknown' !prof should have a single additional operand", MD);
2661 auto *PassName = dyn_cast<MDString>(MD->getOperand(1));
2662 Check(PassName != nullptr,
2663 "'unknown' !prof should have an additional operand of type "
2664 "string");
2665 Check(!PassName->getString().empty(),
2666 "the 'unknown' !prof operand should not be an empty string");
2667}
2668
2669void Verifier::verifyFunctionMetadata(
2670 ArrayRef<std::pair<unsigned, MDNode *>> MDs) {
2671 for (const auto &Pair : MDs) {
2672 if (Pair.first == LLVMContext::MD_prof) {
2673 MDNode *MD = Pair.second;
2674 Check(MD->getNumOperands() >= 2,
2675 "!prof annotations should have no less than 2 operands", MD);
2676 // We may have functions that are synthesized by the compiler, e.g. in
2677 // WPD, that we can't currently determine the entry count.
2678 if (MD->getOperand(0).equalsStr(
2680 verifyUnknownProfileMetadata(MD);
2681 continue;
2682 }
2683
2684 // Check first operand.
2685 Check(MD->getOperand(0) != nullptr, "first operand should not be null",
2686 MD);
2688 "expected string with name of the !prof annotation", MD);
2689 MDString *MDS = cast<MDString>(MD->getOperand(0));
2690 StringRef ProfName = MDS->getString();
2693 "first operand should be 'function_entry_count'"
2694 " or 'synthetic_function_entry_count'",
2695 MD);
2696
2697 // Check second operand.
2698 Check(MD->getOperand(1) != nullptr, "second operand should not be null",
2699 MD);
2701 "expected integer argument to function_entry_count", MD);
2702 } else if (Pair.first == LLVMContext::MD_kcfi_type) {
2703 MDNode *MD = Pair.second;
2704 Check(MD->getNumOperands() == 1,
2705 "!kcfi_type must have exactly one operand", MD);
2706 Check(MD->getOperand(0) != nullptr, "!kcfi_type operand must not be null",
2707 MD);
2709 "expected a constant operand for !kcfi_type", MD);
2710 Constant *C = cast<ConstantAsMetadata>(MD->getOperand(0))->getValue();
2711 Check(isa<ConstantInt>(C) && isa<IntegerType>(C->getType()),
2712 "expected a constant integer operand for !kcfi_type", MD);
2714 "expected a 32-bit integer constant operand for !kcfi_type", MD);
2715 }
2716 }
2717}
2718
2719void Verifier::visitConstantExprsRecursively(const Constant *EntryC) {
2720 if (EntryC->getNumOperands() == 0)
2721 return;
2722
2723 if (!ConstantExprVisited.insert(EntryC).second)
2724 return;
2725
2727 Stack.push_back(EntryC);
2728
2729 while (!Stack.empty()) {
2730 const Constant *C = Stack.pop_back_val();
2731
2732 // Check this constant expression.
2733 if (const auto *CE = dyn_cast<ConstantExpr>(C))
2734 visitConstantExpr(CE);
2735
2736 if (const auto *CPA = dyn_cast<ConstantPtrAuth>(C))
2737 visitConstantPtrAuth(CPA);
2738
2739 if (const auto *GV = dyn_cast<GlobalValue>(C)) {
2740 // Global Values get visited separately, but we do need to make sure
2741 // that the global value is in the correct module
2742 Check(GV->getParent() == &M, "Referencing global in another module!",
2743 EntryC, &M, GV, GV->getParent());
2744 continue;
2745 }
2746
2747 // Visit all sub-expressions.
2748 for (const Use &U : C->operands()) {
2749 const auto *OpC = dyn_cast<Constant>(U);
2750 if (!OpC)
2751 continue;
2752 if (!ConstantExprVisited.insert(OpC).second)
2753 continue;
2754 Stack.push_back(OpC);
2755 }
2756 }
2757}
2758
2759void Verifier::visitConstantExpr(const ConstantExpr *CE) {
2760 if (CE->getOpcode() == Instruction::BitCast)
2761 Check(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0),
2762 CE->getType()),
2763 "Invalid bitcast", CE);
2764 else if (CE->getOpcode() == Instruction::PtrToAddr)
2765 checkPtrToAddr(CE->getOperand(0)->getType(), CE->getType(), *CE);
2766}
2767
2768void Verifier::visitConstantPtrAuth(const ConstantPtrAuth *CPA) {
2769 Check(CPA->getPointer()->getType()->isPointerTy(),
2770 "signed ptrauth constant base pointer must have pointer type");
2771
2772 Check(CPA->getType() == CPA->getPointer()->getType(),
2773 "signed ptrauth constant must have same type as its base pointer");
2774
2775 Check(CPA->getKey()->getBitWidth() == 32,
2776 "signed ptrauth constant key must be i32 constant integer");
2777
2779 "signed ptrauth constant address discriminator must be a pointer");
2780
2781 Check(CPA->getDiscriminator()->getBitWidth() == 64,
2782 "signed ptrauth constant discriminator must be i64 constant integer");
2783
2785 "signed ptrauth constant deactivation symbol must be a pointer");
2786
2789 "signed ptrauth constant deactivation symbol must be a global value "
2790 "or null");
2791}
2792
2793bool Verifier::verifyAttributeCount(AttributeList Attrs, unsigned Params) {
2794 // There shouldn't be more attribute sets than there are parameters plus the
2795 // function and return value.
2796 return Attrs.getNumAttrSets() <= Params + 2;
2797}
2798
2799void Verifier::verifyInlineAsmCall(const CallBase &Call) {
2800 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
2801 unsigned ArgNo = 0;
2802 unsigned LabelNo = 0;
2803 for (const InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) {
2804 if (CI.Type == InlineAsm::isLabel) {
2805 ++LabelNo;
2806 continue;
2807 }
2808
2809 // Only deal with constraints that correspond to call arguments.
2810 if (!CI.hasArg())
2811 continue;
2812
2813 if (CI.isIndirect) {
2814 const Value *Arg = Call.getArgOperand(ArgNo);
2815 Check(Arg->getType()->isPointerTy(),
2816 "Operand for indirect constraint must have pointer type", &Call);
2817
2819 "Operand for indirect constraint must have elementtype attribute",
2820 &Call);
2821 } else {
2822 Check(!Call.paramHasAttr(ArgNo, Attribute::ElementType),
2823 "Elementtype attribute can only be applied for indirect "
2824 "constraints",
2825 &Call);
2826 }
2827
2828 ArgNo++;
2829 }
2830
2831 if (auto *CallBr = dyn_cast<CallBrInst>(&Call)) {
2832 Check(LabelNo == CallBr->getNumIndirectDests(),
2833 "Number of label constraints does not match number of callbr dests",
2834 &Call);
2835 } else {
2836 Check(LabelNo == 0, "Label constraints can only be used with callbr",
2837 &Call);
2838 }
2839}
2840
2841/// Verify that statepoint intrinsic is well formed.
2842void Verifier::verifyStatepoint(const CallBase &Call) {
2843 assert(Call.getIntrinsicID() == Intrinsic::experimental_gc_statepoint);
2844
2847 "gc.statepoint must read and write all memory to preserve "
2848 "reordering restrictions required by safepoint semantics",
2849 Call);
2850
2851 const int64_t NumPatchBytes =
2852 cast<ConstantInt>(Call.getArgOperand(1))->getSExtValue();
2853 assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!");
2854 Check(NumPatchBytes >= 0,
2855 "gc.statepoint number of patchable bytes must be "
2856 "positive",
2857 Call);
2858
2859 Type *TargetElemType = Call.getParamElementType(2);
2860 Check(TargetElemType,
2861 "gc.statepoint callee argument must have elementtype attribute", Call);
2862 FunctionType *TargetFuncType = dyn_cast<FunctionType>(TargetElemType);
2863 Check(TargetFuncType,
2864 "gc.statepoint callee elementtype must be function type", Call);
2865
2866 const int NumCallArgs = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
2867 Check(NumCallArgs >= 0,
2868 "gc.statepoint number of arguments to underlying call "
2869 "must be positive",
2870 Call);
2871 const int NumParams = (int)TargetFuncType->getNumParams();
2872 if (TargetFuncType->isVarArg()) {
2873 Check(NumCallArgs >= NumParams,
2874 "gc.statepoint mismatch in number of vararg call args", Call);
2875
2876 // TODO: Remove this limitation
2877 Check(TargetFuncType->getReturnType()->isVoidTy(),
2878 "gc.statepoint doesn't support wrapping non-void "
2879 "vararg functions yet",
2880 Call);
2881 } else
2882 Check(NumCallArgs == NumParams,
2883 "gc.statepoint mismatch in number of call args", Call);
2884
2885 const uint64_t Flags
2886 = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
2887 Check((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0,
2888 "unknown flag used in gc.statepoint flags argument", Call);
2889
2890 // Verify that the types of the call parameter arguments match
2891 // the type of the wrapped callee.
2892 AttributeList Attrs = Call.getAttributes();
2893 for (int i = 0; i < NumParams; i++) {
2894 Type *ParamType = TargetFuncType->getParamType(i);
2895 Type *ArgType = Call.getArgOperand(5 + i)->getType();
2896 Check(ArgType == ParamType,
2897 "gc.statepoint call argument does not match wrapped "
2898 "function type",
2899 Call);
2900
2901 if (TargetFuncType->isVarArg()) {
2902 AttributeSet ArgAttrs = Attrs.getParamAttrs(5 + i);
2903 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
2904 "Attribute 'sret' cannot be used for vararg call arguments!", Call);
2905 }
2906 }
2907
2908 const int EndCallArgsInx = 4 + NumCallArgs;
2909
2910 const Value *NumTransitionArgsV = Call.getArgOperand(EndCallArgsInx + 1);
2911 Check(isa<ConstantInt>(NumTransitionArgsV),
2912 "gc.statepoint number of transition arguments "
2913 "must be constant integer",
2914 Call);
2915 const int NumTransitionArgs =
2916 cast<ConstantInt>(NumTransitionArgsV)->getZExtValue();
2917 Check(NumTransitionArgs == 0,
2918 "gc.statepoint w/inline transition bundle is deprecated", Call);
2919 const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs;
2920
2921 const Value *NumDeoptArgsV = Call.getArgOperand(EndTransitionArgsInx + 1);
2922 Check(isa<ConstantInt>(NumDeoptArgsV),
2923 "gc.statepoint number of deoptimization arguments "
2924 "must be constant integer",
2925 Call);
2926 const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue();
2927 Check(NumDeoptArgs == 0,
2928 "gc.statepoint w/inline deopt operands is deprecated", Call);
2929
2930 const int ExpectedNumArgs = 7 + NumCallArgs;
2931 Check(ExpectedNumArgs == (int)Call.arg_size(),
2932 "gc.statepoint too many arguments", Call);
2933
2934 // Check that the only uses of this gc.statepoint are gc.result or
2935 // gc.relocate calls which are tied to this statepoint and thus part
2936 // of the same statepoint sequence
2937 for (const User *U : Call.users()) {
2938 const CallInst *UserCall = dyn_cast<const CallInst>(U);
2939 Check(UserCall, "illegal use of statepoint token", Call, U);
2940 if (!UserCall)
2941 continue;
2942 Check(isa<GCRelocateInst>(UserCall) || isa<GCResultInst>(UserCall),
2943 "gc.result or gc.relocate are the only value uses "
2944 "of a gc.statepoint",
2945 Call, U);
2946 if (isa<GCResultInst>(UserCall)) {
2947 Check(UserCall->getArgOperand(0) == &Call,
2948 "gc.result connected to wrong gc.statepoint", Call, UserCall);
2949 } else if (isa<GCRelocateInst>(Call)) {
2950 Check(UserCall->getArgOperand(0) == &Call,
2951 "gc.relocate connected to wrong gc.statepoint", Call, UserCall);
2952 }
2953 }
2954
2955 // Note: It is legal for a single derived pointer to be listed multiple
2956 // times. It's non-optimal, but it is legal. It can also happen after
2957 // insertion if we strip a bitcast away.
2958 // Note: It is really tempting to check that each base is relocated and
2959 // that a derived pointer is never reused as a base pointer. This turns
2960 // out to be problematic since optimizations run after safepoint insertion
2961 // can recognize equality properties that the insertion logic doesn't know
2962 // about. See example statepoint.ll in the verifier subdirectory
2963}
2964
2965void Verifier::verifyFrameRecoverIndices() {
2966 for (auto &Counts : FrameEscapeInfo) {
2967 Function *F = Counts.first;
2968 unsigned EscapedObjectCount = Counts.second.first;
2969 unsigned MaxRecoveredIndex = Counts.second.second;
2970 Check(MaxRecoveredIndex <= EscapedObjectCount,
2971 "all indices passed to llvm.localrecover must be less than the "
2972 "number of arguments passed to llvm.localescape in the parent "
2973 "function",
2974 F);
2975 }
2976}
2977
2978static Instruction *getSuccPad(Instruction *Terminator) {
2979 BasicBlock *UnwindDest;
2980 if (auto *II = dyn_cast<InvokeInst>(Terminator))
2981 UnwindDest = II->getUnwindDest();
2982 else if (auto *CSI = dyn_cast<CatchSwitchInst>(Terminator))
2983 UnwindDest = CSI->getUnwindDest();
2984 else
2985 UnwindDest = cast<CleanupReturnInst>(Terminator)->getUnwindDest();
2986 return &*UnwindDest->getFirstNonPHIIt();
2987}
2988
2989void Verifier::verifySiblingFuncletUnwinds() {
2990 llvm::TimeTraceScope timeScope("Verifier verify sibling funclet unwinds");
2991 SmallPtrSet<Instruction *, 8> Visited;
2992 SmallPtrSet<Instruction *, 8> Active;
2993 for (const auto &Pair : SiblingFuncletInfo) {
2994 Instruction *PredPad = Pair.first;
2995 if (Visited.count(PredPad))
2996 continue;
2997 Active.insert(PredPad);
2998 Instruction *Terminator = Pair.second;
2999 do {
3000 Instruction *SuccPad = getSuccPad(Terminator);
3001 if (Active.count(SuccPad)) {
3002 // Found a cycle; report error
3003 Instruction *CyclePad = SuccPad;
3004 SmallVector<Instruction *, 8> CycleNodes;
3005 do {
3006 CycleNodes.push_back(CyclePad);
3007 Instruction *CycleTerminator = SiblingFuncletInfo[CyclePad];
3008 if (CycleTerminator != CyclePad)
3009 CycleNodes.push_back(CycleTerminator);
3010 CyclePad = getSuccPad(CycleTerminator);
3011 } while (CyclePad != SuccPad);
3012 Check(false, "EH pads can't handle each other's exceptions",
3013 ArrayRef<Instruction *>(CycleNodes));
3014 }
3015 // Don't re-walk a node we've already checked
3016 if (!Visited.insert(SuccPad).second)
3017 break;
3018 // Walk to this successor if it has a map entry.
3019 PredPad = SuccPad;
3020 auto TermI = SiblingFuncletInfo.find(PredPad);
3021 if (TermI == SiblingFuncletInfo.end())
3022 break;
3023 Terminator = TermI->second;
3024 Active.insert(PredPad);
3025 } while (true);
3026 // Each node only has one successor, so we've walked all the active
3027 // nodes' successors.
3028 Active.clear();
3029 }
3030}
3031
3032// visitFunction - Verify that a function is ok.
3033//
3034void Verifier::visitFunction(const Function &F) {
3035 visitGlobalValue(F);
3036
3037 // Check function arguments.
3038 FunctionType *FT = F.getFunctionType();
3039 unsigned NumArgs = F.arg_size();
3040
3041 Check(&Context == &F.getContext(),
3042 "Function context does not match Module context!", &F);
3043
3044 Check(!F.hasCommonLinkage(), "Functions may not have common linkage", &F);
3045 Check(FT->getNumParams() == NumArgs,
3046 "# formal arguments must match # of arguments for function type!", &F,
3047 FT);
3048 Check(F.getReturnType()->isFirstClassType() ||
3049 F.getReturnType()->isVoidTy() || F.getReturnType()->isStructTy(),
3050 "Functions cannot return aggregate values!", &F);
3051
3052 Check(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(),
3053 "Invalid struct return type!", &F);
3054
3055 if (MaybeAlign A = F.getAlign()) {
3056 Check(A->value() <= Value::MaximumAlignment,
3057 "huge alignment values are unsupported", &F);
3058 }
3059
3060 AttributeList Attrs = F.getAttributes();
3061
3062 Check(verifyAttributeCount(Attrs, FT->getNumParams()),
3063 "Attribute after last parameter!", &F);
3064
3065 bool IsIntrinsic = F.isIntrinsic();
3066
3067 // Check function attributes.
3068 verifyFunctionAttrs(FT, Attrs, &F, IsIntrinsic, /* IsInlineAsm */ false);
3069
3070 // On function declarations/definitions, we do not support the builtin
3071 // attribute. We do not check this in VerifyFunctionAttrs since that is
3072 // checking for Attributes that can/can not ever be on functions.
3073 Check(!Attrs.hasFnAttr(Attribute::Builtin),
3074 "Attribute 'builtin' can only be applied to a callsite.", &F);
3075
3076 Check(!Attrs.hasAttrSomewhere(Attribute::ElementType),
3077 "Attribute 'elementtype' can only be applied to a callsite.", &F);
3078
3079 Check(!Attrs.hasFnAttr("aarch64_zt0_undef"),
3080 "Attribute 'aarch64_zt0_undef' can only be applied to a callsite.");
3081
3082 if (Attrs.hasFnAttr(Attribute::Naked))
3083 for (const Argument &Arg : F.args())
3084 Check(Arg.use_empty(), "cannot use argument of naked function", &Arg);
3085
3086 // Check that this function meets the restrictions on this calling convention.
3087 // Sometimes varargs is used for perfectly forwarding thunks, so some of these
3088 // restrictions can be lifted.
3089 switch (F.getCallingConv()) {
3090 default:
3091 case CallingConv::C:
3092 break;
3093 case CallingConv::X86_INTR: {
3094 Check(F.arg_empty() || Attrs.hasParamAttr(0, Attribute::ByVal),
3095 "Calling convention parameter requires byval", &F);
3096 break;
3097 }
3098 case CallingConv::AMDGPU_KERNEL:
3099 case CallingConv::SPIR_KERNEL:
3100 case CallingConv::AMDGPU_CS_Chain:
3101 case CallingConv::AMDGPU_CS_ChainPreserve:
3102 Check(F.getReturnType()->isVoidTy(),
3103 "Calling convention requires void return type", &F);
3104 [[fallthrough]];
3105 case CallingConv::AMDGPU_VS:
3106 case CallingConv::AMDGPU_HS:
3107 case CallingConv::AMDGPU_GS:
3108 case CallingConv::AMDGPU_PS:
3109 case CallingConv::AMDGPU_CS:
3110 Check(!F.hasStructRetAttr(), "Calling convention does not allow sret", &F);
3111 if (F.getCallingConv() != CallingConv::SPIR_KERNEL) {
3112 const unsigned StackAS = DL.getAllocaAddrSpace();
3113 unsigned i = 0;
3114 for (const Argument &Arg : F.args()) {
3115 Check(!Attrs.hasParamAttr(i, Attribute::ByVal),
3116 "Calling convention disallows byval", &F);
3117 Check(!Attrs.hasParamAttr(i, Attribute::Preallocated),
3118 "Calling convention disallows preallocated", &F);
3119 Check(!Attrs.hasParamAttr(i, Attribute::InAlloca),
3120 "Calling convention disallows inalloca", &F);
3121
3122 if (Attrs.hasParamAttr(i, Attribute::ByRef)) {
3123 // FIXME: Should also disallow LDS and GDS, but we don't have the enum
3124 // value here.
3125 Check(Arg.getType()->getPointerAddressSpace() != StackAS,
3126 "Calling convention disallows stack byref", &F);
3127 }
3128
3129 ++i;
3130 }
3131 }
3132
3133 [[fallthrough]];
3134 case CallingConv::Fast:
3135 case CallingConv::Cold:
3136 case CallingConv::Intel_OCL_BI:
3137 case CallingConv::PTX_Kernel:
3138 case CallingConv::PTX_Device:
3139 Check(!F.isVarArg(),
3140 "Calling convention does not support varargs or "
3141 "perfect forwarding!",
3142 &F);
3143 break;
3144 case CallingConv::AMDGPU_Gfx_WholeWave:
3145 Check(!F.arg_empty() && F.arg_begin()->getType()->isIntegerTy(1),
3146 "Calling convention requires first argument to be i1", &F);
3147 Check(!F.arg_begin()->hasInRegAttr(),
3148 "Calling convention requires first argument to not be inreg", &F);
3149 Check(!F.isVarArg(),
3150 "Calling convention does not support varargs or "
3151 "perfect forwarding!",
3152 &F);
3153 break;
3154 }
3155
3156 // Check that the argument values match the function type for this function...
3157 unsigned i = 0;
3158 for (const Argument &Arg : F.args()) {
3159 Check(Arg.getType() == FT->getParamType(i),
3160 "Argument value does not match function argument type!", &Arg,
3161 FT->getParamType(i));
3162 Check(Arg.getType()->isFirstClassType(),
3163 "Function arguments must have first-class types!", &Arg);
3164 if (!IsIntrinsic) {
3165 Check(!Arg.getType()->isMetadataTy(),
3166 "Function takes metadata but isn't an intrinsic", &Arg, &F);
3167 Check(!Arg.getType()->isTokenLikeTy(),
3168 "Function takes token but isn't an intrinsic", &Arg, &F);
3169 Check(!Arg.getType()->isX86_AMXTy(),
3170 "Function takes x86_amx but isn't an intrinsic", &Arg, &F);
3171 }
3172
3173 // Check that swifterror argument is only used by loads and stores.
3174 if (Attrs.hasParamAttr(i, Attribute::SwiftError)) {
3175 verifySwiftErrorValue(&Arg);
3176 }
3177 ++i;
3178 }
3179
3180 if (!IsIntrinsic) {
3181 Check(!F.getReturnType()->isTokenLikeTy(),
3182 "Function returns a token but isn't an intrinsic", &F);
3183 Check(!F.getReturnType()->isX86_AMXTy(),
3184 "Function returns a x86_amx but isn't an intrinsic", &F);
3185 }
3186
3187 // Get the function metadata attachments.
3189 F.getAllMetadata(MDs);
3190 assert(F.hasMetadata() != MDs.empty() && "Bit out-of-sync");
3191 verifyFunctionMetadata(MDs);
3192
3193 // Check validity of the personality function
3194 if (F.hasPersonalityFn()) {
3195 auto *Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
3196 if (Per)
3197 Check(Per->getParent() == F.getParent(),
3198 "Referencing personality function in another module!", &F,
3199 F.getParent(), Per, Per->getParent());
3200 }
3201
3202 // EH funclet coloring can be expensive, recompute on-demand
3203 BlockEHFuncletColors.clear();
3204
3205 if (F.isMaterializable()) {
3206 // Function has a body somewhere we can't see.
3207 Check(MDs.empty(), "unmaterialized function cannot have metadata", &F,
3208 MDs.empty() ? nullptr : MDs.front().second);
3209 } else if (F.isDeclaration()) {
3210 for (const auto &I : MDs) {
3211 // This is used for call site debug information.
3212 CheckDI(I.first != LLVMContext::MD_dbg ||
3213 !cast<DISubprogram>(I.second)->isDistinct(),
3214 "function declaration may only have a unique !dbg attachment",
3215 &F);
3216 Check(I.first != LLVMContext::MD_prof,
3217 "function declaration may not have a !prof attachment", &F);
3218
3219 // Verify the metadata itself.
3220 visitMDNode(*I.second, AreDebugLocsAllowed::Yes);
3221 }
3222 Check(!F.hasPersonalityFn(),
3223 "Function declaration shouldn't have a personality routine", &F);
3224 } else {
3225 // Verify that this function (which has a body) is not named "llvm.*". It
3226 // is not legal to define intrinsics.
3227 Check(!IsIntrinsic, "llvm intrinsics cannot be defined!", &F);
3228
3229 // Check the entry node
3230 const BasicBlock *Entry = &F.getEntryBlock();
3231 Check(pred_empty(Entry),
3232 "Entry block to function must not have predecessors!", Entry);
3233
3234 // The address of the entry block cannot be taken, unless it is dead.
3235 if (Entry->hasAddressTaken()) {
3236 Check(!BlockAddress::lookup(Entry)->isConstantUsed(),
3237 "blockaddress may not be used with the entry block!", Entry);
3238 }
3239
3240 unsigned NumDebugAttachments = 0, NumProfAttachments = 0,
3241 NumKCFIAttachments = 0;
3242 // Visit metadata attachments.
3243 for (const auto &I : MDs) {
3244 // Verify that the attachment is legal.
3245 auto AllowLocs = AreDebugLocsAllowed::No;
3246 switch (I.first) {
3247 default:
3248 break;
3249 case LLVMContext::MD_dbg: {
3250 ++NumDebugAttachments;
3251 CheckDI(NumDebugAttachments == 1,
3252 "function must have a single !dbg attachment", &F, I.second);
3253 CheckDI(isa<DISubprogram>(I.second),
3254 "function !dbg attachment must be a subprogram", &F, I.second);
3255 CheckDI(cast<DISubprogram>(I.second)->isDistinct(),
3256 "function definition may only have a distinct !dbg attachment",
3257 &F);
3258
3259 auto *SP = cast<DISubprogram>(I.second);
3260 const Function *&AttachedTo = DISubprogramAttachments[SP];
3261 CheckDI(!AttachedTo || AttachedTo == &F,
3262 "DISubprogram attached to more than one function", SP, &F);
3263 AttachedTo = &F;
3264 AllowLocs = AreDebugLocsAllowed::Yes;
3265 break;
3266 }
3267 case LLVMContext::MD_prof:
3268 ++NumProfAttachments;
3269 Check(NumProfAttachments == 1,
3270 "function must have a single !prof attachment", &F, I.second);
3271 break;
3272 case LLVMContext::MD_kcfi_type:
3273 ++NumKCFIAttachments;
3274 Check(NumKCFIAttachments == 1,
3275 "function must have a single !kcfi_type attachment", &F,
3276 I.second);
3277 break;
3278 }
3279
3280 // Verify the metadata itself.
3281 visitMDNode(*I.second, AllowLocs);
3282 }
3283 }
3284
3285 // If this function is actually an intrinsic, verify that it is only used in
3286 // direct call/invokes, never having its "address taken".
3287 // Only do this if the module is materialized, otherwise we don't have all the
3288 // uses.
3289 if (F.isIntrinsic() && F.getParent()->isMaterialized()) {
3290 const User *U;
3291 if (F.hasAddressTaken(&U, false, true, false,
3292 /*IgnoreARCAttachedCall=*/true))
3293 Check(false, "Invalid user of intrinsic instruction!", U);
3294 }
3295
3296 // Check intrinsics' signatures.
3297 switch (F.getIntrinsicID()) {
3298 case Intrinsic::experimental_gc_get_pointer_base: {
3299 FunctionType *FT = F.getFunctionType();
3300 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3301 Check(isa<PointerType>(F.getReturnType()),
3302 "gc.get.pointer.base must return a pointer", F);
3303 Check(FT->getParamType(0) == F.getReturnType(),
3304 "gc.get.pointer.base operand and result must be of the same type", F);
3305 break;
3306 }
3307 case Intrinsic::experimental_gc_get_pointer_offset: {
3308 FunctionType *FT = F.getFunctionType();
3309 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3310 Check(isa<PointerType>(FT->getParamType(0)),
3311 "gc.get.pointer.offset operand must be a pointer", F);
3312 Check(F.getReturnType()->isIntegerTy(),
3313 "gc.get.pointer.offset must return integer", F);
3314 break;
3315 }
3316 }
3317
3318 auto *N = F.getSubprogram();
3319 HasDebugInfo = (N != nullptr);
3320 if (!HasDebugInfo)
3321 return;
3322
3323 // Check that all !dbg attachments lead to back to N.
3324 //
3325 // FIXME: Check this incrementally while visiting !dbg attachments.
3326 // FIXME: Only check when N is the canonical subprogram for F.
3327 SmallPtrSet<const MDNode *, 32> Seen;
3328 auto VisitDebugLoc = [&](const Instruction &I, const MDNode *Node) {
3329 // Be careful about using DILocation here since we might be dealing with
3330 // broken code (this is the Verifier after all).
3331 const DILocation *DL = dyn_cast_or_null<DILocation>(Node);
3332 if (!DL)
3333 return;
3334 if (!Seen.insert(DL).second)
3335 return;
3336
3337 Metadata *Parent = DL->getRawScope();
3338 CheckDI(Parent && isa<DILocalScope>(Parent),
3339 "DILocation's scope must be a DILocalScope", N, &F, &I, DL, Parent);
3340
3341 DILocalScope *Scope = DL->getInlinedAtScope();
3342 Check(Scope, "Failed to find DILocalScope", DL);
3343
3344 if (!Seen.insert(Scope).second)
3345 return;
3346
3347 DISubprogram *SP = Scope->getSubprogram();
3348
3349 // Scope and SP could be the same MDNode and we don't want to skip
3350 // validation in that case
3351 if ((Scope != SP) && !Seen.insert(SP).second)
3352 return;
3353
3354 CheckDI(SP->describes(&F),
3355 "!dbg attachment points at wrong subprogram for function", N, &F,
3356 &I, DL, Scope, SP);
3357 };
3358 for (auto &BB : F)
3359 for (auto &I : BB) {
3360 VisitDebugLoc(I, I.getDebugLoc().getAsMDNode());
3361 // The llvm.loop annotations also contain two DILocations.
3362 if (auto MD = I.getMetadata(LLVMContext::MD_loop))
3363 for (unsigned i = 1; i < MD->getNumOperands(); ++i)
3364 VisitDebugLoc(I, dyn_cast_or_null<MDNode>(MD->getOperand(i)));
3365 if (BrokenDebugInfo)
3366 return;
3367 }
3368}
3369
3370// verifyBasicBlock - Verify that a basic block is well formed...
3371//
3372void Verifier::visitBasicBlock(BasicBlock &BB) {
3373 InstsInThisBlock.clear();
3374 ConvergenceVerifyHelper.visit(BB);
3375
3376 // Ensure that basic blocks have terminators!
3377 Check(BB.getTerminator(), "Basic Block does not have terminator!", &BB);
3378
3379 // Check constraints that this basic block imposes on all of the PHI nodes in
3380 // it.
3381 if (isa<PHINode>(BB.front())) {
3382 SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
3384 llvm::sort(Preds);
3385 for (const PHINode &PN : BB.phis()) {
3386 Check(PN.getNumIncomingValues() == Preds.size(),
3387 "PHINode should have one entry for each predecessor of its "
3388 "parent basic block!",
3389 &PN);
3390
3391 // Get and sort all incoming values in the PHI node...
3392 Values.clear();
3393 Values.reserve(PN.getNumIncomingValues());
3394 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
3395 Values.push_back(
3396 std::make_pair(PN.getIncomingBlock(i), PN.getIncomingValue(i)));
3397 llvm::sort(Values);
3398
3399 for (unsigned i = 0, e = Values.size(); i != e; ++i) {
3400 // Check to make sure that if there is more than one entry for a
3401 // particular basic block in this PHI node, that the incoming values are
3402 // all identical.
3403 //
3404 Check(i == 0 || Values[i].first != Values[i - 1].first ||
3405 Values[i].second == Values[i - 1].second,
3406 "PHI node has multiple entries for the same basic block with "
3407 "different incoming values!",
3408 &PN, Values[i].first, Values[i].second, Values[i - 1].second);
3409
3410 // Check to make sure that the predecessors and PHI node entries are
3411 // matched up.
3412 Check(Values[i].first == Preds[i],
3413 "PHI node entries do not match predecessors!", &PN,
3414 Values[i].first, Preds[i]);
3415 }
3416 }
3417 }
3418
3419 // Check that all instructions have their parent pointers set up correctly.
3420 for (auto &I : BB)
3421 {
3422 Check(I.getParent() == &BB, "Instruction has bogus parent pointer!");
3423 }
3424
3425 // Confirm that no issues arise from the debug program.
3426 CheckDI(!BB.getTrailingDbgRecords(), "Basic Block has trailing DbgRecords!",
3427 &BB);
3428}
3429
3430void Verifier::visitTerminator(Instruction &I) {
3431 // Ensure that terminators only exist at the end of the basic block.
3432 Check(&I == I.getParent()->getTerminator(),
3433 "Terminator found in the middle of a basic block!", I.getParent());
3434 visitInstruction(I);
3435}
3436
3437void Verifier::visitBranchInst(BranchInst &BI) {
3438 if (BI.isConditional()) {
3440 "Branch condition is not 'i1' type!", &BI, BI.getCondition());
3441 }
3442 visitTerminator(BI);
3443}
3444
3445void Verifier::visitReturnInst(ReturnInst &RI) {
3446 Function *F = RI.getParent()->getParent();
3447 unsigned N = RI.getNumOperands();
3448 if (F->getReturnType()->isVoidTy())
3449 Check(N == 0,
3450 "Found return instr that returns non-void in Function of void "
3451 "return type!",
3452 &RI, F->getReturnType());
3453 else
3454 Check(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(),
3455 "Function return type does not match operand "
3456 "type of return inst!",
3457 &RI, F->getReturnType());
3458
3459 // Check to make sure that the return value has necessary properties for
3460 // terminators...
3461 visitTerminator(RI);
3462}
3463
3464void Verifier::visitSwitchInst(SwitchInst &SI) {
3465 Check(SI.getType()->isVoidTy(), "Switch must have void result type!", &SI);
3466 // Check to make sure that all of the constants in the switch instruction
3467 // have the same type as the switched-on value.
3468 Type *SwitchTy = SI.getCondition()->getType();
3469 SmallPtrSet<ConstantInt*, 32> Constants;
3470 for (auto &Case : SI.cases()) {
3471 Check(isa<ConstantInt>(Case.getCaseValue()),
3472 "Case value is not a constant integer.", &SI);
3473 Check(Case.getCaseValue()->getType() == SwitchTy,
3474 "Switch constants must all be same type as switch value!", &SI);
3475 Check(Constants.insert(Case.getCaseValue()).second,
3476 "Duplicate integer as switch case", &SI, Case.getCaseValue());
3477 }
3478
3479 visitTerminator(SI);
3480}
3481
3482void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
3484 "Indirectbr operand must have pointer type!", &BI);
3485 for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i)
3487 "Indirectbr destinations must all have pointer type!", &BI);
3488
3489 visitTerminator(BI);
3490}
3491
3492void Verifier::visitCallBrInst(CallBrInst &CBI) {
3493 if (!CBI.isInlineAsm()) {
3495 "Callbr: indirect function / invalid signature");
3496 Check(!CBI.hasOperandBundles(),
3497 "Callbr for intrinsics currently doesn't support operand bundles");
3498
3499 switch (CBI.getIntrinsicID()) {
3500 case Intrinsic::amdgcn_kill: {
3501 Check(CBI.getNumIndirectDests() == 1,
3502 "Callbr amdgcn_kill only supports one indirect dest");
3503 bool Unreachable = isa<UnreachableInst>(CBI.getIndirectDest(0)->begin());
3504 CallInst *Call = dyn_cast<CallInst>(CBI.getIndirectDest(0)->begin());
3505 Check(Unreachable || (Call && Call->getIntrinsicID() ==
3506 Intrinsic::amdgcn_unreachable),
3507 "Callbr amdgcn_kill indirect dest needs to be unreachable");
3508 break;
3509 }
3510 default:
3511 CheckFailed(
3512 "Callbr currently only supports asm-goto and selected intrinsics");
3513 }
3514 visitIntrinsicCall(CBI.getIntrinsicID(), CBI);
3515 } else {
3516 const InlineAsm *IA = cast<InlineAsm>(CBI.getCalledOperand());
3517 Check(!IA->canThrow(), "Unwinding from Callbr is not allowed");
3518
3519 verifyInlineAsmCall(CBI);
3520 }
3521 visitTerminator(CBI);
3522}
3523
3524void Verifier::visitSelectInst(SelectInst &SI) {
3525 Check(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1),
3526 SI.getOperand(2)),
3527 "Invalid operands for select instruction!", &SI);
3528
3529 Check(SI.getTrueValue()->getType() == SI.getType(),
3530 "Select values must have same type as select instruction!", &SI);
3531 visitInstruction(SI);
3532}
3533
3534/// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of
3535/// a pass, if any exist, it's an error.
3536///
3537void Verifier::visitUserOp1(Instruction &I) {
3538 Check(false, "User-defined operators should not live outside of a pass!", &I);
3539}
3540
3541void Verifier::visitTruncInst(TruncInst &I) {
3542 // Get the source and destination types
3543 Type *SrcTy = I.getOperand(0)->getType();
3544 Type *DestTy = I.getType();
3545
3546 // Get the size of the types in bits, we'll need this later
3547 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3548 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3549
3550 Check(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer", &I);
3551 Check(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer", &I);
3552 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3553 "trunc source and destination must both be a vector or neither", &I);
3554 Check(SrcBitSize > DestBitSize, "DestTy too big for Trunc", &I);
3555
3556 visitInstruction(I);
3557}
3558
3559void Verifier::visitZExtInst(ZExtInst &I) {
3560 // Get the source and destination types
3561 Type *SrcTy = I.getOperand(0)->getType();
3562 Type *DestTy = I.getType();
3563
3564 // Get the size of the types in bits, we'll need this later
3565 Check(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I);
3566 Check(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer", &I);
3567 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3568 "zext source and destination must both be a vector or neither", &I);
3569 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3570 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3571
3572 Check(SrcBitSize < DestBitSize, "Type too small for ZExt", &I);
3573
3574 visitInstruction(I);
3575}
3576
3577void Verifier::visitSExtInst(SExtInst &I) {
3578 // Get the source and destination types
3579 Type *SrcTy = I.getOperand(0)->getType();
3580 Type *DestTy = I.getType();
3581
3582 // Get the size of the types in bits, we'll need this later
3583 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3584 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3585
3586 Check(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer", &I);
3587 Check(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer", &I);
3588 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3589 "sext source and destination must both be a vector or neither", &I);
3590 Check(SrcBitSize < DestBitSize, "Type too small for SExt", &I);
3591
3592 visitInstruction(I);
3593}
3594
3595void Verifier::visitFPTruncInst(FPTruncInst &I) {
3596 // Get the source and destination types
3597 Type *SrcTy = I.getOperand(0)->getType();
3598 Type *DestTy = I.getType();
3599 // Get the size of the types in bits, we'll need this later
3600 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3601 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3602
3603 Check(SrcTy->isFPOrFPVectorTy(), "FPTrunc only operates on FP", &I);
3604 Check(DestTy->isFPOrFPVectorTy(), "FPTrunc only produces an FP", &I);
3605 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3606 "fptrunc source and destination must both be a vector or neither", &I);
3607 Check(SrcBitSize > DestBitSize, "DestTy too big for FPTrunc", &I);
3608
3609 visitInstruction(I);
3610}
3611
3612void Verifier::visitFPExtInst(FPExtInst &I) {
3613 // Get the source and destination types
3614 Type *SrcTy = I.getOperand(0)->getType();
3615 Type *DestTy = I.getType();
3616
3617 // Get the size of the types in bits, we'll need this later
3618 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3619 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3620
3621 Check(SrcTy->isFPOrFPVectorTy(), "FPExt only operates on FP", &I);
3622 Check(DestTy->isFPOrFPVectorTy(), "FPExt only produces an FP", &I);
3623 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3624 "fpext source and destination must both be a vector or neither", &I);
3625 Check(SrcBitSize < DestBitSize, "DestTy too small for FPExt", &I);
3626
3627 visitInstruction(I);
3628}
3629
3630void Verifier::visitUIToFPInst(UIToFPInst &I) {
3631 // Get the source and destination types
3632 Type *SrcTy = I.getOperand(0)->getType();
3633 Type *DestTy = I.getType();
3634
3635 bool SrcVec = SrcTy->isVectorTy();
3636 bool DstVec = DestTy->isVectorTy();
3637
3638 Check(SrcVec == DstVec,
3639 "UIToFP source and dest must both be vector or scalar", &I);
3640 Check(SrcTy->isIntOrIntVectorTy(),
3641 "UIToFP source must be integer or integer vector", &I);
3642 Check(DestTy->isFPOrFPVectorTy(), "UIToFP result must be FP or FP vector",
3643 &I);
3644
3645 if (SrcVec && DstVec)
3646 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3647 cast<VectorType>(DestTy)->getElementCount(),
3648 "UIToFP source and dest vector length mismatch", &I);
3649
3650 visitInstruction(I);
3651}
3652
3653void Verifier::visitSIToFPInst(SIToFPInst &I) {
3654 // Get the source and destination types
3655 Type *SrcTy = I.getOperand(0)->getType();
3656 Type *DestTy = I.getType();
3657
3658 bool SrcVec = SrcTy->isVectorTy();
3659 bool DstVec = DestTy->isVectorTy();
3660
3661 Check(SrcVec == DstVec,
3662 "SIToFP source and dest must both be vector or scalar", &I);
3663 Check(SrcTy->isIntOrIntVectorTy(),
3664 "SIToFP source must be integer or integer vector", &I);
3665 Check(DestTy->isFPOrFPVectorTy(), "SIToFP result must be FP or FP vector",
3666 &I);
3667
3668 if (SrcVec && DstVec)
3669 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3670 cast<VectorType>(DestTy)->getElementCount(),
3671 "SIToFP source and dest vector length mismatch", &I);
3672
3673 visitInstruction(I);
3674}
3675
3676void Verifier::visitFPToUIInst(FPToUIInst &I) {
3677 // Get the source and destination types
3678 Type *SrcTy = I.getOperand(0)->getType();
3679 Type *DestTy = I.getType();
3680
3681 bool SrcVec = SrcTy->isVectorTy();
3682 bool DstVec = DestTy->isVectorTy();
3683
3684 Check(SrcVec == DstVec,
3685 "FPToUI source and dest must both be vector or scalar", &I);
3686 Check(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector", &I);
3687 Check(DestTy->isIntOrIntVectorTy(),
3688 "FPToUI result must be integer or integer vector", &I);
3689
3690 if (SrcVec && DstVec)
3691 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3692 cast<VectorType>(DestTy)->getElementCount(),
3693 "FPToUI source and dest vector length mismatch", &I);
3694
3695 visitInstruction(I);
3696}
3697
3698void Verifier::visitFPToSIInst(FPToSIInst &I) {
3699 // Get the source and destination types
3700 Type *SrcTy = I.getOperand(0)->getType();
3701 Type *DestTy = I.getType();
3702
3703 bool SrcVec = SrcTy->isVectorTy();
3704 bool DstVec = DestTy->isVectorTy();
3705
3706 Check(SrcVec == DstVec,
3707 "FPToSI source and dest must both be vector or scalar", &I);
3708 Check(SrcTy->isFPOrFPVectorTy(), "FPToSI source must be FP or FP vector", &I);
3709 Check(DestTy->isIntOrIntVectorTy(),
3710 "FPToSI result must be integer or integer vector", &I);
3711
3712 if (SrcVec && DstVec)
3713 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3714 cast<VectorType>(DestTy)->getElementCount(),
3715 "FPToSI source and dest vector length mismatch", &I);
3716
3717 visitInstruction(I);
3718}
3719
3720void Verifier::checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V) {
3721 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToAddr source must be pointer", V);
3722 Check(DestTy->isIntOrIntVectorTy(), "PtrToAddr result must be integral", V);
3723 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToAddr type mismatch",
3724 V);
3725
3726 if (SrcTy->isVectorTy()) {
3727 auto *VSrc = cast<VectorType>(SrcTy);
3728 auto *VDest = cast<VectorType>(DestTy);
3729 Check(VSrc->getElementCount() == VDest->getElementCount(),
3730 "PtrToAddr vector length mismatch", V);
3731 }
3732
3733 Type *AddrTy = DL.getAddressType(SrcTy);
3734 Check(AddrTy == DestTy, "PtrToAddr result must be address width", V);
3735}
3736
3737void Verifier::visitPtrToAddrInst(PtrToAddrInst &I) {
3738 checkPtrToAddr(I.getOperand(0)->getType(), I.getType(), I);
3739 visitInstruction(I);
3740}
3741
3742void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
3743 // Get the source and destination types
3744 Type *SrcTy = I.getOperand(0)->getType();
3745 Type *DestTy = I.getType();
3746
3747 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToInt source must be pointer", &I);
3748
3749 Check(DestTy->isIntOrIntVectorTy(), "PtrToInt result must be integral", &I);
3750 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch",
3751 &I);
3752
3753 if (SrcTy->isVectorTy()) {
3754 auto *VSrc = cast<VectorType>(SrcTy);
3755 auto *VDest = cast<VectorType>(DestTy);
3756 Check(VSrc->getElementCount() == VDest->getElementCount(),
3757 "PtrToInt Vector length mismatch", &I);
3758 }
3759
3760 visitInstruction(I);
3761}
3762
3763void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
3764 // Get the source and destination types
3765 Type *SrcTy = I.getOperand(0)->getType();
3766 Type *DestTy = I.getType();
3767
3768 Check(SrcTy->isIntOrIntVectorTy(), "IntToPtr source must be an integral", &I);
3769 Check(DestTy->isPtrOrPtrVectorTy(), "IntToPtr result must be a pointer", &I);
3770
3771 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch",
3772 &I);
3773 if (SrcTy->isVectorTy()) {
3774 auto *VSrc = cast<VectorType>(SrcTy);
3775 auto *VDest = cast<VectorType>(DestTy);
3776 Check(VSrc->getElementCount() == VDest->getElementCount(),
3777 "IntToPtr Vector length mismatch", &I);
3778 }
3779 visitInstruction(I);
3780}
3781
3782void Verifier::visitBitCastInst(BitCastInst &I) {
3783 Check(
3784 CastInst::castIsValid(Instruction::BitCast, I.getOperand(0), I.getType()),
3785 "Invalid bitcast", &I);
3786 visitInstruction(I);
3787}
3788
3789void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
3790 Type *SrcTy = I.getOperand(0)->getType();
3791 Type *DestTy = I.getType();
3792
3793 Check(SrcTy->isPtrOrPtrVectorTy(), "AddrSpaceCast source must be a pointer",
3794 &I);
3795 Check(DestTy->isPtrOrPtrVectorTy(), "AddrSpaceCast result must be a pointer",
3796 &I);
3798 "AddrSpaceCast must be between different address spaces", &I);
3799 if (auto *SrcVTy = dyn_cast<VectorType>(SrcTy))
3800 Check(SrcVTy->getElementCount() ==
3801 cast<VectorType>(DestTy)->getElementCount(),
3802 "AddrSpaceCast vector pointer number of elements mismatch", &I);
3803 visitInstruction(I);
3804}
3805
3806/// visitPHINode - Ensure that a PHI node is well formed.
3807///
3808void Verifier::visitPHINode(PHINode &PN) {
3809 // Ensure that the PHI nodes are all grouped together at the top of the block.
3810 // This can be tested by checking whether the instruction before this is
3811 // either nonexistent (because this is begin()) or is a PHI node. If not,
3812 // then there is some other instruction before a PHI.
3813 Check(&PN == &PN.getParent()->front() ||
3815 "PHI nodes not grouped at top of basic block!", &PN, PN.getParent());
3816
3817 // Check that a PHI doesn't yield a Token.
3818 Check(!PN.getType()->isTokenLikeTy(), "PHI nodes cannot have token type!");
3819
3820 // Check that all of the values of the PHI node have the same type as the
3821 // result.
3822 for (Value *IncValue : PN.incoming_values()) {
3823 Check(PN.getType() == IncValue->getType(),
3824 "PHI node operands are not the same type as the result!", &PN);
3825 }
3826
3827 // All other PHI node constraints are checked in the visitBasicBlock method.
3828
3829 visitInstruction(PN);
3830}
3831
3832void Verifier::visitCallBase(CallBase &Call) {
3834 "Called function must be a pointer!", Call);
3835 FunctionType *FTy = Call.getFunctionType();
3836
3837 // Verify that the correct number of arguments are being passed
3838 if (FTy->isVarArg())
3839 Check(Call.arg_size() >= FTy->getNumParams(),
3840 "Called function requires more parameters than were provided!", Call);
3841 else
3842 Check(Call.arg_size() == FTy->getNumParams(),
3843 "Incorrect number of arguments passed to called function!", Call);
3844
3845 // Verify that all arguments to the call match the function type.
3846 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
3847 Check(Call.getArgOperand(i)->getType() == FTy->getParamType(i),
3848 "Call parameter type does not match function signature!",
3849 Call.getArgOperand(i), FTy->getParamType(i), Call);
3850
3851 AttributeList Attrs = Call.getAttributes();
3852
3853 Check(verifyAttributeCount(Attrs, Call.arg_size()),
3854 "Attribute after last parameter!", Call);
3855
3856 Function *Callee =
3858 bool IsIntrinsic = Callee && Callee->isIntrinsic();
3859 if (IsIntrinsic)
3860 Check(Callee->getValueType() == FTy,
3861 "Intrinsic called with incompatible signature", Call);
3862
3863 // Verify if the calling convention of the callee is callable.
3865 "calling convention does not permit calls", Call);
3866
3867 // Disallow passing/returning values with alignment higher than we can
3868 // represent.
3869 // FIXME: Consider making DataLayout cap the alignment, so this isn't
3870 // necessary.
3871 auto VerifyTypeAlign = [&](Type *Ty, const Twine &Message) {
3872 if (!Ty->isSized())
3873 return;
3874 Align ABIAlign = DL.getABITypeAlign(Ty);
3875 Check(ABIAlign.value() <= Value::MaximumAlignment,
3876 "Incorrect alignment of " + Message + " to called function!", Call);
3877 };
3878
3879 if (!IsIntrinsic) {
3880 VerifyTypeAlign(FTy->getReturnType(), "return type");
3881 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3882 Type *Ty = FTy->getParamType(i);
3883 VerifyTypeAlign(Ty, "argument passed");
3884 }
3885 }
3886
3887 if (Attrs.hasFnAttr(Attribute::Speculatable)) {
3888 // Don't allow speculatable on call sites, unless the underlying function
3889 // declaration is also speculatable.
3890 Check(Callee && Callee->isSpeculatable(),
3891 "speculatable attribute may not apply to call sites", Call);
3892 }
3893
3894 if (Attrs.hasFnAttr(Attribute::Preallocated)) {
3895 Check(Call.getIntrinsicID() == Intrinsic::call_preallocated_arg,
3896 "preallocated as a call site attribute can only be on "
3897 "llvm.call.preallocated.arg");
3898 }
3899
3900 // Verify call attributes.
3901 verifyFunctionAttrs(FTy, Attrs, &Call, IsIntrinsic, Call.isInlineAsm());
3902
3903 // Conservatively check the inalloca argument.
3904 // We have a bug if we can find that there is an underlying alloca without
3905 // inalloca.
3906 if (Call.hasInAllocaArgument()) {
3907 Value *InAllocaArg = Call.getArgOperand(FTy->getNumParams() - 1);
3908 if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets()))
3909 Check(AI->isUsedWithInAlloca(),
3910 "inalloca argument for call has mismatched alloca", AI, Call);
3911 }
3912
3913 // For each argument of the callsite, if it has the swifterror argument,
3914 // make sure the underlying alloca/parameter it comes from has a swifterror as
3915 // well.
3916 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3917 if (Call.paramHasAttr(i, Attribute::SwiftError)) {
3918 Value *SwiftErrorArg = Call.getArgOperand(i);
3919 if (auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets())) {
3920 Check(AI->isSwiftError(),
3921 "swifterror argument for call has mismatched alloca", AI, Call);
3922 continue;
3923 }
3924 auto ArgI = dyn_cast<Argument>(SwiftErrorArg);
3925 Check(ArgI, "swifterror argument should come from an alloca or parameter",
3926 SwiftErrorArg, Call);
3927 Check(ArgI->hasSwiftErrorAttr(),
3928 "swifterror argument for call has mismatched parameter", ArgI,
3929 Call);
3930 }
3931
3932 if (Attrs.hasParamAttr(i, Attribute::ImmArg)) {
3933 // Don't allow immarg on call sites, unless the underlying declaration
3934 // also has the matching immarg.
3935 Check(Callee && Callee->hasParamAttribute(i, Attribute::ImmArg),
3936 "immarg may not apply only to call sites", Call.getArgOperand(i),
3937 Call);
3938 }
3939
3940 if (Call.paramHasAttr(i, Attribute::ImmArg)) {
3941 Value *ArgVal = Call.getArgOperand(i);
3942 Check(isa<ConstantInt>(ArgVal) || isa<ConstantFP>(ArgVal),
3943 "immarg operand has non-immediate parameter", ArgVal, Call);
3944
3945 // If the imm-arg is an integer and also has a range attached,
3946 // check if the given value is within the range.
3947 if (Call.paramHasAttr(i, Attribute::Range)) {
3948 if (auto *CI = dyn_cast<ConstantInt>(ArgVal)) {
3949 const ConstantRange &CR =
3950 Call.getParamAttr(i, Attribute::Range).getValueAsConstantRange();
3951 Check(CR.contains(CI->getValue()),
3952 "immarg value " + Twine(CI->getValue().getSExtValue()) +
3953 " out of range [" + Twine(CR.getLower().getSExtValue()) +
3954 ", " + Twine(CR.getUpper().getSExtValue()) + ")",
3955 Call);
3956 }
3957 }
3958 }
3959
3960 if (Call.paramHasAttr(i, Attribute::Preallocated)) {
3961 Value *ArgVal = Call.getArgOperand(i);
3962 bool hasOB =
3964 bool isMustTail = Call.isMustTailCall();
3965 Check(hasOB != isMustTail,
3966 "preallocated operand either requires a preallocated bundle or "
3967 "the call to be musttail (but not both)",
3968 ArgVal, Call);
3969 }
3970 }
3971
3972 if (FTy->isVarArg()) {
3973 // FIXME? is 'nest' even legal here?
3974 bool SawNest = false;
3975 bool SawReturned = false;
3976
3977 for (unsigned Idx = 0; Idx < FTy->getNumParams(); ++Idx) {
3978 if (Attrs.hasParamAttr(Idx, Attribute::Nest))
3979 SawNest = true;
3980 if (Attrs.hasParamAttr(Idx, Attribute::Returned))
3981 SawReturned = true;
3982 }
3983
3984 // Check attributes on the varargs part.
3985 for (unsigned Idx = FTy->getNumParams(); Idx < Call.arg_size(); ++Idx) {
3986 Type *Ty = Call.getArgOperand(Idx)->getType();
3987 AttributeSet ArgAttrs = Attrs.getParamAttrs(Idx);
3988 verifyParameterAttrs(ArgAttrs, Ty, &Call);
3989
3990 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
3991 Check(!SawNest, "More than one parameter has attribute nest!", Call);
3992 SawNest = true;
3993 }
3994
3995 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
3996 Check(!SawReturned, "More than one parameter has attribute returned!",
3997 Call);
3998 Check(Ty->canLosslesslyBitCastTo(FTy->getReturnType()),
3999 "Incompatible argument and return types for 'returned' "
4000 "attribute",
4001 Call);
4002 SawReturned = true;
4003 }
4004
4005 // Statepoint intrinsic is vararg but the wrapped function may be not.
4006 // Allow sret here and check the wrapped function in verifyStatepoint.
4007 if (Call.getIntrinsicID() != Intrinsic::experimental_gc_statepoint)
4008 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
4009 "Attribute 'sret' cannot be used for vararg call arguments!",
4010 Call);
4011
4012 if (ArgAttrs.hasAttribute(Attribute::InAlloca))
4013 Check(Idx == Call.arg_size() - 1,
4014 "inalloca isn't on the last argument!", Call);
4015 }
4016 }
4017
4018 // Verify that there's no metadata unless it's a direct call to an intrinsic.
4019 if (!IsIntrinsic) {
4020 for (Type *ParamTy : FTy->params()) {
4021 Check(!ParamTy->isMetadataTy(),
4022 "Function has metadata parameter but isn't an intrinsic", Call);
4023 Check(!ParamTy->isTokenLikeTy(),
4024 "Function has token parameter but isn't an intrinsic", Call);
4025 }
4026 }
4027
4028 // Verify that indirect calls don't return tokens.
4029 if (!Call.getCalledFunction()) {
4030 Check(!FTy->getReturnType()->isTokenLikeTy(),
4031 "Return type cannot be token for indirect call!");
4032 Check(!FTy->getReturnType()->isX86_AMXTy(),
4033 "Return type cannot be x86_amx for indirect call!");
4034 }
4035
4037 visitIntrinsicCall(ID, Call);
4038
4039 // Verify that a callsite has at most one "deopt", at most one "funclet", at
4040 // most one "gc-transition", at most one "cfguardtarget", at most one
4041 // "preallocated" operand bundle, and at most one "ptrauth" operand bundle.
4042 bool FoundDeoptBundle = false, FoundFuncletBundle = false,
4043 FoundGCTransitionBundle = false, FoundCFGuardTargetBundle = false,
4044 FoundPreallocatedBundle = false, FoundGCLiveBundle = false,
4045 FoundPtrauthBundle = false, FoundKCFIBundle = false,
4046 FoundAttachedCallBundle = false;
4047 for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) {
4048 OperandBundleUse BU = Call.getOperandBundleAt(i);
4049 uint32_t Tag = BU.getTagID();
4050 if (Tag == LLVMContext::OB_deopt) {
4051 Check(!FoundDeoptBundle, "Multiple deopt operand bundles", Call);
4052 FoundDeoptBundle = true;
4053 } else if (Tag == LLVMContext::OB_gc_transition) {
4054 Check(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles",
4055 Call);
4056 FoundGCTransitionBundle = true;
4057 } else if (Tag == LLVMContext::OB_funclet) {
4058 Check(!FoundFuncletBundle, "Multiple funclet operand bundles", Call);
4059 FoundFuncletBundle = true;
4060 Check(BU.Inputs.size() == 1,
4061 "Expected exactly one funclet bundle operand", Call);
4062 Check(isa<FuncletPadInst>(BU.Inputs.front()),
4063 "Funclet bundle operands should correspond to a FuncletPadInst",
4064 Call);
4065 } else if (Tag == LLVMContext::OB_cfguardtarget) {
4066 Check(!FoundCFGuardTargetBundle, "Multiple CFGuardTarget operand bundles",
4067 Call);
4068 FoundCFGuardTargetBundle = true;
4069 Check(BU.Inputs.size() == 1,
4070 "Expected exactly one cfguardtarget bundle operand", Call);
4071 } else if (Tag == LLVMContext::OB_ptrauth) {
4072 Check(!FoundPtrauthBundle, "Multiple ptrauth operand bundles", Call);
4073 FoundPtrauthBundle = true;
4074 Check(BU.Inputs.size() == 2,
4075 "Expected exactly two ptrauth bundle operands", Call);
4076 Check(isa<ConstantInt>(BU.Inputs[0]) &&
4077 BU.Inputs[0]->getType()->isIntegerTy(32),
4078 "Ptrauth bundle key operand must be an i32 constant", Call);
4079 Check(BU.Inputs[1]->getType()->isIntegerTy(64),
4080 "Ptrauth bundle discriminator operand must be an i64", Call);
4081 } else if (Tag == LLVMContext::OB_kcfi) {
4082 Check(!FoundKCFIBundle, "Multiple kcfi operand bundles", Call);
4083 FoundKCFIBundle = true;
4084 Check(BU.Inputs.size() == 1, "Expected exactly one kcfi bundle operand",
4085 Call);
4086 Check(isa<ConstantInt>(BU.Inputs[0]) &&
4087 BU.Inputs[0]->getType()->isIntegerTy(32),
4088 "Kcfi bundle operand must be an i32 constant", Call);
4089 } else if (Tag == LLVMContext::OB_preallocated) {
4090 Check(!FoundPreallocatedBundle, "Multiple preallocated operand bundles",
4091 Call);
4092 FoundPreallocatedBundle = true;
4093 Check(BU.Inputs.size() == 1,
4094 "Expected exactly one preallocated bundle operand", Call);
4095 auto Input = dyn_cast<IntrinsicInst>(BU.Inputs.front());
4096 Check(Input &&
4097 Input->getIntrinsicID() == Intrinsic::call_preallocated_setup,
4098 "\"preallocated\" argument must be a token from "
4099 "llvm.call.preallocated.setup",
4100 Call);
4101 } else if (Tag == LLVMContext::OB_gc_live) {
4102 Check(!FoundGCLiveBundle, "Multiple gc-live operand bundles", Call);
4103 FoundGCLiveBundle = true;
4105 Check(!FoundAttachedCallBundle,
4106 "Multiple \"clang.arc.attachedcall\" operand bundles", Call);
4107 FoundAttachedCallBundle = true;
4108 verifyAttachedCallBundle(Call, BU);
4109 }
4110 }
4111
4112 // Verify that callee and callsite agree on whether to use pointer auth.
4113 Check(!(Call.getCalledFunction() && FoundPtrauthBundle),
4114 "Direct call cannot have a ptrauth bundle", Call);
4115
4116 // Verify that each inlinable callsite of a debug-info-bearing function in a
4117 // debug-info-bearing function has a debug location attached to it. Failure to
4118 // do so causes assertion failures when the inliner sets up inline scope info
4119 // (Interposable functions are not inlinable, neither are functions without
4120 // definitions.)
4126 "inlinable function call in a function with "
4127 "debug info must have a !dbg location",
4128 Call);
4129
4130 if (Call.isInlineAsm())
4131 verifyInlineAsmCall(Call);
4132
4133 ConvergenceVerifyHelper.visit(Call);
4134
4135 visitInstruction(Call);
4136}
4137
4138void Verifier::verifyTailCCMustTailAttrs(const AttrBuilder &Attrs,
4139 StringRef Context) {
4140 Check(!Attrs.contains(Attribute::InAlloca),
4141 Twine("inalloca attribute not allowed in ") + Context);
4142 Check(!Attrs.contains(Attribute::InReg),
4143 Twine("inreg attribute not allowed in ") + Context);
4144 Check(!Attrs.contains(Attribute::SwiftError),
4145 Twine("swifterror attribute not allowed in ") + Context);
4146 Check(!Attrs.contains(Attribute::Preallocated),
4147 Twine("preallocated attribute not allowed in ") + Context);
4148 Check(!Attrs.contains(Attribute::ByRef),
4149 Twine("byref attribute not allowed in ") + Context);
4150}
4151
4152/// Two types are "congruent" if they are identical, or if they are both pointer
4153/// types with different pointee types and the same address space.
4154static bool isTypeCongruent(Type *L, Type *R) {
4155 if (L == R)
4156 return true;
4159 if (!PL || !PR)
4160 return false;
4161 return PL->getAddressSpace() == PR->getAddressSpace();
4162}
4163
4164static AttrBuilder getParameterABIAttributes(LLVMContext& C, unsigned I, AttributeList Attrs) {
4165 static const Attribute::AttrKind ABIAttrs[] = {
4166 Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
4167 Attribute::InReg, Attribute::StackAlignment, Attribute::SwiftSelf,
4168 Attribute::SwiftAsync, Attribute::SwiftError, Attribute::Preallocated,
4169 Attribute::ByRef};
4170 AttrBuilder Copy(C);
4171 for (auto AK : ABIAttrs) {
4172 Attribute Attr = Attrs.getParamAttrs(I).getAttribute(AK);
4173 if (Attr.isValid())
4174 Copy.addAttribute(Attr);
4175 }
4176
4177 // `align` is ABI-affecting only in combination with `byval` or `byref`.
4178 if (Attrs.hasParamAttr(I, Attribute::Alignment) &&
4179 (Attrs.hasParamAttr(I, Attribute::ByVal) ||
4180 Attrs.hasParamAttr(I, Attribute::ByRef)))
4181 Copy.addAlignmentAttr(Attrs.getParamAlignment(I));
4182 return Copy;
4183}
4184
4185void Verifier::verifyMustTailCall(CallInst &CI) {
4186 Check(!CI.isInlineAsm(), "cannot use musttail call with inline asm", &CI);
4187
4188 Function *F = CI.getParent()->getParent();
4189 FunctionType *CallerTy = F->getFunctionType();
4190 FunctionType *CalleeTy = CI.getFunctionType();
4191 Check(CallerTy->isVarArg() == CalleeTy->isVarArg(),
4192 "cannot guarantee tail call due to mismatched varargs", &CI);
4193 Check(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()),
4194 "cannot guarantee tail call due to mismatched return types", &CI);
4195
4196 // - The calling conventions of the caller and callee must match.
4197 Check(F->getCallingConv() == CI.getCallingConv(),
4198 "cannot guarantee tail call due to mismatched calling conv", &CI);
4199
4200 // - The call must immediately precede a :ref:`ret <i_ret>` instruction,
4201 // or a pointer bitcast followed by a ret instruction.
4202 // - The ret instruction must return the (possibly bitcasted) value
4203 // produced by the call or void.
4204 Value *RetVal = &CI;
4206
4207 // Handle the optional bitcast.
4208 if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Next)) {
4209 Check(BI->getOperand(0) == RetVal,
4210 "bitcast following musttail call must use the call", BI);
4211 RetVal = BI;
4212 Next = BI->getNextNode();
4213 }
4214
4215 // Check the return.
4216 ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Next);
4217 Check(Ret, "musttail call must precede a ret with an optional bitcast", &CI);
4218 Check(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal ||
4220 "musttail call result must be returned", Ret);
4221
4222 AttributeList CallerAttrs = F->getAttributes();
4223 AttributeList CalleeAttrs = CI.getAttributes();
4224 if (CI.getCallingConv() == CallingConv::SwiftTail ||
4225 CI.getCallingConv() == CallingConv::Tail) {
4226 StringRef CCName =
4227 CI.getCallingConv() == CallingConv::Tail ? "tailcc" : "swifttailcc";
4228
4229 // - Only sret, byval, swiftself, and swiftasync ABI-impacting attributes
4230 // are allowed in swifttailcc call
4231 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4232 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
4233 SmallString<32> Context{CCName, StringRef(" musttail caller")};
4234 verifyTailCCMustTailAttrs(ABIAttrs, Context);
4235 }
4236 for (unsigned I = 0, E = CalleeTy->getNumParams(); I != E; ++I) {
4237 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
4238 SmallString<32> Context{CCName, StringRef(" musttail callee")};
4239 verifyTailCCMustTailAttrs(ABIAttrs, Context);
4240 }
4241 // - Varargs functions are not allowed
4242 Check(!CallerTy->isVarArg(), Twine("cannot guarantee ") + CCName +
4243 " tail call for varargs function");
4244 return;
4245 }
4246
4247 // - The caller and callee prototypes must match. Pointer types of
4248 // parameters or return types may differ in pointee type, but not
4249 // address space.
4250 if (!CI.getIntrinsicID()) {
4251 Check(CallerTy->getNumParams() == CalleeTy->getNumParams(),
4252 "cannot guarantee tail call due to mismatched parameter counts", &CI);
4253 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4254 Check(
4255 isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)),
4256 "cannot guarantee tail call due to mismatched parameter types", &CI);
4257 }
4258 }
4259
4260 // - All ABI-impacting function attributes, such as sret, byval, inreg,
4261 // returned, preallocated, and inalloca, must match.
4262 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4263 AttrBuilder CallerABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
4264 AttrBuilder CalleeABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
4265 Check(CallerABIAttrs == CalleeABIAttrs,
4266 "cannot guarantee tail call due to mismatched ABI impacting "
4267 "function attributes",
4268 &CI, CI.getOperand(I));
4269 }
4270}
4271
4272void Verifier::visitCallInst(CallInst &CI) {
4273 visitCallBase(CI);
4274
4275 if (CI.isMustTailCall())
4276 verifyMustTailCall(CI);
4277}
4278
4279void Verifier::visitInvokeInst(InvokeInst &II) {
4280 visitCallBase(II);
4281
4282 // Verify that the first non-PHI instruction of the unwind destination is an
4283 // exception handling instruction.
4284 Check(
4285 II.getUnwindDest()->isEHPad(),
4286 "The unwind destination does not have an exception handling instruction!",
4287 &II);
4288
4289 visitTerminator(II);
4290}
4291
4292/// visitUnaryOperator - Check the argument to the unary operator.
4293///
4294void Verifier::visitUnaryOperator(UnaryOperator &U) {
4295 Check(U.getType() == U.getOperand(0)->getType(),
4296 "Unary operators must have same type for"
4297 "operands and result!",
4298 &U);
4299
4300 switch (U.getOpcode()) {
4301 // Check that floating-point arithmetic operators are only used with
4302 // floating-point operands.
4303 case Instruction::FNeg:
4304 Check(U.getType()->isFPOrFPVectorTy(),
4305 "FNeg operator only works with float types!", &U);
4306 break;
4307 default:
4308 llvm_unreachable("Unknown UnaryOperator opcode!");
4309 }
4310
4311 visitInstruction(U);
4312}
4313
4314/// visitBinaryOperator - Check that both arguments to the binary operator are
4315/// of the same type!
4316///
4317void Verifier::visitBinaryOperator(BinaryOperator &B) {
4318 Check(B.getOperand(0)->getType() == B.getOperand(1)->getType(),
4319 "Both operands to a binary operator are not of the same type!", &B);
4320
4321 switch (B.getOpcode()) {
4322 // Check that integer arithmetic operators are only used with
4323 // integral operands.
4324 case Instruction::Add:
4325 case Instruction::Sub:
4326 case Instruction::Mul:
4327 case Instruction::SDiv:
4328 case Instruction::UDiv:
4329 case Instruction::SRem:
4330 case Instruction::URem:
4331 Check(B.getType()->isIntOrIntVectorTy(),
4332 "Integer arithmetic operators only work with integral types!", &B);
4333 Check(B.getType() == B.getOperand(0)->getType(),
4334 "Integer arithmetic operators must have same type "
4335 "for operands and result!",
4336 &B);
4337 break;
4338 // Check that floating-point arithmetic operators are only used with
4339 // floating-point operands.
4340 case Instruction::FAdd:
4341 case Instruction::FSub:
4342 case Instruction::FMul:
4343 case Instruction::FDiv:
4344 case Instruction::FRem:
4345 Check(B.getType()->isFPOrFPVectorTy(),
4346 "Floating-point arithmetic operators only work with "
4347 "floating-point types!",
4348 &B);
4349 Check(B.getType() == B.getOperand(0)->getType(),
4350 "Floating-point arithmetic operators must have same type "
4351 "for operands and result!",
4352 &B);
4353 break;
4354 // Check that logical operators are only used with integral operands.
4355 case Instruction::And:
4356 case Instruction::Or:
4357 case Instruction::Xor:
4358 Check(B.getType()->isIntOrIntVectorTy(),
4359 "Logical operators only work with integral types!", &B);
4360 Check(B.getType() == B.getOperand(0)->getType(),
4361 "Logical operators must have same type for operands and result!", &B);
4362 break;
4363 case Instruction::Shl:
4364 case Instruction::LShr:
4365 case Instruction::AShr:
4366 Check(B.getType()->isIntOrIntVectorTy(),
4367 "Shifts only work with integral types!", &B);
4368 Check(B.getType() == B.getOperand(0)->getType(),
4369 "Shift return type must be same as operands!", &B);
4370 break;
4371 default:
4372 llvm_unreachable("Unknown BinaryOperator opcode!");
4373 }
4374
4375 visitInstruction(B);
4376}
4377
4378void Verifier::visitICmpInst(ICmpInst &IC) {
4379 // Check that the operands are the same type
4380 Type *Op0Ty = IC.getOperand(0)->getType();
4381 Type *Op1Ty = IC.getOperand(1)->getType();
4382 Check(Op0Ty == Op1Ty,
4383 "Both operands to ICmp instruction are not of the same type!", &IC);
4384 // Check that the operands are the right type
4385 Check(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPtrOrPtrVectorTy(),
4386 "Invalid operand types for ICmp instruction", &IC);
4387 // Check that the predicate is valid.
4388 Check(IC.isIntPredicate(), "Invalid predicate in ICmp instruction!", &IC);
4389
4390 visitInstruction(IC);
4391}
4392
4393void Verifier::visitFCmpInst(FCmpInst &FC) {
4394 // Check that the operands are the same type
4395 Type *Op0Ty = FC.getOperand(0)->getType();
4396 Type *Op1Ty = FC.getOperand(1)->getType();
4397 Check(Op0Ty == Op1Ty,
4398 "Both operands to FCmp instruction are not of the same type!", &FC);
4399 // Check that the operands are the right type
4400 Check(Op0Ty->isFPOrFPVectorTy(), "Invalid operand types for FCmp instruction",
4401 &FC);
4402 // Check that the predicate is valid.
4403 Check(FC.isFPPredicate(), "Invalid predicate in FCmp instruction!", &FC);
4404
4405 visitInstruction(FC);
4406}
4407
4408void Verifier::visitExtractElementInst(ExtractElementInst &EI) {
4410 "Invalid extractelement operands!", &EI);
4411 visitInstruction(EI);
4412}
4413
4414void Verifier::visitInsertElementInst(InsertElementInst &IE) {
4415 Check(InsertElementInst::isValidOperands(IE.getOperand(0), IE.getOperand(1),
4416 IE.getOperand(2)),
4417 "Invalid insertelement operands!", &IE);
4418 visitInstruction(IE);
4419}
4420
4421void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) {
4423 SV.getShuffleMask()),
4424 "Invalid shufflevector operands!", &SV);
4425 visitInstruction(SV);
4426}
4427
4428void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
4429 Type *TargetTy = GEP.getPointerOperandType()->getScalarType();
4430
4431 Check(isa<PointerType>(TargetTy),
4432 "GEP base pointer is not a vector or a vector of pointers", &GEP);
4433 Check(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP);
4434
4435 if (auto *STy = dyn_cast<StructType>(GEP.getSourceElementType())) {
4436 Check(!STy->isScalableTy(),
4437 "getelementptr cannot target structure that contains scalable vector"
4438 "type",
4439 &GEP);
4440 }
4441
4442 SmallVector<Value *, 16> Idxs(GEP.indices());
4443 Check(
4444 all_of(Idxs, [](Value *V) { return V->getType()->isIntOrIntVectorTy(); }),
4445 "GEP indexes must be integers", &GEP);
4446 Type *ElTy =
4447 GetElementPtrInst::getIndexedType(GEP.getSourceElementType(), Idxs);
4448 Check(ElTy, "Invalid indices for GEP pointer type!", &GEP);
4449
4450 PointerType *PtrTy = dyn_cast<PointerType>(GEP.getType()->getScalarType());
4451
4452 Check(PtrTy && GEP.getResultElementType() == ElTy,
4453 "GEP is not of right type for indices!", &GEP, ElTy);
4454
4455 if (auto *GEPVTy = dyn_cast<VectorType>(GEP.getType())) {
4456 // Additional checks for vector GEPs.
4457 ElementCount GEPWidth = GEPVTy->getElementCount();
4458 if (GEP.getPointerOperandType()->isVectorTy())
4459 Check(
4460 GEPWidth ==
4461 cast<VectorType>(GEP.getPointerOperandType())->getElementCount(),
4462 "Vector GEP result width doesn't match operand's", &GEP);
4463 for (Value *Idx : Idxs) {
4464 Type *IndexTy = Idx->getType();
4465 if (auto *IndexVTy = dyn_cast<VectorType>(IndexTy)) {
4466 ElementCount IndexWidth = IndexVTy->getElementCount();
4467 Check(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP);
4468 }
4469 Check(IndexTy->isIntOrIntVectorTy(),
4470 "All GEP indices should be of integer type");
4471 }
4472 }
4473
4474 Check(GEP.getAddressSpace() == PtrTy->getAddressSpace(),
4475 "GEP address space doesn't match type", &GEP);
4476
4477 visitInstruction(GEP);
4478}
4479
4480static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
4481 return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
4482}
4483
4484/// Verify !range and !absolute_symbol metadata. These have the same
4485/// restrictions, except !absolute_symbol allows the full set.
4486void Verifier::verifyRangeLikeMetadata(const Value &I, const MDNode *Range,
4487 Type *Ty, RangeLikeMetadataKind Kind) {
4488 unsigned NumOperands = Range->getNumOperands();
4489 Check(NumOperands % 2 == 0, "Unfinished range!", Range);
4490 unsigned NumRanges = NumOperands / 2;
4491 Check(NumRanges >= 1, "It should have at least one range!", Range);
4492
4493 ConstantRange LastRange(1, true); // Dummy initial value
4494 for (unsigned i = 0; i < NumRanges; ++i) {
4495 ConstantInt *Low =
4496 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i));
4497 Check(Low, "The lower limit must be an integer!", Low);
4498 ConstantInt *High =
4499 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i + 1));
4500 Check(High, "The upper limit must be an integer!", High);
4501
4502 Check(High->getType() == Low->getType(), "Range pair types must match!",
4503 &I);
4504
4505 if (Kind == RangeLikeMetadataKind::NoaliasAddrspace) {
4506 Check(High->getType()->isIntegerTy(32),
4507 "noalias.addrspace type must be i32!", &I);
4508 } else {
4509 Check(High->getType() == Ty->getScalarType(),
4510 "Range types must match instruction type!", &I);
4511 }
4512
4513 APInt HighV = High->getValue();
4514 APInt LowV = Low->getValue();
4515
4516 // ConstantRange asserts if the ranges are the same except for the min/max
4517 // value. Leave the cases it tolerates for the empty range error below.
4518 Check(LowV != HighV || LowV.isMaxValue() || LowV.isMinValue(),
4519 "The upper and lower limits cannot be the same value", &I);
4520
4521 ConstantRange CurRange(LowV, HighV);
4522 Check(!CurRange.isEmptySet() &&
4523 (Kind == RangeLikeMetadataKind::AbsoluteSymbol ||
4524 !CurRange.isFullSet()),
4525 "Range must not be empty!", Range);
4526 if (i != 0) {
4527 Check(CurRange.intersectWith(LastRange).isEmptySet(),
4528 "Intervals are overlapping", Range);
4529 Check(LowV.sgt(LastRange.getLower()), "Intervals are not in order",
4530 Range);
4531 Check(!isContiguous(CurRange, LastRange), "Intervals are contiguous",
4532 Range);
4533 }
4534 LastRange = ConstantRange(LowV, HighV);
4535 }
4536 if (NumRanges > 2) {
4537 APInt FirstLow =
4538 mdconst::dyn_extract<ConstantInt>(Range->getOperand(0))->getValue();
4539 APInt FirstHigh =
4540 mdconst::dyn_extract<ConstantInt>(Range->getOperand(1))->getValue();
4541 ConstantRange FirstRange(FirstLow, FirstHigh);
4542 Check(FirstRange.intersectWith(LastRange).isEmptySet(),
4543 "Intervals are overlapping", Range);
4544 Check(!isContiguous(FirstRange, LastRange), "Intervals are contiguous",
4545 Range);
4546 }
4547}
4548
4549void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) {
4550 assert(Range && Range == I.getMetadata(LLVMContext::MD_range) &&
4551 "precondition violation");
4552 verifyRangeLikeMetadata(I, Range, Ty, RangeLikeMetadataKind::Range);
4553}
4554
4555void Verifier::visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range,
4556 Type *Ty) {
4557 assert(Range && Range == I.getMetadata(LLVMContext::MD_noalias_addrspace) &&
4558 "precondition violation");
4559 verifyRangeLikeMetadata(I, Range, Ty,
4560 RangeLikeMetadataKind::NoaliasAddrspace);
4561}
4562
4563void Verifier::checkAtomicMemAccessSize(Type *Ty, const Instruction *I) {
4564 unsigned Size = DL.getTypeSizeInBits(Ty).getFixedValue();
4565 Check(Size >= 8, "atomic memory access' size must be byte-sized", Ty, I);
4566 Check(!(Size & (Size - 1)),
4567 "atomic memory access' operand must have a power-of-two size", Ty, I);
4568}
4569
4570void Verifier::visitLoadInst(LoadInst &LI) {
4572 Check(PTy, "Load operand must be a pointer.", &LI);
4573 Type *ElTy = LI.getType();
4574 if (MaybeAlign A = LI.getAlign()) {
4575 Check(A->value() <= Value::MaximumAlignment,
4576 "huge alignment values are unsupported", &LI);
4577 }
4578 Check(ElTy->isSized(), "loading unsized types is not allowed", &LI);
4579 if (LI.isAtomic()) {
4580 Check(LI.getOrdering() != AtomicOrdering::Release &&
4581 LI.getOrdering() != AtomicOrdering::AcquireRelease,
4582 "Load cannot have Release ordering", &LI);
4583 Check(ElTy->getScalarType()->isIntOrPtrTy() ||
4585 "atomic load operand must have integer, pointer, floating point, "
4586 "or vector type!",
4587 ElTy, &LI);
4588
4589 checkAtomicMemAccessSize(ElTy, &LI);
4590 } else {
4592 "Non-atomic load cannot have SynchronizationScope specified", &LI);
4593 }
4594
4595 visitInstruction(LI);
4596}
4597
4598void Verifier::visitStoreInst(StoreInst &SI) {
4599 PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType());
4600 Check(PTy, "Store operand must be a pointer.", &SI);
4601 Type *ElTy = SI.getOperand(0)->getType();
4602 if (MaybeAlign A = SI.getAlign()) {
4603 Check(A->value() <= Value::MaximumAlignment,
4604 "huge alignment values are unsupported", &SI);
4605 }
4606 Check(ElTy->isSized(), "storing unsized types is not allowed", &SI);
4607 if (SI.isAtomic()) {
4608 Check(SI.getOrdering() != AtomicOrdering::Acquire &&
4609 SI.getOrdering() != AtomicOrdering::AcquireRelease,
4610 "Store cannot have Acquire ordering", &SI);
4611 Check(ElTy->getScalarType()->isIntOrPtrTy() ||
4613 "atomic store operand must have integer, pointer, floating point, "
4614 "or vector type!",
4615 ElTy, &SI);
4616 checkAtomicMemAccessSize(ElTy, &SI);
4617 } else {
4618 Check(SI.getSyncScopeID() == SyncScope::System,
4619 "Non-atomic store cannot have SynchronizationScope specified", &SI);
4620 }
4621 visitInstruction(SI);
4622}
4623
4624/// Check that SwiftErrorVal is used as a swifterror argument in CS.
4625void Verifier::verifySwiftErrorCall(CallBase &Call,
4626 const Value *SwiftErrorVal) {
4627 for (const auto &I : llvm::enumerate(Call.args())) {
4628 if (I.value() == SwiftErrorVal) {
4629 Check(Call.paramHasAttr(I.index(), Attribute::SwiftError),
4630 "swifterror value when used in a callsite should be marked "
4631 "with swifterror attribute",
4632 SwiftErrorVal, Call);
4633 }
4634 }
4635}
4636
4637void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) {
4638 // Check that swifterror value is only used by loads, stores, or as
4639 // a swifterror argument.
4640 for (const User *U : SwiftErrorVal->users()) {
4642 isa<InvokeInst>(U),
4643 "swifterror value can only be loaded and stored from, or "
4644 "as a swifterror argument!",
4645 SwiftErrorVal, U);
4646 // If it is used by a store, check it is the second operand.
4647 if (auto StoreI = dyn_cast<StoreInst>(U))
4648 Check(StoreI->getOperand(1) == SwiftErrorVal,
4649 "swifterror value should be the second operand when used "
4650 "by stores",
4651 SwiftErrorVal, U);
4652 if (auto *Call = dyn_cast<CallBase>(U))
4653 verifySwiftErrorCall(*const_cast<CallBase *>(Call), SwiftErrorVal);
4654 }
4655}
4656
4657void Verifier::visitAllocaInst(AllocaInst &AI) {
4658 Type *Ty = AI.getAllocatedType();
4659 SmallPtrSet<Type*, 4> Visited;
4660 Check(Ty->isSized(&Visited), "Cannot allocate unsized type", &AI);
4661 // Check if it's a target extension type that disallows being used on the
4662 // stack.
4664 "Alloca has illegal target extension type", &AI);
4666 "Alloca array size must have integer type", &AI);
4667 if (MaybeAlign A = AI.getAlign()) {
4668 Check(A->value() <= Value::MaximumAlignment,
4669 "huge alignment values are unsupported", &AI);
4670 }
4671
4672 if (AI.isSwiftError()) {
4673 Check(Ty->isPointerTy(), "swifterror alloca must have pointer type", &AI);
4675 "swifterror alloca must not be array allocation", &AI);
4676 verifySwiftErrorValue(&AI);
4677 }
4678
4679 if (TT.isAMDGPU()) {
4681 "alloca on amdgpu must be in addrspace(5)", &AI);
4682 }
4683
4684 visitInstruction(AI);
4685}
4686
4687void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
4688 Type *ElTy = CXI.getOperand(1)->getType();
4689 Check(ElTy->isIntOrPtrTy(),
4690 "cmpxchg operand must have integer or pointer type", ElTy, &CXI);
4691 checkAtomicMemAccessSize(ElTy, &CXI);
4692 visitInstruction(CXI);
4693}
4694
4695void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
4696 Check(RMWI.getOrdering() != AtomicOrdering::Unordered,
4697 "atomicrmw instructions cannot be unordered.", &RMWI);
4698 auto Op = RMWI.getOperation();
4699 Type *ElTy = RMWI.getOperand(1)->getType();
4700 if (Op == AtomicRMWInst::Xchg) {
4701 Check(ElTy->isIntegerTy() || ElTy->isFloatingPointTy() ||
4702 ElTy->isPointerTy(),
4703 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4704 " operand must have integer or floating point type!",
4705 &RMWI, ElTy);
4706 } else if (AtomicRMWInst::isFPOperation(Op)) {
4708 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4709 " operand must have floating-point or fixed vector of floating-point "
4710 "type!",
4711 &RMWI, ElTy);
4712 } else {
4713 Check(ElTy->isIntegerTy(),
4714 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4715 " operand must have integer type!",
4716 &RMWI, ElTy);
4717 }
4718 checkAtomicMemAccessSize(ElTy, &RMWI);
4720 "Invalid binary operation!", &RMWI);
4721 visitInstruction(RMWI);
4722}
4723
4724void Verifier::visitFenceInst(FenceInst &FI) {
4725 const AtomicOrdering Ordering = FI.getOrdering();
4726 Check(Ordering == AtomicOrdering::Acquire ||
4727 Ordering == AtomicOrdering::Release ||
4728 Ordering == AtomicOrdering::AcquireRelease ||
4729 Ordering == AtomicOrdering::SequentiallyConsistent,
4730 "fence instructions may only have acquire, release, acq_rel, or "
4731 "seq_cst ordering.",
4732 &FI);
4733 visitInstruction(FI);
4734}
4735
4736void Verifier::visitExtractValueInst(ExtractValueInst &EVI) {
4738 EVI.getIndices()) == EVI.getType(),
4739 "Invalid ExtractValueInst operands!", &EVI);
4740
4741 visitInstruction(EVI);
4742}
4743
4744void Verifier::visitInsertValueInst(InsertValueInst &IVI) {
4746 IVI.getIndices()) ==
4747 IVI.getOperand(1)->getType(),
4748 "Invalid InsertValueInst operands!", &IVI);
4749
4750 visitInstruction(IVI);
4751}
4752
4753static Value *getParentPad(Value *EHPad) {
4754 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
4755 return FPI->getParentPad();
4756
4757 return cast<CatchSwitchInst>(EHPad)->getParentPad();
4758}
4759
4760void Verifier::visitEHPadPredecessors(Instruction &I) {
4761 assert(I.isEHPad());
4762
4763 BasicBlock *BB = I.getParent();
4764 Function *F = BB->getParent();
4765
4766 Check(BB != &F->getEntryBlock(), "EH pad cannot be in entry block.", &I);
4767
4768 if (auto *LPI = dyn_cast<LandingPadInst>(&I)) {
4769 // The landingpad instruction defines its parent as a landing pad block. The
4770 // landing pad block may be branched to only by the unwind edge of an
4771 // invoke.
4772 for (BasicBlock *PredBB : predecessors(BB)) {
4773 const auto *II = dyn_cast<InvokeInst>(PredBB->getTerminator());
4774 Check(II && II->getUnwindDest() == BB && II->getNormalDest() != BB,
4775 "Block containing LandingPadInst must be jumped to "
4776 "only by the unwind edge of an invoke.",
4777 LPI);
4778 }
4779 return;
4780 }
4781 if (auto *CPI = dyn_cast<CatchPadInst>(&I)) {
4782 if (!pred_empty(BB))
4783 Check(BB->getUniquePredecessor() == CPI->getCatchSwitch()->getParent(),
4784 "Block containg CatchPadInst must be jumped to "
4785 "only by its catchswitch.",
4786 CPI);
4787 Check(BB != CPI->getCatchSwitch()->getUnwindDest(),
4788 "Catchswitch cannot unwind to one of its catchpads",
4789 CPI->getCatchSwitch(), CPI);
4790 return;
4791 }
4792
4793 // Verify that each pred has a legal terminator with a legal to/from EH
4794 // pad relationship.
4795 Instruction *ToPad = &I;
4796 Value *ToPadParent = getParentPad(ToPad);
4797 for (BasicBlock *PredBB : predecessors(BB)) {
4798 Instruction *TI = PredBB->getTerminator();
4799 Value *FromPad;
4800 if (auto *II = dyn_cast<InvokeInst>(TI)) {
4801 Check(II->getUnwindDest() == BB && II->getNormalDest() != BB,
4802 "EH pad must be jumped to via an unwind edge", ToPad, II);
4803 auto *CalledFn =
4804 dyn_cast<Function>(II->getCalledOperand()->stripPointerCasts());
4805 if (CalledFn && CalledFn->isIntrinsic() && II->doesNotThrow() &&
4806 !IntrinsicInst::mayLowerToFunctionCall(CalledFn->getIntrinsicID()))
4807 continue;
4808 if (auto Bundle = II->getOperandBundle(LLVMContext::OB_funclet))
4809 FromPad = Bundle->Inputs[0];
4810 else
4811 FromPad = ConstantTokenNone::get(II->getContext());
4812 } else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
4813 FromPad = CRI->getOperand(0);
4814 Check(FromPad != ToPadParent, "A cleanupret must exit its cleanup", CRI);
4815 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) {
4816 FromPad = CSI;
4817 } else {
4818 Check(false, "EH pad must be jumped to via an unwind edge", ToPad, TI);
4819 }
4820
4821 // The edge may exit from zero or more nested pads.
4822 SmallPtrSet<Value *, 8> Seen;
4823 for (;; FromPad = getParentPad(FromPad)) {
4824 Check(FromPad != ToPad,
4825 "EH pad cannot handle exceptions raised within it", FromPad, TI);
4826 if (FromPad == ToPadParent) {
4827 // This is a legal unwind edge.
4828 break;
4829 }
4830 Check(!isa<ConstantTokenNone>(FromPad),
4831 "A single unwind edge may only enter one EH pad", TI);
4832 Check(Seen.insert(FromPad).second, "EH pad jumps through a cycle of pads",
4833 FromPad);
4834
4835 // This will be diagnosed on the corresponding instruction already. We
4836 // need the extra check here to make sure getParentPad() works.
4837 Check(isa<FuncletPadInst>(FromPad) || isa<CatchSwitchInst>(FromPad),
4838 "Parent pad must be catchpad/cleanuppad/catchswitch", TI);
4839 }
4840 }
4841}
4842
4843void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
4844 // The landingpad instruction is ill-formed if it doesn't have any clauses and
4845 // isn't a cleanup.
4846 Check(LPI.getNumClauses() > 0 || LPI.isCleanup(),
4847 "LandingPadInst needs at least one clause or to be a cleanup.", &LPI);
4848
4849 visitEHPadPredecessors(LPI);
4850
4851 if (!LandingPadResultTy)
4852 LandingPadResultTy = LPI.getType();
4853 else
4854 Check(LandingPadResultTy == LPI.getType(),
4855 "The landingpad instruction should have a consistent result type "
4856 "inside a function.",
4857 &LPI);
4858
4859 Function *F = LPI.getParent()->getParent();
4860 Check(F->hasPersonalityFn(),
4861 "LandingPadInst needs to be in a function with a personality.", &LPI);
4862
4863 // The landingpad instruction must be the first non-PHI instruction in the
4864 // block.
4865 Check(LPI.getParent()->getLandingPadInst() == &LPI,
4866 "LandingPadInst not the first non-PHI instruction in the block.", &LPI);
4867
4868 for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) {
4869 Constant *Clause = LPI.getClause(i);
4870 if (LPI.isCatch(i)) {
4871 Check(isa<PointerType>(Clause->getType()),
4872 "Catch operand does not have pointer type!", &LPI);
4873 } else {
4874 Check(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI);
4876 "Filter operand is not an array of constants!", &LPI);
4877 }
4878 }
4879
4880 visitInstruction(LPI);
4881}
4882
4883void Verifier::visitResumeInst(ResumeInst &RI) {
4885 "ResumeInst needs to be in a function with a personality.", &RI);
4886
4887 if (!LandingPadResultTy)
4888 LandingPadResultTy = RI.getValue()->getType();
4889 else
4890 Check(LandingPadResultTy == RI.getValue()->getType(),
4891 "The resume instruction should have a consistent result type "
4892 "inside a function.",
4893 &RI);
4894
4895 visitTerminator(RI);
4896}
4897
4898void Verifier::visitCatchPadInst(CatchPadInst &CPI) {
4899 BasicBlock *BB = CPI.getParent();
4900
4901 Function *F = BB->getParent();
4902 Check(F->hasPersonalityFn(),
4903 "CatchPadInst needs to be in a function with a personality.", &CPI);
4904
4906 "CatchPadInst needs to be directly nested in a CatchSwitchInst.",
4907 CPI.getParentPad());
4908
4909 // The catchpad instruction must be the first non-PHI instruction in the
4910 // block.
4911 Check(&*BB->getFirstNonPHIIt() == &CPI,
4912 "CatchPadInst not the first non-PHI instruction in the block.", &CPI);
4913
4914 visitEHPadPredecessors(CPI);
4915 visitFuncletPadInst(CPI);
4916}
4917
4918void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) {
4919 Check(isa<CatchPadInst>(CatchReturn.getOperand(0)),
4920 "CatchReturnInst needs to be provided a CatchPad", &CatchReturn,
4921 CatchReturn.getOperand(0));
4922
4923 visitTerminator(CatchReturn);
4924}
4925
4926void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) {
4927 BasicBlock *BB = CPI.getParent();
4928
4929 Function *F = BB->getParent();
4930 Check(F->hasPersonalityFn(),
4931 "CleanupPadInst needs to be in a function with a personality.", &CPI);
4932
4933 // The cleanuppad instruction must be the first non-PHI instruction in the
4934 // block.
4935 Check(&*BB->getFirstNonPHIIt() == &CPI,
4936 "CleanupPadInst not the first non-PHI instruction in the block.", &CPI);
4937
4938 auto *ParentPad = CPI.getParentPad();
4939 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
4940 "CleanupPadInst has an invalid parent.", &CPI);
4941
4942 visitEHPadPredecessors(CPI);
4943 visitFuncletPadInst(CPI);
4944}
4945
4946void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) {
4947 User *FirstUser = nullptr;
4948 Value *FirstUnwindPad = nullptr;
4949 SmallVector<FuncletPadInst *, 8> Worklist({&FPI});
4950 SmallPtrSet<FuncletPadInst *, 8> Seen;
4951
4952 while (!Worklist.empty()) {
4953 FuncletPadInst *CurrentPad = Worklist.pop_back_val();
4954 Check(Seen.insert(CurrentPad).second,
4955 "FuncletPadInst must not be nested within itself", CurrentPad);
4956 Value *UnresolvedAncestorPad = nullptr;
4957 for (User *U : CurrentPad->users()) {
4958 BasicBlock *UnwindDest;
4959 if (auto *CRI = dyn_cast<CleanupReturnInst>(U)) {
4960 UnwindDest = CRI->getUnwindDest();
4961 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(U)) {
4962 // We allow catchswitch unwind to caller to nest
4963 // within an outer pad that unwinds somewhere else,
4964 // because catchswitch doesn't have a nounwind variant.
4965 // See e.g. SimplifyCFGOpt::SimplifyUnreachable.
4966 if (CSI->unwindsToCaller())
4967 continue;
4968 UnwindDest = CSI->getUnwindDest();
4969 } else if (auto *II = dyn_cast<InvokeInst>(U)) {
4970 UnwindDest = II->getUnwindDest();
4971 } else if (isa<CallInst>(U)) {
4972 // Calls which don't unwind may be found inside funclet
4973 // pads that unwind somewhere else. We don't *require*
4974 // such calls to be annotated nounwind.
4975 continue;
4976 } else if (auto *CPI = dyn_cast<CleanupPadInst>(U)) {
4977 // The unwind dest for a cleanup can only be found by
4978 // recursive search. Add it to the worklist, and we'll
4979 // search for its first use that determines where it unwinds.
4980 Worklist.push_back(CPI);
4981 continue;
4982 } else {
4983 Check(isa<CatchReturnInst>(U), "Bogus funclet pad use", U);
4984 continue;
4985 }
4986
4987 Value *UnwindPad;
4988 bool ExitsFPI;
4989 if (UnwindDest) {
4990 UnwindPad = &*UnwindDest->getFirstNonPHIIt();
4991 if (!cast<Instruction>(UnwindPad)->isEHPad())
4992 continue;
4993 Value *UnwindParent = getParentPad(UnwindPad);
4994 // Ignore unwind edges that don't exit CurrentPad.
4995 if (UnwindParent == CurrentPad)
4996 continue;
4997 // Determine whether the original funclet pad is exited,
4998 // and if we are scanning nested pads determine how many
4999 // of them are exited so we can stop searching their
5000 // children.
5001 Value *ExitedPad = CurrentPad;
5002 ExitsFPI = false;
5003 do {
5004 if (ExitedPad == &FPI) {
5005 ExitsFPI = true;
5006 // Now we can resolve any ancestors of CurrentPad up to
5007 // FPI, but not including FPI since we need to make sure
5008 // to check all direct users of FPI for consistency.
5009 UnresolvedAncestorPad = &FPI;
5010 break;
5011 }
5012 Value *ExitedParent = getParentPad(ExitedPad);
5013 if (ExitedParent == UnwindParent) {
5014 // ExitedPad is the ancestor-most pad which this unwind
5015 // edge exits, so we can resolve up to it, meaning that
5016 // ExitedParent is the first ancestor still unresolved.
5017 UnresolvedAncestorPad = ExitedParent;
5018 break;
5019 }
5020 ExitedPad = ExitedParent;
5021 } while (!isa<ConstantTokenNone>(ExitedPad));
5022 } else {
5023 // Unwinding to caller exits all pads.
5024 UnwindPad = ConstantTokenNone::get(FPI.getContext());
5025 ExitsFPI = true;
5026 UnresolvedAncestorPad = &FPI;
5027 }
5028
5029 if (ExitsFPI) {
5030 // This unwind edge exits FPI. Make sure it agrees with other
5031 // such edges.
5032 if (FirstUser) {
5033 Check(UnwindPad == FirstUnwindPad,
5034 "Unwind edges out of a funclet "
5035 "pad must have the same unwind "
5036 "dest",
5037 &FPI, U, FirstUser);
5038 } else {
5039 FirstUser = U;
5040 FirstUnwindPad = UnwindPad;
5041 // Record cleanup sibling unwinds for verifySiblingFuncletUnwinds
5042 if (isa<CleanupPadInst>(&FPI) && !isa<ConstantTokenNone>(UnwindPad) &&
5043 getParentPad(UnwindPad) == getParentPad(&FPI))
5044 SiblingFuncletInfo[&FPI] = cast<Instruction>(U);
5045 }
5046 }
5047 // Make sure we visit all uses of FPI, but for nested pads stop as
5048 // soon as we know where they unwind to.
5049 if (CurrentPad != &FPI)
5050 break;
5051 }
5052 if (UnresolvedAncestorPad) {
5053 if (CurrentPad == UnresolvedAncestorPad) {
5054 // When CurrentPad is FPI itself, we don't mark it as resolved even if
5055 // we've found an unwind edge that exits it, because we need to verify
5056 // all direct uses of FPI.
5057 assert(CurrentPad == &FPI);
5058 continue;
5059 }
5060 // Pop off the worklist any nested pads that we've found an unwind
5061 // destination for. The pads on the worklist are the uncles,
5062 // great-uncles, etc. of CurrentPad. We've found an unwind destination
5063 // for all ancestors of CurrentPad up to but not including
5064 // UnresolvedAncestorPad.
5065 Value *ResolvedPad = CurrentPad;
5066 while (!Worklist.empty()) {
5067 Value *UnclePad = Worklist.back();
5068 Value *AncestorPad = getParentPad(UnclePad);
5069 // Walk ResolvedPad up the ancestor list until we either find the
5070 // uncle's parent or the last resolved ancestor.
5071 while (ResolvedPad != AncestorPad) {
5072 Value *ResolvedParent = getParentPad(ResolvedPad);
5073 if (ResolvedParent == UnresolvedAncestorPad) {
5074 break;
5075 }
5076 ResolvedPad = ResolvedParent;
5077 }
5078 // If the resolved ancestor search didn't find the uncle's parent,
5079 // then the uncle is not yet resolved.
5080 if (ResolvedPad != AncestorPad)
5081 break;
5082 // This uncle is resolved, so pop it from the worklist.
5083 Worklist.pop_back();
5084 }
5085 }
5086 }
5087
5088 if (FirstUnwindPad) {
5089 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FPI.getParentPad())) {
5090 BasicBlock *SwitchUnwindDest = CatchSwitch->getUnwindDest();
5091 Value *SwitchUnwindPad;
5092 if (SwitchUnwindDest)
5093 SwitchUnwindPad = &*SwitchUnwindDest->getFirstNonPHIIt();
5094 else
5095 SwitchUnwindPad = ConstantTokenNone::get(FPI.getContext());
5096 Check(SwitchUnwindPad == FirstUnwindPad,
5097 "Unwind edges out of a catch must have the same unwind dest as "
5098 "the parent catchswitch",
5099 &FPI, FirstUser, CatchSwitch);
5100 }
5101 }
5102
5103 visitInstruction(FPI);
5104}
5105
5106void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) {
5107 BasicBlock *BB = CatchSwitch.getParent();
5108
5109 Function *F = BB->getParent();
5110 Check(F->hasPersonalityFn(),
5111 "CatchSwitchInst needs to be in a function with a personality.",
5112 &CatchSwitch);
5113
5114 // The catchswitch instruction must be the first non-PHI instruction in the
5115 // block.
5116 Check(&*BB->getFirstNonPHIIt() == &CatchSwitch,
5117 "CatchSwitchInst not the first non-PHI instruction in the block.",
5118 &CatchSwitch);
5119
5120 auto *ParentPad = CatchSwitch.getParentPad();
5121 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
5122 "CatchSwitchInst has an invalid parent.", ParentPad);
5123
5124 if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) {
5125 BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt();
5126 Check(I->isEHPad() && !isa<LandingPadInst>(I),
5127 "CatchSwitchInst must unwind to an EH block which is not a "
5128 "landingpad.",
5129 &CatchSwitch);
5130
5131 // Record catchswitch sibling unwinds for verifySiblingFuncletUnwinds
5132 if (getParentPad(&*I) == ParentPad)
5133 SiblingFuncletInfo[&CatchSwitch] = &CatchSwitch;
5134 }
5135
5136 Check(CatchSwitch.getNumHandlers() != 0,
5137 "CatchSwitchInst cannot have empty handler list", &CatchSwitch);
5138
5139 for (BasicBlock *Handler : CatchSwitch.handlers()) {
5140 Check(isa<CatchPadInst>(Handler->getFirstNonPHIIt()),
5141 "CatchSwitchInst handlers must be catchpads", &CatchSwitch, Handler);
5142 }
5143
5144 visitEHPadPredecessors(CatchSwitch);
5145 visitTerminator(CatchSwitch);
5146}
5147
5148void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) {
5150 "CleanupReturnInst needs to be provided a CleanupPad", &CRI,
5151 CRI.getOperand(0));
5152
5153 if (BasicBlock *UnwindDest = CRI.getUnwindDest()) {
5154 BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt();
5155 Check(I->isEHPad() && !isa<LandingPadInst>(I),
5156 "CleanupReturnInst must unwind to an EH block which is not a "
5157 "landingpad.",
5158 &CRI);
5159 }
5160
5161 visitTerminator(CRI);
5162}
5163
5164void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
5165 Instruction *Op = cast<Instruction>(I.getOperand(i));
5166 // If the we have an invalid invoke, don't try to compute the dominance.
5167 // We already reject it in the invoke specific checks and the dominance
5168 // computation doesn't handle multiple edges.
5169 if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) {
5170 if (II->getNormalDest() == II->getUnwindDest())
5171 return;
5172 }
5173
5174 // Quick check whether the def has already been encountered in the same block.
5175 // PHI nodes are not checked to prevent accepting preceding PHIs, because PHI
5176 // uses are defined to happen on the incoming edge, not at the instruction.
5177 //
5178 // FIXME: If this operand is a MetadataAsValue (wrapping a LocalAsMetadata)
5179 // wrapping an SSA value, assert that we've already encountered it. See
5180 // related FIXME in Mapper::mapLocalAsMetadata in ValueMapper.cpp.
5181 if (!isa<PHINode>(I) && InstsInThisBlock.count(Op))
5182 return;
5183
5184 const Use &U = I.getOperandUse(i);
5185 Check(DT.dominates(Op, U), "Instruction does not dominate all uses!", Op, &I);
5186}
5187
5188void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) {
5189 Check(I.getType()->isPointerTy(),
5190 "dereferenceable, dereferenceable_or_null "
5191 "apply only to pointer types",
5192 &I);
5194 "dereferenceable, dereferenceable_or_null apply only to load"
5195 " and inttoptr instructions, use attributes for calls or invokes",
5196 &I);
5197 Check(MD->getNumOperands() == 1,
5198 "dereferenceable, dereferenceable_or_null "
5199 "take one operand!",
5200 &I);
5201 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0));
5202 Check(CI && CI->getType()->isIntegerTy(64),
5203 "dereferenceable, "
5204 "dereferenceable_or_null metadata value must be an i64!",
5205 &I);
5206}
5207
5208void Verifier::visitNofreeMetadata(Instruction &I, MDNode *MD) {
5209 Check(I.getType()->isPointerTy(), "nofree applies only to pointer types", &I);
5210 Check((isa<IntToPtrInst>(I)), "nofree applies only to inttoptr instruction",
5211 &I);
5212 Check(MD->getNumOperands() == 0, "nofree metadata must be empty", &I);
5213}
5214
5215void Verifier::visitProfMetadata(Instruction &I, MDNode *MD) {
5216 auto GetBranchingTerminatorNumOperands = [&]() {
5217 unsigned ExpectedNumOperands = 0;
5218 if (BranchInst *BI = dyn_cast<BranchInst>(&I))
5219 ExpectedNumOperands = BI->getNumSuccessors();
5220 else if (SwitchInst *SI = dyn_cast<SwitchInst>(&I))
5221 ExpectedNumOperands = SI->getNumSuccessors();
5222 else if (isa<CallInst>(&I))
5223 ExpectedNumOperands = 1;
5224 else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(&I))
5225 ExpectedNumOperands = IBI->getNumDestinations();
5226 else if (isa<SelectInst>(&I))
5227 ExpectedNumOperands = 2;
5228 else if (CallBrInst *CI = dyn_cast<CallBrInst>(&I))
5229 ExpectedNumOperands = CI->getNumSuccessors();
5230 return ExpectedNumOperands;
5231 };
5232 Check(MD->getNumOperands() >= 1,
5233 "!prof annotations should have at least 1 operand", MD);
5234 // Check first operand.
5235 Check(MD->getOperand(0) != nullptr, "first operand should not be null", MD);
5237 "expected string with name of the !prof annotation", MD);
5238 MDString *MDS = cast<MDString>(MD->getOperand(0));
5239 StringRef ProfName = MDS->getString();
5240
5242 Check(GetBranchingTerminatorNumOperands() != 0 || isa<InvokeInst>(I),
5243 "'unknown' !prof should only appear on instructions on which "
5244 "'branch_weights' would",
5245 MD);
5246 verifyUnknownProfileMetadata(MD);
5247 return;
5248 }
5249
5250 Check(MD->getNumOperands() >= 2,
5251 "!prof annotations should have no less than 2 operands", MD);
5252
5253 // Check consistency of !prof branch_weights metadata.
5254 if (ProfName == MDProfLabels::BranchWeights) {
5255 unsigned NumBranchWeights = getNumBranchWeights(*MD);
5256 if (isa<InvokeInst>(&I)) {
5257 Check(NumBranchWeights == 1 || NumBranchWeights == 2,
5258 "Wrong number of InvokeInst branch_weights operands", MD);
5259 } else {
5260 const unsigned ExpectedNumOperands = GetBranchingTerminatorNumOperands();
5261 if (ExpectedNumOperands == 0)
5262 CheckFailed("!prof branch_weights are not allowed for this instruction",
5263 MD);
5264
5265 Check(NumBranchWeights == ExpectedNumOperands, "Wrong number of operands",
5266 MD);
5267 }
5268 for (unsigned i = getBranchWeightOffset(MD); i < MD->getNumOperands();
5269 ++i) {
5270 auto &MDO = MD->getOperand(i);
5271 Check(MDO, "second operand should not be null", MD);
5273 "!prof brunch_weights operand is not a const int");
5274 }
5275 } else if (ProfName == MDProfLabels::ValueProfile) {
5276 Check(isValueProfileMD(MD), "invalid value profiling metadata", MD);
5277 ConstantInt *KindInt = mdconst::dyn_extract<ConstantInt>(MD->getOperand(1));
5278 Check(KindInt, "VP !prof missing kind argument", MD);
5279
5280 auto Kind = KindInt->getZExtValue();
5281 Check(Kind >= InstrProfValueKind::IPVK_First &&
5282 Kind <= InstrProfValueKind::IPVK_Last,
5283 "Invalid VP !prof kind", MD);
5284 Check(MD->getNumOperands() % 2 == 1,
5285 "VP !prof should have an even number "
5286 "of arguments after 'VP'",
5287 MD);
5288 if (Kind == InstrProfValueKind::IPVK_IndirectCallTarget ||
5289 Kind == InstrProfValueKind::IPVK_MemOPSize)
5291 "VP !prof indirect call or memop size expected to be applied to "
5292 "CallBase instructions only",
5293 MD);
5294 } else {
5295 CheckFailed("expected either branch_weights or VP profile name", MD);
5296 }
5297}
5298
5299void Verifier::visitDIAssignIDMetadata(Instruction &I, MDNode *MD) {
5300 assert(I.hasMetadata(LLVMContext::MD_DIAssignID));
5301 // DIAssignID metadata must be attached to either an alloca or some form of
5302 // store/memory-writing instruction.
5303 // FIXME: We allow all intrinsic insts here to avoid trying to enumerate all
5304 // possible store intrinsics.
5305 bool ExpectedInstTy =
5307 CheckDI(ExpectedInstTy, "!DIAssignID attached to unexpected instruction kind",
5308 I, MD);
5309 // Iterate over the MetadataAsValue uses of the DIAssignID - these should
5310 // only be found as DbgAssignIntrinsic operands.
5311 if (auto *AsValue = MetadataAsValue::getIfExists(Context, MD)) {
5312 for (auto *User : AsValue->users()) {
5314 "!DIAssignID should only be used by llvm.dbg.assign intrinsics",
5315 MD, User);
5316 // All of the dbg.assign intrinsics should be in the same function as I.
5317 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(User))
5318 CheckDI(DAI->getFunction() == I.getFunction(),
5319 "dbg.assign not in same function as inst", DAI, &I);
5320 }
5321 }
5322 for (DbgVariableRecord *DVR :
5323 cast<DIAssignID>(MD)->getAllDbgVariableRecordUsers()) {
5324 CheckDI(DVR->isDbgAssign(),
5325 "!DIAssignID should only be used by Assign DVRs.", MD, DVR);
5326 CheckDI(DVR->getFunction() == I.getFunction(),
5327 "DVRAssign not in same function as inst", DVR, &I);
5328 }
5329}
5330
5331void Verifier::visitMMRAMetadata(Instruction &I, MDNode *MD) {
5333 "!mmra metadata attached to unexpected instruction kind", I, MD);
5334
5335 // MMRA Metadata should either be a tag, e.g. !{!"foo", !"bar"}, or a
5336 // list of tags such as !2 in the following example:
5337 // !0 = !{!"a", !"b"}
5338 // !1 = !{!"c", !"d"}
5339 // !2 = !{!0, !1}
5340 if (MMRAMetadata::isTagMD(MD))
5341 return;
5342
5343 Check(isa<MDTuple>(MD), "!mmra expected to be a metadata tuple", I, MD);
5344 for (const MDOperand &MDOp : MD->operands())
5345 Check(MMRAMetadata::isTagMD(MDOp.get()),
5346 "!mmra metadata tuple operand is not an MMRA tag", I, MDOp.get());
5347}
5348
5349void Verifier::visitCallStackMetadata(MDNode *MD) {
5350 // Call stack metadata should consist of a list of at least 1 constant int
5351 // (representing a hash of the location).
5352 Check(MD->getNumOperands() >= 1,
5353 "call stack metadata should have at least 1 operand", MD);
5354
5355 for (const auto &Op : MD->operands())
5357 "call stack metadata operand should be constant integer", Op);
5358}
5359
5360void Verifier::visitMemProfMetadata(Instruction &I, MDNode *MD) {
5361 Check(isa<CallBase>(I), "!memprof metadata should only exist on calls", &I);
5362 Check(MD->getNumOperands() >= 1,
5363 "!memprof annotations should have at least 1 metadata operand "
5364 "(MemInfoBlock)",
5365 MD);
5366
5367 // Check each MIB
5368 for (auto &MIBOp : MD->operands()) {
5369 MDNode *MIB = dyn_cast<MDNode>(MIBOp);
5370 // The first operand of an MIB should be the call stack metadata.
5371 // There rest of the operands should be MDString tags, and there should be
5372 // at least one.
5373 Check(MIB->getNumOperands() >= 2,
5374 "Each !memprof MemInfoBlock should have at least 2 operands", MIB);
5375
5376 // Check call stack metadata (first operand).
5377 Check(MIB->getOperand(0) != nullptr,
5378 "!memprof MemInfoBlock first operand should not be null", MIB);
5379 Check(isa<MDNode>(MIB->getOperand(0)),
5380 "!memprof MemInfoBlock first operand should be an MDNode", MIB);
5381 MDNode *StackMD = dyn_cast<MDNode>(MIB->getOperand(0));
5382 visitCallStackMetadata(StackMD);
5383
5384 // The second MIB operand should be MDString.
5386 "!memprof MemInfoBlock second operand should be an MDString", MIB);
5387
5388 // Any remaining should be MDNode that are pairs of integers
5389 for (unsigned I = 2; I < MIB->getNumOperands(); ++I) {
5390 MDNode *OpNode = dyn_cast<MDNode>(MIB->getOperand(I));
5391 Check(OpNode, "Not all !memprof MemInfoBlock operands 2 to N are MDNode",
5392 MIB);
5393 Check(OpNode->getNumOperands() == 2,
5394 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with 2 "
5395 "operands",
5396 MIB);
5397 // Check that all of Op's operands are ConstantInt.
5398 Check(llvm::all_of(OpNode->operands(),
5399 [](const MDOperand &Op) {
5400 return mdconst::hasa<ConstantInt>(Op);
5401 }),
5402 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with "
5403 "ConstantInt operands",
5404 MIB);
5405 }
5406 }
5407}
5408
5409void Verifier::visitCallsiteMetadata(Instruction &I, MDNode *MD) {
5410 Check(isa<CallBase>(I), "!callsite metadata should only exist on calls", &I);
5411 // Verify the partial callstack annotated from memprof profiles. This callsite
5412 // is a part of a profiled allocation callstack.
5413 visitCallStackMetadata(MD);
5414}
5415
5416static inline bool isConstantIntMetadataOperand(const Metadata *MD) {
5417 if (auto *VAL = dyn_cast<ValueAsMetadata>(MD))
5418 return isa<ConstantInt>(VAL->getValue());
5419 return false;
5420}
5421
5422void Verifier::visitCalleeTypeMetadata(Instruction &I, MDNode *MD) {
5423 Check(isa<CallBase>(I), "!callee_type metadata should only exist on calls",
5424 &I);
5425 for (Metadata *Op : MD->operands()) {
5427 "The callee_type metadata must be a list of type metadata nodes", Op);
5428 auto *TypeMD = cast<MDNode>(Op);
5429 Check(TypeMD->getNumOperands() == 2,
5430 "Well-formed generalized type metadata must contain exactly two "
5431 "operands",
5432 Op);
5433 Check(isConstantIntMetadataOperand(TypeMD->getOperand(0)) &&
5434 mdconst::extract<ConstantInt>(TypeMD->getOperand(0))->isZero(),
5435 "The first operand of type metadata for functions must be zero", Op);
5436 Check(TypeMD->hasGeneralizedMDString(),
5437 "Only generalized type metadata can be part of the callee_type "
5438 "metadata list",
5439 Op);
5440 }
5441}
5442
5443void Verifier::visitAnnotationMetadata(MDNode *Annotation) {
5444 Check(isa<MDTuple>(Annotation), "annotation must be a tuple");
5445 Check(Annotation->getNumOperands() >= 1,
5446 "annotation must have at least one operand");
5447 for (const MDOperand &Op : Annotation->operands()) {
5448 bool TupleOfStrings =
5449 isa<MDTuple>(Op.get()) &&
5450 all_of(cast<MDTuple>(Op)->operands(), [](auto &Annotation) {
5451 return isa<MDString>(Annotation.get());
5452 });
5453 Check(isa<MDString>(Op.get()) || TupleOfStrings,
5454 "operands must be a string or a tuple of strings");
5455 }
5456}
5457
5458void Verifier::visitAliasScopeMetadata(const MDNode *MD) {
5459 unsigned NumOps = MD->getNumOperands();
5460 Check(NumOps >= 2 && NumOps <= 3, "scope must have two or three operands",
5461 MD);
5462 Check(MD->getOperand(0).get() == MD || isa<MDString>(MD->getOperand(0)),
5463 "first scope operand must be self-referential or string", MD);
5464 if (NumOps == 3)
5466 "third scope operand must be string (if used)", MD);
5467
5468 MDNode *Domain = dyn_cast<MDNode>(MD->getOperand(1));
5469 Check(Domain != nullptr, "second scope operand must be MDNode", MD);
5470
5471 unsigned NumDomainOps = Domain->getNumOperands();
5472 Check(NumDomainOps >= 1 && NumDomainOps <= 2,
5473 "domain must have one or two operands", Domain);
5474 Check(Domain->getOperand(0).get() == Domain ||
5475 isa<MDString>(Domain->getOperand(0)),
5476 "first domain operand must be self-referential or string", Domain);
5477 if (NumDomainOps == 2)
5478 Check(isa<MDString>(Domain->getOperand(1)),
5479 "second domain operand must be string (if used)", Domain);
5480}
5481
5482void Verifier::visitAliasScopeListMetadata(const MDNode *MD) {
5483 for (const MDOperand &Op : MD->operands()) {
5484 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5485 Check(OpMD != nullptr, "scope list must consist of MDNodes", MD);
5486 visitAliasScopeMetadata(OpMD);
5487 }
5488}
5489
5490void Verifier::visitAccessGroupMetadata(const MDNode *MD) {
5491 auto IsValidAccessScope = [](const MDNode *MD) {
5492 return MD->getNumOperands() == 0 && MD->isDistinct();
5493 };
5494
5495 // It must be either an access scope itself...
5496 if (IsValidAccessScope(MD))
5497 return;
5498
5499 // ...or a list of access scopes.
5500 for (const MDOperand &Op : MD->operands()) {
5501 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5502 Check(OpMD != nullptr, "Access scope list must consist of MDNodes", MD);
5503 Check(IsValidAccessScope(OpMD),
5504 "Access scope list contains invalid access scope", MD);
5505 }
5506}
5507
5508void Verifier::visitCapturesMetadata(Instruction &I, const MDNode *Captures) {
5509 static const char *ValidArgs[] = {"address_is_null", "address",
5510 "read_provenance", "provenance"};
5511
5512 auto *SI = dyn_cast<StoreInst>(&I);
5513 Check(SI, "!captures metadata can only be applied to store instructions", &I);
5514 Check(SI->getValueOperand()->getType()->isPointerTy(),
5515 "!captures metadata can only be applied to store with value operand of "
5516 "pointer type",
5517 &I);
5518 Check(Captures->getNumOperands() != 0, "!captures metadata cannot be empty",
5519 &I);
5520
5521 for (Metadata *Op : Captures->operands()) {
5522 auto *Str = dyn_cast<MDString>(Op);
5523 Check(Str, "!captures metadata must be a list of strings", &I);
5524 Check(is_contained(ValidArgs, Str->getString()),
5525 "invalid entry in !captures metadata", &I, Str);
5526 }
5527}
5528
5529void Verifier::visitAllocTokenMetadata(Instruction &I, MDNode *MD) {
5530 Check(isa<CallBase>(I), "!alloc_token should only exist on calls", &I);
5531 Check(MD->getNumOperands() == 2, "!alloc_token must have 2 operands", MD);
5532 Check(isa<MDString>(MD->getOperand(0)), "expected string", MD);
5534 "expected integer constant", MD);
5535}
5536
5537/// verifyInstruction - Verify that an instruction is well formed.
5538///
5539void Verifier::visitInstruction(Instruction &I) {
5540 BasicBlock *BB = I.getParent();
5541 Check(BB, "Instruction not embedded in basic block!", &I);
5542
5543 if (!isa<PHINode>(I)) { // Check that non-phi nodes are not self referential
5544 for (User *U : I.users()) {
5545 Check(U != (User *)&I || !DT.isReachableFromEntry(BB),
5546 "Only PHI nodes may reference their own value!", &I);
5547 }
5548 }
5549
5550 // Check that void typed values don't have names
5551 Check(!I.getType()->isVoidTy() || !I.hasName(),
5552 "Instruction has a name, but provides a void value!", &I);
5553
5554 // Check that the return value of the instruction is either void or a legal
5555 // value type.
5556 Check(I.getType()->isVoidTy() || I.getType()->isFirstClassType(),
5557 "Instruction returns a non-scalar type!", &I);
5558
5559 // Check that the instruction doesn't produce metadata. Calls are already
5560 // checked against the callee type.
5561 Check(!I.getType()->isMetadataTy() || isa<CallInst>(I) || isa<InvokeInst>(I),
5562 "Invalid use of metadata!", &I);
5563
5564 // Check that all uses of the instruction, if they are instructions
5565 // themselves, actually have parent basic blocks. If the use is not an
5566 // instruction, it is an error!
5567 for (Use &U : I.uses()) {
5568 if (Instruction *Used = dyn_cast<Instruction>(U.getUser()))
5569 Check(Used->getParent() != nullptr,
5570 "Instruction referencing"
5571 " instruction not embedded in a basic block!",
5572 &I, Used);
5573 else {
5574 CheckFailed("Use of instruction is not an instruction!", U);
5575 return;
5576 }
5577 }
5578
5579 // Get a pointer to the call base of the instruction if it is some form of
5580 // call.
5581 const CallBase *CBI = dyn_cast<CallBase>(&I);
5582
5583 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
5584 Check(I.getOperand(i) != nullptr, "Instruction has null operand!", &I);
5585
5586 // Check to make sure that only first-class-values are operands to
5587 // instructions.
5588 if (!I.getOperand(i)->getType()->isFirstClassType()) {
5589 Check(false, "Instruction operands must be first-class values!", &I);
5590 }
5591
5592 if (Function *F = dyn_cast<Function>(I.getOperand(i))) {
5593 // This code checks whether the function is used as the operand of a
5594 // clang_arc_attachedcall operand bundle.
5595 auto IsAttachedCallOperand = [](Function *F, const CallBase *CBI,
5596 int Idx) {
5597 return CBI && CBI->isOperandBundleOfType(
5599 };
5600
5601 // Check to make sure that the "address of" an intrinsic function is never
5602 // taken. Ignore cases where the address of the intrinsic function is used
5603 // as the argument of operand bundle "clang.arc.attachedcall" as those
5604 // cases are handled in verifyAttachedCallBundle.
5605 Check((!F->isIntrinsic() ||
5606 (CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i)) ||
5607 IsAttachedCallOperand(F, CBI, i)),
5608 "Cannot take the address of an intrinsic!", &I);
5609 Check(!F->isIntrinsic() || isa<CallInst>(I) || isa<CallBrInst>(I) ||
5610 F->getIntrinsicID() == Intrinsic::donothing ||
5611 F->getIntrinsicID() == Intrinsic::seh_try_begin ||
5612 F->getIntrinsicID() == Intrinsic::seh_try_end ||
5613 F->getIntrinsicID() == Intrinsic::seh_scope_begin ||
5614 F->getIntrinsicID() == Intrinsic::seh_scope_end ||
5615 F->getIntrinsicID() == Intrinsic::coro_resume ||
5616 F->getIntrinsicID() == Intrinsic::coro_destroy ||
5617 F->getIntrinsicID() == Intrinsic::coro_await_suspend_void ||
5618 F->getIntrinsicID() == Intrinsic::coro_await_suspend_bool ||
5619 F->getIntrinsicID() == Intrinsic::coro_await_suspend_handle ||
5620 F->getIntrinsicID() ==
5621 Intrinsic::experimental_patchpoint_void ||
5622 F->getIntrinsicID() == Intrinsic::experimental_patchpoint ||
5623 F->getIntrinsicID() == Intrinsic::fake_use ||
5624 F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint ||
5625 F->getIntrinsicID() == Intrinsic::wasm_throw ||
5626 F->getIntrinsicID() == Intrinsic::wasm_rethrow ||
5627 IsAttachedCallOperand(F, CBI, i),
5628 "Cannot invoke an intrinsic other than donothing, patchpoint, "
5629 "statepoint, coro_resume, coro_destroy, clang.arc.attachedcall or "
5630 "wasm.(re)throw",
5631 &I);
5632 Check(F->getParent() == &M, "Referencing function in another module!", &I,
5633 &M, F, F->getParent());
5634 } else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) {
5635 Check(OpBB->getParent() == BB->getParent(),
5636 "Referring to a basic block in another function!", &I);
5637 } else if (Argument *OpArg = dyn_cast<Argument>(I.getOperand(i))) {
5638 Check(OpArg->getParent() == BB->getParent(),
5639 "Referring to an argument in another function!", &I);
5640 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) {
5641 Check(GV->getParent() == &M, "Referencing global in another module!", &I,
5642 &M, GV, GV->getParent());
5643 } else if (Instruction *OpInst = dyn_cast<Instruction>(I.getOperand(i))) {
5644 Check(OpInst->getFunction() == BB->getParent(),
5645 "Referring to an instruction in another function!", &I);
5646 verifyDominatesUse(I, i);
5647 } else if (isa<InlineAsm>(I.getOperand(i))) {
5648 Check(CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i),
5649 "Cannot take the address of an inline asm!", &I);
5650 } else if (auto *C = dyn_cast<Constant>(I.getOperand(i))) {
5651 visitConstantExprsRecursively(C);
5652 }
5653 }
5654
5655 if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) {
5656 Check(I.getType()->isFPOrFPVectorTy(),
5657 "fpmath requires a floating point result!", &I);
5658 Check(MD->getNumOperands() == 1, "fpmath takes one operand!", &I);
5659 if (ConstantFP *CFP0 =
5661 const APFloat &Accuracy = CFP0->getValueAPF();
5662 Check(&Accuracy.getSemantics() == &APFloat::IEEEsingle(),
5663 "fpmath accuracy must have float type", &I);
5664 Check(Accuracy.isFiniteNonZero() && !Accuracy.isNegative(),
5665 "fpmath accuracy not a positive number!", &I);
5666 } else {
5667 Check(false, "invalid fpmath accuracy!", &I);
5668 }
5669 }
5670
5671 if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) {
5673 "Ranges are only for loads, calls and invokes!", &I);
5674 visitRangeMetadata(I, Range, I.getType());
5675 }
5676
5677 if (MDNode *Range = I.getMetadata(LLVMContext::MD_noalias_addrspace)) {
5680 "noalias.addrspace are only for memory operations!", &I);
5681 visitNoaliasAddrspaceMetadata(I, Range, I.getType());
5682 }
5683
5684 if (I.hasMetadata(LLVMContext::MD_invariant_group)) {
5686 "invariant.group metadata is only for loads and stores", &I);
5687 }
5688
5689 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nonnull)) {
5690 Check(I.getType()->isPointerTy(), "nonnull applies only to pointer types",
5691 &I);
5693 "nonnull applies only to load instructions, use attributes"
5694 " for calls or invokes",
5695 &I);
5696 Check(MD->getNumOperands() == 0, "nonnull metadata must be empty", &I);
5697 }
5698
5699 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable))
5700 visitDereferenceableMetadata(I, MD);
5701
5702 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable_or_null))
5703 visitDereferenceableMetadata(I, MD);
5704
5705 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nofree))
5706 visitNofreeMetadata(I, MD);
5707
5708 if (MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa))
5709 TBAAVerifyHelper.visitTBAAMetadata(&I, TBAA);
5710
5711 if (MDNode *MD = I.getMetadata(LLVMContext::MD_noalias))
5712 visitAliasScopeListMetadata(MD);
5713 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alias_scope))
5714 visitAliasScopeListMetadata(MD);
5715
5716 if (MDNode *MD = I.getMetadata(LLVMContext::MD_access_group))
5717 visitAccessGroupMetadata(MD);
5718
5719 if (MDNode *AlignMD = I.getMetadata(LLVMContext::MD_align)) {
5720 Check(I.getType()->isPointerTy(), "align applies only to pointer types",
5721 &I);
5723 "align applies only to load instructions, "
5724 "use attributes for calls or invokes",
5725 &I);
5726 Check(AlignMD->getNumOperands() == 1, "align takes one operand!", &I);
5727 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(AlignMD->getOperand(0));
5728 Check(CI && CI->getType()->isIntegerTy(64),
5729 "align metadata value must be an i64!", &I);
5730 uint64_t Align = CI->getZExtValue();
5731 Check(isPowerOf2_64(Align), "align metadata value must be a power of 2!",
5732 &I);
5733 Check(Align <= Value::MaximumAlignment,
5734 "alignment is larger that implementation defined limit", &I);
5735 }
5736
5737 if (MDNode *MD = I.getMetadata(LLVMContext::MD_prof))
5738 visitProfMetadata(I, MD);
5739
5740 if (MDNode *MD = I.getMetadata(LLVMContext::MD_memprof))
5741 visitMemProfMetadata(I, MD);
5742
5743 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callsite))
5744 visitCallsiteMetadata(I, MD);
5745
5746 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callee_type))
5747 visitCalleeTypeMetadata(I, MD);
5748
5749 if (MDNode *MD = I.getMetadata(LLVMContext::MD_DIAssignID))
5750 visitDIAssignIDMetadata(I, MD);
5751
5752 if (MDNode *MMRA = I.getMetadata(LLVMContext::MD_mmra))
5753 visitMMRAMetadata(I, MMRA);
5754
5755 if (MDNode *Annotation = I.getMetadata(LLVMContext::MD_annotation))
5756 visitAnnotationMetadata(Annotation);
5757
5758 if (MDNode *Captures = I.getMetadata(LLVMContext::MD_captures))
5759 visitCapturesMetadata(I, Captures);
5760
5761 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alloc_token))
5762 visitAllocTokenMetadata(I, MD);
5763
5764 if (MDNode *N = I.getDebugLoc().getAsMDNode()) {
5765 CheckDI(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N);
5766 visitMDNode(*N, AreDebugLocsAllowed::Yes);
5767
5768 if (auto *DL = dyn_cast<DILocation>(N)) {
5769 if (DL->getAtomGroup()) {
5770 CheckDI(DL->getScope()->getSubprogram()->getKeyInstructionsEnabled(),
5771 "DbgLoc uses atomGroup but DISubprogram doesn't have Key "
5772 "Instructions enabled",
5773 DL, DL->getScope()->getSubprogram());
5774 }
5775 }
5776 }
5777
5779 I.getAllMetadata(MDs);
5780 for (auto Attachment : MDs) {
5781 unsigned Kind = Attachment.first;
5782 auto AllowLocs =
5783 (Kind == LLVMContext::MD_dbg || Kind == LLVMContext::MD_loop)
5784 ? AreDebugLocsAllowed::Yes
5785 : AreDebugLocsAllowed::No;
5786 visitMDNode(*Attachment.second, AllowLocs);
5787 }
5788
5789 InstsInThisBlock.insert(&I);
5790}
5791
5792/// Allow intrinsics to be verified in different ways.
5793void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
5795 Check(IF->isDeclaration(), "Intrinsic functions should never be defined!",
5796 IF);
5797
5798 // Verify that the intrinsic prototype lines up with what the .td files
5799 // describe.
5800 FunctionType *IFTy = IF->getFunctionType();
5801 bool IsVarArg = IFTy->isVarArg();
5802
5806
5807 // Walk the descriptors to extract overloaded types.
5812 "Intrinsic has incorrect return type!", IF);
5814 "Intrinsic has incorrect argument type!", IF);
5815
5816 // Verify if the intrinsic call matches the vararg property.
5817 if (IsVarArg)
5819 "Intrinsic was not defined with variable arguments!", IF);
5820 else
5822 "Callsite was not defined with variable arguments!", IF);
5823
5824 // All descriptors should be absorbed by now.
5825 Check(TableRef.empty(), "Intrinsic has too few arguments!", IF);
5826
5827 // Now that we have the intrinsic ID and the actual argument types (and we
5828 // know they are legal for the intrinsic!) get the intrinsic name through the
5829 // usual means. This allows us to verify the mangling of argument types into
5830 // the name.
5831 const std::string ExpectedName =
5832 Intrinsic::getName(ID, ArgTys, IF->getParent(), IFTy);
5833 Check(ExpectedName == IF->getName(),
5834 "Intrinsic name not mangled correctly for type arguments! "
5835 "Should be: " +
5836 ExpectedName,
5837 IF);
5838
5839 // If the intrinsic takes MDNode arguments, verify that they are either global
5840 // or are local to *this* function.
5841 for (Value *V : Call.args()) {
5842 if (auto *MD = dyn_cast<MetadataAsValue>(V))
5843 visitMetadataAsValue(*MD, Call.getCaller());
5844 if (auto *Const = dyn_cast<Constant>(V))
5845 Check(!Const->getType()->isX86_AMXTy(),
5846 "const x86_amx is not allowed in argument!");
5847 }
5848
5849 switch (ID) {
5850 default:
5851 break;
5852 case Intrinsic::assume: {
5853 if (Call.hasOperandBundles()) {
5855 Check(Cond && Cond->isOne(),
5856 "assume with operand bundles must have i1 true condition", Call);
5857 }
5858 for (auto &Elem : Call.bundle_op_infos()) {
5859 unsigned ArgCount = Elem.End - Elem.Begin;
5860 // Separate storage assumptions are special insofar as they're the only
5861 // operand bundles allowed on assumes that aren't parameter attributes.
5862 if (Elem.Tag->getKey() == "separate_storage") {
5863 Check(ArgCount == 2,
5864 "separate_storage assumptions should have 2 arguments", Call);
5865 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy() &&
5866 Call.getOperand(Elem.Begin + 1)->getType()->isPointerTy(),
5867 "arguments to separate_storage assumptions should be pointers",
5868 Call);
5869 continue;
5870 }
5871 Check(Elem.Tag->getKey() == "ignore" ||
5872 Attribute::isExistingAttribute(Elem.Tag->getKey()),
5873 "tags must be valid attribute names", Call);
5874 Attribute::AttrKind Kind =
5875 Attribute::getAttrKindFromName(Elem.Tag->getKey());
5876 if (Kind == Attribute::Alignment) {
5877 Check(ArgCount <= 3 && ArgCount >= 2,
5878 "alignment assumptions should have 2 or 3 arguments", Call);
5879 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5880 "first argument should be a pointer", Call);
5881 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5882 "second argument should be an integer", Call);
5883 if (ArgCount == 3)
5884 Check(Call.getOperand(Elem.Begin + 2)->getType()->isIntegerTy(),
5885 "third argument should be an integer if present", Call);
5886 continue;
5887 }
5888 if (Kind == Attribute::Dereferenceable) {
5889 Check(ArgCount == 2,
5890 "dereferenceable assumptions should have 2 arguments", Call);
5891 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5892 "first argument should be a pointer", Call);
5893 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5894 "second argument should be an integer", Call);
5895 continue;
5896 }
5897 Check(ArgCount <= 2, "too many arguments", Call);
5898 if (Kind == Attribute::None)
5899 break;
5900 if (Attribute::isIntAttrKind(Kind)) {
5901 Check(ArgCount == 2, "this attribute should have 2 arguments", Call);
5902 Check(isa<ConstantInt>(Call.getOperand(Elem.Begin + 1)),
5903 "the second argument should be a constant integral value", Call);
5904 } else if (Attribute::canUseAsParamAttr(Kind)) {
5905 Check((ArgCount) == 1, "this attribute should have one argument", Call);
5906 } else if (Attribute::canUseAsFnAttr(Kind)) {
5907 Check((ArgCount) == 0, "this attribute has no argument", Call);
5908 }
5909 }
5910 break;
5911 }
5912 case Intrinsic::ucmp:
5913 case Intrinsic::scmp: {
5914 Type *SrcTy = Call.getOperand(0)->getType();
5915 Type *DestTy = Call.getType();
5916
5917 Check(DestTy->getScalarSizeInBits() >= 2,
5918 "result type must be at least 2 bits wide", Call);
5919
5920 bool IsDestTypeVector = DestTy->isVectorTy();
5921 Check(SrcTy->isVectorTy() == IsDestTypeVector,
5922 "ucmp/scmp argument and result types must both be either vector or "
5923 "scalar types",
5924 Call);
5925 if (IsDestTypeVector) {
5926 auto SrcVecLen = cast<VectorType>(SrcTy)->getElementCount();
5927 auto DestVecLen = cast<VectorType>(DestTy)->getElementCount();
5928 Check(SrcVecLen == DestVecLen,
5929 "return type and arguments must have the same number of "
5930 "elements",
5931 Call);
5932 }
5933 break;
5934 }
5935 case Intrinsic::coro_id: {
5936 auto *InfoArg = Call.getArgOperand(3)->stripPointerCasts();
5937 if (isa<ConstantPointerNull>(InfoArg))
5938 break;
5939 auto *GV = dyn_cast<GlobalVariable>(InfoArg);
5940 Check(GV && GV->isConstant() && GV->hasDefinitiveInitializer(),
5941 "info argument of llvm.coro.id must refer to an initialized "
5942 "constant");
5943 Constant *Init = GV->getInitializer();
5945 "info argument of llvm.coro.id must refer to either a struct or "
5946 "an array");
5947 break;
5948 }
5949 case Intrinsic::is_fpclass: {
5950 const ConstantInt *TestMask = cast<ConstantInt>(Call.getOperand(1));
5951 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
5952 "unsupported bits for llvm.is.fpclass test mask");
5953 break;
5954 }
5955 case Intrinsic::fptrunc_round: {
5956 // Check the rounding mode
5957 Metadata *MD = nullptr;
5959 if (MAV)
5960 MD = MAV->getMetadata();
5961
5962 Check(MD != nullptr, "missing rounding mode argument", Call);
5963
5964 Check(isa<MDString>(MD),
5965 ("invalid value for llvm.fptrunc.round metadata operand"
5966 " (the operand should be a string)"),
5967 MD);
5968
5969 std::optional<RoundingMode> RoundMode =
5970 convertStrToRoundingMode(cast<MDString>(MD)->getString());
5971 Check(RoundMode && *RoundMode != RoundingMode::Dynamic,
5972 "unsupported rounding mode argument", Call);
5973 break;
5974 }
5975#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
5976#include "llvm/IR/VPIntrinsics.def"
5977#undef BEGIN_REGISTER_VP_INTRINSIC
5978 visitVPIntrinsic(cast<VPIntrinsic>(Call));
5979 break;
5980#define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC) \
5981 case Intrinsic::INTRINSIC:
5982#include "llvm/IR/ConstrainedOps.def"
5983#undef INSTRUCTION
5984 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(Call));
5985 break;
5986 case Intrinsic::dbg_declare: // llvm.dbg.declare
5987 case Intrinsic::dbg_value: // llvm.dbg.value
5988 case Intrinsic::dbg_assign: // llvm.dbg.assign
5989 case Intrinsic::dbg_label: // llvm.dbg.label
5990 // We no longer interpret debug intrinsics (the old variable-location
5991 // design). They're meaningless as far as LLVM is concerned we could make
5992 // it an error for them to appear, but it's possible we'll have users
5993 // converting back to intrinsics for the forseeable future (such as DXIL),
5994 // so tolerate their existance.
5995 break;
5996 case Intrinsic::memcpy:
5997 case Intrinsic::memcpy_inline:
5998 case Intrinsic::memmove:
5999 case Intrinsic::memset:
6000 case Intrinsic::memset_inline:
6001 break;
6002 case Intrinsic::experimental_memset_pattern: {
6003 const auto Memset = cast<MemSetPatternInst>(&Call);
6004 Check(Memset->getValue()->getType()->isSized(),
6005 "unsized types cannot be used as memset patterns", Call);
6006 break;
6007 }
6008 case Intrinsic::memcpy_element_unordered_atomic:
6009 case Intrinsic::memmove_element_unordered_atomic:
6010 case Intrinsic::memset_element_unordered_atomic: {
6011 const auto *AMI = cast<AnyMemIntrinsic>(&Call);
6012
6013 ConstantInt *ElementSizeCI =
6014 cast<ConstantInt>(AMI->getRawElementSizeInBytes());
6015 const APInt &ElementSizeVal = ElementSizeCI->getValue();
6016 Check(ElementSizeVal.isPowerOf2(),
6017 "element size of the element-wise atomic memory intrinsic "
6018 "must be a power of 2",
6019 Call);
6020
6021 auto IsValidAlignment = [&](MaybeAlign Alignment) {
6022 return Alignment && ElementSizeVal.ule(Alignment->value());
6023 };
6024 Check(IsValidAlignment(AMI->getDestAlign()),
6025 "incorrect alignment of the destination argument", Call);
6026 if (const auto *AMT = dyn_cast<AnyMemTransferInst>(AMI)) {
6027 Check(IsValidAlignment(AMT->getSourceAlign()),
6028 "incorrect alignment of the source argument", Call);
6029 }
6030 break;
6031 }
6032 case Intrinsic::call_preallocated_setup: {
6033 auto *NumArgs = cast<ConstantInt>(Call.getArgOperand(0));
6034 bool FoundCall = false;
6035 for (User *U : Call.users()) {
6036 auto *UseCall = dyn_cast<CallBase>(U);
6037 Check(UseCall != nullptr,
6038 "Uses of llvm.call.preallocated.setup must be calls");
6039 Intrinsic::ID IID = UseCall->getIntrinsicID();
6040 if (IID == Intrinsic::call_preallocated_arg) {
6041 auto *AllocArgIndex = dyn_cast<ConstantInt>(UseCall->getArgOperand(1));
6042 Check(AllocArgIndex != nullptr,
6043 "llvm.call.preallocated.alloc arg index must be a constant");
6044 auto AllocArgIndexInt = AllocArgIndex->getValue();
6045 Check(AllocArgIndexInt.sge(0) &&
6046 AllocArgIndexInt.slt(NumArgs->getValue()),
6047 "llvm.call.preallocated.alloc arg index must be between 0 and "
6048 "corresponding "
6049 "llvm.call.preallocated.setup's argument count");
6050 } else if (IID == Intrinsic::call_preallocated_teardown) {
6051 // nothing to do
6052 } else {
6053 Check(!FoundCall, "Can have at most one call corresponding to a "
6054 "llvm.call.preallocated.setup");
6055 FoundCall = true;
6056 size_t NumPreallocatedArgs = 0;
6057 for (unsigned i = 0; i < UseCall->arg_size(); i++) {
6058 if (UseCall->paramHasAttr(i, Attribute::Preallocated)) {
6059 ++NumPreallocatedArgs;
6060 }
6061 }
6062 Check(NumPreallocatedArgs != 0,
6063 "cannot use preallocated intrinsics on a call without "
6064 "preallocated arguments");
6065 Check(NumArgs->equalsInt(NumPreallocatedArgs),
6066 "llvm.call.preallocated.setup arg size must be equal to number "
6067 "of preallocated arguments "
6068 "at call site",
6069 Call, *UseCall);
6070 // getOperandBundle() cannot be called if more than one of the operand
6071 // bundle exists. There is already a check elsewhere for this, so skip
6072 // here if we see more than one.
6073 if (UseCall->countOperandBundlesOfType(LLVMContext::OB_preallocated) >
6074 1) {
6075 return;
6076 }
6077 auto PreallocatedBundle =
6078 UseCall->getOperandBundle(LLVMContext::OB_preallocated);
6079 Check(PreallocatedBundle,
6080 "Use of llvm.call.preallocated.setup outside intrinsics "
6081 "must be in \"preallocated\" operand bundle");
6082 Check(PreallocatedBundle->Inputs.front().get() == &Call,
6083 "preallocated bundle must have token from corresponding "
6084 "llvm.call.preallocated.setup");
6085 }
6086 }
6087 break;
6088 }
6089 case Intrinsic::call_preallocated_arg: {
6090 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
6091 Check(Token &&
6092 Token->getIntrinsicID() == Intrinsic::call_preallocated_setup,
6093 "llvm.call.preallocated.arg token argument must be a "
6094 "llvm.call.preallocated.setup");
6095 Check(Call.hasFnAttr(Attribute::Preallocated),
6096 "llvm.call.preallocated.arg must be called with a \"preallocated\" "
6097 "call site attribute");
6098 break;
6099 }
6100 case Intrinsic::call_preallocated_teardown: {
6101 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
6102 Check(Token &&
6103 Token->getIntrinsicID() == Intrinsic::call_preallocated_setup,
6104 "llvm.call.preallocated.teardown token argument must be a "
6105 "llvm.call.preallocated.setup");
6106 break;
6107 }
6108 case Intrinsic::gcroot:
6109 case Intrinsic::gcwrite:
6110 case Intrinsic::gcread:
6111 if (ID == Intrinsic::gcroot) {
6112 AllocaInst *AI =
6114 Check(AI, "llvm.gcroot parameter #1 must be an alloca.", Call);
6116 "llvm.gcroot parameter #2 must be a constant.", Call);
6117 if (!AI->getAllocatedType()->isPointerTy()) {
6119 "llvm.gcroot parameter #1 must either be a pointer alloca, "
6120 "or argument #2 must be a non-null constant.",
6121 Call);
6122 }
6123 }
6124
6125 Check(Call.getParent()->getParent()->hasGC(),
6126 "Enclosing function does not use GC.", Call);
6127 break;
6128 case Intrinsic::init_trampoline:
6130 "llvm.init_trampoline parameter #2 must resolve to a function.",
6131 Call);
6132 break;
6133 case Intrinsic::prefetch:
6134 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6135 "rw argument to llvm.prefetch must be 0-1", Call);
6136 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6137 "locality argument to llvm.prefetch must be 0-3", Call);
6138 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6139 "cache type argument to llvm.prefetch must be 0-1", Call);
6140 break;
6141 case Intrinsic::reloc_none: {
6143 cast<MetadataAsValue>(Call.getArgOperand(0))->getMetadata()),
6144 "llvm.reloc.none argument must be a metadata string", &Call);
6145 break;
6146 }
6147 case Intrinsic::stackprotector:
6149 "llvm.stackprotector parameter #2 must resolve to an alloca.", Call);
6150 break;
6151 case Intrinsic::localescape: {
6152 BasicBlock *BB = Call.getParent();
6153 Check(BB->isEntryBlock(), "llvm.localescape used outside of entry block",
6154 Call);
6155 Check(!SawFrameEscape, "multiple calls to llvm.localescape in one function",
6156 Call);
6157 for (Value *Arg : Call.args()) {
6158 if (isa<ConstantPointerNull>(Arg))
6159 continue; // Null values are allowed as placeholders.
6160 auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
6161 Check(AI && AI->isStaticAlloca(),
6162 "llvm.localescape only accepts static allocas", Call);
6163 }
6164 FrameEscapeInfo[BB->getParent()].first = Call.arg_size();
6165 SawFrameEscape = true;
6166 break;
6167 }
6168 case Intrinsic::localrecover: {
6170 Function *Fn = dyn_cast<Function>(FnArg);
6171 Check(Fn && !Fn->isDeclaration(),
6172 "llvm.localrecover first "
6173 "argument must be function defined in this module",
6174 Call);
6175 auto *IdxArg = cast<ConstantInt>(Call.getArgOperand(2));
6176 auto &Entry = FrameEscapeInfo[Fn];
6177 Entry.second = unsigned(
6178 std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1));
6179 break;
6180 }
6181
6182 case Intrinsic::experimental_gc_statepoint:
6183 if (auto *CI = dyn_cast<CallInst>(&Call))
6184 Check(!CI->isInlineAsm(),
6185 "gc.statepoint support for inline assembly unimplemented", CI);
6186 Check(Call.getParent()->getParent()->hasGC(),
6187 "Enclosing function does not use GC.", Call);
6188
6189 verifyStatepoint(Call);
6190 break;
6191 case Intrinsic::experimental_gc_result: {
6192 Check(Call.getParent()->getParent()->hasGC(),
6193 "Enclosing function does not use GC.", Call);
6194
6195 auto *Statepoint = Call.getArgOperand(0);
6196 if (isa<UndefValue>(Statepoint))
6197 break;
6198
6199 // Are we tied to a statepoint properly?
6200 const auto *StatepointCall = dyn_cast<CallBase>(Statepoint);
6201 Check(StatepointCall && StatepointCall->getIntrinsicID() ==
6202 Intrinsic::experimental_gc_statepoint,
6203 "gc.result operand #1 must be from a statepoint", Call,
6204 Call.getArgOperand(0));
6205
6206 // Check that result type matches wrapped callee.
6207 auto *TargetFuncType =
6208 cast<FunctionType>(StatepointCall->getParamElementType(2));
6209 Check(Call.getType() == TargetFuncType->getReturnType(),
6210 "gc.result result type does not match wrapped callee", Call);
6211 break;
6212 }
6213 case Intrinsic::experimental_gc_relocate: {
6214 Check(Call.arg_size() == 3, "wrong number of arguments", Call);
6215
6217 "gc.relocate must return a pointer or a vector of pointers", Call);
6218
6219 // Check that this relocate is correctly tied to the statepoint
6220
6221 // This is case for relocate on the unwinding path of an invoke statepoint
6222 if (LandingPadInst *LandingPad =
6224
6225 const BasicBlock *InvokeBB =
6226 LandingPad->getParent()->getUniquePredecessor();
6227
6228 // Landingpad relocates should have only one predecessor with invoke
6229 // statepoint terminator
6230 Check(InvokeBB, "safepoints should have unique landingpads",
6231 LandingPad->getParent());
6232 Check(InvokeBB->getTerminator(), "safepoint block should be well formed",
6233 InvokeBB);
6235 "gc relocate should be linked to a statepoint", InvokeBB);
6236 } else {
6237 // In all other cases relocate should be tied to the statepoint directly.
6238 // This covers relocates on a normal return path of invoke statepoint and
6239 // relocates of a call statepoint.
6240 auto *Token = Call.getArgOperand(0);
6242 "gc relocate is incorrectly tied to the statepoint", Call, Token);
6243 }
6244
6245 // Verify rest of the relocate arguments.
6246 const Value &StatepointCall = *cast<GCRelocateInst>(Call).getStatepoint();
6247
6248 // Both the base and derived must be piped through the safepoint.
6251 "gc.relocate operand #2 must be integer offset", Call);
6252
6253 Value *Derived = Call.getArgOperand(2);
6254 Check(isa<ConstantInt>(Derived),
6255 "gc.relocate operand #3 must be integer offset", Call);
6256
6257 const uint64_t BaseIndex = cast<ConstantInt>(Base)->getZExtValue();
6258 const uint64_t DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue();
6259
6260 // Check the bounds
6261 if (isa<UndefValue>(StatepointCall))
6262 break;
6263 if (auto Opt = cast<GCStatepointInst>(StatepointCall)
6264 .getOperandBundle(LLVMContext::OB_gc_live)) {
6265 Check(BaseIndex < Opt->Inputs.size(),
6266 "gc.relocate: statepoint base index out of bounds", Call);
6267 Check(DerivedIndex < Opt->Inputs.size(),
6268 "gc.relocate: statepoint derived index out of bounds", Call);
6269 }
6270
6271 // Relocated value must be either a pointer type or vector-of-pointer type,
6272 // but gc_relocate does not need to return the same pointer type as the
6273 // relocated pointer. It can be casted to the correct type later if it's
6274 // desired. However, they must have the same address space and 'vectorness'
6275 GCRelocateInst &Relocate = cast<GCRelocateInst>(Call);
6276 auto *ResultType = Call.getType();
6277 auto *DerivedType = Relocate.getDerivedPtr()->getType();
6278 auto *BaseType = Relocate.getBasePtr()->getType();
6279
6280 Check(BaseType->isPtrOrPtrVectorTy(),
6281 "gc.relocate: relocated value must be a pointer", Call);
6282 Check(DerivedType->isPtrOrPtrVectorTy(),
6283 "gc.relocate: relocated value must be a pointer", Call);
6284
6285 Check(ResultType->isVectorTy() == DerivedType->isVectorTy(),
6286 "gc.relocate: vector relocates to vector and pointer to pointer",
6287 Call);
6288 Check(
6289 ResultType->getPointerAddressSpace() ==
6290 DerivedType->getPointerAddressSpace(),
6291 "gc.relocate: relocating a pointer shouldn't change its address space",
6292 Call);
6293
6294 auto GC = llvm::getGCStrategy(Relocate.getFunction()->getGC());
6295 Check(GC, "gc.relocate: calling function must have GCStrategy",
6296 Call.getFunction());
6297 if (GC) {
6298 auto isGCPtr = [&GC](Type *PTy) {
6299 return GC->isGCManagedPointer(PTy->getScalarType()).value_or(true);
6300 };
6301 Check(isGCPtr(ResultType), "gc.relocate: must return gc pointer", Call);
6302 Check(isGCPtr(BaseType),
6303 "gc.relocate: relocated value must be a gc pointer", Call);
6304 Check(isGCPtr(DerivedType),
6305 "gc.relocate: relocated value must be a gc pointer", Call);
6306 }
6307 break;
6308 }
6309 case Intrinsic::experimental_patchpoint: {
6310 if (Call.getCallingConv() == CallingConv::AnyReg) {
6312 "patchpoint: invalid return type used with anyregcc", Call);
6313 }
6314 break;
6315 }
6316 case Intrinsic::eh_exceptioncode:
6317 case Intrinsic::eh_exceptionpointer: {
6319 "eh.exceptionpointer argument must be a catchpad", Call);
6320 break;
6321 }
6322 case Intrinsic::get_active_lane_mask: {
6324 "get_active_lane_mask: must return a "
6325 "vector",
6326 Call);
6327 auto *ElemTy = Call.getType()->getScalarType();
6328 Check(ElemTy->isIntegerTy(1),
6329 "get_active_lane_mask: element type is not "
6330 "i1",
6331 Call);
6332 break;
6333 }
6334 case Intrinsic::experimental_get_vector_length: {
6335 ConstantInt *VF = cast<ConstantInt>(Call.getArgOperand(1));
6336 Check(!VF->isNegative() && !VF->isZero(),
6337 "get_vector_length: VF must be positive", Call);
6338 break;
6339 }
6340 case Intrinsic::masked_load: {
6341 Check(Call.getType()->isVectorTy(), "masked_load: must return a vector",
6342 Call);
6343
6345 Value *PassThru = Call.getArgOperand(2);
6346 Check(Mask->getType()->isVectorTy(), "masked_load: mask must be vector",
6347 Call);
6348 Check(PassThru->getType() == Call.getType(),
6349 "masked_load: pass through and return type must match", Call);
6350 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
6351 cast<VectorType>(Call.getType())->getElementCount(),
6352 "masked_load: vector mask must be same length as return", Call);
6353 break;
6354 }
6355 case Intrinsic::masked_store: {
6356 Value *Val = Call.getArgOperand(0);
6358 Check(Mask->getType()->isVectorTy(), "masked_store: mask must be vector",
6359 Call);
6360 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
6361 cast<VectorType>(Val->getType())->getElementCount(),
6362 "masked_store: vector mask must be same length as value", Call);
6363 break;
6364 }
6365
6366 case Intrinsic::experimental_guard: {
6367 Check(isa<CallInst>(Call), "experimental_guard cannot be invoked", Call);
6369 "experimental_guard must have exactly one "
6370 "\"deopt\" operand bundle");
6371 break;
6372 }
6373
6374 case Intrinsic::experimental_deoptimize: {
6375 Check(isa<CallInst>(Call), "experimental_deoptimize cannot be invoked",
6376 Call);
6378 "experimental_deoptimize must have exactly one "
6379 "\"deopt\" operand bundle");
6381 "experimental_deoptimize return type must match caller return type");
6382
6383 if (isa<CallInst>(Call)) {
6385 Check(RI,
6386 "calls to experimental_deoptimize must be followed by a return");
6387
6388 if (!Call.getType()->isVoidTy() && RI)
6389 Check(RI->getReturnValue() == &Call,
6390 "calls to experimental_deoptimize must be followed by a return "
6391 "of the value computed by experimental_deoptimize");
6392 }
6393
6394 break;
6395 }
6396 case Intrinsic::vastart: {
6398 "va_start called in a non-varargs function");
6399 break;
6400 }
6401 case Intrinsic::get_dynamic_area_offset: {
6402 auto *IntTy = dyn_cast<IntegerType>(Call.getType());
6403 Check(IntTy && DL.getPointerSizeInBits(DL.getAllocaAddrSpace()) ==
6404 IntTy->getBitWidth(),
6405 "get_dynamic_area_offset result type must be scalar integer matching "
6406 "alloca address space width",
6407 Call);
6408 break;
6409 }
6410 case Intrinsic::vector_reduce_and:
6411 case Intrinsic::vector_reduce_or:
6412 case Intrinsic::vector_reduce_xor:
6413 case Intrinsic::vector_reduce_add:
6414 case Intrinsic::vector_reduce_mul:
6415 case Intrinsic::vector_reduce_smax:
6416 case Intrinsic::vector_reduce_smin:
6417 case Intrinsic::vector_reduce_umax:
6418 case Intrinsic::vector_reduce_umin: {
6419 Type *ArgTy = Call.getArgOperand(0)->getType();
6420 Check(ArgTy->isIntOrIntVectorTy() && ArgTy->isVectorTy(),
6421 "Intrinsic has incorrect argument type!");
6422 break;
6423 }
6424 case Intrinsic::vector_reduce_fmax:
6425 case Intrinsic::vector_reduce_fmin: {
6426 Type *ArgTy = Call.getArgOperand(0)->getType();
6427 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6428 "Intrinsic has incorrect argument type!");
6429 break;
6430 }
6431 case Intrinsic::vector_reduce_fadd:
6432 case Intrinsic::vector_reduce_fmul: {
6433 // Unlike the other reductions, the first argument is a start value. The
6434 // second argument is the vector to be reduced.
6435 Type *ArgTy = Call.getArgOperand(1)->getType();
6436 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6437 "Intrinsic has incorrect argument type!");
6438 break;
6439 }
6440 case Intrinsic::smul_fix:
6441 case Intrinsic::smul_fix_sat:
6442 case Intrinsic::umul_fix:
6443 case Intrinsic::umul_fix_sat:
6444 case Intrinsic::sdiv_fix:
6445 case Intrinsic::sdiv_fix_sat:
6446 case Intrinsic::udiv_fix:
6447 case Intrinsic::udiv_fix_sat: {
6448 Value *Op1 = Call.getArgOperand(0);
6449 Value *Op2 = Call.getArgOperand(1);
6451 "first operand of [us][mul|div]_fix[_sat] must be an int type or "
6452 "vector of ints");
6454 "second operand of [us][mul|div]_fix[_sat] must be an int type or "
6455 "vector of ints");
6456
6457 auto *Op3 = cast<ConstantInt>(Call.getArgOperand(2));
6458 Check(Op3->getType()->isIntegerTy(),
6459 "third operand of [us][mul|div]_fix[_sat] must be an int type");
6460 Check(Op3->getBitWidth() <= 32,
6461 "third operand of [us][mul|div]_fix[_sat] must fit within 32 bits");
6462
6463 if (ID == Intrinsic::smul_fix || ID == Intrinsic::smul_fix_sat ||
6464 ID == Intrinsic::sdiv_fix || ID == Intrinsic::sdiv_fix_sat) {
6465 Check(Op3->getZExtValue() < Op1->getType()->getScalarSizeInBits(),
6466 "the scale of s[mul|div]_fix[_sat] must be less than the width of "
6467 "the operands");
6468 } else {
6469 Check(Op3->getZExtValue() <= Op1->getType()->getScalarSizeInBits(),
6470 "the scale of u[mul|div]_fix[_sat] must be less than or equal "
6471 "to the width of the operands");
6472 }
6473 break;
6474 }
6475 case Intrinsic::lrint:
6476 case Intrinsic::llrint:
6477 case Intrinsic::lround:
6478 case Intrinsic::llround: {
6479 Type *ValTy = Call.getArgOperand(0)->getType();
6480 Type *ResultTy = Call.getType();
6481 auto *VTy = dyn_cast<VectorType>(ValTy);
6482 auto *RTy = dyn_cast<VectorType>(ResultTy);
6483 Check(ValTy->isFPOrFPVectorTy() && ResultTy->isIntOrIntVectorTy(),
6484 ExpectedName + ": argument must be floating-point or vector "
6485 "of floating-points, and result must be integer or "
6486 "vector of integers",
6487 &Call);
6488 Check(ValTy->isVectorTy() == ResultTy->isVectorTy(),
6489 ExpectedName + ": argument and result disagree on vector use", &Call);
6490 if (VTy) {
6491 Check(VTy->getElementCount() == RTy->getElementCount(),
6492 ExpectedName + ": argument must be same length as result", &Call);
6493 }
6494 break;
6495 }
6496 case Intrinsic::bswap: {
6497 Type *Ty = Call.getType();
6498 unsigned Size = Ty->getScalarSizeInBits();
6499 Check(Size % 16 == 0, "bswap must be an even number of bytes", &Call);
6500 break;
6501 }
6502 case Intrinsic::invariant_start: {
6503 ConstantInt *InvariantSize = dyn_cast<ConstantInt>(Call.getArgOperand(0));
6504 Check(InvariantSize &&
6505 (!InvariantSize->isNegative() || InvariantSize->isMinusOne()),
6506 "invariant_start parameter must be -1, 0 or a positive number",
6507 &Call);
6508 break;
6509 }
6510 case Intrinsic::matrix_multiply:
6511 case Intrinsic::matrix_transpose:
6512 case Intrinsic::matrix_column_major_load:
6513 case Intrinsic::matrix_column_major_store: {
6515 ConstantInt *Stride = nullptr;
6516 ConstantInt *NumRows;
6517 ConstantInt *NumColumns;
6518 VectorType *ResultTy;
6519 Type *Op0ElemTy = nullptr;
6520 Type *Op1ElemTy = nullptr;
6521 switch (ID) {
6522 case Intrinsic::matrix_multiply: {
6523 NumRows = cast<ConstantInt>(Call.getArgOperand(2));
6524 ConstantInt *N = cast<ConstantInt>(Call.getArgOperand(3));
6525 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6527 ->getNumElements() ==
6528 NumRows->getZExtValue() * N->getZExtValue(),
6529 "First argument of a matrix operation does not match specified "
6530 "shape!");
6532 ->getNumElements() ==
6533 N->getZExtValue() * NumColumns->getZExtValue(),
6534 "Second argument of a matrix operation does not match specified "
6535 "shape!");
6536
6537 ResultTy = cast<VectorType>(Call.getType());
6538 Op0ElemTy =
6539 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6540 Op1ElemTy =
6541 cast<VectorType>(Call.getArgOperand(1)->getType())->getElementType();
6542 break;
6543 }
6544 case Intrinsic::matrix_transpose:
6545 NumRows = cast<ConstantInt>(Call.getArgOperand(1));
6546 NumColumns = cast<ConstantInt>(Call.getArgOperand(2));
6547 ResultTy = cast<VectorType>(Call.getType());
6548 Op0ElemTy =
6549 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6550 break;
6551 case Intrinsic::matrix_column_major_load: {
6553 NumRows = cast<ConstantInt>(Call.getArgOperand(3));
6554 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6555 ResultTy = cast<VectorType>(Call.getType());
6556 break;
6557 }
6558 case Intrinsic::matrix_column_major_store: {
6560 NumRows = cast<ConstantInt>(Call.getArgOperand(4));
6561 NumColumns = cast<ConstantInt>(Call.getArgOperand(5));
6562 ResultTy = cast<VectorType>(Call.getArgOperand(0)->getType());
6563 Op0ElemTy =
6564 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6565 break;
6566 }
6567 default:
6568 llvm_unreachable("unexpected intrinsic");
6569 }
6570
6571 Check(ResultTy->getElementType()->isIntegerTy() ||
6572 ResultTy->getElementType()->isFloatingPointTy(),
6573 "Result type must be an integer or floating-point type!", IF);
6574
6575 if (Op0ElemTy)
6576 Check(ResultTy->getElementType() == Op0ElemTy,
6577 "Vector element type mismatch of the result and first operand "
6578 "vector!",
6579 IF);
6580
6581 if (Op1ElemTy)
6582 Check(ResultTy->getElementType() == Op1ElemTy,
6583 "Vector element type mismatch of the result and second operand "
6584 "vector!",
6585 IF);
6586
6588 NumRows->getZExtValue() * NumColumns->getZExtValue(),
6589 "Result of a matrix operation does not fit in the returned vector!");
6590
6591 if (Stride) {
6592 Check(Stride->getBitWidth() <= 64, "Stride bitwidth cannot exceed 64!",
6593 IF);
6594 Check(Stride->getZExtValue() >= NumRows->getZExtValue(),
6595 "Stride must be greater or equal than the number of rows!", IF);
6596 }
6597
6598 break;
6599 }
6600 case Intrinsic::vector_splice_left:
6601 case Intrinsic::vector_splice_right: {
6603 uint64_t Idx = cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue();
6604 uint64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
6605 if (VecTy->isScalableTy() && Call.getParent() &&
6606 Call.getParent()->getParent()) {
6607 AttributeList Attrs = Call.getParent()->getParent()->getAttributes();
6608 if (Attrs.hasFnAttr(Attribute::VScaleRange))
6609 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
6610 }
6611 if (ID == Intrinsic::vector_splice_left)
6612 Check(Idx < KnownMinNumElements,
6613 "The splice index exceeds the range [0, VL-1] where VL is the "
6614 "known minimum number of elements in the vector. For scalable "
6615 "vectors the minimum number of elements is determined from "
6616 "vscale_range.",
6617 &Call);
6618 else
6619 Check(Idx <= KnownMinNumElements,
6620 "The splice index exceeds the range [0, VL] where VL is the "
6621 "known minimum number of elements in the vector. For scalable "
6622 "vectors the minimum number of elements is determined from "
6623 "vscale_range.",
6624 &Call);
6625 break;
6626 }
6627 case Intrinsic::stepvector: {
6629 Check(VecTy && VecTy->getScalarType()->isIntegerTy() &&
6630 VecTy->getScalarSizeInBits() >= 8,
6631 "stepvector only supported for vectors of integers "
6632 "with a bitwidth of at least 8.",
6633 &Call);
6634 break;
6635 }
6636 case Intrinsic::experimental_vector_match: {
6637 Value *Op1 = Call.getArgOperand(0);
6638 Value *Op2 = Call.getArgOperand(1);
6640
6641 VectorType *Op1Ty = dyn_cast<VectorType>(Op1->getType());
6642 VectorType *Op2Ty = dyn_cast<VectorType>(Op2->getType());
6643 VectorType *MaskTy = dyn_cast<VectorType>(Mask->getType());
6644
6645 Check(Op1Ty && Op2Ty && MaskTy, "Operands must be vectors.", &Call);
6647 "Second operand must be a fixed length vector.", &Call);
6648 Check(Op1Ty->getElementType()->isIntegerTy(),
6649 "First operand must be a vector of integers.", &Call);
6650 Check(Op1Ty->getElementType() == Op2Ty->getElementType(),
6651 "First two operands must have the same element type.", &Call);
6652 Check(Op1Ty->getElementCount() == MaskTy->getElementCount(),
6653 "First operand and mask must have the same number of elements.",
6654 &Call);
6655 Check(MaskTy->getElementType()->isIntegerTy(1),
6656 "Mask must be a vector of i1's.", &Call);
6657 Check(Call.getType() == MaskTy, "Return type must match the mask type.",
6658 &Call);
6659 break;
6660 }
6661 case Intrinsic::vector_insert: {
6662 Value *Vec = Call.getArgOperand(0);
6663 Value *SubVec = Call.getArgOperand(1);
6664 Value *Idx = Call.getArgOperand(2);
6665 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6666
6667 VectorType *VecTy = cast<VectorType>(Vec->getType());
6668 VectorType *SubVecTy = cast<VectorType>(SubVec->getType());
6669
6670 ElementCount VecEC = VecTy->getElementCount();
6671 ElementCount SubVecEC = SubVecTy->getElementCount();
6672 Check(VecTy->getElementType() == SubVecTy->getElementType(),
6673 "vector_insert parameters must have the same element "
6674 "type.",
6675 &Call);
6676 Check(IdxN % SubVecEC.getKnownMinValue() == 0,
6677 "vector_insert index must be a constant multiple of "
6678 "the subvector's known minimum vector length.");
6679
6680 // If this insertion is not the 'mixed' case where a fixed vector is
6681 // inserted into a scalable vector, ensure that the insertion of the
6682 // subvector does not overrun the parent vector.
6683 if (VecEC.isScalable() == SubVecEC.isScalable()) {
6684 Check(IdxN < VecEC.getKnownMinValue() &&
6685 IdxN + SubVecEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6686 "subvector operand of vector_insert would overrun the "
6687 "vector being inserted into.");
6688 }
6689 break;
6690 }
6691 case Intrinsic::vector_extract: {
6692 Value *Vec = Call.getArgOperand(0);
6693 Value *Idx = Call.getArgOperand(1);
6694 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6695
6696 VectorType *ResultTy = cast<VectorType>(Call.getType());
6697 VectorType *VecTy = cast<VectorType>(Vec->getType());
6698
6699 ElementCount VecEC = VecTy->getElementCount();
6700 ElementCount ResultEC = ResultTy->getElementCount();
6701
6702 Check(ResultTy->getElementType() == VecTy->getElementType(),
6703 "vector_extract result must have the same element "
6704 "type as the input vector.",
6705 &Call);
6706 Check(IdxN % ResultEC.getKnownMinValue() == 0,
6707 "vector_extract index must be a constant multiple of "
6708 "the result type's known minimum vector length.");
6709
6710 // If this extraction is not the 'mixed' case where a fixed vector is
6711 // extracted from a scalable vector, ensure that the extraction does not
6712 // overrun the parent vector.
6713 if (VecEC.isScalable() == ResultEC.isScalable()) {
6714 Check(IdxN < VecEC.getKnownMinValue() &&
6715 IdxN + ResultEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6716 "vector_extract would overrun.");
6717 }
6718 break;
6719 }
6720 case Intrinsic::vector_partial_reduce_fadd:
6721 case Intrinsic::vector_partial_reduce_add: {
6724
6725 unsigned VecWidth = VecTy->getElementCount().getKnownMinValue();
6726 unsigned AccWidth = AccTy->getElementCount().getKnownMinValue();
6727
6728 Check((VecWidth % AccWidth) == 0,
6729 "Invalid vector widths for partial "
6730 "reduction. The width of the input vector "
6731 "must be a positive integer multiple of "
6732 "the width of the accumulator vector.");
6733 break;
6734 }
6735 case Intrinsic::experimental_noalias_scope_decl: {
6736 NoAliasScopeDecls.push_back(cast<IntrinsicInst>(&Call));
6737 break;
6738 }
6739 case Intrinsic::preserve_array_access_index:
6740 case Intrinsic::preserve_struct_access_index:
6741 case Intrinsic::aarch64_ldaxr:
6742 case Intrinsic::aarch64_ldxr:
6743 case Intrinsic::arm_ldaex:
6744 case Intrinsic::arm_ldrex: {
6745 Type *ElemTy = Call.getParamElementType(0);
6746 Check(ElemTy, "Intrinsic requires elementtype attribute on first argument.",
6747 &Call);
6748 break;
6749 }
6750 case Intrinsic::aarch64_stlxr:
6751 case Intrinsic::aarch64_stxr:
6752 case Intrinsic::arm_stlex:
6753 case Intrinsic::arm_strex: {
6754 Type *ElemTy = Call.getAttributes().getParamElementType(1);
6755 Check(ElemTy,
6756 "Intrinsic requires elementtype attribute on second argument.",
6757 &Call);
6758 break;
6759 }
6760 case Intrinsic::aarch64_prefetch: {
6761 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6762 "write argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6763 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6764 "target argument to llvm.aarch64.prefetch must be 0-3", Call);
6765 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6766 "stream argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6767 Check(cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue() < 2,
6768 "isdata argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6769 break;
6770 }
6771 case Intrinsic::callbr_landingpad: {
6772 const auto *CBR = dyn_cast<CallBrInst>(Call.getOperand(0));
6773 Check(CBR, "intrinstic requires callbr operand", &Call);
6774 if (!CBR)
6775 break;
6776
6777 const BasicBlock *LandingPadBB = Call.getParent();
6778 const BasicBlock *PredBB = LandingPadBB->getUniquePredecessor();
6779 if (!PredBB) {
6780 CheckFailed("Intrinsic in block must have 1 unique predecessor", &Call);
6781 break;
6782 }
6783 if (!isa<CallBrInst>(PredBB->getTerminator())) {
6784 CheckFailed("Intrinsic must have corresponding callbr in predecessor",
6785 &Call);
6786 break;
6787 }
6788 Check(llvm::is_contained(CBR->getIndirectDests(), LandingPadBB),
6789 "Intrinsic's corresponding callbr must have intrinsic's parent basic "
6790 "block in indirect destination list",
6791 &Call);
6792 const Instruction &First = *LandingPadBB->begin();
6793 Check(&First == &Call, "No other instructions may proceed intrinsic",
6794 &Call);
6795 break;
6796 }
6797 case Intrinsic::amdgcn_cs_chain: {
6798 auto CallerCC = Call.getCaller()->getCallingConv();
6799 switch (CallerCC) {
6800 case CallingConv::AMDGPU_CS:
6801 case CallingConv::AMDGPU_CS_Chain:
6802 case CallingConv::AMDGPU_CS_ChainPreserve:
6803 case CallingConv::AMDGPU_ES:
6804 case CallingConv::AMDGPU_GS:
6805 case CallingConv::AMDGPU_HS:
6806 case CallingConv::AMDGPU_LS:
6807 case CallingConv::AMDGPU_VS:
6808 break;
6809 default:
6810 CheckFailed("Intrinsic cannot be called from functions with this "
6811 "calling convention",
6812 &Call);
6813 break;
6814 }
6815
6816 Check(Call.paramHasAttr(2, Attribute::InReg),
6817 "SGPR arguments must have the `inreg` attribute", &Call);
6818 Check(!Call.paramHasAttr(3, Attribute::InReg),
6819 "VGPR arguments must not have the `inreg` attribute", &Call);
6820
6821 auto *Next = Call.getNextNode();
6822 bool IsAMDUnreachable = Next && isa<IntrinsicInst>(Next) &&
6823 cast<IntrinsicInst>(Next)->getIntrinsicID() ==
6824 Intrinsic::amdgcn_unreachable;
6825 Check(Next && (isa<UnreachableInst>(Next) || IsAMDUnreachable),
6826 "llvm.amdgcn.cs.chain must be followed by unreachable", &Call);
6827 break;
6828 }
6829 case Intrinsic::amdgcn_init_exec_from_input: {
6830 const Argument *Arg = dyn_cast<Argument>(Call.getOperand(0));
6831 Check(Arg && Arg->hasInRegAttr(),
6832 "only inreg arguments to the parent function are valid as inputs to "
6833 "this intrinsic",
6834 &Call);
6835 break;
6836 }
6837 case Intrinsic::amdgcn_set_inactive_chain_arg: {
6838 auto CallerCC = Call.getCaller()->getCallingConv();
6839 switch (CallerCC) {
6840 case CallingConv::AMDGPU_CS_Chain:
6841 case CallingConv::AMDGPU_CS_ChainPreserve:
6842 break;
6843 default:
6844 CheckFailed("Intrinsic can only be used from functions with the "
6845 "amdgpu_cs_chain or amdgpu_cs_chain_preserve "
6846 "calling conventions",
6847 &Call);
6848 break;
6849 }
6850
6851 unsigned InactiveIdx = 1;
6852 Check(!Call.paramHasAttr(InactiveIdx, Attribute::InReg),
6853 "Value for inactive lanes must not have the `inreg` attribute",
6854 &Call);
6855 Check(isa<Argument>(Call.getArgOperand(InactiveIdx)),
6856 "Value for inactive lanes must be a function argument", &Call);
6857 Check(!cast<Argument>(Call.getArgOperand(InactiveIdx))->hasInRegAttr(),
6858 "Value for inactive lanes must be a VGPR function argument", &Call);
6859 break;
6860 }
6861 case Intrinsic::amdgcn_call_whole_wave: {
6863 Check(F, "Indirect whole wave calls are not allowed", &Call);
6864
6865 CallingConv::ID CC = F->getCallingConv();
6866 Check(CC == CallingConv::AMDGPU_Gfx_WholeWave,
6867 "Callee must have the amdgpu_gfx_whole_wave calling convention",
6868 &Call);
6869
6870 Check(!F->isVarArg(), "Variadic whole wave calls are not allowed", &Call);
6871
6872 Check(Call.arg_size() == F->arg_size(),
6873 "Call argument count must match callee argument count", &Call);
6874
6875 // The first argument of the call is the callee, and the first argument of
6876 // the callee is the active mask. The rest of the arguments must match.
6877 Check(F->arg_begin()->getType()->isIntegerTy(1),
6878 "Callee must have i1 as its first argument", &Call);
6879 for (auto [CallArg, FuncArg] :
6880 drop_begin(zip_equal(Call.args(), F->args()))) {
6881 Check(CallArg->getType() == FuncArg.getType(),
6882 "Argument types must match", &Call);
6883
6884 // Check that inreg attributes match between call site and function
6885 Check(Call.paramHasAttr(FuncArg.getArgNo(), Attribute::InReg) ==
6886 FuncArg.hasInRegAttr(),
6887 "Argument inreg attributes must match", &Call);
6888 }
6889 break;
6890 }
6891 case Intrinsic::amdgcn_s_prefetch_data: {
6892 Check(
6895 "llvm.amdgcn.s.prefetch.data only supports global or constant memory");
6896 break;
6897 }
6898 case Intrinsic::amdgcn_mfma_scale_f32_16x16x128_f8f6f4:
6899 case Intrinsic::amdgcn_mfma_scale_f32_32x32x64_f8f6f4: {
6900 Value *Src0 = Call.getArgOperand(0);
6901 Value *Src1 = Call.getArgOperand(1);
6902
6903 uint64_t CBSZ = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
6904 uint64_t BLGP = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
6905 Check(CBSZ <= 4, "invalid value for cbsz format", Call,
6906 Call.getArgOperand(3));
6907 Check(BLGP <= 4, "invalid value for blgp format", Call,
6908 Call.getArgOperand(4));
6909
6910 // AMDGPU::MFMAScaleFormats values
6911 auto getFormatNumRegs = [](unsigned FormatVal) {
6912 switch (FormatVal) {
6913 case 0:
6914 case 1:
6915 return 8u;
6916 case 2:
6917 case 3:
6918 return 6u;
6919 case 4:
6920 return 4u;
6921 default:
6922 llvm_unreachable("invalid format value");
6923 }
6924 };
6925
6926 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
6927 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
6928 return false;
6929 unsigned NumElts = Ty->getNumElements();
6930 return NumElts == 4 || NumElts == 6 || NumElts == 8;
6931 };
6932
6933 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
6934 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
6935 Check(isValidSrcASrcBVector(Src0Ty),
6936 "operand 0 must be 4, 6 or 8 element i32 vector", &Call, Src0);
6937 Check(isValidSrcASrcBVector(Src1Ty),
6938 "operand 1 must be 4, 6 or 8 element i32 vector", &Call, Src1);
6939
6940 // Permit excess registers for the format.
6941 Check(Src0Ty->getNumElements() >= getFormatNumRegs(CBSZ),
6942 "invalid vector type for format", &Call, Src0, Call.getArgOperand(3));
6943 Check(Src1Ty->getNumElements() >= getFormatNumRegs(BLGP),
6944 "invalid vector type for format", &Call, Src1, Call.getArgOperand(5));
6945 break;
6946 }
6947 case Intrinsic::amdgcn_wmma_f32_16x16x128_f8f6f4:
6948 case Intrinsic::amdgcn_wmma_scale_f32_16x16x128_f8f6f4:
6949 case Intrinsic::amdgcn_wmma_scale16_f32_16x16x128_f8f6f4: {
6950 Value *Src0 = Call.getArgOperand(1);
6951 Value *Src1 = Call.getArgOperand(3);
6952
6953 unsigned FmtA = cast<ConstantInt>(Call.getArgOperand(0))->getZExtValue();
6954 unsigned FmtB = cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue();
6955 Check(FmtA <= 4, "invalid value for matrix format", Call,
6956 Call.getArgOperand(0));
6957 Check(FmtB <= 4, "invalid value for matrix format", Call,
6958 Call.getArgOperand(2));
6959
6960 // AMDGPU::MatrixFMT values
6961 auto getFormatNumRegs = [](unsigned FormatVal) {
6962 switch (FormatVal) {
6963 case 0:
6964 case 1:
6965 return 16u;
6966 case 2:
6967 case 3:
6968 return 12u;
6969 case 4:
6970 return 8u;
6971 default:
6972 llvm_unreachable("invalid format value");
6973 }
6974 };
6975
6976 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
6977 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
6978 return false;
6979 unsigned NumElts = Ty->getNumElements();
6980 return NumElts == 16 || NumElts == 12 || NumElts == 8;
6981 };
6982
6983 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
6984 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
6985 Check(isValidSrcASrcBVector(Src0Ty),
6986 "operand 1 must be 8, 12 or 16 element i32 vector", &Call, Src0);
6987 Check(isValidSrcASrcBVector(Src1Ty),
6988 "operand 3 must be 8, 12 or 16 element i32 vector", &Call, Src1);
6989
6990 // Permit excess registers for the format.
6991 Check(Src0Ty->getNumElements() >= getFormatNumRegs(FmtA),
6992 "invalid vector type for format", &Call, Src0, Call.getArgOperand(0));
6993 Check(Src1Ty->getNumElements() >= getFormatNumRegs(FmtB),
6994 "invalid vector type for format", &Call, Src1, Call.getArgOperand(2));
6995 break;
6996 }
6997 case Intrinsic::amdgcn_cooperative_atomic_load_32x4B:
6998 case Intrinsic::amdgcn_cooperative_atomic_load_16x8B:
6999 case Intrinsic::amdgcn_cooperative_atomic_load_8x16B:
7000 case Intrinsic::amdgcn_cooperative_atomic_store_32x4B:
7001 case Intrinsic::amdgcn_cooperative_atomic_store_16x8B:
7002 case Intrinsic::amdgcn_cooperative_atomic_store_8x16B: {
7003 // Check we only use this intrinsic on the FLAT or GLOBAL address spaces.
7004 Value *PtrArg = Call.getArgOperand(0);
7005 const unsigned AS = PtrArg->getType()->getPointerAddressSpace();
7007 "cooperative atomic intrinsics require a generic or global pointer",
7008 &Call, PtrArg);
7009
7010 // Last argument must be a MD string
7012 MDNode *MD = cast<MDNode>(Op->getMetadata());
7013 Check((MD->getNumOperands() == 1) && isa<MDString>(MD->getOperand(0)),
7014 "cooperative atomic intrinsics require that the last argument is a "
7015 "metadata string",
7016 &Call, Op);
7017 break;
7018 }
7019 case Intrinsic::nvvm_setmaxnreg_inc_sync_aligned_u32:
7020 case Intrinsic::nvvm_setmaxnreg_dec_sync_aligned_u32: {
7021 Value *V = Call.getArgOperand(0);
7022 unsigned RegCount = cast<ConstantInt>(V)->getZExtValue();
7023 Check(RegCount % 8 == 0,
7024 "reg_count argument to nvvm.setmaxnreg must be in multiples of 8");
7025 break;
7026 }
7027 case Intrinsic::experimental_convergence_entry:
7028 case Intrinsic::experimental_convergence_anchor:
7029 break;
7030 case Intrinsic::experimental_convergence_loop:
7031 break;
7032 case Intrinsic::ptrmask: {
7033 Type *Ty0 = Call.getArgOperand(0)->getType();
7034 Type *Ty1 = Call.getArgOperand(1)->getType();
7036 "llvm.ptrmask intrinsic first argument must be pointer or vector "
7037 "of pointers",
7038 &Call);
7039 Check(
7040 Ty0->isVectorTy() == Ty1->isVectorTy(),
7041 "llvm.ptrmask intrinsic arguments must be both scalars or both vectors",
7042 &Call);
7043 if (Ty0->isVectorTy())
7044 Check(cast<VectorType>(Ty0)->getElementCount() ==
7045 cast<VectorType>(Ty1)->getElementCount(),
7046 "llvm.ptrmask intrinsic arguments must have the same number of "
7047 "elements",
7048 &Call);
7049 Check(DL.getIndexTypeSizeInBits(Ty0) == Ty1->getScalarSizeInBits(),
7050 "llvm.ptrmask intrinsic second argument bitwidth must match "
7051 "pointer index type size of first argument",
7052 &Call);
7053 break;
7054 }
7055 case Intrinsic::thread_pointer: {
7057 DL.getDefaultGlobalsAddressSpace(),
7058 "llvm.thread.pointer intrinsic return type must be for the globals "
7059 "address space",
7060 &Call);
7061 break;
7062 }
7063 case Intrinsic::threadlocal_address: {
7064 const Value &Arg0 = *Call.getArgOperand(0);
7065 Check(isa<GlobalValue>(Arg0),
7066 "llvm.threadlocal.address first argument must be a GlobalValue");
7067 Check(cast<GlobalValue>(Arg0).isThreadLocal(),
7068 "llvm.threadlocal.address operand isThreadLocal() must be true");
7069 break;
7070 }
7071 case Intrinsic::lifetime_start:
7072 case Intrinsic::lifetime_end: {
7073 Value *Ptr = Call.getArgOperand(0);
7075 "llvm.lifetime.start/end can only be used on alloca or poison",
7076 &Call);
7077 break;
7078 }
7079 };
7080
7081 // Verify that there aren't any unmediated control transfers between funclets.
7083 Function *F = Call.getParent()->getParent();
7084 if (F->hasPersonalityFn() &&
7085 isScopedEHPersonality(classifyEHPersonality(F->getPersonalityFn()))) {
7086 // Run EH funclet coloring on-demand and cache results for other intrinsic
7087 // calls in this function
7088 if (BlockEHFuncletColors.empty())
7089 BlockEHFuncletColors = colorEHFunclets(*F);
7090
7091 // Check for catch-/cleanup-pad in first funclet block
7092 bool InEHFunclet = false;
7093 BasicBlock *CallBB = Call.getParent();
7094 const ColorVector &CV = BlockEHFuncletColors.find(CallBB)->second;
7095 assert(CV.size() > 0 && "Uncolored block");
7096 for (BasicBlock *ColorFirstBB : CV)
7097 if (auto It = ColorFirstBB->getFirstNonPHIIt();
7098 It != ColorFirstBB->end())
7100 InEHFunclet = true;
7101
7102 // Check for funclet operand bundle
7103 bool HasToken = false;
7104 for (unsigned I = 0, E = Call.getNumOperandBundles(); I != E; ++I)
7106 HasToken = true;
7107
7108 // This would cause silent code truncation in WinEHPrepare
7109 if (InEHFunclet)
7110 Check(HasToken, "Missing funclet token on intrinsic call", &Call);
7111 }
7112 }
7113}
7114
7115/// Carefully grab the subprogram from a local scope.
7116///
7117/// This carefully grabs the subprogram from a local scope, avoiding the
7118/// built-in assertions that would typically fire.
7120 if (!LocalScope)
7121 return nullptr;
7122
7123 if (auto *SP = dyn_cast<DISubprogram>(LocalScope))
7124 return SP;
7125
7126 if (auto *LB = dyn_cast<DILexicalBlockBase>(LocalScope))
7127 return getSubprogram(LB->getRawScope());
7128
7129 // Just return null; broken scope chains are checked elsewhere.
7130 assert(!isa<DILocalScope>(LocalScope) && "Unknown type of local scope");
7131 return nullptr;
7132}
7133
7134void Verifier::visit(DbgLabelRecord &DLR) {
7136 "invalid #dbg_label intrinsic variable", &DLR, DLR.getRawLabel());
7137
7138 // Ignore broken !dbg attachments; they're checked elsewhere.
7139 if (MDNode *N = DLR.getDebugLoc().getAsMDNode())
7140 if (!isa<DILocation>(N))
7141 return;
7142
7143 BasicBlock *BB = DLR.getParent();
7144 Function *F = BB ? BB->getParent() : nullptr;
7145
7146 // The scopes for variables and !dbg attachments must agree.
7147 DILabel *Label = DLR.getLabel();
7148 DILocation *Loc = DLR.getDebugLoc();
7149 CheckDI(Loc, "#dbg_label record requires a !dbg attachment", &DLR, BB, F);
7150
7151 DISubprogram *LabelSP = getSubprogram(Label->getRawScope());
7152 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
7153 if (!LabelSP || !LocSP)
7154 return;
7155
7156 CheckDI(LabelSP == LocSP,
7157 "mismatched subprogram between #dbg_label label and !dbg attachment",
7158 &DLR, BB, F, Label, Label->getScope()->getSubprogram(), Loc,
7159 Loc->getScope()->getSubprogram());
7160}
7161
7162void Verifier::visit(DbgVariableRecord &DVR) {
7163 BasicBlock *BB = DVR.getParent();
7164 Function *F = BB->getParent();
7165
7166 CheckDI(DVR.getType() == DbgVariableRecord::LocationType::Value ||
7167 DVR.getType() == DbgVariableRecord::LocationType::Declare ||
7168 DVR.getType() == DbgVariableRecord::LocationType::DeclareValue ||
7169 DVR.getType() == DbgVariableRecord::LocationType::Assign,
7170 "invalid #dbg record type", &DVR, DVR.getType(), BB, F);
7171
7172 // The location for a DbgVariableRecord must be either a ValueAsMetadata,
7173 // DIArgList, or an empty MDNode (which is a legacy representation for an
7174 // "undef" location).
7175 auto *MD = DVR.getRawLocation();
7176 CheckDI(MD && (isa<ValueAsMetadata>(MD) || isa<DIArgList>(MD) ||
7177 (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands())),
7178 "invalid #dbg record address/value", &DVR, MD, BB, F);
7179 if (auto *VAM = dyn_cast<ValueAsMetadata>(MD)) {
7180 visitValueAsMetadata(*VAM, F);
7181 if (DVR.isDbgDeclare()) {
7182 // Allow integers here to support inttoptr salvage.
7183 Type *Ty = VAM->getValue()->getType();
7184 CheckDI(Ty->isPointerTy() || Ty->isIntegerTy(),
7185 "location of #dbg_declare must be a pointer or int", &DVR, MD, BB,
7186 F);
7187 }
7188 } else if (auto *AL = dyn_cast<DIArgList>(MD)) {
7189 visitDIArgList(*AL, F);
7190 }
7191
7193 "invalid #dbg record variable", &DVR, DVR.getRawVariable(), BB, F);
7194 visitMDNode(*DVR.getRawVariable(), AreDebugLocsAllowed::No);
7195
7197 "invalid #dbg record expression", &DVR, DVR.getRawExpression(), BB,
7198 F);
7199 visitMDNode(*DVR.getExpression(), AreDebugLocsAllowed::No);
7200
7201 if (DVR.isDbgAssign()) {
7203 "invalid #dbg_assign DIAssignID", &DVR, DVR.getRawAssignID(), BB,
7204 F);
7205 visitMDNode(*cast<DIAssignID>(DVR.getRawAssignID()),
7206 AreDebugLocsAllowed::No);
7207
7208 const auto *RawAddr = DVR.getRawAddress();
7209 // Similarly to the location above, the address for an assign
7210 // DbgVariableRecord must be a ValueAsMetadata or an empty MDNode, which
7211 // represents an undef address.
7212 CheckDI(
7213 isa<ValueAsMetadata>(RawAddr) ||
7214 (isa<MDNode>(RawAddr) && !cast<MDNode>(RawAddr)->getNumOperands()),
7215 "invalid #dbg_assign address", &DVR, DVR.getRawAddress(), BB, F);
7216 if (auto *VAM = dyn_cast<ValueAsMetadata>(RawAddr))
7217 visitValueAsMetadata(*VAM, F);
7218
7220 "invalid #dbg_assign address expression", &DVR,
7221 DVR.getRawAddressExpression(), BB, F);
7222 visitMDNode(*DVR.getAddressExpression(), AreDebugLocsAllowed::No);
7223
7224 // All of the linked instructions should be in the same function as DVR.
7225 for (Instruction *I : at::getAssignmentInsts(&DVR))
7226 CheckDI(DVR.getFunction() == I->getFunction(),
7227 "inst not in same function as #dbg_assign", I, &DVR, BB, F);
7228 }
7229
7230 // This check is redundant with one in visitLocalVariable().
7231 DILocalVariable *Var = DVR.getVariable();
7232 CheckDI(isType(Var->getRawType()), "invalid type ref", Var, Var->getRawType(),
7233 BB, F);
7234
7235 auto *DLNode = DVR.getDebugLoc().getAsMDNode();
7236 CheckDI(isa_and_nonnull<DILocation>(DLNode), "invalid #dbg record DILocation",
7237 &DVR, DLNode, BB, F);
7238 DILocation *Loc = DVR.getDebugLoc();
7239
7240 // The scopes for variables and !dbg attachments must agree.
7241 DISubprogram *VarSP = getSubprogram(Var->getRawScope());
7242 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
7243 if (!VarSP || !LocSP)
7244 return; // Broken scope chains are checked elsewhere.
7245
7246 CheckDI(VarSP == LocSP,
7247 "mismatched subprogram between #dbg record variable and DILocation",
7248 &DVR, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
7249 Loc->getScope()->getSubprogram(), BB, F);
7250
7251 verifyFnArgs(DVR);
7252}
7253
7254void Verifier::visitVPIntrinsic(VPIntrinsic &VPI) {
7255 if (auto *VPCast = dyn_cast<VPCastIntrinsic>(&VPI)) {
7256 auto *RetTy = cast<VectorType>(VPCast->getType());
7257 auto *ValTy = cast<VectorType>(VPCast->getOperand(0)->getType());
7258 Check(RetTy->getElementCount() == ValTy->getElementCount(),
7259 "VP cast intrinsic first argument and result vector lengths must be "
7260 "equal",
7261 *VPCast);
7262
7263 switch (VPCast->getIntrinsicID()) {
7264 default:
7265 llvm_unreachable("Unknown VP cast intrinsic");
7266 case Intrinsic::vp_trunc:
7267 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
7268 "llvm.vp.trunc intrinsic first argument and result element type "
7269 "must be integer",
7270 *VPCast);
7271 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
7272 "llvm.vp.trunc intrinsic the bit size of first argument must be "
7273 "larger than the bit size of the return type",
7274 *VPCast);
7275 break;
7276 case Intrinsic::vp_zext:
7277 case Intrinsic::vp_sext:
7278 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
7279 "llvm.vp.zext or llvm.vp.sext intrinsic first argument and result "
7280 "element type must be integer",
7281 *VPCast);
7282 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
7283 "llvm.vp.zext or llvm.vp.sext intrinsic the bit size of first "
7284 "argument must be smaller than the bit size of the return type",
7285 *VPCast);
7286 break;
7287 case Intrinsic::vp_fptoui:
7288 case Intrinsic::vp_fptosi:
7289 case Intrinsic::vp_lrint:
7290 case Intrinsic::vp_llrint:
7291 Check(
7292 RetTy->isIntOrIntVectorTy() && ValTy->isFPOrFPVectorTy(),
7293 "llvm.vp.fptoui, llvm.vp.fptosi, llvm.vp.lrint or llvm.vp.llrint" "intrinsic first argument element "
7294 "type must be floating-point and result element type must be integer",
7295 *VPCast);
7296 break;
7297 case Intrinsic::vp_uitofp:
7298 case Intrinsic::vp_sitofp:
7299 Check(
7300 RetTy->isFPOrFPVectorTy() && ValTy->isIntOrIntVectorTy(),
7301 "llvm.vp.uitofp or llvm.vp.sitofp intrinsic first argument element "
7302 "type must be integer and result element type must be floating-point",
7303 *VPCast);
7304 break;
7305 case Intrinsic::vp_fptrunc:
7306 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
7307 "llvm.vp.fptrunc intrinsic first argument and result element type "
7308 "must be floating-point",
7309 *VPCast);
7310 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
7311 "llvm.vp.fptrunc intrinsic the bit size of first argument must be "
7312 "larger than the bit size of the return type",
7313 *VPCast);
7314 break;
7315 case Intrinsic::vp_fpext:
7316 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
7317 "llvm.vp.fpext intrinsic first argument and result element type "
7318 "must be floating-point",
7319 *VPCast);
7320 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
7321 "llvm.vp.fpext intrinsic the bit size of first argument must be "
7322 "smaller than the bit size of the return type",
7323 *VPCast);
7324 break;
7325 case Intrinsic::vp_ptrtoint:
7326 Check(RetTy->isIntOrIntVectorTy() && ValTy->isPtrOrPtrVectorTy(),
7327 "llvm.vp.ptrtoint intrinsic first argument element type must be "
7328 "pointer and result element type must be integer",
7329 *VPCast);
7330 break;
7331 case Intrinsic::vp_inttoptr:
7332 Check(RetTy->isPtrOrPtrVectorTy() && ValTy->isIntOrIntVectorTy(),
7333 "llvm.vp.inttoptr intrinsic first argument element type must be "
7334 "integer and result element type must be pointer",
7335 *VPCast);
7336 break;
7337 }
7338 }
7339
7340 switch (VPI.getIntrinsicID()) {
7341 case Intrinsic::vp_fcmp: {
7342 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
7344 "invalid predicate for VP FP comparison intrinsic", &VPI);
7345 break;
7346 }
7347 case Intrinsic::vp_icmp: {
7348 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
7350 "invalid predicate for VP integer comparison intrinsic", &VPI);
7351 break;
7352 }
7353 case Intrinsic::vp_is_fpclass: {
7354 auto TestMask = cast<ConstantInt>(VPI.getOperand(1));
7355 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
7356 "unsupported bits for llvm.vp.is.fpclass test mask");
7357 break;
7358 }
7359 case Intrinsic::experimental_vp_splice: {
7360 VectorType *VecTy = cast<VectorType>(VPI.getType());
7361 int64_t Idx = cast<ConstantInt>(VPI.getArgOperand(2))->getSExtValue();
7362 int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
7363 if (VPI.getParent() && VPI.getParent()->getParent()) {
7364 AttributeList Attrs = VPI.getParent()->getParent()->getAttributes();
7365 if (Attrs.hasFnAttr(Attribute::VScaleRange))
7366 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
7367 }
7368 Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
7369 (Idx >= 0 && Idx < KnownMinNumElements),
7370 "The splice index exceeds the range [-VL, VL-1] where VL is the "
7371 "known minimum number of elements in the vector. For scalable "
7372 "vectors the minimum number of elements is determined from "
7373 "vscale_range.",
7374 &VPI);
7375 break;
7376 }
7377 }
7378}
7379
7380void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
7381 unsigned NumOperands = FPI.getNonMetadataArgCount();
7382 bool HasRoundingMD =
7384
7385 // Add the expected number of metadata operands.
7386 NumOperands += (1 + HasRoundingMD);
7387
7388 // Compare intrinsics carry an extra predicate metadata operand.
7390 NumOperands += 1;
7391 Check((FPI.arg_size() == NumOperands),
7392 "invalid arguments for constrained FP intrinsic", &FPI);
7393
7394 switch (FPI.getIntrinsicID()) {
7395 case Intrinsic::experimental_constrained_lrint:
7396 case Intrinsic::experimental_constrained_llrint: {
7397 Type *ValTy = FPI.getArgOperand(0)->getType();
7398 Type *ResultTy = FPI.getType();
7399 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
7400 "Intrinsic does not support vectors", &FPI);
7401 break;
7402 }
7403
7404 case Intrinsic::experimental_constrained_lround:
7405 case Intrinsic::experimental_constrained_llround: {
7406 Type *ValTy = FPI.getArgOperand(0)->getType();
7407 Type *ResultTy = FPI.getType();
7408 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
7409 "Intrinsic does not support vectors", &FPI);
7410 break;
7411 }
7412
7413 case Intrinsic::experimental_constrained_fcmp:
7414 case Intrinsic::experimental_constrained_fcmps: {
7415 auto Pred = cast<ConstrainedFPCmpIntrinsic>(&FPI)->getPredicate();
7417 "invalid predicate for constrained FP comparison intrinsic", &FPI);
7418 break;
7419 }
7420
7421 case Intrinsic::experimental_constrained_fptosi:
7422 case Intrinsic::experimental_constrained_fptoui: {
7423 Value *Operand = FPI.getArgOperand(0);
7424 ElementCount SrcEC;
7425 Check(Operand->getType()->isFPOrFPVectorTy(),
7426 "Intrinsic first argument must be floating point", &FPI);
7427 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7428 SrcEC = cast<VectorType>(OperandT)->getElementCount();
7429 }
7430
7431 Operand = &FPI;
7432 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
7433 "Intrinsic first argument and result disagree on vector use", &FPI);
7434 Check(Operand->getType()->isIntOrIntVectorTy(),
7435 "Intrinsic result must be an integer", &FPI);
7436 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7437 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
7438 "Intrinsic first argument and result vector lengths must be equal",
7439 &FPI);
7440 }
7441 break;
7442 }
7443
7444 case Intrinsic::experimental_constrained_sitofp:
7445 case Intrinsic::experimental_constrained_uitofp: {
7446 Value *Operand = FPI.getArgOperand(0);
7447 ElementCount SrcEC;
7448 Check(Operand->getType()->isIntOrIntVectorTy(),
7449 "Intrinsic first argument must be integer", &FPI);
7450 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7451 SrcEC = cast<VectorType>(OperandT)->getElementCount();
7452 }
7453
7454 Operand = &FPI;
7455 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
7456 "Intrinsic first argument and result disagree on vector use", &FPI);
7457 Check(Operand->getType()->isFPOrFPVectorTy(),
7458 "Intrinsic result must be a floating point", &FPI);
7459 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7460 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
7461 "Intrinsic first argument and result vector lengths must be equal",
7462 &FPI);
7463 }
7464 break;
7465 }
7466
7467 case Intrinsic::experimental_constrained_fptrunc:
7468 case Intrinsic::experimental_constrained_fpext: {
7469 Value *Operand = FPI.getArgOperand(0);
7470 Type *OperandTy = Operand->getType();
7471 Value *Result = &FPI;
7472 Type *ResultTy = Result->getType();
7473 Check(OperandTy->isFPOrFPVectorTy(),
7474 "Intrinsic first argument must be FP or FP vector", &FPI);
7475 Check(ResultTy->isFPOrFPVectorTy(),
7476 "Intrinsic result must be FP or FP vector", &FPI);
7477 Check(OperandTy->isVectorTy() == ResultTy->isVectorTy(),
7478 "Intrinsic first argument and result disagree on vector use", &FPI);
7479 if (OperandTy->isVectorTy()) {
7480 Check(cast<VectorType>(OperandTy)->getElementCount() ==
7481 cast<VectorType>(ResultTy)->getElementCount(),
7482 "Intrinsic first argument and result vector lengths must be equal",
7483 &FPI);
7484 }
7485 if (FPI.getIntrinsicID() == Intrinsic::experimental_constrained_fptrunc) {
7486 Check(OperandTy->getScalarSizeInBits() > ResultTy->getScalarSizeInBits(),
7487 "Intrinsic first argument's type must be larger than result type",
7488 &FPI);
7489 } else {
7490 Check(OperandTy->getScalarSizeInBits() < ResultTy->getScalarSizeInBits(),
7491 "Intrinsic first argument's type must be smaller than result type",
7492 &FPI);
7493 }
7494 break;
7495 }
7496
7497 default:
7498 break;
7499 }
7500
7501 // If a non-metadata argument is passed in a metadata slot then the
7502 // error will be caught earlier when the incorrect argument doesn't
7503 // match the specification in the intrinsic call table. Thus, no
7504 // argument type check is needed here.
7505
7506 Check(FPI.getExceptionBehavior().has_value(),
7507 "invalid exception behavior argument", &FPI);
7508 if (HasRoundingMD) {
7509 Check(FPI.getRoundingMode().has_value(), "invalid rounding mode argument",
7510 &FPI);
7511 }
7512}
7513
7514void Verifier::verifyFragmentExpression(const DbgVariableRecord &DVR) {
7515 DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(DVR.getRawVariable());
7516 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7517
7518 // We don't know whether this intrinsic verified correctly.
7519 if (!V || !E || !E->isValid())
7520 return;
7521
7522 // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
7523 auto Fragment = E->getFragmentInfo();
7524 if (!Fragment)
7525 return;
7526
7527 // The frontend helps out GDB by emitting the members of local anonymous
7528 // unions as artificial local variables with shared storage. When SROA splits
7529 // the storage for artificial local variables that are smaller than the entire
7530 // union, the overhang piece will be outside of the allotted space for the
7531 // variable and this check fails.
7532 // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
7533 if (V->isArtificial())
7534 return;
7535
7536 verifyFragmentExpression(*V, *Fragment, &DVR);
7537}
7538
7539template <typename ValueOrMetadata>
7540void Verifier::verifyFragmentExpression(const DIVariable &V,
7542 ValueOrMetadata *Desc) {
7543 // If there's no size, the type is broken, but that should be checked
7544 // elsewhere.
7545 auto VarSize = V.getSizeInBits();
7546 if (!VarSize)
7547 return;
7548
7549 unsigned FragSize = Fragment.SizeInBits;
7550 unsigned FragOffset = Fragment.OffsetInBits;
7551 CheckDI(FragSize + FragOffset <= *VarSize,
7552 "fragment is larger than or outside of variable", Desc, &V);
7553 CheckDI(FragSize != *VarSize, "fragment covers entire variable", Desc, &V);
7554}
7555
7556void Verifier::verifyFnArgs(const DbgVariableRecord &DVR) {
7557 // This function does not take the scope of noninlined function arguments into
7558 // account. Don't run it if current function is nodebug, because it may
7559 // contain inlined debug intrinsics.
7560 if (!HasDebugInfo)
7561 return;
7562
7563 // For performance reasons only check non-inlined ones.
7564 if (DVR.getDebugLoc()->getInlinedAt())
7565 return;
7566
7567 DILocalVariable *Var = DVR.getVariable();
7568 CheckDI(Var, "#dbg record without variable");
7569
7570 unsigned ArgNo = Var->getArg();
7571 if (!ArgNo)
7572 return;
7573
7574 // Verify there are no duplicate function argument debug info entries.
7575 // These will cause hard-to-debug assertions in the DWARF backend.
7576 if (DebugFnArgs.size() < ArgNo)
7577 DebugFnArgs.resize(ArgNo, nullptr);
7578
7579 auto *Prev = DebugFnArgs[ArgNo - 1];
7580 DebugFnArgs[ArgNo - 1] = Var;
7581 CheckDI(!Prev || (Prev == Var), "conflicting debug info for argument", &DVR,
7582 Prev, Var);
7583}
7584
7585void Verifier::verifyNotEntryValue(const DbgVariableRecord &DVR) {
7586 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7587
7588 // We don't know whether this intrinsic verified correctly.
7589 if (!E || !E->isValid())
7590 return;
7591
7593 Value *VarValue = DVR.getVariableLocationOp(0);
7594 if (isa<UndefValue>(VarValue) || isa<PoisonValue>(VarValue))
7595 return;
7596 // We allow EntryValues for swift async arguments, as they have an
7597 // ABI-guarantee to be turned into a specific register.
7598 if (auto *ArgLoc = dyn_cast_or_null<Argument>(VarValue);
7599 ArgLoc && ArgLoc->hasAttribute(Attribute::SwiftAsync))
7600 return;
7601 }
7602
7603 CheckDI(!E->isEntryValue(),
7604 "Entry values are only allowed in MIR unless they target a "
7605 "swiftasync Argument",
7606 &DVR);
7607}
7608
7609void Verifier::verifyCompileUnits() {
7610 // When more than one Module is imported into the same context, such as during
7611 // an LTO build before linking the modules, ODR type uniquing may cause types
7612 // to point to a different CU. This check does not make sense in this case.
7613 if (M.getContext().isODRUniquingDebugTypes())
7614 return;
7615 auto *CUs = M.getNamedMetadata("llvm.dbg.cu");
7616 SmallPtrSet<const Metadata *, 2> Listed;
7617 if (CUs)
7618 Listed.insert_range(CUs->operands());
7619 for (const auto *CU : CUVisited)
7620 CheckDI(Listed.count(CU), "DICompileUnit not listed in llvm.dbg.cu", CU);
7621 CUVisited.clear();
7622}
7623
7624void Verifier::verifyDeoptimizeCallingConvs() {
7625 if (DeoptimizeDeclarations.empty())
7626 return;
7627
7628 const Function *First = DeoptimizeDeclarations[0];
7629 for (const auto *F : ArrayRef(DeoptimizeDeclarations).slice(1)) {
7630 Check(First->getCallingConv() == F->getCallingConv(),
7631 "All llvm.experimental.deoptimize declarations must have the same "
7632 "calling convention",
7633 First, F);
7634 }
7635}
7636
7637void Verifier::verifyAttachedCallBundle(const CallBase &Call,
7638 const OperandBundleUse &BU) {
7639 FunctionType *FTy = Call.getFunctionType();
7640
7641 Check((FTy->getReturnType()->isPointerTy() ||
7642 (Call.doesNotReturn() && FTy->getReturnType()->isVoidTy())),
7643 "a call with operand bundle \"clang.arc.attachedcall\" must call a "
7644 "function returning a pointer or a non-returning function that has a "
7645 "void return type",
7646 Call);
7647
7648 Check(BU.Inputs.size() == 1 && isa<Function>(BU.Inputs.front()),
7649 "operand bundle \"clang.arc.attachedcall\" requires one function as "
7650 "an argument",
7651 Call);
7652
7653 auto *Fn = cast<Function>(BU.Inputs.front());
7654 Intrinsic::ID IID = Fn->getIntrinsicID();
7655
7656 if (IID) {
7657 Check((IID == Intrinsic::objc_retainAutoreleasedReturnValue ||
7658 IID == Intrinsic::objc_claimAutoreleasedReturnValue ||
7659 IID == Intrinsic::objc_unsafeClaimAutoreleasedReturnValue),
7660 "invalid function argument", Call);
7661 } else {
7662 StringRef FnName = Fn->getName();
7663 Check((FnName == "objc_retainAutoreleasedReturnValue" ||
7664 FnName == "objc_claimAutoreleasedReturnValue" ||
7665 FnName == "objc_unsafeClaimAutoreleasedReturnValue"),
7666 "invalid function argument", Call);
7667 }
7668}
7669
7670void Verifier::verifyNoAliasScopeDecl() {
7671 if (NoAliasScopeDecls.empty())
7672 return;
7673
7674 // only a single scope must be declared at a time.
7675 for (auto *II : NoAliasScopeDecls) {
7676 assert(II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl &&
7677 "Not a llvm.experimental.noalias.scope.decl ?");
7678 const auto *ScopeListMV = dyn_cast<MetadataAsValue>(
7680 Check(ScopeListMV != nullptr,
7681 "llvm.experimental.noalias.scope.decl must have a MetadataAsValue "
7682 "argument",
7683 II);
7684
7685 const auto *ScopeListMD = dyn_cast<MDNode>(ScopeListMV->getMetadata());
7686 Check(ScopeListMD != nullptr, "!id.scope.list must point to an MDNode", II);
7687 Check(ScopeListMD->getNumOperands() == 1,
7688 "!id.scope.list must point to a list with a single scope", II);
7689 visitAliasScopeListMetadata(ScopeListMD);
7690 }
7691
7692 // Only check the domination rule when requested. Once all passes have been
7693 // adapted this option can go away.
7695 return;
7696
7697 // Now sort the intrinsics based on the scope MDNode so that declarations of
7698 // the same scopes are next to each other.
7699 auto GetScope = [](IntrinsicInst *II) {
7700 const auto *ScopeListMV = cast<MetadataAsValue>(
7702 return &cast<MDNode>(ScopeListMV->getMetadata())->getOperand(0);
7703 };
7704
7705 // We are sorting on MDNode pointers here. For valid input IR this is ok.
7706 // TODO: Sort on Metadata ID to avoid non-deterministic error messages.
7707 auto Compare = [GetScope](IntrinsicInst *Lhs, IntrinsicInst *Rhs) {
7708 return GetScope(Lhs) < GetScope(Rhs);
7709 };
7710
7711 llvm::sort(NoAliasScopeDecls, Compare);
7712
7713 // Go over the intrinsics and check that for the same scope, they are not
7714 // dominating each other.
7715 auto ItCurrent = NoAliasScopeDecls.begin();
7716 while (ItCurrent != NoAliasScopeDecls.end()) {
7717 auto CurScope = GetScope(*ItCurrent);
7718 auto ItNext = ItCurrent;
7719 do {
7720 ++ItNext;
7721 } while (ItNext != NoAliasScopeDecls.end() &&
7722 GetScope(*ItNext) == CurScope);
7723
7724 // [ItCurrent, ItNext) represents the declarations for the same scope.
7725 // Ensure they are not dominating each other.. but only if it is not too
7726 // expensive.
7727 if (ItNext - ItCurrent < 32)
7728 for (auto *I : llvm::make_range(ItCurrent, ItNext))
7729 for (auto *J : llvm::make_range(ItCurrent, ItNext))
7730 if (I != J)
7731 Check(!DT.dominates(I, J),
7732 "llvm.experimental.noalias.scope.decl dominates another one "
7733 "with the same scope",
7734 I);
7735 ItCurrent = ItNext;
7736 }
7737}
7738
7739//===----------------------------------------------------------------------===//
7740// Implement the public interfaces to this file...
7741//===----------------------------------------------------------------------===//
7742
7744 Function &F = const_cast<Function &>(f);
7745
7746 // Don't use a raw_null_ostream. Printing IR is expensive.
7747 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/true, *f.getParent());
7748
7749 // Note that this function's return value is inverted from what you would
7750 // expect of a function called "verify".
7751 return !V.verify(F);
7752}
7753
7755 bool *BrokenDebugInfo) {
7756 // Don't use a raw_null_ostream. Printing IR is expensive.
7757 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/!BrokenDebugInfo, M);
7758
7759 bool Broken = false;
7760 for (const Function &F : M)
7761 Broken |= !V.verify(F);
7762
7763 Broken |= !V.verify();
7764 if (BrokenDebugInfo)
7765 *BrokenDebugInfo = V.hasBrokenDebugInfo();
7766 // Note that this function's return value is inverted from what you would
7767 // expect of a function called "verify".
7768 return Broken;
7769}
7770
7771namespace {
7772
7773struct VerifierLegacyPass : public FunctionPass {
7774 static char ID;
7775
7776 std::unique_ptr<Verifier> V;
7777 bool FatalErrors = true;
7778
7779 VerifierLegacyPass() : FunctionPass(ID) {
7781 }
7782 explicit VerifierLegacyPass(bool FatalErrors)
7783 : FunctionPass(ID),
7784 FatalErrors(FatalErrors) {
7786 }
7787
7788 bool doInitialization(Module &M) override {
7789 V = std::make_unique<Verifier>(
7790 &dbgs(), /*ShouldTreatBrokenDebugInfoAsError=*/false, M);
7791 return false;
7792 }
7793
7794 bool runOnFunction(Function &F) override {
7795 if (!V->verify(F) && FatalErrors) {
7796 errs() << "in function " << F.getName() << '\n';
7797 report_fatal_error("Broken function found, compilation aborted!");
7798 }
7799 return false;
7800 }
7801
7802 bool doFinalization(Module &M) override {
7803 bool HasErrors = false;
7804 for (Function &F : M)
7805 if (F.isDeclaration())
7806 HasErrors |= !V->verify(F);
7807
7808 HasErrors |= !V->verify();
7809 if (FatalErrors && (HasErrors || V->hasBrokenDebugInfo()))
7810 report_fatal_error("Broken module found, compilation aborted!");
7811 return false;
7812 }
7813
7814 void getAnalysisUsage(AnalysisUsage &AU) const override {
7815 AU.setPreservesAll();
7816 }
7817};
7818
7819} // end anonymous namespace
7820
7821/// Helper to issue failure from the TBAA verification
7822template <typename... Tys> void TBAAVerifier::CheckFailed(Tys &&... Args) {
7823 if (Diagnostic)
7824 return Diagnostic->CheckFailed(Args...);
7825}
7826
7827#define CheckTBAA(C, ...) \
7828 do { \
7829 if (!(C)) { \
7830 CheckFailed(__VA_ARGS__); \
7831 return false; \
7832 } \
7833 } while (false)
7834
7835/// Verify that \p BaseNode can be used as the "base type" in the struct-path
7836/// TBAA scheme. This means \p BaseNode is either a scalar node, or a
7837/// struct-type node describing an aggregate data structure (like a struct).
7838TBAAVerifier::TBAABaseNodeSummary
7839TBAAVerifier::verifyTBAABaseNode(const Instruction *I, const MDNode *BaseNode,
7840 bool IsNewFormat) {
7841 if (BaseNode->getNumOperands() < 2) {
7842 CheckFailed("Base nodes must have at least two operands", I, BaseNode);
7843 return {true, ~0u};
7844 }
7845
7846 auto Itr = TBAABaseNodes.find(BaseNode);
7847 if (Itr != TBAABaseNodes.end())
7848 return Itr->second;
7849
7850 auto Result = verifyTBAABaseNodeImpl(I, BaseNode, IsNewFormat);
7851 auto InsertResult = TBAABaseNodes.insert({BaseNode, Result});
7852 (void)InsertResult;
7853 assert(InsertResult.second && "We just checked!");
7854 return Result;
7855}
7856
7857TBAAVerifier::TBAABaseNodeSummary
7858TBAAVerifier::verifyTBAABaseNodeImpl(const Instruction *I,
7859 const MDNode *BaseNode, bool IsNewFormat) {
7860 const TBAAVerifier::TBAABaseNodeSummary InvalidNode = {true, ~0u};
7861
7862 if (BaseNode->getNumOperands() == 2) {
7863 // Scalar nodes can only be accessed at offset 0.
7864 return isValidScalarTBAANode(BaseNode)
7865 ? TBAAVerifier::TBAABaseNodeSummary({false, 0})
7866 : InvalidNode;
7867 }
7868
7869 if (IsNewFormat) {
7870 if (BaseNode->getNumOperands() % 3 != 0) {
7871 CheckFailed("Access tag nodes must have the number of operands that is a "
7872 "multiple of 3!", BaseNode);
7873 return InvalidNode;
7874 }
7875 } else {
7876 if (BaseNode->getNumOperands() % 2 != 1) {
7877 CheckFailed("Struct tag nodes must have an odd number of operands!",
7878 BaseNode);
7879 return InvalidNode;
7880 }
7881 }
7882
7883 // Check the type size field.
7884 if (IsNewFormat) {
7885 auto *TypeSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7886 BaseNode->getOperand(1));
7887 if (!TypeSizeNode) {
7888 CheckFailed("Type size nodes must be constants!", I, BaseNode);
7889 return InvalidNode;
7890 }
7891 }
7892
7893 // Check the type name field. In the new format it can be anything.
7894 if (!IsNewFormat && !isa<MDString>(BaseNode->getOperand(0))) {
7895 CheckFailed("Struct tag nodes have a string as their first operand",
7896 BaseNode);
7897 return InvalidNode;
7898 }
7899
7900 bool Failed = false;
7901
7902 std::optional<APInt> PrevOffset;
7903 unsigned BitWidth = ~0u;
7904
7905 // We've already checked that BaseNode is not a degenerate root node with one
7906 // operand in \c verifyTBAABaseNode, so this loop should run at least once.
7907 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
7908 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
7909 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
7910 Idx += NumOpsPerField) {
7911 const MDOperand &FieldTy = BaseNode->getOperand(Idx);
7912 const MDOperand &FieldOffset = BaseNode->getOperand(Idx + 1);
7913 if (!isa<MDNode>(FieldTy)) {
7914 CheckFailed("Incorrect field entry in struct type node!", I, BaseNode);
7915 Failed = true;
7916 continue;
7917 }
7918
7919 auto *OffsetEntryCI =
7921 if (!OffsetEntryCI) {
7922 CheckFailed("Offset entries must be constants!", I, BaseNode);
7923 Failed = true;
7924 continue;
7925 }
7926
7927 if (BitWidth == ~0u)
7928 BitWidth = OffsetEntryCI->getBitWidth();
7929
7930 if (OffsetEntryCI->getBitWidth() != BitWidth) {
7931 CheckFailed(
7932 "Bitwidth between the offsets and struct type entries must match", I,
7933 BaseNode);
7934 Failed = true;
7935 continue;
7936 }
7937
7938 // NB! As far as I can tell, we generate a non-strictly increasing offset
7939 // sequence only from structs that have zero size bit fields. When
7940 // recursing into a contained struct in \c getFieldNodeFromTBAABaseNode we
7941 // pick the field lexically the latest in struct type metadata node. This
7942 // mirrors the actual behavior of the alias analysis implementation.
7943 bool IsAscending =
7944 !PrevOffset || PrevOffset->ule(OffsetEntryCI->getValue());
7945
7946 if (!IsAscending) {
7947 CheckFailed("Offsets must be increasing!", I, BaseNode);
7948 Failed = true;
7949 }
7950
7951 PrevOffset = OffsetEntryCI->getValue();
7952
7953 if (IsNewFormat) {
7954 auto *MemberSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7955 BaseNode->getOperand(Idx + 2));
7956 if (!MemberSizeNode) {
7957 CheckFailed("Member size entries must be constants!", I, BaseNode);
7958 Failed = true;
7959 continue;
7960 }
7961 }
7962 }
7963
7964 return Failed ? InvalidNode
7965 : TBAAVerifier::TBAABaseNodeSummary(false, BitWidth);
7966}
7967
7968static bool IsRootTBAANode(const MDNode *MD) {
7969 return MD->getNumOperands() < 2;
7970}
7971
7972static bool IsScalarTBAANodeImpl(const MDNode *MD,
7974 if (MD->getNumOperands() != 2 && MD->getNumOperands() != 3)
7975 return false;
7976
7977 if (!isa<MDString>(MD->getOperand(0)))
7978 return false;
7979
7980 if (MD->getNumOperands() == 3) {
7982 if (!(Offset && Offset->isZero() && isa<MDString>(MD->getOperand(0))))
7983 return false;
7984 }
7985
7986 auto *Parent = dyn_cast_or_null<MDNode>(MD->getOperand(1));
7987 return Parent && Visited.insert(Parent).second &&
7988 (IsRootTBAANode(Parent) || IsScalarTBAANodeImpl(Parent, Visited));
7989}
7990
7991bool TBAAVerifier::isValidScalarTBAANode(const MDNode *MD) {
7992 auto ResultIt = TBAAScalarNodes.find(MD);
7993 if (ResultIt != TBAAScalarNodes.end())
7994 return ResultIt->second;
7995
7996 SmallPtrSet<const MDNode *, 4> Visited;
7997 bool Result = IsScalarTBAANodeImpl(MD, Visited);
7998 auto InsertResult = TBAAScalarNodes.insert({MD, Result});
7999 (void)InsertResult;
8000 assert(InsertResult.second && "Just checked!");
8001
8002 return Result;
8003}
8004
8005/// Returns the field node at the offset \p Offset in \p BaseNode. Update \p
8006/// Offset in place to be the offset within the field node returned.
8007///
8008/// We assume we've okayed \p BaseNode via \c verifyTBAABaseNode.
8009MDNode *TBAAVerifier::getFieldNodeFromTBAABaseNode(const Instruction *I,
8010 const MDNode *BaseNode,
8011 APInt &Offset,
8012 bool IsNewFormat) {
8013 assert(BaseNode->getNumOperands() >= 2 && "Invalid base node!");
8014
8015 // Scalar nodes have only one possible "field" -- their parent in the access
8016 // hierarchy. Offset must be zero at this point, but our caller is supposed
8017 // to check that.
8018 if (BaseNode->getNumOperands() == 2)
8019 return cast<MDNode>(BaseNode->getOperand(1));
8020
8021 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
8022 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
8023 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
8024 Idx += NumOpsPerField) {
8025 auto *OffsetEntryCI =
8026 mdconst::extract<ConstantInt>(BaseNode->getOperand(Idx + 1));
8027 if (OffsetEntryCI->getValue().ugt(Offset)) {
8028 if (Idx == FirstFieldOpNo) {
8029 CheckFailed("Could not find TBAA parent in struct type node", I,
8030 BaseNode, &Offset);
8031 return nullptr;
8032 }
8033
8034 unsigned PrevIdx = Idx - NumOpsPerField;
8035 auto *PrevOffsetEntryCI =
8036 mdconst::extract<ConstantInt>(BaseNode->getOperand(PrevIdx + 1));
8037 Offset -= PrevOffsetEntryCI->getValue();
8038 return cast<MDNode>(BaseNode->getOperand(PrevIdx));
8039 }
8040 }
8041
8042 unsigned LastIdx = BaseNode->getNumOperands() - NumOpsPerField;
8043 auto *LastOffsetEntryCI = mdconst::extract<ConstantInt>(
8044 BaseNode->getOperand(LastIdx + 1));
8045 Offset -= LastOffsetEntryCI->getValue();
8046 return cast<MDNode>(BaseNode->getOperand(LastIdx));
8047}
8048
8050 if (!Type || Type->getNumOperands() < 3)
8051 return false;
8052
8053 // In the new format type nodes shall have a reference to the parent type as
8054 // its first operand.
8055 return isa_and_nonnull<MDNode>(Type->getOperand(0));
8056}
8057
8059 CheckTBAA(MD->getNumOperands() > 0, "TBAA metadata cannot have 0 operands", I,
8060 MD);
8061
8062 if (I)
8066 "This instruction shall not have a TBAA access tag!", I);
8067
8068 bool IsStructPathTBAA =
8069 isa<MDNode>(MD->getOperand(0)) && MD->getNumOperands() >= 3;
8070
8071 CheckTBAA(IsStructPathTBAA,
8072 "Old-style TBAA is no longer allowed, use struct-path TBAA instead",
8073 I);
8074
8075 MDNode *BaseNode = dyn_cast_or_null<MDNode>(MD->getOperand(0));
8076 MDNode *AccessType = dyn_cast_or_null<MDNode>(MD->getOperand(1));
8077
8078 bool IsNewFormat = isNewFormatTBAATypeNode(AccessType);
8079
8080 if (IsNewFormat) {
8081 CheckTBAA(MD->getNumOperands() == 4 || MD->getNumOperands() == 5,
8082 "Access tag metadata must have either 4 or 5 operands", I, MD);
8083 } else {
8084 CheckTBAA(MD->getNumOperands() < 5,
8085 "Struct tag metadata must have either 3 or 4 operands", I, MD);
8086 }
8087
8088 // Check the access size field.
8089 if (IsNewFormat) {
8090 auto *AccessSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
8091 MD->getOperand(3));
8092 CheckTBAA(AccessSizeNode, "Access size field must be a constant", I, MD);
8093 }
8094
8095 // Check the immutability flag.
8096 unsigned ImmutabilityFlagOpNo = IsNewFormat ? 4 : 3;
8097 if (MD->getNumOperands() == ImmutabilityFlagOpNo + 1) {
8098 auto *IsImmutableCI = mdconst::dyn_extract_or_null<ConstantInt>(
8099 MD->getOperand(ImmutabilityFlagOpNo));
8100 CheckTBAA(IsImmutableCI,
8101 "Immutability tag on struct tag metadata must be a constant", I,
8102 MD);
8103 CheckTBAA(
8104 IsImmutableCI->isZero() || IsImmutableCI->isOne(),
8105 "Immutability part of the struct tag metadata must be either 0 or 1", I,
8106 MD);
8107 }
8108
8109 CheckTBAA(BaseNode && AccessType,
8110 "Malformed struct tag metadata: base and access-type "
8111 "should be non-null and point to Metadata nodes",
8112 I, MD, BaseNode, AccessType);
8113
8114 if (!IsNewFormat) {
8115 CheckTBAA(isValidScalarTBAANode(AccessType),
8116 "Access type node must be a valid scalar type", I, MD,
8117 AccessType);
8118 }
8119
8121 CheckTBAA(OffsetCI, "Offset must be constant integer", I, MD);
8122
8123 APInt Offset = OffsetCI->getValue();
8124 bool SeenAccessTypeInPath = false;
8125
8126 SmallPtrSet<MDNode *, 4> StructPath;
8127
8128 for (/* empty */; BaseNode && !IsRootTBAANode(BaseNode);
8129 BaseNode =
8130 getFieldNodeFromTBAABaseNode(I, BaseNode, Offset, IsNewFormat)) {
8131 if (!StructPath.insert(BaseNode).second) {
8132 CheckFailed("Cycle detected in struct path", I, MD);
8133 return false;
8134 }
8135
8136 bool Invalid;
8137 unsigned BaseNodeBitWidth;
8138 std::tie(Invalid, BaseNodeBitWidth) =
8139 verifyTBAABaseNode(I, BaseNode, IsNewFormat);
8140
8141 // If the base node is invalid in itself, then we've already printed all the
8142 // errors we wanted to print.
8143 if (Invalid)
8144 return false;
8145
8146 SeenAccessTypeInPath |= BaseNode == AccessType;
8147
8148 if (isValidScalarTBAANode(BaseNode) || BaseNode == AccessType)
8149 CheckTBAA(Offset == 0, "Offset not zero at the point of scalar access", I,
8150 MD, &Offset);
8151
8152 CheckTBAA(BaseNodeBitWidth == Offset.getBitWidth() ||
8153 (BaseNodeBitWidth == 0 && Offset == 0) ||
8154 (IsNewFormat && BaseNodeBitWidth == ~0u),
8155 "Access bit-width not the same as description bit-width", I, MD,
8156 BaseNodeBitWidth, Offset.getBitWidth());
8157
8158 if (IsNewFormat && SeenAccessTypeInPath)
8159 break;
8160 }
8161
8162 CheckTBAA(SeenAccessTypeInPath, "Did not see access type in access path!", I,
8163 MD);
8164 return true;
8165}
8166
8167char VerifierLegacyPass::ID = 0;
8168INITIALIZE_PASS(VerifierLegacyPass, "verify", "Module Verifier", false, false)
8169
8171 return new VerifierLegacyPass(FatalErrors);
8172}
8173
8174AnalysisKey VerifierAnalysis::Key;
8181
8186
8188 auto Res = AM.getResult<VerifierAnalysis>(M);
8189 if (FatalErrors && (Res.IRBroken || Res.DebugInfoBroken))
8190 report_fatal_error("Broken module found, compilation aborted!");
8191
8192 return PreservedAnalyses::all();
8193}
8194
8196 auto res = AM.getResult<VerifierAnalysis>(F);
8197 if (res.IRBroken && FatalErrors)
8198 report_fatal_error("Broken function found, compilation aborted!");
8199
8200 return PreservedAnalyses::all();
8201}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU address space definition.
ArrayRef< TableEntry > TableRef
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Atomic ordering constants.
@ RetAttr
@ FnAttr
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file declares the LLVM IR specialization of the GenericConvergenceVerifier template.
static DISubprogram * getSubprogram(bool IsDistinct, Ts &&...Args)
dxil translate DXIL Translate Metadata
This file defines the DenseMap class.
This file contains constants used for implementing Dwarf debug support.
static bool runOnFunction(Function &F, bool PostInlining)
#define Check(C,...)
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Machine Check Debug Module
This file implements a map that provides insertion order iteration.
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
static bool isContiguous(const ConstantRange &A, const ConstantRange &B)
This file contains the declarations for metadata subclasses.
#define T
#define T1
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t High
uint64_t IntrinsicInst * II
#define P(N)
ppc ctr loops verify
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
static unsigned getNumElements(Type *Ty)
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
This file contains some templates that are useful if you are working with the STL at all.
verify safepoint Safepoint IR Verifier
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
static bool IsScalarTBAANodeImpl(const MDNode *MD, SmallPtrSetImpl< const MDNode * > &Visited)
static bool isType(const Metadata *MD)
static Instruction * getSuccPad(Instruction *Terminator)
static bool isMDTuple(const Metadata *MD)
static bool isNewFormatTBAATypeNode(llvm::MDNode *Type)
#define CheckDI(C,...)
We know that a debug info condition should be true, if not print an error message.
Definition Verifier.cpp:681
static void forEachUser(const Value *User, SmallPtrSet< const Value *, 32 > &Visited, llvm::function_ref< bool(const Value *)> Callback)
Definition Verifier.cpp:722
static bool isDINode(const Metadata *MD)
static bool isScope(const Metadata *MD)
static cl::opt< bool > VerifyNoAliasScopeDomination("verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false), cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical " "scopes are not dominating"))
static bool isTypeCongruent(Type *L, Type *R)
Two types are "congruent" if they are identical, or if they are both pointer types with different poi...
#define CheckTBAA(C,...)
static bool isConstantIntMetadataOperand(const Metadata *MD)
static bool IsRootTBAANode(const MDNode *MD)
static Value * getParentPad(Value *EHPad)
static bool hasConflictingReferenceFlags(unsigned Flags)
Detect mutually exclusive flags.
static AttrBuilder getParameterABIAttributes(LLVMContext &C, unsigned I, AttributeList Attrs)
static const char PassName[]
bool isFiniteNonZero() const
Definition APFloat.h:1441
bool isNegative() const
Definition APFloat.h:1431
const fltSemantics & getSemantics() const
Definition APFloat.h:1439
Class for arbitrary precision integers.
Definition APInt.h:78
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition APInt.h:1202
bool isMinValue() const
Determine if this is the smallest unsigned value.
Definition APInt.h:418
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition APInt.h:1151
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:441
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1563
bool isMaxValue() const
Determine if this is the largest unsigned value.
Definition APInt.h:400
This class represents a conversion between pointers from one address space to another.
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
unsigned getAddressSpace() const
Return the address space for the allocation.
LLVM_ABI bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
const Value * getArraySize() const
Get the number of elements allocated.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
void setPreservesAll()
Set by analyses that do not transform their input at all.
LLVM_ABI bool hasInRegAttr() const
Return true if this argument has the inreg attribute.
Definition Function.cpp:293
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
static bool isFPOperation(BinOp Op)
BinOp getOperation() const
static LLVM_ABI StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
bool contains(Attribute::AttrKind A) const
Return true if the builder has the specified attribute.
LLVM_ABI bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
LLVM_ABI std::string getAsString(bool InAttrGrp=false) const
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:69
LLVM_ABI const ConstantRange & getValueAsConstantRange() const
Return the attribute's value as a ConstantRange.
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition Attributes.h:88
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:223
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:459
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:528
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
const Instruction & front() const
Definition BasicBlock.h:482
LLVM_ABI const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
This class represents a no-op cast from one type to another.
static LLVM_ABI BlockAddress * lookup(const BasicBlock *BB)
Lookup an existing BlockAddress constant for the given BasicBlock.
bool isConditional() const
Value * getCondition() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool isInlineAsm() const
Check if this call is an inline asm statement.
bool hasInAllocaArgument() const
Determine if there are is an inalloca argument.
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool doesNotAccessMemory(unsigned OpNo) const
bool hasFnAttr(Attribute::AttrKind Kind) const
Determine whether this call has the given attribute.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
CallingConv::ID getCallingConv() const
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Get the attribute of a given kind from a given arg.
iterator_range< bundle_op_iterator > bundle_op_infos()
Return the range [bundle_op_info_begin, bundle_op_info_end).
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
bool onlyReadsMemory(unsigned OpNo) const
Value * getCalledOperand() const
Type * getParamElementType(unsigned ArgNo) const
Extract the elementtype type for a parameter.
Value * getArgOperand(unsigned i) const
FunctionType * getFunctionType() const
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
bool doesNotReturn() const
Determine if the call cannot return.
LLVM_ABI bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
bool hasOperandBundles() const
Return true if this User has any operand bundles.
LLVM_ABI Function * getCaller()
Helper to get the caller (the parent function).
BasicBlock * getIndirectDest(unsigned i) const
unsigned getNumIndirectDests() const
Return the number of callbr indirect dest labels.
bool isMustTailCall() const
static LLVM_ABI bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
unsigned getNumHandlers() const
return the number of 'handlers' in this catchswitch instruction, except the default handler
Value * getParentPad() const
BasicBlock * getUnwindDest() const
handler_range handlers()
iteration adapter for range-for loops.
BasicBlock * getUnwindDest() const
bool isFPPredicate() const
Definition InstrTypes.h:782
bool isIntPredicate() const
Definition InstrTypes.h:783
static bool isIntPredicate(Predicate P)
Definition InstrTypes.h:776
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
Definition Constants.h:231
bool isNegative() const
Definition Constants.h:214
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition Constants.h:219
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition Constants.h:162
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:168
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:159
Constant * getAddrDiscriminator() const
The address discriminator if any, or the null constant.
Definition Constants.h:1078
Constant * getPointer() const
The pointer that is signed in this ptrauth signed pointer.
Definition Constants.h:1065
ConstantInt * getKey() const
The Key ID, an i32 constant.
Definition Constants.h:1068
Constant * getDeactivationSymbol() const
Definition Constants.h:1087
ConstantInt * getDiscriminator() const
The integer discriminator, an i64 constant, or 0.
Definition Constants.h:1071
static LLVM_ABI bool isOrderedRanges(ArrayRef< ConstantRange > RangesRef)
This class represents a range of values.
const APInt & getLower() const
Return the lower value for this range.
const APInt & getUpper() const
Return the upper value for this range.
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
static LLVM_ABI ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition Constants.cpp:90
LLVM_ABI std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
LLVM_ABI std::optional< RoundingMode > getRoundingMode() const
LLVM_ABI unsigned getNonMetadataArgCount() const
DbgVariableFragmentInfo FragmentInfo
@ FixedPointBinary
Scale factor 2^Factor.
@ FixedPointDecimal
Scale factor 10^Factor.
@ FixedPointRational
Arbitrary rational scale factor.
DIGlobalVariable * getVariable() const
LLVM_ABI DISubprogram * getSubprogram() const
Get the subprogram for this scope.
DILocalScope * getScope() const
Get the local scope for this variable.
Metadata * getRawScope() const
Base class for scope-like contexts.
Subprogram description. Uses SubclassData1.
static const DIScope * getRawRetainedNodeScope(const MDNode *N)
Base class for template parameters.
Base class for variables.
Metadata * getRawType() const
Metadata * getRawScope() const
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
LLVM_ABI void print(raw_ostream &O, bool IsForDebug=false) const
DebugLoc getDebugLoc() const
LLVM_ABI const BasicBlock * getParent() const
LLVM_ABI Function * getFunction()
Record of a variable value-assignment, aka a non instruction representation of the dbg....
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
DIExpression * getExpression() const
DILocalVariable * getVariable() const
Metadata * getRawLocation() const
Returns the metadata operand for the first location description.
@ End
Marks the end of the concrete types.
@ Any
To indicate all LocationTypes in searches.
DIExpression * getAddressExpression() const
MDNode * getAsMDNode() const
Return this as a bar MDNode.
Definition DebugLoc.h:290
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:205
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
bool empty() const
Definition DenseMap.h:109
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:241
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:164
This instruction extracts a single (scalar) element from a VectorType value.
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
ArrayRef< unsigned > getIndices() const
static LLVM_ABI Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
This instruction compares its operands according to the predicate given to the constructor.
This class represents an extension of floating point types.
This class represents a cast from floating point to signed integer.
This class represents a cast from floating point to unsigned integer.
This class represents a truncation of floating point types.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Value * getParentPad() const
Convenience accessors.
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
Type * getReturnType() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition Function.h:209
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition Function.h:244
DISubprogram * getSubprogram() const
Get the attached subprogram.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:270
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition Function.h:903
const Function & getFunction() const
Definition Function.h:164
const std::string & getGC() const
Definition Function.cpp:834
Type * getReturnType() const
Returns the type of the ret val.
Definition Function.h:214
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
Definition Function.h:227
LLVM_ABI Value * getBasePtr() const
LLVM_ABI Value * getDerivedPtr() const
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
static bool isValidLinkage(LinkageTypes L)
Definition GlobalAlias.h:98
const Constant * getAliasee() const
Definition GlobalAlias.h:87
LLVM_ABI const Function * getResolverFunction() const
Definition Globals.cpp:665
static bool isValidLinkage(LinkageTypes L)
Definition GlobalIFunc.h:86
const Constant * getResolver() const
Definition GlobalIFunc.h:73
LLVM_ABI void getAllMetadata(SmallVectorImpl< std::pair< unsigned, MDNode * > > &MDs) const
Appends all metadata attached to this value to MDs, sorting by KindID.
bool hasComdat() const
MDNode * getMetadata(unsigned KindID) const
Get the current metadata attachments for the given kind, if any.
Definition Value.h:576
bool hasExternalLinkage() const
bool isDSOLocal() const
bool isImplicitDSOLocal() const
LLVM_ABI bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition Globals.cpp:328
bool hasValidDeclarationLinkage() const
LinkageTypes getLinkage() const
bool hasDefaultVisibility() const
bool hasPrivateLinkage() const
bool hasHiddenVisibility() const
bool hasExternalWeakLinkage() const
bool hasDLLImportStorageClass() const
bool hasDLLExportStorageClass() const
bool isDeclarationForLinker() const
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
PointerType * getType() const
Global values are always pointers.
LLVM_ABI bool isInterposable() const
Return true if this global's definition can be substituted with an arbitrary definition at link time ...
Definition Globals.cpp:107
bool hasComdat() const
bool hasCommonLinkage() const
bool hasGlobalUnnamedAddr() const
bool hasAppendingLinkage() const
bool hasAvailableExternallyLinkage() const
Type * getValueType() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool hasInitializer() const
Definitions have initializers, declarations don't.
MaybeAlign getAlign() const
Returns the alignment of the given variable.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
This instruction compares its operands according to the predicate given to the constructor.
BasicBlock * getDestination(unsigned i)
Return the specified destination.
unsigned getNumDestinations() const
return the number of possible destinations in this indirectbr instruction.
unsigned getNumSuccessors() const
This instruction inserts a single (scalar) element into a VectorType value.
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
ArrayRef< unsigned > getIndices() const
Base class for instruction visitors.
Definition InstVisitor.h:78
void visit(Iterator Start, Iterator End)
Definition InstVisitor.h:87
LLVM_ABI unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
This class represents a cast from an integer to a pointer.
static LLVM_ABI bool mayLowerToFunctionCall(Intrinsic::ID IID)
Check if the intrinsic might lower into a regular function call in the course of IR transformations.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Align getAlign() const
Return the alignment of the access that is being performed.
Metadata node.
Definition Metadata.h:1078
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1442
bool isTemporary() const
Definition Metadata.h:1262
ArrayRef< MDOperand > operands() const
Definition Metadata.h:1440
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1448
bool isDistinct() const
Definition Metadata.h:1261
bool isResolved() const
Check if node is fully resolved.
Definition Metadata.h:1258
LLVMContext & getContext() const
Definition Metadata.h:1242
bool equalsStr(StringRef Str) const
Definition Metadata.h:922
Metadata * get() const
Definition Metadata.h:929
LLVM_ABI StringRef getString() const
Definition Metadata.cpp:618
static LLVM_ABI bool isTagMD(const Metadata *MD)
This class implements a map that also provides access to all stored values in a deterministic order.
Definition MapVector.h:36
static LLVM_ABI MetadataAsValue * getIfExists(LLVMContext &Context, Metadata *MD)
Definition Metadata.cpp:112
Metadata * getMetadata() const
Definition Metadata.h:201
Root of the metadata hierarchy.
Definition Metadata.h:64
LLVM_ABI void print(raw_ostream &OS, const Module *M=nullptr, bool IsForDebug=false) const
Print.
unsigned getMetadataID() const
Definition Metadata.h:104
Manage lifetime of a slot tracker for printing IR.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
LLVM_ABI StringRef getName() const
LLVM_ABI void print(raw_ostream &ROS, bool IsForDebug=false) const
LLVM_ABI unsigned getNumOperands() const
iterator_range< op_iterator > operands()
Definition Metadata.h:1853
op_range incoming_values()
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
This class represents a cast from a pointer to an address (non-capturing ptrtoint).
This class represents a cast from a pointer to an integer.
Value * getValue() const
Convenience accessor.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
This class represents a sign extension of integer types.
This class represents a cast from signed integer to floating point.
static LLVM_ABI const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
This instruction constructs a fixed permutation of two input vectors.
static LLVM_ABI bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
static LLVM_ABI void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
void insert_range(Range &&R)
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
void reserve(size_type N)
iterator insert(iterator I, T &&Elt)
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition StringRef.h:712
static constexpr size_t npos
Definition StringRef.h:57
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition StringRef.h:472
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:261
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:143
unsigned getNumElements() const
Random access to the elements.
LLVM_ABI Type * getTypeAtIndex(const Value *V) const
Given an index value into the type, return the type of the element.
Definition Type.cpp:718
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Returns true if this struct contains a scalable vector.
Definition Type.cpp:440
Verify that the TBAA Metadatas are valid.
Definition Verifier.h:40
LLVM_ABI bool visitTBAAMetadata(const Instruction *I, const MDNode *MD)
Visit an instruction, or a TBAA node itself as part of a metadata, and return true if it is valid,...
unsigned size() const
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
LLVM_ABI bool containsNonGlobalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a global...
Definition Type.cpp:74
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition Type.h:264
LLVM_ABI bool containsNonLocalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a local.
Definition Type.cpp:90
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
Definition Type.cpp:61
bool isLabelTy() const
Return true if this is 'label'.
Definition Type.h:228
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition Type.h:246
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
bool isTokenLikeTy() const
Returns true if this is 'token' or a token-like target type.s.
Definition Type.cpp:1065
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isSingleValueType() const
Return true if the type is a valid type for a register in codegen.
Definition Type.h:296
LLVM_ABI bool canLosslesslyBitCastTo(Type *Ty) const
Return true if this type could be converted with a lossless BitCast to type 'Ty'.
Definition Type.cpp:153
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition Type.h:311
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:230
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:184
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition Type.h:270
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
Definition Type.h:255
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition Type.h:225
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
bool isMetadataTy() const
Return true if this is 'metadata'.
Definition Type.h:231
This class represents a cast unsigned integer to floating point.
op_range operands()
Definition User.h:293
Value * getOperand(unsigned i) const
Definition User.h:233
unsigned getNumOperands() const
Definition User.h:255
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
Value * getValue() const
Definition Metadata.h:498
LLVM Value Representation.
Definition Value.h:75
iterator_range< user_iterator > materialized_users()
Definition Value.h:420
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI const Value * stripPointerCastsAndAliases() const
Strip off pointer casts, all-zero GEPs, address space casts, and aliases.
Definition Value.cpp:712
LLVM_ABI const Value * stripInBoundsOffsets(function_ref< void(const Value *)> Func=[](const Value *) {}) const
Strip off pointer casts and inbounds GEPs.
Definition Value.cpp:819
iterator_range< user_iterator > users()
Definition Value.h:426
bool materialized_use_empty() const
Definition Value.h:351
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:708
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1106
bool hasName() const
Definition Value.h:262
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
Check a module for errors, and report separate error states for IR and debug info errors.
Definition Verifier.h:109
LLVM_ABI Result run(Module &M, ModuleAnalysisManager &)
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
This class represents zero extension of integer types.
constexpr bool isNonZero() const
Definition TypeSize.h:155
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
CallInst * Call
This file contains the declaration of the Comdat class, which represents a single COMDAT in LLVM.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ FLAT_ADDRESS
Address space for flat memory.
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
@ PRIVATE_ADDRESS
Address space for private memory.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
bool isFlatGlobalAddrSpace(unsigned AS)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Entry
Definition COFF.h:862
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
LLVM_ABI MatchIntrinsicTypesResult matchIntrinsicSignature(FunctionType *FTy, ArrayRef< IITDescriptor > &Infos, SmallVectorImpl< Type * > &ArgTys)
Match the specified function type with the type constraints specified by the .td file.
LLVM_ABI void getIntrinsicInfoTableEntries(ID id, SmallVectorImpl< IITDescriptor > &T)
Return the IIT table descriptor for the specified intrinsic into an array of IITDescriptors.
@ MatchIntrinsicTypes_NoMatchRet
Definition Intrinsics.h:260
@ MatchIntrinsicTypes_NoMatchArg
Definition Intrinsics.h:261
LLVM_ABI bool hasConstrainedFPRoundingModeOperand(ID QID)
Returns true if the intrinsic ID is for one of the "ConstrainedFloating-Point Intrinsics" that take r...
LLVM_ABI StringRef getName(ID id)
Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
static const int NoAliasScopeDeclScopeArg
Definition Intrinsics.h:41
LLVM_ABI bool matchIntrinsicVarArg(bool isVarArg, ArrayRef< IITDescriptor > &Infos)
Verify if the intrinsic has variable arguments.
std::variant< std::monostate, Loc::Single, Loc::Multi, Loc::MMI, Loc::EntryValue > Variant
Alias for the std::variant specialization base class of DbgVariable.
Definition DwarfDebug.h:189
Flag
These should be considered private to the implementation of the MCInstrDesc class.
@ System
Synchronized with respect to all concurrently executing threads.
Definition LLVMContext.h:58
LLVM_ABI std::optional< VFInfo > tryDemangleForVFABI(StringRef MangledName, const FunctionType *FTy)
Function to construct a VFInfo out of a mangled names in the following format:
@ CE
Windows NT (Windows on ARM)
Definition MCAsmInfo.h:48
LLVM_ABI AssignmentInstRange getAssignmentInsts(DIAssignID *ID)
Return a range of instructions (typically just one) that have ID as an attachment.
initializer< Ty > init(const Ty &Val)
@ DW_MACINFO_undef
Definition Dwarf.h:818
@ DW_MACINFO_start_file
Definition Dwarf.h:819
@ DW_MACINFO_define
Definition Dwarf.h:817
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract_or_null(Y &&MD)
Extract a Value from Metadata, if any, allowing null.
Definition Metadata.h:708
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract(Y &&MD)
Extract a Value from Metadata, if any.
Definition Metadata.h:695
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:667
@ User
could "use" a pointer
NodeAddr< UseNode * > Use
Definition RDFGraph.h:385
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
Definition Threading.h:280
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1737
LLVM_ABI bool canInstructionHaveMMRAs(const Instruction &I)
detail::zippy< detail::zip_first, T, U, Args... > zip_equal(T &&t, U &&u, Args &&...args)
zip iterator that assumes that all iteratees have the same length.
Definition STLExtras.h:839
LLVM_ABI unsigned getBranchWeightOffset(const MDNode *ProfileData)
Return the offset to the first branch weight data.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2530
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
AllocFnKind
Definition Attributes.h:51
testing::Matcher< const detail::ErrorHolder & > Failed()
Definition Error.h:198
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2184
LLVM_ABI DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition MathExtras.h:284
bool isa_and_nonnull(const Y &Val)
Definition Casting.h:676
Op::Description Desc
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
GenericConvergenceVerifier< SSAContext > ConvergenceVerifier
LLVM_ABI void initializeVerifierLegacyPassPass(PassRegistry &)
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
bool isModSet(const ModRefInfo MRI)
Definition ModRef.h:49
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1634
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
iterator_range< SplittingIterator > split(StringRef Str, StringRef Separator)
Split the specified string over a separator and return a range-compatible iterable over its partition...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI bool isValueProfileMD(const MDNode *ProfileData)
Checks if an MDNode contains value profiling Metadata.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
LLVM_ABI unsigned getNumBranchWeights(const MDNode &ProfileData)
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:74
LLVM_ABI FunctionPass * createVerifierPass(bool FatalErrors=true)
FunctionAddr VTableAddr Next
Definition InstrProf.h:141
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
TinyPtrVector< BasicBlock * > ColorVector
LLVM_ABI const char * LLVMLoopEstimatedTripCount
Profile-based loop metadata that should be accessed only by using llvm::getLoopEstimatedTripCount and...
DenormalMode parseDenormalFPAttribute(StringRef Str)
Returns the denormal mode to use for inputs and outputs.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
Definition FPEnv.cpp:25
LLVM_ABI std::unique_ptr< GCStrategy > getGCStrategy(const StringRef Name)
Lookup the GCStrategy object associated with the given gc name.
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1945
bool pred_empty(const BasicBlock *BB)
Definition CFG.h:119
bool isHexDigit(char C)
Checks if character C is a hexadecimal numeric character.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
constexpr bool isCallableCC(CallingConv::ID CC)
LLVM_ABI bool verifyModule(const Module &M, raw_ostream *OS=nullptr, bool *BrokenDebugInfo=nullptr)
Check a module for errors.
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
Definition MIRParser.h:39
#define N
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition Analysis.h:29
static LLVM_ABI const char * SyntheticFunctionEntryCount
static LLVM_ABI const char * UnknownBranchWeightsMarker
static LLVM_ABI const char * ValueProfile
static LLVM_ABI const char * FunctionEntryCount
static LLVM_ABI const char * BranchWeights
uint32_t getTagID() const
Return the tag of this operand bundle as an integer.
ArrayRef< Use > Inputs
void DebugInfoCheckFailed(const Twine &Message)
A debug info check failed.
Definition Verifier.cpp:305
VerifierSupport(raw_ostream *OS, const Module &M)
Definition Verifier.cpp:154
bool Broken
Track the brokenness of the module while recursively visiting.
Definition Verifier.cpp:148
void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A check failed (with values to print).
Definition Verifier.cpp:298
bool BrokenDebugInfo
Broken debug info can be "recovered" from by stripping the debug info.
Definition Verifier.cpp:150
LLVMContext & Context
Definition Verifier.cpp:145
bool TreatBrokenDebugInfoAsError
Whether to treat broken debug info as an error.
Definition Verifier.cpp:152
void CheckFailed(const Twine &Message)
A check failed, so printout out the condition and the message.
Definition Verifier.cpp:287
const Module & M
Definition Verifier.cpp:141
const DataLayout & DL
Definition Verifier.cpp:144
void DebugInfoCheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A debug info check failed (with values to print).
Definition Verifier.cpp:314
const Triple & TT
Definition Verifier.cpp:143
ModuleSlotTracker MST
Definition Verifier.cpp:142