LLVM 23.0.0git
Verifier.cpp
Go to the documentation of this file.
1//===-- Verifier.cpp - Implement the Module Verifier -----------------------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the function verifier interface, that can be used for some
10// basic correctness checking of input to the system.
11//
12// Note that this does not provide full `Java style' security and verifications,
13// instead it just tries to ensure that code is well-formed.
14//
15// * Both of a binary operator's parameters are of the same type
16// * Verify that the indices of mem access instructions match other operands
17// * Verify that arithmetic and other things are only performed on first-class
18// types. Verify that shifts & logicals only happen on integrals f.e.
19// * All of the constants in a switch statement are of the correct type
20// * The code is in valid SSA form
21// * It should be illegal to put a label into any other type (like a structure)
22// or to return one. [except constant arrays!]
23// * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad
24// * PHI nodes must have an entry for each predecessor, with no extras.
25// * PHI nodes must be the first thing in a basic block, all grouped together
26// * All basic blocks should only end with terminator insts, not contain them
27// * The entry node to a function must not have predecessors
28// * All Instructions must be embedded into a basic block
29// * Functions cannot take a void-typed parameter
30// * Verify that a function's argument list agrees with it's declared type.
31// * It is illegal to specify a name for a void value.
32// * It is illegal to have a internal global value with no initializer
33// * It is illegal to have a ret instruction that returns a value that does not
34// agree with the function return value type.
35// * Function call argument types match the function prototype
36// * A landing pad is defined by a landingpad instruction, and can be jumped to
37// only by the unwind edge of an invoke instruction.
38// * A landingpad instruction must be the first non-PHI instruction in the
39// block.
40// * Landingpad instructions must be in a function with a personality function.
41// * Convergence control intrinsics are introduced in ConvergentOperations.rst.
42// The applied restrictions are too numerous to list here.
43// * The convergence entry intrinsic and the loop heart must be the first
44// non-PHI instruction in their respective block. This does not conflict with
45// the landing pads, since these two kinds cannot occur in the same block.
46// * All other things that are tested by asserts spread about the code...
47//
48//===----------------------------------------------------------------------===//
49
50#include "llvm/IR/Verifier.h"
51#include "llvm/ADT/APFloat.h"
52#include "llvm/ADT/APInt.h"
53#include "llvm/ADT/ArrayRef.h"
54#include "llvm/ADT/DenseMap.h"
55#include "llvm/ADT/MapVector.h"
56#include "llvm/ADT/STLExtras.h"
60#include "llvm/ADT/StringRef.h"
61#include "llvm/ADT/Twine.h"
63#include "llvm/IR/Argument.h"
65#include "llvm/IR/Attributes.h"
66#include "llvm/IR/BasicBlock.h"
67#include "llvm/IR/CFG.h"
68#include "llvm/IR/CallingConv.h"
69#include "llvm/IR/Comdat.h"
70#include "llvm/IR/Constant.h"
73#include "llvm/IR/Constants.h"
75#include "llvm/IR/DataLayout.h"
76#include "llvm/IR/DebugInfo.h"
78#include "llvm/IR/DebugLoc.h"
80#include "llvm/IR/Dominators.h"
82#include "llvm/IR/FPEnv.h"
83#include "llvm/IR/Function.h"
84#include "llvm/IR/GCStrategy.h"
86#include "llvm/IR/GlobalAlias.h"
87#include "llvm/IR/GlobalValue.h"
89#include "llvm/IR/InlineAsm.h"
90#include "llvm/IR/InstVisitor.h"
91#include "llvm/IR/InstrTypes.h"
92#include "llvm/IR/Instruction.h"
95#include "llvm/IR/Intrinsics.h"
96#include "llvm/IR/IntrinsicsAArch64.h"
97#include "llvm/IR/IntrinsicsAMDGPU.h"
98#include "llvm/IR/IntrinsicsARM.h"
99#include "llvm/IR/IntrinsicsNVPTX.h"
100#include "llvm/IR/IntrinsicsWebAssembly.h"
101#include "llvm/IR/LLVMContext.h"
103#include "llvm/IR/Metadata.h"
104#include "llvm/IR/Module.h"
106#include "llvm/IR/PassManager.h"
108#include "llvm/IR/Statepoint.h"
109#include "llvm/IR/Type.h"
110#include "llvm/IR/Use.h"
111#include "llvm/IR/User.h"
113#include "llvm/IR/Value.h"
115#include "llvm/Pass.h"
119#include "llvm/Support/Casting.h"
123#include "llvm/Support/ModRef.h"
127#include <algorithm>
128#include <cassert>
129#include <cstdint>
130#include <memory>
131#include <optional>
132#include <string>
133#include <utility>
134
135using namespace llvm;
136
138 "verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false),
139 cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical "
140 "scopes are not dominating"));
141
144 const Module &M;
146 const Triple &TT;
149
150 /// Track the brokenness of the module while recursively visiting.
151 bool Broken = false;
152 /// Broken debug info can be "recovered" from by stripping the debug info.
153 bool BrokenDebugInfo = false;
154 /// Whether to treat broken debug info as an error.
156
158 : OS(OS), M(M), MST(&M), TT(M.getTargetTriple()), DL(M.getDataLayout()),
159 Context(M.getContext()) {}
160
161private:
162 void Write(const Module *M) {
163 *OS << "; ModuleID = '" << M->getModuleIdentifier() << "'\n";
164 }
165
166 void Write(const Value *V) {
167 if (V)
168 Write(*V);
169 }
170
171 void Write(const Value &V) {
172 if (isa<Instruction>(V)) {
173 V.print(*OS, MST);
174 *OS << '\n';
175 } else {
176 V.printAsOperand(*OS, true, MST);
177 *OS << '\n';
178 }
179 }
180
181 void Write(const DbgRecord *DR) {
182 if (DR) {
183 DR->print(*OS, MST, false);
184 *OS << '\n';
185 }
186 }
187
189 switch (Type) {
191 *OS << "value";
192 break;
194 *OS << "declare";
195 break;
197 *OS << "declare_value";
198 break;
200 *OS << "assign";
201 break;
203 *OS << "end";
204 break;
206 *OS << "any";
207 break;
208 };
209 }
210
211 void Write(const Metadata *MD) {
212 if (!MD)
213 return;
214 MD->print(*OS, MST, &M);
215 *OS << '\n';
216 }
217
218 template <class T> void Write(const MDTupleTypedArrayWrapper<T> &MD) {
219 Write(MD.get());
220 }
221
222 void Write(const NamedMDNode *NMD) {
223 if (!NMD)
224 return;
225 NMD->print(*OS, MST);
226 *OS << '\n';
227 }
228
229 void Write(Type *T) {
230 if (!T)
231 return;
232 *OS << ' ' << *T;
233 }
234
235 void Write(const Comdat *C) {
236 if (!C)
237 return;
238 *OS << *C;
239 }
240
241 void Write(const APInt *AI) {
242 if (!AI)
243 return;
244 *OS << *AI << '\n';
245 }
246
247 void Write(const unsigned i) { *OS << i << '\n'; }
248
249 // NOLINTNEXTLINE(readability-identifier-naming)
250 void Write(const Attribute *A) {
251 if (!A)
252 return;
253 *OS << A->getAsString() << '\n';
254 }
255
256 // NOLINTNEXTLINE(readability-identifier-naming)
257 void Write(const AttributeSet *AS) {
258 if (!AS)
259 return;
260 *OS << AS->getAsString() << '\n';
261 }
262
263 // NOLINTNEXTLINE(readability-identifier-naming)
264 void Write(const AttributeList *AL) {
265 if (!AL)
266 return;
267 AL->print(*OS);
268 }
269
270 void Write(Printable P) { *OS << P << '\n'; }
271
272 template <typename T> void Write(ArrayRef<T> Vs) {
273 for (const T &V : Vs)
274 Write(V);
275 }
276
277 template <typename T1, typename... Ts>
278 void WriteTs(const T1 &V1, const Ts &... Vs) {
279 Write(V1);
280 WriteTs(Vs...);
281 }
282
283 template <typename... Ts> void WriteTs() {}
284
285public:
286 /// A check failed, so printout out the condition and the message.
287 ///
288 /// This provides a nice place to put a breakpoint if you want to see why
289 /// something is not correct.
290 void CheckFailed(const Twine &Message) {
291 if (OS)
292 *OS << Message << '\n';
293 Broken = true;
294 }
295
296 /// A check failed (with values to print).
297 ///
298 /// This calls the Message-only version so that the above is easier to set a
299 /// breakpoint on.
300 template <typename T1, typename... Ts>
301 void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) {
302 CheckFailed(Message);
303 if (OS)
304 WriteTs(V1, Vs...);
305 }
306
307 /// A debug info check failed.
308 void DebugInfoCheckFailed(const Twine &Message) {
309 if (OS)
310 *OS << Message << '\n';
312 BrokenDebugInfo = true;
313 }
314
315 /// A debug info check failed (with values to print).
316 template <typename T1, typename... Ts>
317 void DebugInfoCheckFailed(const Twine &Message, const T1 &V1,
318 const Ts &... Vs) {
319 DebugInfoCheckFailed(Message);
320 if (OS)
321 WriteTs(V1, Vs...);
322 }
323};
324
325namespace {
326
327class Verifier : public InstVisitor<Verifier>, VerifierSupport {
328 friend class InstVisitor<Verifier>;
329 DominatorTree DT;
330
331 /// When verifying a basic block, keep track of all of the
332 /// instructions we have seen so far.
333 ///
334 /// This allows us to do efficient dominance checks for the case when an
335 /// instruction has an operand that is an instruction in the same block.
336 SmallPtrSet<Instruction *, 16> InstsInThisBlock;
337
338 /// Keep track of the metadata nodes that have been checked already.
340
341 /// Keep track which DISubprogram is attached to which function.
343
344 /// Track all DICompileUnits visited.
346
347 /// The result type for a landingpad.
348 Type *LandingPadResultTy;
349
350 /// Whether we've seen a call to @llvm.localescape in this function
351 /// already.
352 bool SawFrameEscape;
353
354 /// Whether the current function has a DISubprogram attached to it.
355 bool HasDebugInfo = false;
356
357 /// Stores the count of how many objects were passed to llvm.localescape for a
358 /// given function and the largest index passed to llvm.localrecover.
360
361 // Maps catchswitches and cleanuppads that unwind to siblings to the
362 // terminators that indicate the unwind, used to detect cycles therein.
364
365 /// Cache which blocks are in which funclet, if an EH funclet personality is
366 /// in use. Otherwise empty.
367 DenseMap<BasicBlock *, ColorVector> BlockEHFuncletColors;
368
369 /// Cache of constants visited in search of ConstantExprs.
370 SmallPtrSet<const Constant *, 32> ConstantExprVisited;
371
372 /// Cache of declarations of the llvm.experimental.deoptimize.<ty> intrinsic.
373 SmallVector<const Function *, 4> DeoptimizeDeclarations;
374
375 /// Cache of attribute lists verified.
376 SmallPtrSet<const void *, 32> AttributeListsVisited;
377
378 // Verify that this GlobalValue is only used in this module.
379 // This map is used to avoid visiting uses twice. We can arrive at a user
380 // twice, if they have multiple operands. In particular for very large
381 // constant expressions, we can arrive at a particular user many times.
382 SmallPtrSet<const Value *, 32> GlobalValueVisited;
383
384 // Keeps track of duplicate function argument debug info.
386
387 TBAAVerifier TBAAVerifyHelper;
388 ConvergenceVerifier ConvergenceVerifyHelper;
389
390 SmallVector<IntrinsicInst *, 4> NoAliasScopeDecls;
391
392 void checkAtomicMemAccessSize(Type *Ty, const Instruction *I);
393
394public:
395 explicit Verifier(raw_ostream *OS, bool ShouldTreatBrokenDebugInfoAsError,
396 const Module &M)
397 : VerifierSupport(OS, M), LandingPadResultTy(nullptr),
398 SawFrameEscape(false), TBAAVerifyHelper(this) {
399 TreatBrokenDebugInfoAsError = ShouldTreatBrokenDebugInfoAsError;
400 }
401
402 bool hasBrokenDebugInfo() const { return BrokenDebugInfo; }
403
404 bool verify(const Function &F) {
405 llvm::TimeTraceScope timeScope("Verifier");
406 assert(F.getParent() == &M &&
407 "An instance of this class only works with a specific module!");
408
409 // First ensure the function is well-enough formed to compute dominance
410 // information, and directly compute a dominance tree. We don't rely on the
411 // pass manager to provide this as it isolates us from a potentially
412 // out-of-date dominator tree and makes it significantly more complex to run
413 // this code outside of a pass manager.
414
415 // First check that every basic block has a terminator, otherwise we can't
416 // even inspect the CFG.
417 for (const BasicBlock &BB : F) {
418 if (!BB.empty() && BB.back().isTerminator())
419 continue;
420
421 if (OS) {
422 *OS << "Basic Block in function '" << F.getName()
423 << "' does not have terminator!\n";
424 BB.printAsOperand(*OS, true, MST);
425 *OS << "\n";
426 }
427 return false;
428 }
429
430 // FIXME: It's really gross that we have to cast away constness here.
431 if (!F.empty())
432 DT.recalculate(const_cast<Function &>(F));
433
434 auto FailureCB = [this](const Twine &Message) {
435 this->CheckFailed(Message);
436 };
437 ConvergenceVerifyHelper.initialize(OS, FailureCB, F);
438
439 Broken = false;
440 // FIXME: We strip const here because the inst visitor strips const.
441 visit(const_cast<Function &>(F));
442 verifySiblingFuncletUnwinds();
443
444 if (ConvergenceVerifyHelper.sawTokens())
445 ConvergenceVerifyHelper.verify(DT);
446
447 InstsInThisBlock.clear();
448 DebugFnArgs.clear();
449 LandingPadResultTy = nullptr;
450 SawFrameEscape = false;
451 SiblingFuncletInfo.clear();
452 verifyNoAliasScopeDecl();
453 NoAliasScopeDecls.clear();
454
455 return !Broken;
456 }
457
458 /// Verify the module that this instance of \c Verifier was initialized with.
459 bool verify() {
460 Broken = false;
461
462 // Collect all declarations of the llvm.experimental.deoptimize intrinsic.
463 for (const Function &F : M)
464 if (F.getIntrinsicID() == Intrinsic::experimental_deoptimize)
465 DeoptimizeDeclarations.push_back(&F);
466
467 // Now that we've visited every function, verify that we never asked to
468 // recover a frame index that wasn't escaped.
469 verifyFrameRecoverIndices();
470 for (const GlobalVariable &GV : M.globals())
471 visitGlobalVariable(GV);
472
473 for (const GlobalAlias &GA : M.aliases())
474 visitGlobalAlias(GA);
475
476 for (const GlobalIFunc &GI : M.ifuncs())
477 visitGlobalIFunc(GI);
478
479 for (const NamedMDNode &NMD : M.named_metadata())
480 visitNamedMDNode(NMD);
481
482 for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable())
483 visitComdat(SMEC.getValue());
484
485 visitModuleFlags();
486 visitModuleIdents();
487 visitModuleCommandLines();
488 visitModuleErrnoTBAA();
489
490 verifyCompileUnits();
491
492 verifyDeoptimizeCallingConvs();
493 DISubprogramAttachments.clear();
494 return !Broken;
495 }
496
497private:
498 /// Whether a metadata node is allowed to be, or contain, a DILocation.
499 enum class AreDebugLocsAllowed { No, Yes };
500
501 /// Metadata that should be treated as a range, with slightly different
502 /// requirements.
503 enum class RangeLikeMetadataKind {
504 Range, // MD_range
505 AbsoluteSymbol, // MD_absolute_symbol
506 NoaliasAddrspace // MD_noalias_addrspace
507 };
508
509 // Verification methods...
510 void visitGlobalValue(const GlobalValue &GV);
511 void visitGlobalVariable(const GlobalVariable &GV);
512 void visitGlobalAlias(const GlobalAlias &GA);
513 void visitGlobalIFunc(const GlobalIFunc &GI);
514 void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C);
515 void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited,
516 const GlobalAlias &A, const Constant &C);
517 void visitNamedMDNode(const NamedMDNode &NMD);
518 void visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs);
519 void visitMetadataAsValue(const MetadataAsValue &MD, Function *F);
520 void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F);
521 void visitDIArgList(const DIArgList &AL, Function *F);
522 void visitComdat(const Comdat &C);
523 void visitModuleIdents();
524 void visitModuleCommandLines();
525 void visitModuleErrnoTBAA();
526 void visitModuleFlags();
527 void visitModuleFlag(const MDNode *Op,
528 DenseMap<const MDString *, const MDNode *> &SeenIDs,
529 SmallVectorImpl<const MDNode *> &Requirements);
530 void visitModuleFlagCGProfileEntry(const MDOperand &MDO);
531 void visitFunction(const Function &F);
532 void visitBasicBlock(BasicBlock &BB);
533 void verifyRangeLikeMetadata(const Value &V, const MDNode *Range, Type *Ty,
534 RangeLikeMetadataKind Kind);
535 void visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty);
536 void visitNoFPClassMetadata(Instruction &I, MDNode *Range, Type *Ty);
537 void visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range, Type *Ty);
538 void visitDereferenceableMetadata(Instruction &I, MDNode *MD);
539 void visitNofreeMetadata(Instruction &I, MDNode *MD);
540 void visitProfMetadata(Instruction &I, MDNode *MD);
541 void visitCallStackMetadata(MDNode *MD);
542 void visitMemProfMetadata(Instruction &I, MDNode *MD);
543 void visitCallsiteMetadata(Instruction &I, MDNode *MD);
544 void visitCalleeTypeMetadata(Instruction &I, MDNode *MD);
545 void visitDIAssignIDMetadata(Instruction &I, MDNode *MD);
546 void visitMMRAMetadata(Instruction &I, MDNode *MD);
547 void visitAnnotationMetadata(MDNode *Annotation);
548 void visitAliasScopeMetadata(const MDNode *MD);
549 void visitAliasScopeListMetadata(const MDNode *MD);
550 void visitAccessGroupMetadata(const MDNode *MD);
551 void visitCapturesMetadata(Instruction &I, const MDNode *Captures);
552 void visitAllocTokenMetadata(Instruction &I, MDNode *MD);
553 void visitInlineHistoryMetadata(Instruction &I, MDNode *MD);
554
555 template <class Ty> bool isValidMetadataArray(const MDTuple &N);
556#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N);
557#include "llvm/IR/Metadata.def"
558 void visitDIType(const DIType &N);
559 void visitDIScope(const DIScope &N);
560 void visitDIVariable(const DIVariable &N);
561 void visitDILexicalBlockBase(const DILexicalBlockBase &N);
562 void visitDITemplateParameter(const DITemplateParameter &N);
563
564 void visitTemplateParams(const MDNode &N, const Metadata &RawParams);
565
566 void visit(DbgLabelRecord &DLR);
567 void visit(DbgVariableRecord &DVR);
568 // InstVisitor overrides...
569 using InstVisitor<Verifier>::visit;
570 void visitDbgRecords(Instruction &I);
571 void visit(Instruction &I);
572
573 void visitTruncInst(TruncInst &I);
574 void visitZExtInst(ZExtInst &I);
575 void visitSExtInst(SExtInst &I);
576 void visitFPTruncInst(FPTruncInst &I);
577 void visitFPExtInst(FPExtInst &I);
578 void visitFPToUIInst(FPToUIInst &I);
579 void visitFPToSIInst(FPToSIInst &I);
580 void visitUIToFPInst(UIToFPInst &I);
581 void visitSIToFPInst(SIToFPInst &I);
582 void visitIntToPtrInst(IntToPtrInst &I);
583 void checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V);
584 void visitPtrToAddrInst(PtrToAddrInst &I);
585 void visitPtrToIntInst(PtrToIntInst &I);
586 void visitBitCastInst(BitCastInst &I);
587 void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
588 void visitPHINode(PHINode &PN);
589 void visitCallBase(CallBase &Call);
590 void visitUnaryOperator(UnaryOperator &U);
591 void visitBinaryOperator(BinaryOperator &B);
592 void visitICmpInst(ICmpInst &IC);
593 void visitFCmpInst(FCmpInst &FC);
594 void visitExtractElementInst(ExtractElementInst &EI);
595 void visitInsertElementInst(InsertElementInst &EI);
596 void visitShuffleVectorInst(ShuffleVectorInst &EI);
597 void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); }
598 void visitCallInst(CallInst &CI);
599 void visitInvokeInst(InvokeInst &II);
600 void visitGetElementPtrInst(GetElementPtrInst &GEP);
601 void visitLoadInst(LoadInst &LI);
602 void visitStoreInst(StoreInst &SI);
603 void verifyDominatesUse(Instruction &I, unsigned i);
604 void visitInstruction(Instruction &I);
605 void visitTerminator(Instruction &I);
606 void visitCondBrInst(CondBrInst &BI);
607 void visitReturnInst(ReturnInst &RI);
608 void visitSwitchInst(SwitchInst &SI);
609 void visitIndirectBrInst(IndirectBrInst &BI);
610 void visitCallBrInst(CallBrInst &CBI);
611 void visitSelectInst(SelectInst &SI);
612 void visitUserOp1(Instruction &I);
613 void visitUserOp2(Instruction &I) { visitUserOp1(I); }
614 void visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call);
615 void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI);
616 void visitVPIntrinsic(VPIntrinsic &VPI);
617 void visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI);
618 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
619 void visitAtomicRMWInst(AtomicRMWInst &RMWI);
620 void visitFenceInst(FenceInst &FI);
621 void visitAllocaInst(AllocaInst &AI);
622 void visitExtractValueInst(ExtractValueInst &EVI);
623 void visitInsertValueInst(InsertValueInst &IVI);
624 void visitEHPadPredecessors(Instruction &I);
625 void visitLandingPadInst(LandingPadInst &LPI);
626 void visitResumeInst(ResumeInst &RI);
627 void visitCatchPadInst(CatchPadInst &CPI);
628 void visitCatchReturnInst(CatchReturnInst &CatchReturn);
629 void visitCleanupPadInst(CleanupPadInst &CPI);
630 void visitFuncletPadInst(FuncletPadInst &FPI);
631 void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch);
632 void visitCleanupReturnInst(CleanupReturnInst &CRI);
633
634 void verifySwiftErrorCall(CallBase &Call, const Value *SwiftErrorVal);
635 void verifySwiftErrorValue(const Value *SwiftErrorVal);
636 void verifyTailCCMustTailAttrs(const AttrBuilder &Attrs, StringRef Context);
637 void verifyMustTailCall(CallInst &CI);
638 bool verifyAttributeCount(AttributeList Attrs, unsigned Params);
639 void verifyAttributeTypes(AttributeSet Attrs, const Value *V);
640 void verifyParameterAttrs(AttributeSet Attrs, Type *Ty, const Value *V);
641 void checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
642 const Value *V);
643 void verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
644 const Value *V, bool IsIntrinsic, bool IsInlineAsm);
645 void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs);
646 void verifyUnknownProfileMetadata(MDNode *MD);
647 void visitConstantExprsRecursively(const Constant *EntryC);
648 void visitConstantExpr(const ConstantExpr *CE);
649 void visitConstantPtrAuth(const ConstantPtrAuth *CPA);
650 void verifyInlineAsmCall(const CallBase &Call);
651 void verifyStatepoint(const CallBase &Call);
652 void verifyFrameRecoverIndices();
653 void verifySiblingFuncletUnwinds();
654
655 void verifyFragmentExpression(const DbgVariableRecord &I);
656 template <typename ValueOrMetadata>
657 void verifyFragmentExpression(const DIVariable &V,
659 ValueOrMetadata *Desc);
660 void verifyFnArgs(const DbgVariableRecord &DVR);
661 void verifyNotEntryValue(const DbgVariableRecord &I);
662
663 /// Module-level debug info verification...
664 void verifyCompileUnits();
665
666 /// Module-level verification that all @llvm.experimental.deoptimize
667 /// declarations share the same calling convention.
668 void verifyDeoptimizeCallingConvs();
669
670 void verifyAttachedCallBundle(const CallBase &Call,
671 const OperandBundleUse &BU);
672
673 /// Verify the llvm.experimental.noalias.scope.decl declarations
674 void verifyNoAliasScopeDecl();
675};
676
677} // end anonymous namespace
678
679/// We know that cond should be true, if not print an error message.
680#define Check(C, ...) \
681 do { \
682 if (!(C)) { \
683 CheckFailed(__VA_ARGS__); \
684 return; \
685 } \
686 } while (false)
687
688/// We know that a debug info condition should be true, if not print
689/// an error message.
690#define CheckDI(C, ...) \
691 do { \
692 if (!(C)) { \
693 DebugInfoCheckFailed(__VA_ARGS__); \
694 return; \
695 } \
696 } while (false)
697
698void Verifier::visitDbgRecords(Instruction &I) {
699 if (!I.DebugMarker)
700 return;
701 CheckDI(I.DebugMarker->MarkedInstr == &I,
702 "Instruction has invalid DebugMarker", &I);
703 CheckDI(!isa<PHINode>(&I) || !I.hasDbgRecords(),
704 "PHI Node must not have any attached DbgRecords", &I);
705 for (DbgRecord &DR : I.getDbgRecordRange()) {
706 CheckDI(DR.getMarker() == I.DebugMarker,
707 "DbgRecord had invalid DebugMarker", &I, &DR);
708 if (auto *Loc =
710 visitMDNode(*Loc, AreDebugLocsAllowed::Yes);
711 if (auto *DVR = dyn_cast<DbgVariableRecord>(&DR)) {
712 visit(*DVR);
713 // These have to appear after `visit` for consistency with existing
714 // intrinsic behaviour.
715 verifyFragmentExpression(*DVR);
716 verifyNotEntryValue(*DVR);
717 } else if (auto *DLR = dyn_cast<DbgLabelRecord>(&DR)) {
718 visit(*DLR);
719 }
720 }
721}
722
723void Verifier::visit(Instruction &I) {
724 visitDbgRecords(I);
725 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
726 Check(I.getOperand(i) != nullptr, "Operand is null", &I);
728}
729
730// Helper to iterate over indirect users. By returning false, the callback can ask to stop traversing further.
731static void forEachUser(const Value *User,
733 llvm::function_ref<bool(const Value *)> Callback) {
734 if (!Visited.insert(User).second)
735 return;
736
738 while (!WorkList.empty()) {
739 const Value *Cur = WorkList.pop_back_val();
740 if (!Visited.insert(Cur).second)
741 continue;
742 if (Callback(Cur))
743 append_range(WorkList, Cur->materialized_users());
744 }
745}
746
747void Verifier::visitGlobalValue(const GlobalValue &GV) {
749 "Global is external, but doesn't have external or weak linkage!", &GV);
750
751 if (const GlobalObject *GO = dyn_cast<GlobalObject>(&GV)) {
752 if (const MDNode *Associated =
753 GO->getMetadata(LLVMContext::MD_associated)) {
754 Check(Associated->getNumOperands() == 1,
755 "associated metadata must have one operand", &GV, Associated);
756 const Metadata *Op = Associated->getOperand(0).get();
757 Check(Op, "associated metadata must have a global value", GO, Associated);
758
759 const auto *VM = dyn_cast_or_null<ValueAsMetadata>(Op);
760 Check(VM, "associated metadata must be ValueAsMetadata", GO, Associated);
761 if (VM) {
762 Check(isa<PointerType>(VM->getValue()->getType()),
763 "associated value must be pointer typed", GV, Associated);
764
765 const Value *Stripped = VM->getValue()->stripPointerCastsAndAliases();
766 Check(isa<GlobalObject>(Stripped) || isa<Constant>(Stripped),
767 "associated metadata must point to a GlobalObject", GO, Stripped);
768 Check(Stripped != GO,
769 "global values should not associate to themselves", GO,
770 Associated);
771 }
772 }
773
774 // FIXME: Why is getMetadata on GlobalValue protected?
775 if (const MDNode *AbsoluteSymbol =
776 GO->getMetadata(LLVMContext::MD_absolute_symbol)) {
777 verifyRangeLikeMetadata(*GO, AbsoluteSymbol,
778 DL.getIntPtrType(GO->getType()),
779 RangeLikeMetadataKind::AbsoluteSymbol);
780 }
781
782 if (GO->hasMetadata(LLVMContext::MD_implicit_ref)) {
783 Check(!GO->isDeclaration(),
784 "ref metadata must not be placed on a declaration", GO);
785
787 GO->getMetadata(LLVMContext::MD_implicit_ref, MDs);
788 for (const MDNode *MD : MDs) {
789 Check(MD->getNumOperands() == 1, "ref metadata must have one operand",
790 &GV, MD);
791 const Metadata *Op = MD->getOperand(0).get();
792 const auto *VM = dyn_cast_or_null<ValueAsMetadata>(Op);
793 Check(VM, "ref metadata must be ValueAsMetadata", GO, MD);
794 if (VM) {
795 Check(isa<PointerType>(VM->getValue()->getType()),
796 "ref value must be pointer typed", GV, MD);
797
798 const Value *Stripped = VM->getValue()->stripPointerCastsAndAliases();
799 Check(isa<GlobalObject>(Stripped) || isa<Constant>(Stripped),
800 "ref metadata must point to a GlobalObject", GO, Stripped);
801 Check(Stripped != GO, "values should not reference themselves", GO,
802 MD);
803 }
804 }
805 }
806 }
807
809 "Only global variables can have appending linkage!", &GV);
810
811 if (GV.hasAppendingLinkage()) {
812 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV);
813 Check(GVar && GVar->getValueType()->isArrayTy(),
814 "Only global arrays can have appending linkage!", GVar);
815 }
816
817 if (GV.isDeclarationForLinker())
818 Check(!GV.hasComdat(), "Declaration may not be in a Comdat!", &GV);
819
820 if (GV.hasDLLExportStorageClass()) {
822 "dllexport GlobalValue must have default or protected visibility",
823 &GV);
824 }
825 if (GV.hasDLLImportStorageClass()) {
827 "dllimport GlobalValue must have default visibility", &GV);
828 Check(!GV.isDSOLocal(), "GlobalValue with DLLImport Storage is dso_local!",
829 &GV);
830
831 Check((GV.isDeclaration() &&
834 "Global is marked as dllimport, but not external", &GV);
835 }
836
837 if (GV.isImplicitDSOLocal())
838 Check(GV.isDSOLocal(),
839 "GlobalValue with local linkage or non-default "
840 "visibility must be dso_local!",
841 &GV);
842
843 forEachUser(&GV, GlobalValueVisited, [&](const Value *V) -> bool {
844 if (const Instruction *I = dyn_cast<Instruction>(V)) {
845 if (!I->getParent() || !I->getParent()->getParent())
846 CheckFailed("Global is referenced by parentless instruction!", &GV, &M,
847 I);
848 else if (I->getParent()->getParent()->getParent() != &M)
849 CheckFailed("Global is referenced in a different module!", &GV, &M, I,
850 I->getParent()->getParent(),
851 I->getParent()->getParent()->getParent());
852 return false;
853 } else if (const Function *F = dyn_cast<Function>(V)) {
854 if (F->getParent() != &M)
855 CheckFailed("Global is used by function in a different module", &GV, &M,
856 F, F->getParent());
857 return false;
858 }
859 return true;
860 });
861}
862
863void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
864 Type *GVType = GV.getValueType();
865
866 if (MaybeAlign A = GV.getAlign()) {
867 Check(A->value() <= Value::MaximumAlignment,
868 "huge alignment values are unsupported", &GV);
869 }
870
871 if (GV.hasInitializer()) {
872 Check(GV.getInitializer()->getType() == GVType,
873 "Global variable initializer type does not match global "
874 "variable type!",
875 &GV);
877 "Global variable initializer must be sized", &GV);
878 visitConstantExprsRecursively(GV.getInitializer());
879 // If the global has common linkage, it must have a zero initializer and
880 // cannot be constant.
881 if (GV.hasCommonLinkage()) {
883 "'common' global must have a zero initializer!", &GV);
884 Check(!GV.isConstant(), "'common' global may not be marked constant!",
885 &GV);
886 Check(!GV.hasComdat(), "'common' global may not be in a Comdat!", &GV);
887 }
888 }
889
890 if (GV.hasName() && (GV.getName() == "llvm.global_ctors" ||
891 GV.getName() == "llvm.global_dtors")) {
893 "invalid linkage for intrinsic global variable", &GV);
895 "invalid uses of intrinsic global variable", &GV);
896
897 // Don't worry about emitting an error for it not being an array,
898 // visitGlobalValue will complain on appending non-array.
899 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
900 StructType *STy = dyn_cast<StructType>(ATy->getElementType());
901 PointerType *FuncPtrTy =
902 PointerType::get(Context, DL.getProgramAddressSpace());
903 Check(STy && (STy->getNumElements() == 2 || STy->getNumElements() == 3) &&
904 STy->getTypeAtIndex(0u)->isIntegerTy(32) &&
905 STy->getTypeAtIndex(1) == FuncPtrTy,
906 "wrong type for intrinsic global variable", &GV);
907 Check(STy->getNumElements() == 3,
908 "the third field of the element type is mandatory, "
909 "specify ptr null to migrate from the obsoleted 2-field form");
910 Type *ETy = STy->getTypeAtIndex(2);
911 Check(ETy->isPointerTy(), "wrong type for intrinsic global variable",
912 &GV);
913 }
914 }
915
916 if (GV.hasName() && (GV.getName() == "llvm.used" ||
917 GV.getName() == "llvm.compiler.used")) {
919 "invalid linkage for intrinsic global variable", &GV);
921 "invalid uses of intrinsic global variable", &GV);
922
923 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
924 PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType());
925 Check(PTy, "wrong type for intrinsic global variable", &GV);
926 if (GV.hasInitializer()) {
927 const Constant *Init = GV.getInitializer();
928 const ConstantArray *InitArray = dyn_cast<ConstantArray>(Init);
929 Check(InitArray, "wrong initializer for intrinsic global variable",
930 Init);
931 for (Value *Op : InitArray->operands()) {
932 Value *V = Op->stripPointerCasts();
935 Twine("invalid ") + GV.getName() + " member", V);
936 Check(V->hasName(),
937 Twine("members of ") + GV.getName() + " must be named", V);
938 }
939 }
940 }
941 }
942
943 // Visit any debug info attachments.
945 GV.getMetadata(LLVMContext::MD_dbg, MDs);
946 for (auto *MD : MDs) {
947 if (auto *GVE = dyn_cast<DIGlobalVariableExpression>(MD))
948 visitDIGlobalVariableExpression(*GVE);
949 else
950 CheckDI(false, "!dbg attachment of global variable must be a "
951 "DIGlobalVariableExpression");
952 }
953
954 // Scalable vectors cannot be global variables, since we don't know
955 // the runtime size.
956 Check(!GVType->isScalableTy(), "Globals cannot contain scalable types", &GV);
957
958 // Check if it is or contains a target extension type that disallows being
959 // used as a global.
961 "Global @" + GV.getName() + " has illegal target extension type",
962 GVType);
963
964 // Check that the the address space can hold all bits of the type, recognized
965 // by an access in the address space being able to reach all bytes of the
966 // type.
967 Check(!GVType->isSized() ||
968 isUIntN(DL.getAddressSizeInBits(GV.getAddressSpace()),
969 GV.getGlobalSize(DL)),
970 "Global variable is too large to fit into the address space", &GV,
971 GVType);
972
973 if (!GV.hasInitializer()) {
974 visitGlobalValue(GV);
975 return;
976 }
977
978 // Walk any aggregate initializers looking for bitcasts between address spaces
979 visitConstantExprsRecursively(GV.getInitializer());
980
981 visitGlobalValue(GV);
982}
983
984void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) {
985 SmallPtrSet<const GlobalAlias*, 4> Visited;
986 Visited.insert(&GA);
987 visitAliaseeSubExpr(Visited, GA, C);
988}
989
990void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited,
991 const GlobalAlias &GA, const Constant &C) {
994 cast<GlobalValue>(C).hasAvailableExternallyLinkage(),
995 "available_externally alias must point to available_externally "
996 "global value",
997 &GA);
998 }
999 if (const auto *GV = dyn_cast<GlobalValue>(&C)) {
1001 Check(!GV->isDeclarationForLinker(), "Alias must point to a definition",
1002 &GA);
1003 }
1004
1005 if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) {
1006 Check(Visited.insert(GA2).second, "Aliases cannot form a cycle", &GA);
1007
1008 Check(!GA2->isInterposable(),
1009 "Alias cannot point to an interposable alias", &GA);
1010 } else {
1011 // Only continue verifying subexpressions of GlobalAliases.
1012 // Do not recurse into global initializers.
1013 return;
1014 }
1015 }
1016
1017 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
1018 visitConstantExprsRecursively(CE);
1019
1020 for (const Use &U : C.operands()) {
1021 Value *V = &*U;
1022 if (const auto *GA2 = dyn_cast<GlobalAlias>(V))
1023 visitAliaseeSubExpr(Visited, GA, *GA2->getAliasee());
1024 else if (const auto *C2 = dyn_cast<Constant>(V))
1025 visitAliaseeSubExpr(Visited, GA, *C2);
1026 }
1027}
1028
1029void Verifier::visitGlobalAlias(const GlobalAlias &GA) {
1031 "Alias should have private, internal, linkonce, weak, linkonce_odr, "
1032 "weak_odr, external, or available_externally linkage!",
1033 &GA);
1034 const Constant *Aliasee = GA.getAliasee();
1035 Check(Aliasee, "Aliasee cannot be NULL!", &GA);
1036 Check(GA.getType() == Aliasee->getType(),
1037 "Alias and aliasee types should match!", &GA);
1038
1039 Check(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee),
1040 "Aliasee should be either GlobalValue or ConstantExpr", &GA);
1041
1042 visitAliaseeSubExpr(GA, *Aliasee);
1043
1044 visitGlobalValue(GA);
1045}
1046
1047void Verifier::visitGlobalIFunc(const GlobalIFunc &GI) {
1048 visitGlobalValue(GI);
1049
1051 GI.getAllMetadata(MDs);
1052 for (const auto &I : MDs) {
1053 CheckDI(I.first != LLVMContext::MD_dbg,
1054 "an ifunc may not have a !dbg attachment", &GI);
1055 Check(I.first != LLVMContext::MD_prof,
1056 "an ifunc may not have a !prof attachment", &GI);
1057 visitMDNode(*I.second, AreDebugLocsAllowed::No);
1058 }
1059
1061 "IFunc should have private, internal, linkonce, weak, linkonce_odr, "
1062 "weak_odr, or external linkage!",
1063 &GI);
1064 // Pierce through ConstantExprs and GlobalAliases and check that the resolver
1065 // is a Function definition.
1066 const Function *Resolver = GI.getResolverFunction();
1067 Check(Resolver, "IFunc must have a Function resolver", &GI);
1068 Check(!Resolver->isDeclarationForLinker(),
1069 "IFunc resolver must be a definition", &GI);
1070
1071 // Check that the immediate resolver operand (prior to any bitcasts) has the
1072 // correct type.
1073 const Type *ResolverTy = GI.getResolver()->getType();
1074
1076 "IFunc resolver must return a pointer", &GI);
1077
1078 Check(ResolverTy == PointerType::get(Context, GI.getAddressSpace()),
1079 "IFunc resolver has incorrect type", &GI);
1080}
1081
1082void Verifier::visitNamedMDNode(const NamedMDNode &NMD) {
1083 // There used to be various other llvm.dbg.* nodes, but we don't support
1084 // upgrading them and we want to reserve the namespace for future uses.
1085 if (NMD.getName().starts_with("llvm.dbg."))
1086 CheckDI(NMD.getName() == "llvm.dbg.cu",
1087 "unrecognized named metadata node in the llvm.dbg namespace", &NMD);
1088 for (const MDNode *MD : NMD.operands()) {
1089 if (NMD.getName() == "llvm.dbg.cu")
1090 CheckDI(MD && isa<DICompileUnit>(MD), "invalid compile unit", &NMD, MD);
1091
1092 if (!MD)
1093 continue;
1094
1095 visitMDNode(*MD, AreDebugLocsAllowed::Yes);
1096 }
1097}
1098
1099void Verifier::visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs) {
1100 // Only visit each node once. Metadata can be mutually recursive, so this
1101 // avoids infinite recursion here, as well as being an optimization.
1102 if (!MDNodes.insert(&MD).second)
1103 return;
1104
1105 Check(&MD.getContext() == &Context,
1106 "MDNode context does not match Module context!", &MD);
1107
1108 switch (MD.getMetadataID()) {
1109 default:
1110 llvm_unreachable("Invalid MDNode subclass");
1111 case Metadata::MDTupleKind:
1112 break;
1113#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) \
1114 case Metadata::CLASS##Kind: \
1115 visit##CLASS(cast<CLASS>(MD)); \
1116 break;
1117#include "llvm/IR/Metadata.def"
1118 }
1119
1120 for (const Metadata *Op : MD.operands()) {
1121 if (!Op)
1122 continue;
1123 Check(!isa<LocalAsMetadata>(Op), "Invalid operand for global metadata!",
1124 &MD, Op);
1125 CheckDI(!isa<DILocation>(Op) || AllowLocs == AreDebugLocsAllowed::Yes,
1126 "DILocation not allowed within this metadata node", &MD, Op);
1127 if (auto *N = dyn_cast<MDNode>(Op)) {
1128 visitMDNode(*N, AllowLocs);
1129 continue;
1130 }
1131 if (auto *V = dyn_cast<ValueAsMetadata>(Op)) {
1132 visitValueAsMetadata(*V, nullptr);
1133 continue;
1134 }
1135 }
1136
1137 // Check llvm.loop.estimated_trip_count.
1138 if (MD.getNumOperands() > 0 &&
1140 Check(MD.getNumOperands() == 2, "Expected two operands", &MD);
1142 Check(Count && Count->getType()->isIntegerTy() &&
1143 cast<IntegerType>(Count->getType())->getBitWidth() <= 32,
1144 "Expected second operand to be an integer constant of type i32 or "
1145 "smaller",
1146 &MD);
1147 }
1148
1149 // Check these last, so we diagnose problems in operands first.
1150 Check(!MD.isTemporary(), "Expected no forward declarations!", &MD);
1151 Check(MD.isResolved(), "All nodes should be resolved!", &MD);
1152}
1153
1154void Verifier::visitValueAsMetadata(const ValueAsMetadata &MD, Function *F) {
1155 Check(MD.getValue(), "Expected valid value", &MD);
1156 Check(!MD.getValue()->getType()->isMetadataTy(),
1157 "Unexpected metadata round-trip through values", &MD, MD.getValue());
1158
1159 auto *L = dyn_cast<LocalAsMetadata>(&MD);
1160 if (!L)
1161 return;
1162
1163 Check(F, "function-local metadata used outside a function", L);
1164
1165 // If this was an instruction, bb, or argument, verify that it is in the
1166 // function that we expect.
1167 Function *ActualF = nullptr;
1168 if (Instruction *I = dyn_cast<Instruction>(L->getValue())) {
1169 Check(I->getParent(), "function-local metadata not in basic block", L, I);
1170 ActualF = I->getParent()->getParent();
1171 } else if (BasicBlock *BB = dyn_cast<BasicBlock>(L->getValue()))
1172 ActualF = BB->getParent();
1173 else if (Argument *A = dyn_cast<Argument>(L->getValue()))
1174 ActualF = A->getParent();
1175 assert(ActualF && "Unimplemented function local metadata case!");
1176
1177 Check(ActualF == F, "function-local metadata used in wrong function", L);
1178}
1179
1180void Verifier::visitDIArgList(const DIArgList &AL, Function *F) {
1181 for (const ValueAsMetadata *VAM : AL.getArgs())
1182 visitValueAsMetadata(*VAM, F);
1183}
1184
1185void Verifier::visitMetadataAsValue(const MetadataAsValue &MDV, Function *F) {
1186 Metadata *MD = MDV.getMetadata();
1187 if (auto *N = dyn_cast<MDNode>(MD)) {
1188 visitMDNode(*N, AreDebugLocsAllowed::No);
1189 return;
1190 }
1191
1192 // Only visit each node once. Metadata can be mutually recursive, so this
1193 // avoids infinite recursion here, as well as being an optimization.
1194 if (!MDNodes.insert(MD).second)
1195 return;
1196
1197 if (auto *V = dyn_cast<ValueAsMetadata>(MD))
1198 visitValueAsMetadata(*V, F);
1199
1200 if (auto *AL = dyn_cast<DIArgList>(MD))
1201 visitDIArgList(*AL, F);
1202}
1203
1204static bool isType(const Metadata *MD) { return !MD || isa<DIType>(MD); }
1205static bool isScope(const Metadata *MD) { return !MD || isa<DIScope>(MD); }
1206static bool isDINode(const Metadata *MD) { return !MD || isa<DINode>(MD); }
1207static bool isMDTuple(const Metadata *MD) { return !MD || isa<MDTuple>(MD); }
1208
1209void Verifier::visitDILocation(const DILocation &N) {
1210 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1211 "location requires a valid scope", &N, N.getRawScope());
1212 if (auto *IA = N.getRawInlinedAt())
1213 CheckDI(isa<DILocation>(IA), "inlined-at should be a location", &N, IA);
1214 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1215 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1216}
1217
1218void Verifier::visitGenericDINode(const GenericDINode &N) {
1219 CheckDI(N.getTag(), "invalid tag", &N);
1220}
1221
1222void Verifier::visitDIScope(const DIScope &N) {
1223 if (auto *F = N.getRawFile())
1224 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1225}
1226
1227void Verifier::visitDIType(const DIType &N) {
1228 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1229 visitDIScope(N);
1230 CheckDI(N.getRawFile() || N.getLine() == 0, "line specified with no file", &N,
1231 N.getLine());
1232}
1233
1234void Verifier::visitDISubrangeType(const DISubrangeType &N) {
1235 visitDIType(N);
1236
1237 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1238 auto *BaseType = N.getRawBaseType();
1239 CheckDI(!BaseType || isType(BaseType), "BaseType must be a type");
1240 auto *LBound = N.getRawLowerBound();
1241 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1242 isa<DIVariable>(LBound) || isa<DIExpression>(LBound) ||
1243 isa<DIDerivedType>(LBound),
1244 "LowerBound must be signed constant or DIVariable or DIExpression or "
1245 "DIDerivedType",
1246 &N);
1247 auto *UBound = N.getRawUpperBound();
1248 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1249 isa<DIVariable>(UBound) || isa<DIExpression>(UBound) ||
1250 isa<DIDerivedType>(UBound),
1251 "UpperBound must be signed constant or DIVariable or DIExpression or "
1252 "DIDerivedType",
1253 &N);
1254 auto *Stride = N.getRawStride();
1255 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1256 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1257 "Stride must be signed constant or DIVariable or DIExpression", &N);
1258 auto *Bias = N.getRawBias();
1259 CheckDI(!Bias || isa<ConstantAsMetadata>(Bias) || isa<DIVariable>(Bias) ||
1260 isa<DIExpression>(Bias),
1261 "Bias must be signed constant or DIVariable or DIExpression", &N);
1262 // Subrange types currently only support constant size.
1263 auto *Size = N.getRawSizeInBits();
1265 "SizeInBits must be a constant");
1266}
1267
1268void Verifier::visitDISubrange(const DISubrange &N) {
1269 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1270 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1271 "Subrange can have any one of count or upperBound", &N);
1272 auto *CBound = N.getRawCountNode();
1273 CheckDI(!CBound || isa<ConstantAsMetadata>(CBound) ||
1274 isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1275 "Count must be signed constant or DIVariable or DIExpression", &N);
1276 auto Count = N.getCount();
1278 cast<ConstantInt *>(Count)->getSExtValue() >= -1,
1279 "invalid subrange count", &N);
1280 auto *LBound = N.getRawLowerBound();
1281 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1282 isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1283 "LowerBound must be signed constant or DIVariable or DIExpression",
1284 &N);
1285 auto *UBound = N.getRawUpperBound();
1286 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1287 isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1288 "UpperBound must be signed constant or DIVariable or DIExpression",
1289 &N);
1290 auto *Stride = N.getRawStride();
1291 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1292 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1293 "Stride must be signed constant or DIVariable or DIExpression", &N);
1294}
1295
1296void Verifier::visitDIGenericSubrange(const DIGenericSubrange &N) {
1297 CheckDI(N.getTag() == dwarf::DW_TAG_generic_subrange, "invalid tag", &N);
1298 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1299 "GenericSubrange can have any one of count or upperBound", &N);
1300 auto *CBound = N.getRawCountNode();
1301 CheckDI(!CBound || isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1302 "Count must be signed constant or DIVariable or DIExpression", &N);
1303 auto *LBound = N.getRawLowerBound();
1304 CheckDI(LBound, "GenericSubrange must contain lowerBound", &N);
1305 CheckDI(isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1306 "LowerBound must be signed constant or DIVariable or DIExpression",
1307 &N);
1308 auto *UBound = N.getRawUpperBound();
1309 CheckDI(!UBound || isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1310 "UpperBound must be signed constant or DIVariable or DIExpression",
1311 &N);
1312 auto *Stride = N.getRawStride();
1313 CheckDI(Stride, "GenericSubrange must contain stride", &N);
1314 CheckDI(isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1315 "Stride must be signed constant or DIVariable or DIExpression", &N);
1316}
1317
1318void Verifier::visitDIEnumerator(const DIEnumerator &N) {
1319 CheckDI(N.getTag() == dwarf::DW_TAG_enumerator, "invalid tag", &N);
1320}
1321
1322void Verifier::visitDIBasicType(const DIBasicType &N) {
1323 visitDIType(N);
1324
1325 CheckDI(N.getTag() == dwarf::DW_TAG_base_type ||
1326 N.getTag() == dwarf::DW_TAG_unspecified_type ||
1327 N.getTag() == dwarf::DW_TAG_string_type,
1328 "invalid tag", &N);
1329 // Basic types currently only support constant size.
1330 auto *Size = N.getRawSizeInBits();
1332 "SizeInBits must be a constant");
1333}
1334
1335void Verifier::visitDIFixedPointType(const DIFixedPointType &N) {
1336 visitDIBasicType(N);
1337
1338 CheckDI(N.getTag() == dwarf::DW_TAG_base_type, "invalid tag", &N);
1339 CheckDI(N.getEncoding() == dwarf::DW_ATE_signed_fixed ||
1340 N.getEncoding() == dwarf::DW_ATE_unsigned_fixed,
1341 "invalid encoding", &N);
1345 "invalid kind", &N);
1347 N.getFactorRaw() == 0,
1348 "factor should be 0 for rationals", &N);
1350 (N.getNumeratorRaw() == 0 && N.getDenominatorRaw() == 0),
1351 "numerator and denominator should be 0 for non-rationals", &N);
1352}
1353
1354void Verifier::visitDIStringType(const DIStringType &N) {
1355 visitDIType(N);
1356
1357 CheckDI(N.getTag() == dwarf::DW_TAG_string_type, "invalid tag", &N);
1358 CheckDI(!(N.isBigEndian() && N.isLittleEndian()), "has conflicting flags",
1359 &N);
1360}
1361
1362void Verifier::visitDIDerivedType(const DIDerivedType &N) {
1363 // Common type checks.
1364 visitDIType(N);
1365
1366 CheckDI(N.getTag() == dwarf::DW_TAG_typedef ||
1367 N.getTag() == dwarf::DW_TAG_pointer_type ||
1368 N.getTag() == dwarf::DW_TAG_ptr_to_member_type ||
1369 N.getTag() == dwarf::DW_TAG_reference_type ||
1370 N.getTag() == dwarf::DW_TAG_rvalue_reference_type ||
1371 N.getTag() == dwarf::DW_TAG_const_type ||
1372 N.getTag() == dwarf::DW_TAG_immutable_type ||
1373 N.getTag() == dwarf::DW_TAG_volatile_type ||
1374 N.getTag() == dwarf::DW_TAG_restrict_type ||
1375 N.getTag() == dwarf::DW_TAG_atomic_type ||
1376 N.getTag() == dwarf::DW_TAG_LLVM_ptrauth_type ||
1377 N.getTag() == dwarf::DW_TAG_member ||
1378 (N.getTag() == dwarf::DW_TAG_variable && N.isStaticMember()) ||
1379 N.getTag() == dwarf::DW_TAG_inheritance ||
1380 N.getTag() == dwarf::DW_TAG_friend ||
1381 N.getTag() == dwarf::DW_TAG_set_type ||
1382 N.getTag() == dwarf::DW_TAG_template_alias,
1383 "invalid tag", &N);
1384 if (N.getTag() == dwarf::DW_TAG_ptr_to_member_type) {
1385 CheckDI(isType(N.getRawExtraData()), "invalid pointer to member type", &N,
1386 N.getRawExtraData());
1387 } else if (N.getTag() == dwarf::DW_TAG_template_alias) {
1388 CheckDI(isMDTuple(N.getRawExtraData()), "invalid template parameters", &N,
1389 N.getRawExtraData());
1390 } else if (N.getTag() == dwarf::DW_TAG_inheritance ||
1391 N.getTag() == dwarf::DW_TAG_member ||
1392 N.getTag() == dwarf::DW_TAG_variable) {
1393 auto *ExtraData = N.getRawExtraData();
1394 auto IsValidExtraData = [&]() {
1395 if (ExtraData == nullptr)
1396 return true;
1397 if (isa<ConstantAsMetadata>(ExtraData) || isa<MDString>(ExtraData) ||
1398 isa<DIObjCProperty>(ExtraData))
1399 return true;
1400 if (auto *Tuple = dyn_cast<MDTuple>(ExtraData)) {
1401 if (Tuple->getNumOperands() != 1)
1402 return false;
1403 return isa_and_nonnull<ConstantAsMetadata>(Tuple->getOperand(0).get());
1404 }
1405 return false;
1406 };
1407 CheckDI(IsValidExtraData(),
1408 "extraData must be ConstantAsMetadata, MDString, DIObjCProperty, "
1409 "or MDTuple with single ConstantAsMetadata operand",
1410 &N, ExtraData);
1411 }
1412
1413 if (N.getTag() == dwarf::DW_TAG_set_type) {
1414 if (auto *T = N.getRawBaseType()) {
1418 CheckDI(
1419 (Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type) ||
1420 (Subrange && Subrange->getTag() == dwarf::DW_TAG_subrange_type) ||
1421 (Basic && (Basic->getEncoding() == dwarf::DW_ATE_unsigned ||
1422 Basic->getEncoding() == dwarf::DW_ATE_signed ||
1423 Basic->getEncoding() == dwarf::DW_ATE_unsigned_char ||
1424 Basic->getEncoding() == dwarf::DW_ATE_signed_char ||
1425 Basic->getEncoding() == dwarf::DW_ATE_boolean)),
1426 "invalid set base type", &N, T);
1427 }
1428 }
1429
1430 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1431 N.getRawBaseType());
1432
1433 if (N.getDWARFAddressSpace()) {
1434 CheckDI(N.getTag() == dwarf::DW_TAG_pointer_type ||
1435 N.getTag() == dwarf::DW_TAG_reference_type ||
1436 N.getTag() == dwarf::DW_TAG_rvalue_reference_type,
1437 "DWARF address space only applies to pointer or reference types",
1438 &N);
1439 }
1440
1441 auto *Size = N.getRawSizeInBits();
1444 "SizeInBits must be a constant or DIVariable or DIExpression");
1445}
1446
1447/// Detect mutually exclusive flags.
1448static bool hasConflictingReferenceFlags(unsigned Flags) {
1449 return ((Flags & DINode::FlagLValueReference) &&
1450 (Flags & DINode::FlagRValueReference)) ||
1451 ((Flags & DINode::FlagTypePassByValue) &&
1452 (Flags & DINode::FlagTypePassByReference));
1453}
1454
1455void Verifier::visitTemplateParams(const MDNode &N, const Metadata &RawParams) {
1456 auto *Params = dyn_cast<MDTuple>(&RawParams);
1457 CheckDI(Params, "invalid template params", &N, &RawParams);
1458 for (Metadata *Op : Params->operands()) {
1459 CheckDI(Op && isa<DITemplateParameter>(Op), "invalid template parameter",
1460 &N, Params, Op);
1461 }
1462}
1463
1464void Verifier::visitDICompositeType(const DICompositeType &N) {
1465 // Common type checks.
1466 visitDIType(N);
1467
1468 CheckDI(N.getTag() == dwarf::DW_TAG_array_type ||
1469 N.getTag() == dwarf::DW_TAG_structure_type ||
1470 N.getTag() == dwarf::DW_TAG_union_type ||
1471 N.getTag() == dwarf::DW_TAG_enumeration_type ||
1472 N.getTag() == dwarf::DW_TAG_class_type ||
1473 N.getTag() == dwarf::DW_TAG_variant_part ||
1474 N.getTag() == dwarf::DW_TAG_variant ||
1475 N.getTag() == dwarf::DW_TAG_namelist,
1476 "invalid tag", &N);
1477
1478 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1479 N.getRawBaseType());
1480
1481 CheckDI(!N.getRawElements() || isa<MDTuple>(N.getRawElements()),
1482 "invalid composite elements", &N, N.getRawElements());
1483 CheckDI(isType(N.getRawVTableHolder()), "invalid vtable holder", &N,
1484 N.getRawVTableHolder());
1486 "invalid reference flags", &N);
1487 unsigned DIBlockByRefStruct = 1 << 4;
1488 CheckDI((N.getFlags() & DIBlockByRefStruct) == 0,
1489 "DIBlockByRefStruct on DICompositeType is no longer supported", &N);
1490 CheckDI(llvm::all_of(N.getElements(), [](const DINode *N) { return N; }),
1491 "DISubprogram contains null entry in `elements` field", &N);
1492
1493 if (N.isVector()) {
1494 const DINodeArray Elements = N.getElements();
1495 CheckDI(Elements.size() == 1 &&
1496 Elements[0]->getTag() == dwarf::DW_TAG_subrange_type,
1497 "invalid vector, expected one element of type subrange", &N);
1498 }
1499
1500 if (auto *Params = N.getRawTemplateParams())
1501 visitTemplateParams(N, *Params);
1502
1503 if (auto *D = N.getRawDiscriminator()) {
1504 CheckDI(isa<DIDerivedType>(D) && N.getTag() == dwarf::DW_TAG_variant_part,
1505 "discriminator can only appear on variant part");
1506 }
1507
1508 if (N.getRawDataLocation()) {
1509 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1510 "dataLocation can only appear in array type");
1511 }
1512
1513 if (N.getRawAssociated()) {
1514 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1515 "associated can only appear in array type");
1516 }
1517
1518 if (N.getRawAllocated()) {
1519 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1520 "allocated can only appear in array type");
1521 }
1522
1523 if (N.getRawRank()) {
1524 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1525 "rank can only appear in array type");
1526 }
1527
1528 if (N.getTag() == dwarf::DW_TAG_array_type) {
1529 CheckDI(N.getRawBaseType(), "array types must have a base type", &N);
1530 }
1531
1532 auto *Size = N.getRawSizeInBits();
1535 "SizeInBits must be a constant or DIVariable or DIExpression");
1536}
1537
1538void Verifier::visitDISubroutineType(const DISubroutineType &N) {
1539 visitDIType(N);
1540 CheckDI(N.getTag() == dwarf::DW_TAG_subroutine_type, "invalid tag", &N);
1541 if (auto *Types = N.getRawTypeArray()) {
1542 CheckDI(isa<MDTuple>(Types), "invalid composite elements", &N, Types);
1543 for (Metadata *Ty : N.getTypeArray()->operands()) {
1544 CheckDI(isType(Ty), "invalid subroutine type ref", &N, Types, Ty);
1545 }
1546 }
1548 "invalid reference flags", &N);
1549}
1550
1551void Verifier::visitDIFile(const DIFile &N) {
1552 CheckDI(N.getTag() == dwarf::DW_TAG_file_type, "invalid tag", &N);
1553 std::optional<DIFile::ChecksumInfo<StringRef>> Checksum = N.getChecksum();
1554 if (Checksum) {
1555 CheckDI(Checksum->Kind <= DIFile::ChecksumKind::CSK_Last,
1556 "invalid checksum kind", &N);
1557 size_t Size;
1558 switch (Checksum->Kind) {
1559 case DIFile::CSK_MD5:
1560 Size = 32;
1561 break;
1562 case DIFile::CSK_SHA1:
1563 Size = 40;
1564 break;
1565 case DIFile::CSK_SHA256:
1566 Size = 64;
1567 break;
1568 }
1569 CheckDI(Checksum->Value.size() == Size, "invalid checksum length", &N);
1570 CheckDI(Checksum->Value.find_if_not(llvm::isHexDigit) == StringRef::npos,
1571 "invalid checksum", &N);
1572 }
1573}
1574
1575void Verifier::visitDICompileUnit(const DICompileUnit &N) {
1576 CheckDI(N.isDistinct(), "compile units must be distinct", &N);
1577 CheckDI(N.getTag() == dwarf::DW_TAG_compile_unit, "invalid tag", &N);
1578
1579 // Don't bother verifying the compilation directory or producer string
1580 // as those could be empty.
1581 CheckDI(N.getRawFile() && isa<DIFile>(N.getRawFile()), "invalid file", &N,
1582 N.getRawFile());
1583 CheckDI(!N.getFile()->getFilename().empty(), "invalid filename", &N,
1584 N.getFile());
1585
1586 CheckDI((N.getEmissionKind() <= DICompileUnit::LastEmissionKind),
1587 "invalid emission kind", &N);
1588
1589 if (auto *Array = N.getRawEnumTypes()) {
1590 CheckDI(isa<MDTuple>(Array), "invalid enum list", &N, Array);
1591 for (Metadata *Op : N.getEnumTypes()->operands()) {
1593 CheckDI(Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type,
1594 "invalid enum type", &N, N.getEnumTypes(), Op);
1595 CheckDI(!Enum->getScope() || !isa<DILocalScope>(Enum->getScope()),
1596 "function-local enum in a DICompileUnit's enum list", &N,
1597 N.getEnumTypes(), Op);
1598 }
1599 }
1600 if (auto *Array = N.getRawRetainedTypes()) {
1601 CheckDI(isa<MDTuple>(Array), "invalid retained type list", &N, Array);
1602 for (Metadata *Op : N.getRetainedTypes()->operands()) {
1603 CheckDI(
1604 Op && (isa<DIType>(Op) || (isa<DISubprogram>(Op) &&
1605 !cast<DISubprogram>(Op)->isDefinition())),
1606 "invalid retained type", &N, Op);
1607 }
1608 }
1609 if (auto *Array = N.getRawGlobalVariables()) {
1610 CheckDI(isa<MDTuple>(Array), "invalid global variable list", &N, Array);
1611 for (Metadata *Op : N.getGlobalVariables()->operands()) {
1613 "invalid global variable ref", &N, Op);
1614 }
1615 }
1616 if (auto *Array = N.getRawImportedEntities()) {
1617 CheckDI(isa<MDTuple>(Array), "invalid imported entity list", &N, Array);
1618 for (Metadata *Op : N.getImportedEntities()->operands()) {
1620 CheckDI(IE, "invalid imported entity ref", &N, Op);
1622 "function-local imports are not allowed in a DICompileUnit's "
1623 "imported entities list",
1624 &N, Op);
1625 }
1626 }
1627 if (auto *Array = N.getRawMacros()) {
1628 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1629 for (Metadata *Op : N.getMacros()->operands()) {
1630 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1631 }
1632 }
1633 CUVisited.insert(&N);
1634}
1635
1636void Verifier::visitDISubprogram(const DISubprogram &N) {
1637 CheckDI(N.getTag() == dwarf::DW_TAG_subprogram, "invalid tag", &N);
1638 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1639 if (auto *F = N.getRawFile())
1640 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1641 else
1642 CheckDI(N.getLine() == 0, "line specified with no file", &N, N.getLine());
1643 if (auto *T = N.getRawType())
1644 CheckDI(isa<DISubroutineType>(T), "invalid subroutine type", &N, T);
1645 CheckDI(isType(N.getRawContainingType()), "invalid containing type", &N,
1646 N.getRawContainingType());
1647 if (auto *Params = N.getRawTemplateParams())
1648 visitTemplateParams(N, *Params);
1649 if (auto *S = N.getRawDeclaration())
1650 CheckDI(isa<DISubprogram>(S) && !cast<DISubprogram>(S)->isDefinition(),
1651 "invalid subprogram declaration", &N, S);
1652 if (auto *RawNode = N.getRawRetainedNodes()) {
1653 auto *Node = dyn_cast<MDTuple>(RawNode);
1654 CheckDI(Node, "invalid retained nodes list", &N, RawNode);
1655
1656 DenseMap<unsigned, DILocalVariable *> Args;
1657 for (Metadata *Op : Node->operands()) {
1658 CheckDI(Op, "nullptr in retained nodes", &N, Node);
1659
1660 auto True = [](const Metadata *) { return true; };
1661 auto False = [](const Metadata *) { return false; };
1662 bool IsTypeCorrect = DISubprogram::visitRetainedNode<bool>(
1663 Op, True, True, True, True, False);
1664 CheckDI(IsTypeCorrect,
1665 "invalid retained nodes, expected DILocalVariable, DILabel, "
1666 "DIImportedEntity or DIType",
1667 &N, Node, Op);
1668
1669 auto *RetainedNode = cast<DINode>(Op);
1670 auto *RetainedNodeScope = dyn_cast_or_null<DILocalScope>(
1672 CheckDI(RetainedNodeScope,
1673 "invalid retained nodes, retained node is not local", &N, Node,
1674 RetainedNode);
1675
1676 DISubprogram *RetainedNodeSP = RetainedNodeScope->getSubprogram();
1677 DICompileUnit *RetainedNodeUnit =
1678 RetainedNodeSP ? RetainedNodeSP->getUnit() : nullptr;
1679 CheckDI(
1680 RetainedNodeSP == &N,
1681 "invalid retained nodes, retained node does not belong to subprogram",
1682 &N, Node, RetainedNode, RetainedNodeScope, RetainedNodeSP,
1683 RetainedNodeUnit);
1684
1685 auto *DV = dyn_cast<DILocalVariable>(RetainedNode);
1686 if (!DV)
1687 continue;
1688 if (unsigned ArgNum = DV->getArg()) {
1689 auto [ArgI, Inserted] = Args.insert({ArgNum, DV});
1690 CheckDI(Inserted || DV == ArgI->second,
1691 "invalid retained nodes, more than one local variable with the "
1692 "same argument index",
1693 &N, N.getUnit(), Node, RetainedNode, Args[ArgNum]);
1694 }
1695 }
1696 }
1698 "invalid reference flags", &N);
1699
1700 auto *Unit = N.getRawUnit();
1701 if (N.isDefinition()) {
1702 // Subprogram definitions (not part of the type hierarchy).
1703 CheckDI(N.isDistinct(), "subprogram definitions must be distinct", &N);
1704 CheckDI(Unit, "subprogram definitions must have a compile unit", &N);
1705 CheckDI(isa<DICompileUnit>(Unit), "invalid unit type", &N, Unit);
1706 // There's no good way to cross the CU boundary to insert a nested
1707 // DISubprogram definition in one CU into a type defined in another CU.
1708 auto *CT = dyn_cast_or_null<DICompositeType>(N.getRawScope());
1709 if (CT && CT->getRawIdentifier() &&
1710 M.getContext().isODRUniquingDebugTypes())
1711 CheckDI(N.getDeclaration(),
1712 "definition subprograms cannot be nested within DICompositeType "
1713 "when enabling ODR",
1714 &N);
1715 } else {
1716 // Subprogram declarations (part of the type hierarchy).
1717 CheckDI(!Unit, "subprogram declarations must not have a compile unit", &N);
1718 CheckDI(!N.getRawDeclaration(),
1719 "subprogram declaration must not have a declaration field");
1720 }
1721
1722 if (auto *RawThrownTypes = N.getRawThrownTypes()) {
1723 auto *ThrownTypes = dyn_cast<MDTuple>(RawThrownTypes);
1724 CheckDI(ThrownTypes, "invalid thrown types list", &N, RawThrownTypes);
1725 for (Metadata *Op : ThrownTypes->operands())
1726 CheckDI(Op && isa<DIType>(Op), "invalid thrown type", &N, ThrownTypes,
1727 Op);
1728 }
1729
1730 if (N.areAllCallsDescribed())
1731 CheckDI(N.isDefinition(),
1732 "DIFlagAllCallsDescribed must be attached to a definition");
1733}
1734
1735void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) {
1736 CheckDI(N.getTag() == dwarf::DW_TAG_lexical_block, "invalid tag", &N);
1737 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1738 "invalid local scope", &N, N.getRawScope());
1739 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1740 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1741}
1742
1743void Verifier::visitDILexicalBlock(const DILexicalBlock &N) {
1744 visitDILexicalBlockBase(N);
1745
1746 CheckDI(N.getLine() || !N.getColumn(),
1747 "cannot have column info without line info", &N);
1748}
1749
1750void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) {
1751 visitDILexicalBlockBase(N);
1752}
1753
1754void Verifier::visitDICommonBlock(const DICommonBlock &N) {
1755 CheckDI(N.getTag() == dwarf::DW_TAG_common_block, "invalid tag", &N);
1756 if (auto *S = N.getRawScope())
1757 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1758 if (auto *S = N.getRawDecl())
1759 CheckDI(isa<DIGlobalVariable>(S), "invalid declaration", &N, S);
1760}
1761
1762void Verifier::visitDINamespace(const DINamespace &N) {
1763 CheckDI(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag", &N);
1764 if (auto *S = N.getRawScope())
1765 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1766}
1767
1768void Verifier::visitDIMacro(const DIMacro &N) {
1769 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_define ||
1770 N.getMacinfoType() == dwarf::DW_MACINFO_undef,
1771 "invalid macinfo type", &N);
1772 CheckDI(!N.getName().empty(), "anonymous macro", &N);
1773 if (!N.getValue().empty()) {
1774 assert(N.getValue().data()[0] != ' ' && "Macro value has a space prefix");
1775 }
1776}
1777
1778void Verifier::visitDIMacroFile(const DIMacroFile &N) {
1779 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_start_file,
1780 "invalid macinfo type", &N);
1781 if (auto *F = N.getRawFile())
1782 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1783
1784 if (auto *Array = N.getRawElements()) {
1785 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1786 for (Metadata *Op : N.getElements()->operands()) {
1787 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1788 }
1789 }
1790}
1791
1792void Verifier::visitDIModule(const DIModule &N) {
1793 CheckDI(N.getTag() == dwarf::DW_TAG_module, "invalid tag", &N);
1794 CheckDI(!N.getName().empty(), "anonymous module", &N);
1795}
1796
1797void Verifier::visitDITemplateParameter(const DITemplateParameter &N) {
1798 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1799}
1800
1801void Verifier::visitDITemplateTypeParameter(const DITemplateTypeParameter &N) {
1802 visitDITemplateParameter(N);
1803
1804 CheckDI(N.getTag() == dwarf::DW_TAG_template_type_parameter, "invalid tag",
1805 &N);
1806}
1807
1808void Verifier::visitDITemplateValueParameter(
1809 const DITemplateValueParameter &N) {
1810 visitDITemplateParameter(N);
1811
1812 CheckDI(N.getTag() == dwarf::DW_TAG_template_value_parameter ||
1813 N.getTag() == dwarf::DW_TAG_GNU_template_template_param ||
1814 N.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack,
1815 "invalid tag", &N);
1816}
1817
1818void Verifier::visitDIVariable(const DIVariable &N) {
1819 if (auto *S = N.getRawScope())
1820 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1821 if (auto *F = N.getRawFile())
1822 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1823}
1824
1825void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) {
1826 // Checks common to all variables.
1827 visitDIVariable(N);
1828
1829 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1830 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1831 // Check only if the global variable is not an extern
1832 if (N.isDefinition())
1833 CheckDI(N.getType(), "missing global variable type", &N);
1834 if (auto *Member = N.getRawStaticDataMemberDeclaration()) {
1836 "invalid static data member declaration", &N, Member);
1837 }
1838}
1839
1840void Verifier::visitDILocalVariable(const DILocalVariable &N) {
1841 // Checks common to all variables.
1842 visitDIVariable(N);
1843
1844 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1845 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1846 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1847 "local variable requires a valid scope", &N, N.getRawScope());
1848 if (auto Ty = N.getType())
1849 CheckDI(!isa<DISubroutineType>(Ty), "invalid type", &N, N.getType());
1850}
1851
1852void Verifier::visitDIAssignID(const DIAssignID &N) {
1853 CheckDI(!N.getNumOperands(), "DIAssignID has no arguments", &N);
1854 CheckDI(N.isDistinct(), "DIAssignID must be distinct", &N);
1855}
1856
1857void Verifier::visitDILabel(const DILabel &N) {
1858 if (auto *S = N.getRawScope())
1859 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1860 if (auto *F = N.getRawFile())
1861 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1862
1863 CheckDI(N.getTag() == dwarf::DW_TAG_label, "invalid tag", &N);
1864 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1865 "label requires a valid scope", &N, N.getRawScope());
1866}
1867
1868void Verifier::visitDIExpression(const DIExpression &N) {
1869 CheckDI(N.isValid(), "invalid expression", &N);
1870}
1871
1872void Verifier::visitDIGlobalVariableExpression(
1873 const DIGlobalVariableExpression &GVE) {
1874 CheckDI(GVE.getVariable(), "missing variable");
1875 if (auto *Var = GVE.getVariable())
1876 visitDIGlobalVariable(*Var);
1877 if (auto *Expr = GVE.getExpression()) {
1878 visitDIExpression(*Expr);
1879 if (auto Fragment = Expr->getFragmentInfo())
1880 verifyFragmentExpression(*GVE.getVariable(), *Fragment, &GVE);
1881 }
1882}
1883
1884void Verifier::visitDIObjCProperty(const DIObjCProperty &N) {
1885 CheckDI(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag", &N);
1886 if (auto *T = N.getRawType())
1887 CheckDI(isType(T), "invalid type ref", &N, T);
1888 if (auto *F = N.getRawFile())
1889 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1890}
1891
1892void Verifier::visitDIImportedEntity(const DIImportedEntity &N) {
1893 CheckDI(N.getTag() == dwarf::DW_TAG_imported_module ||
1894 N.getTag() == dwarf::DW_TAG_imported_declaration,
1895 "invalid tag", &N);
1896 if (auto *S = N.getRawScope())
1897 CheckDI(isa<DIScope>(S), "invalid scope for imported entity", &N, S);
1898 CheckDI(isDINode(N.getRawEntity()), "invalid imported entity", &N,
1899 N.getRawEntity());
1900}
1901
1902void Verifier::visitComdat(const Comdat &C) {
1903 // In COFF the Module is invalid if the GlobalValue has private linkage.
1904 // Entities with private linkage don't have entries in the symbol table.
1905 if (TT.isOSBinFormatCOFF())
1906 if (const GlobalValue *GV = M.getNamedValue(C.getName()))
1907 Check(!GV->hasPrivateLinkage(), "comdat global value has private linkage",
1908 GV);
1909}
1910
1911void Verifier::visitModuleIdents() {
1912 const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident");
1913 if (!Idents)
1914 return;
1915
1916 // llvm.ident takes a list of metadata entry. Each entry has only one string.
1917 // Scan each llvm.ident entry and make sure that this requirement is met.
1918 for (const MDNode *N : Idents->operands()) {
1919 Check(N->getNumOperands() == 1,
1920 "incorrect number of operands in llvm.ident metadata", N);
1921 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1922 ("invalid value for llvm.ident metadata entry operand"
1923 "(the operand should be a string)"),
1924 N->getOperand(0));
1925 }
1926}
1927
1928void Verifier::visitModuleCommandLines() {
1929 const NamedMDNode *CommandLines = M.getNamedMetadata("llvm.commandline");
1930 if (!CommandLines)
1931 return;
1932
1933 // llvm.commandline takes a list of metadata entry. Each entry has only one
1934 // string. Scan each llvm.commandline entry and make sure that this
1935 // requirement is met.
1936 for (const MDNode *N : CommandLines->operands()) {
1937 Check(N->getNumOperands() == 1,
1938 "incorrect number of operands in llvm.commandline metadata", N);
1939 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1940 ("invalid value for llvm.commandline metadata entry operand"
1941 "(the operand should be a string)"),
1942 N->getOperand(0));
1943 }
1944}
1945
1946void Verifier::visitModuleErrnoTBAA() {
1947 const NamedMDNode *ErrnoTBAA = M.getNamedMetadata("llvm.errno.tbaa");
1948 if (!ErrnoTBAA)
1949 return;
1950
1951 Check(ErrnoTBAA->getNumOperands() >= 1,
1952 "llvm.errno.tbaa must have at least one operand", ErrnoTBAA);
1953
1954 for (const MDNode *N : ErrnoTBAA->operands())
1955 TBAAVerifyHelper.visitTBAAMetadata(nullptr, N);
1956}
1957
1958void Verifier::visitModuleFlags() {
1959 const NamedMDNode *Flags = M.getModuleFlagsMetadata();
1960 if (!Flags) return;
1961
1962 // Scan each flag, and track the flags and requirements.
1963 DenseMap<const MDString*, const MDNode*> SeenIDs;
1964 SmallVector<const MDNode*, 16> Requirements;
1965 uint64_t PAuthABIPlatform = -1;
1966 uint64_t PAuthABIVersion = -1;
1967 for (const MDNode *MDN : Flags->operands()) {
1968 visitModuleFlag(MDN, SeenIDs, Requirements);
1969 if (MDN->getNumOperands() != 3)
1970 continue;
1971 if (const auto *FlagName = dyn_cast_or_null<MDString>(MDN->getOperand(1))) {
1972 if (FlagName->getString() == "aarch64-elf-pauthabi-platform") {
1973 if (const auto *PAP =
1975 PAuthABIPlatform = PAP->getZExtValue();
1976 } else if (FlagName->getString() == "aarch64-elf-pauthabi-version") {
1977 if (const auto *PAV =
1979 PAuthABIVersion = PAV->getZExtValue();
1980 }
1981 }
1982 }
1983
1984 if ((PAuthABIPlatform == uint64_t(-1)) != (PAuthABIVersion == uint64_t(-1)))
1985 CheckFailed("either both or no 'aarch64-elf-pauthabi-platform' and "
1986 "'aarch64-elf-pauthabi-version' module flags must be present");
1987
1988 // Validate that the requirements in the module are valid.
1989 for (const MDNode *Requirement : Requirements) {
1990 const MDString *Flag = cast<MDString>(Requirement->getOperand(0));
1991 const Metadata *ReqValue = Requirement->getOperand(1);
1992
1993 const MDNode *Op = SeenIDs.lookup(Flag);
1994 if (!Op) {
1995 CheckFailed("invalid requirement on flag, flag is not present in module",
1996 Flag);
1997 continue;
1998 }
1999
2000 if (Op->getOperand(2) != ReqValue) {
2001 CheckFailed(("invalid requirement on flag, "
2002 "flag does not have the required value"),
2003 Flag);
2004 continue;
2005 }
2006 }
2007}
2008
2009void
2010Verifier::visitModuleFlag(const MDNode *Op,
2011 DenseMap<const MDString *, const MDNode *> &SeenIDs,
2012 SmallVectorImpl<const MDNode *> &Requirements) {
2013 // Each module flag should have three arguments, the merge behavior (a
2014 // constant int), the flag ID (an MDString), and the value.
2015 Check(Op->getNumOperands() == 3,
2016 "incorrect number of operands in module flag", Op);
2017 Module::ModFlagBehavior MFB;
2018 if (!Module::isValidModFlagBehavior(Op->getOperand(0), MFB)) {
2020 "invalid behavior operand in module flag (expected constant integer)",
2021 Op->getOperand(0));
2022 Check(false,
2023 "invalid behavior operand in module flag (unexpected constant)",
2024 Op->getOperand(0));
2025 }
2026 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
2027 Check(ID, "invalid ID operand in module flag (expected metadata string)",
2028 Op->getOperand(1));
2029
2030 // Check the values for behaviors with additional requirements.
2031 switch (MFB) {
2032 case Module::Error:
2033 case Module::Warning:
2034 case Module::Override:
2035 // These behavior types accept any value.
2036 break;
2037
2038 case Module::Min: {
2039 auto *V = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
2040 Check(V && V->getValue().isNonNegative(),
2041 "invalid value for 'min' module flag (expected constant non-negative "
2042 "integer)",
2043 Op->getOperand(2));
2044 break;
2045 }
2046
2047 case Module::Max: {
2049 "invalid value for 'max' module flag (expected constant integer)",
2050 Op->getOperand(2));
2051 break;
2052 }
2053
2054 case Module::Require: {
2055 // The value should itself be an MDNode with two operands, a flag ID (an
2056 // MDString), and a value.
2057 MDNode *Value = dyn_cast<MDNode>(Op->getOperand(2));
2058 Check(Value && Value->getNumOperands() == 2,
2059 "invalid value for 'require' module flag (expected metadata pair)",
2060 Op->getOperand(2));
2061 Check(isa<MDString>(Value->getOperand(0)),
2062 ("invalid value for 'require' module flag "
2063 "(first value operand should be a string)"),
2064 Value->getOperand(0));
2065
2066 // Append it to the list of requirements, to check once all module flags are
2067 // scanned.
2068 Requirements.push_back(Value);
2069 break;
2070 }
2071
2072 case Module::Append:
2073 case Module::AppendUnique: {
2074 // These behavior types require the operand be an MDNode.
2075 Check(isa<MDNode>(Op->getOperand(2)),
2076 "invalid value for 'append'-type module flag "
2077 "(expected a metadata node)",
2078 Op->getOperand(2));
2079 break;
2080 }
2081 }
2082
2083 // Unless this is a "requires" flag, check the ID is unique.
2084 if (MFB != Module::Require) {
2085 bool Inserted = SeenIDs.insert(std::make_pair(ID, Op)).second;
2086 Check(Inserted,
2087 "module flag identifiers must be unique (or of 'require' type)", ID);
2088 }
2089
2090 if (ID->getString() == "wchar_size") {
2091 ConstantInt *Value
2093 Check(Value, "wchar_size metadata requires constant integer argument");
2094 }
2095
2096 if (ID->getString() == "Linker Options") {
2097 // If the llvm.linker.options named metadata exists, we assume that the
2098 // bitcode reader has upgraded the module flag. Otherwise the flag might
2099 // have been created by a client directly.
2100 Check(M.getNamedMetadata("llvm.linker.options"),
2101 "'Linker Options' named metadata no longer supported");
2102 }
2103
2104 if (ID->getString() == "SemanticInterposition") {
2105 ConstantInt *Value =
2107 Check(Value,
2108 "SemanticInterposition metadata requires constant integer argument");
2109 }
2110
2111 if (ID->getString() == "CG Profile") {
2112 for (const MDOperand &MDO : cast<MDNode>(Op->getOperand(2))->operands())
2113 visitModuleFlagCGProfileEntry(MDO);
2114 }
2115}
2116
2117void Verifier::visitModuleFlagCGProfileEntry(const MDOperand &MDO) {
2118 auto CheckFunction = [&](const MDOperand &FuncMDO) {
2119 if (!FuncMDO)
2120 return;
2121 auto F = dyn_cast<ValueAsMetadata>(FuncMDO);
2122 Check(F && isa<Function>(F->getValue()->stripPointerCasts()),
2123 "expected a Function or null", FuncMDO);
2124 };
2125 auto Node = dyn_cast_or_null<MDNode>(MDO);
2126 Check(Node && Node->getNumOperands() == 3, "expected a MDNode triple", MDO);
2127 CheckFunction(Node->getOperand(0));
2128 CheckFunction(Node->getOperand(1));
2129 auto Count = dyn_cast_or_null<ConstantAsMetadata>(Node->getOperand(2));
2130 Check(Count && Count->getType()->isIntegerTy(),
2131 "expected an integer constant", Node->getOperand(2));
2132}
2133
2134void Verifier::verifyAttributeTypes(AttributeSet Attrs, const Value *V) {
2135 for (Attribute A : Attrs) {
2136
2137 if (A.isStringAttribute()) {
2138#define GET_ATTR_NAMES
2139#define ATTRIBUTE_ENUM(ENUM_NAME, DISPLAY_NAME)
2140#define ATTRIBUTE_STRBOOL(ENUM_NAME, DISPLAY_NAME) \
2141 if (A.getKindAsString() == #DISPLAY_NAME) { \
2142 auto V = A.getValueAsString(); \
2143 if (!(V.empty() || V == "true" || V == "false")) \
2144 CheckFailed("invalid value for '" #DISPLAY_NAME "' attribute: " + V + \
2145 ""); \
2146 }
2147
2148#include "llvm/IR/Attributes.inc"
2149 continue;
2150 }
2151
2152 if (A.isIntAttribute() != Attribute::isIntAttrKind(A.getKindAsEnum())) {
2153 CheckFailed("Attribute '" + A.getAsString() + "' should have an Argument",
2154 V);
2155 return;
2156 }
2157 }
2158}
2159
2160// VerifyParameterAttrs - Check the given attributes for an argument or return
2161// value of the specified type. The value V is printed in error messages.
2162void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty,
2163 const Value *V) {
2164 if (!Attrs.hasAttributes())
2165 return;
2166
2167 verifyAttributeTypes(Attrs, V);
2168
2169 for (Attribute Attr : Attrs)
2170 Check(Attr.isStringAttribute() ||
2171 Attribute::canUseAsParamAttr(Attr.getKindAsEnum()),
2172 "Attribute '" + Attr.getAsString() + "' does not apply to parameters",
2173 V);
2174
2175 if (Attrs.hasAttribute(Attribute::ImmArg)) {
2176 unsigned AttrCount =
2177 Attrs.getNumAttributes() - Attrs.hasAttribute(Attribute::Range);
2178 Check(AttrCount == 1,
2179 "Attribute 'immarg' is incompatible with other attributes except the "
2180 "'range' attribute",
2181 V);
2182 }
2183
2184 // Check for mutually incompatible attributes. Only inreg is compatible with
2185 // sret.
2186 unsigned AttrCount = 0;
2187 AttrCount += Attrs.hasAttribute(Attribute::ByVal);
2188 AttrCount += Attrs.hasAttribute(Attribute::InAlloca);
2189 AttrCount += Attrs.hasAttribute(Attribute::Preallocated);
2190 AttrCount += Attrs.hasAttribute(Attribute::StructRet) ||
2191 Attrs.hasAttribute(Attribute::InReg);
2192 AttrCount += Attrs.hasAttribute(Attribute::Nest);
2193 AttrCount += Attrs.hasAttribute(Attribute::ByRef);
2194 Check(AttrCount <= 1,
2195 "Attributes 'byval', 'inalloca', 'preallocated', 'inreg', 'nest', "
2196 "'byref', and 'sret' are incompatible!",
2197 V);
2198
2199 Check(!(Attrs.hasAttribute(Attribute::InAlloca) &&
2200 Attrs.hasAttribute(Attribute::ReadOnly)),
2201 "Attributes "
2202 "'inalloca and readonly' are incompatible!",
2203 V);
2204
2205 Check(!(Attrs.hasAttribute(Attribute::StructRet) &&
2206 Attrs.hasAttribute(Attribute::Returned)),
2207 "Attributes "
2208 "'sret and returned' are incompatible!",
2209 V);
2210
2211 Check(!(Attrs.hasAttribute(Attribute::ZExt) &&
2212 Attrs.hasAttribute(Attribute::SExt)),
2213 "Attributes "
2214 "'zeroext and signext' are incompatible!",
2215 V);
2216
2217 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
2218 Attrs.hasAttribute(Attribute::ReadOnly)),
2219 "Attributes "
2220 "'readnone and readonly' are incompatible!",
2221 V);
2222
2223 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
2224 Attrs.hasAttribute(Attribute::WriteOnly)),
2225 "Attributes "
2226 "'readnone and writeonly' are incompatible!",
2227 V);
2228
2229 Check(!(Attrs.hasAttribute(Attribute::ReadOnly) &&
2230 Attrs.hasAttribute(Attribute::WriteOnly)),
2231 "Attributes "
2232 "'readonly and writeonly' are incompatible!",
2233 V);
2234
2235 Check(!(Attrs.hasAttribute(Attribute::NoInline) &&
2236 Attrs.hasAttribute(Attribute::AlwaysInline)),
2237 "Attributes "
2238 "'noinline and alwaysinline' are incompatible!",
2239 V);
2240
2241 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2242 Attrs.hasAttribute(Attribute::ReadNone)),
2243 "Attributes writable and readnone are incompatible!", V);
2244
2245 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2246 Attrs.hasAttribute(Attribute::ReadOnly)),
2247 "Attributes writable and readonly are incompatible!", V);
2248
2249 AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible(Ty, Attrs);
2250 for (Attribute Attr : Attrs) {
2251 if (!Attr.isStringAttribute() &&
2252 IncompatibleAttrs.contains(Attr.getKindAsEnum())) {
2253 CheckFailed("Attribute '" + Attr.getAsString() +
2254 "' applied to incompatible type!", V);
2255 return;
2256 }
2257 }
2258
2259 if (isa<PointerType>(Ty)) {
2260 if (Attrs.hasAttribute(Attribute::Alignment)) {
2261 Align AttrAlign = Attrs.getAlignment().valueOrOne();
2262 Check(AttrAlign.value() <= Value::MaximumAlignment,
2263 "huge alignment values are unsupported", V);
2264 }
2265 if (Attrs.hasAttribute(Attribute::ByVal)) {
2266 Type *ByValTy = Attrs.getByValType();
2267 SmallPtrSet<Type *, 4> Visited;
2268 Check(ByValTy->isSized(&Visited),
2269 "Attribute 'byval' does not support unsized types!", V);
2270 // Check if it is or contains a target extension type that disallows being
2271 // used on the stack.
2273 "'byval' argument has illegal target extension type", V);
2274 Check(DL.getTypeAllocSize(ByValTy).getKnownMinValue() < (1ULL << 32),
2275 "huge 'byval' arguments are unsupported", V);
2276 }
2277 if (Attrs.hasAttribute(Attribute::ByRef)) {
2278 SmallPtrSet<Type *, 4> Visited;
2279 Check(Attrs.getByRefType()->isSized(&Visited),
2280 "Attribute 'byref' does not support unsized types!", V);
2281 Check(DL.getTypeAllocSize(Attrs.getByRefType()).getKnownMinValue() <
2282 (1ULL << 32),
2283 "huge 'byref' arguments are unsupported", V);
2284 }
2285 if (Attrs.hasAttribute(Attribute::InAlloca)) {
2286 SmallPtrSet<Type *, 4> Visited;
2287 Check(Attrs.getInAllocaType()->isSized(&Visited),
2288 "Attribute 'inalloca' does not support unsized types!", V);
2289 Check(DL.getTypeAllocSize(Attrs.getInAllocaType()).getKnownMinValue() <
2290 (1ULL << 32),
2291 "huge 'inalloca' arguments are unsupported", V);
2292 }
2293 if (Attrs.hasAttribute(Attribute::Preallocated)) {
2294 SmallPtrSet<Type *, 4> Visited;
2295 Check(Attrs.getPreallocatedType()->isSized(&Visited),
2296 "Attribute 'preallocated' does not support unsized types!", V);
2297 Check(
2298 DL.getTypeAllocSize(Attrs.getPreallocatedType()).getKnownMinValue() <
2299 (1ULL << 32),
2300 "huge 'preallocated' arguments are unsupported", V);
2301 }
2302 }
2303
2304 if (Attrs.hasAttribute(Attribute::Initializes)) {
2305 auto Inits = Attrs.getAttribute(Attribute::Initializes).getInitializes();
2306 Check(!Inits.empty(), "Attribute 'initializes' does not support empty list",
2307 V);
2309 "Attribute 'initializes' does not support unordered ranges", V);
2310 }
2311
2312 if (Attrs.hasAttribute(Attribute::NoFPClass)) {
2313 uint64_t Val = Attrs.getAttribute(Attribute::NoFPClass).getValueAsInt();
2314 Check(Val != 0, "Attribute 'nofpclass' must have at least one test bit set",
2315 V);
2316 Check((Val & ~static_cast<unsigned>(fcAllFlags)) == 0,
2317 "Invalid value for 'nofpclass' test mask", V);
2318 }
2319 if (Attrs.hasAttribute(Attribute::Range)) {
2320 const ConstantRange &CR =
2321 Attrs.getAttribute(Attribute::Range).getValueAsConstantRange();
2323 "Range bit width must match type bit width!", V);
2324 }
2325}
2326
2327void Verifier::checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
2328 const Value *V) {
2329 if (Attrs.hasFnAttr(Attr)) {
2330 StringRef S = Attrs.getFnAttr(Attr).getValueAsString();
2331 unsigned N;
2332 if (S.getAsInteger(10, N))
2333 CheckFailed("\"" + Attr + "\" takes an unsigned integer: " + S, V);
2334 }
2335}
2336
2337// Check parameter attributes against a function type.
2338// The value V is printed in error messages.
2339void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
2340 const Value *V, bool IsIntrinsic,
2341 bool IsInlineAsm) {
2342 if (Attrs.isEmpty())
2343 return;
2344
2345 if (AttributeListsVisited.insert(Attrs.getRawPointer()).second) {
2346 Check(Attrs.hasParentContext(Context),
2347 "Attribute list does not match Module context!", &Attrs, V);
2348 for (const auto &AttrSet : Attrs) {
2349 Check(!AttrSet.hasAttributes() || AttrSet.hasParentContext(Context),
2350 "Attribute set does not match Module context!", &AttrSet, V);
2351 for (const auto &A : AttrSet) {
2352 Check(A.hasParentContext(Context),
2353 "Attribute does not match Module context!", &A, V);
2354 }
2355 }
2356 }
2357
2358 bool SawNest = false;
2359 bool SawReturned = false;
2360 bool SawSRet = false;
2361 bool SawSwiftSelf = false;
2362 bool SawSwiftAsync = false;
2363 bool SawSwiftError = false;
2364
2365 // Verify return value attributes.
2366 AttributeSet RetAttrs = Attrs.getRetAttrs();
2367 for (Attribute RetAttr : RetAttrs)
2368 Check(RetAttr.isStringAttribute() ||
2369 Attribute::canUseAsRetAttr(RetAttr.getKindAsEnum()),
2370 "Attribute '" + RetAttr.getAsString() +
2371 "' does not apply to function return values",
2372 V);
2373
2374 unsigned MaxParameterWidth = 0;
2375 auto GetMaxParameterWidth = [&MaxParameterWidth](Type *Ty) {
2376 if (Ty->isVectorTy()) {
2377 if (auto *VT = dyn_cast<FixedVectorType>(Ty)) {
2378 unsigned Size = VT->getPrimitiveSizeInBits().getFixedValue();
2379 if (Size > MaxParameterWidth)
2380 MaxParameterWidth = Size;
2381 }
2382 }
2383 };
2384 GetMaxParameterWidth(FT->getReturnType());
2385 verifyParameterAttrs(RetAttrs, FT->getReturnType(), V);
2386
2387 // Verify parameter attributes.
2388 for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
2389 Type *Ty = FT->getParamType(i);
2390 AttributeSet ArgAttrs = Attrs.getParamAttrs(i);
2391
2392 if (!IsIntrinsic) {
2393 Check(!ArgAttrs.hasAttribute(Attribute::ImmArg),
2394 "immarg attribute only applies to intrinsics", V);
2395 if (!IsInlineAsm)
2396 Check(!ArgAttrs.hasAttribute(Attribute::ElementType),
2397 "Attribute 'elementtype' can only be applied to intrinsics"
2398 " and inline asm.",
2399 V);
2400 }
2401
2402 verifyParameterAttrs(ArgAttrs, Ty, V);
2403 GetMaxParameterWidth(Ty);
2404
2405 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
2406 Check(!SawNest, "More than one parameter has attribute nest!", V);
2407 SawNest = true;
2408 }
2409
2410 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
2411 Check(!SawReturned, "More than one parameter has attribute returned!", V);
2412 Check(Ty->canLosslesslyBitCastTo(FT->getReturnType()),
2413 "Incompatible argument and return types for 'returned' attribute",
2414 V);
2415 SawReturned = true;
2416 }
2417
2418 if (ArgAttrs.hasAttribute(Attribute::StructRet)) {
2419 Check(!SawSRet, "Cannot have multiple 'sret' parameters!", V);
2420 Check(i == 0 || i == 1,
2421 "Attribute 'sret' is not on first or second parameter!", V);
2422 SawSRet = true;
2423 }
2424
2425 if (ArgAttrs.hasAttribute(Attribute::SwiftSelf)) {
2426 Check(!SawSwiftSelf, "Cannot have multiple 'swiftself' parameters!", V);
2427 SawSwiftSelf = true;
2428 }
2429
2430 if (ArgAttrs.hasAttribute(Attribute::SwiftAsync)) {
2431 Check(!SawSwiftAsync, "Cannot have multiple 'swiftasync' parameters!", V);
2432 SawSwiftAsync = true;
2433 }
2434
2435 if (ArgAttrs.hasAttribute(Attribute::SwiftError)) {
2436 Check(!SawSwiftError, "Cannot have multiple 'swifterror' parameters!", V);
2437 SawSwiftError = true;
2438 }
2439
2440 if (ArgAttrs.hasAttribute(Attribute::InAlloca)) {
2441 Check(i == FT->getNumParams() - 1,
2442 "inalloca isn't on the last parameter!", V);
2443 }
2444 }
2445
2446 if (!Attrs.hasFnAttrs())
2447 return;
2448
2449 verifyAttributeTypes(Attrs.getFnAttrs(), V);
2450 for (Attribute FnAttr : Attrs.getFnAttrs())
2451 Check(FnAttr.isStringAttribute() ||
2452 Attribute::canUseAsFnAttr(FnAttr.getKindAsEnum()),
2453 "Attribute '" + FnAttr.getAsString() +
2454 "' does not apply to functions!",
2455 V);
2456
2457 Check(!(Attrs.hasFnAttr(Attribute::NoInline) &&
2458 Attrs.hasFnAttr(Attribute::AlwaysInline)),
2459 "Attributes 'noinline and alwaysinline' are incompatible!", V);
2460
2461 if (Attrs.hasFnAttr(Attribute::OptimizeNone)) {
2462 Check(Attrs.hasFnAttr(Attribute::NoInline),
2463 "Attribute 'optnone' requires 'noinline'!", V);
2464
2465 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2466 "Attributes 'optsize and optnone' are incompatible!", V);
2467
2468 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2469 "Attributes 'minsize and optnone' are incompatible!", V);
2470
2471 Check(!Attrs.hasFnAttr(Attribute::OptimizeForDebugging),
2472 "Attributes 'optdebug and optnone' are incompatible!", V);
2473 }
2474
2475 Check(!(Attrs.hasFnAttr(Attribute::SanitizeRealtime) &&
2476 Attrs.hasFnAttr(Attribute::SanitizeRealtimeBlocking)),
2477 "Attributes "
2478 "'sanitize_realtime and sanitize_realtime_blocking' are incompatible!",
2479 V);
2480
2481 if (Attrs.hasFnAttr(Attribute::OptimizeForDebugging)) {
2482 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2483 "Attributes 'optsize and optdebug' are incompatible!", V);
2484
2485 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2486 "Attributes 'minsize and optdebug' are incompatible!", V);
2487 }
2488
2489 Check(!Attrs.hasAttrSomewhere(Attribute::Writable) ||
2490 isModSet(Attrs.getMemoryEffects().getModRef(IRMemLocation::ArgMem)),
2491 "Attribute writable and memory without argmem: write are incompatible!",
2492 V);
2493
2494 if (Attrs.hasFnAttr("aarch64_pstate_sm_enabled")) {
2495 Check(!Attrs.hasFnAttr("aarch64_pstate_sm_compatible"),
2496 "Attributes 'aarch64_pstate_sm_enabled and "
2497 "aarch64_pstate_sm_compatible' are incompatible!",
2498 V);
2499 }
2500
2501 Check((Attrs.hasFnAttr("aarch64_new_za") + Attrs.hasFnAttr("aarch64_in_za") +
2502 Attrs.hasFnAttr("aarch64_inout_za") +
2503 Attrs.hasFnAttr("aarch64_out_za") +
2504 Attrs.hasFnAttr("aarch64_preserves_za") +
2505 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2506 "Attributes 'aarch64_new_za', 'aarch64_in_za', 'aarch64_out_za', "
2507 "'aarch64_inout_za', 'aarch64_preserves_za' and "
2508 "'aarch64_za_state_agnostic' are mutually exclusive",
2509 V);
2510
2511 Check((Attrs.hasFnAttr("aarch64_new_zt0") +
2512 Attrs.hasFnAttr("aarch64_in_zt0") +
2513 Attrs.hasFnAttr("aarch64_inout_zt0") +
2514 Attrs.hasFnAttr("aarch64_out_zt0") +
2515 Attrs.hasFnAttr("aarch64_preserves_zt0") +
2516 Attrs.hasFnAttr("aarch64_za_state_agnostic")) <= 1,
2517 "Attributes 'aarch64_new_zt0', 'aarch64_in_zt0', 'aarch64_out_zt0', "
2518 "'aarch64_inout_zt0', 'aarch64_preserves_zt0' and "
2519 "'aarch64_za_state_agnostic' are mutually exclusive",
2520 V);
2521
2522 if (Attrs.hasFnAttr(Attribute::JumpTable)) {
2523 const GlobalValue *GV = cast<GlobalValue>(V);
2525 "Attribute 'jumptable' requires 'unnamed_addr'", V);
2526 }
2527
2528 if (auto Args = Attrs.getFnAttrs().getAllocSizeArgs()) {
2529 auto CheckParam = [&](StringRef Name, unsigned ParamNo) {
2530 if (ParamNo >= FT->getNumParams()) {
2531 CheckFailed("'allocsize' " + Name + " argument is out of bounds", V);
2532 return false;
2533 }
2534
2535 if (!FT->getParamType(ParamNo)->isIntegerTy()) {
2536 CheckFailed("'allocsize' " + Name +
2537 " argument must refer to an integer parameter",
2538 V);
2539 return false;
2540 }
2541
2542 return true;
2543 };
2544
2545 if (!CheckParam("element size", Args->first))
2546 return;
2547
2548 if (Args->second && !CheckParam("number of elements", *Args->second))
2549 return;
2550 }
2551
2552 if (Attrs.hasFnAttr(Attribute::AllocKind)) {
2553 AllocFnKind K = Attrs.getAllocKind();
2555 K & (AllocFnKind::Alloc | AllocFnKind::Realloc | AllocFnKind::Free);
2556 if (!is_contained(
2557 {AllocFnKind::Alloc, AllocFnKind::Realloc, AllocFnKind::Free},
2558 Type))
2559 CheckFailed(
2560 "'allockind()' requires exactly one of alloc, realloc, and free");
2561 if ((Type == AllocFnKind::Free) &&
2562 ((K & (AllocFnKind::Uninitialized | AllocFnKind::Zeroed |
2563 AllocFnKind::Aligned)) != AllocFnKind::Unknown))
2564 CheckFailed("'allockind(\"free\")' doesn't allow uninitialized, zeroed, "
2565 "or aligned modifiers.");
2566 AllocFnKind ZeroedUninit = AllocFnKind::Uninitialized | AllocFnKind::Zeroed;
2567 if ((K & ZeroedUninit) == ZeroedUninit)
2568 CheckFailed("'allockind()' can't be both zeroed and uninitialized");
2569 }
2570
2571 if (Attribute A = Attrs.getFnAttr("alloc-variant-zeroed"); A.isValid()) {
2572 StringRef S = A.getValueAsString();
2573 Check(!S.empty(), "'alloc-variant-zeroed' must not be empty");
2574 Function *Variant = M.getFunction(S);
2575 if (Variant) {
2576 Attribute Family = Attrs.getFnAttr("alloc-family");
2577 Attribute VariantFamily = Variant->getFnAttribute("alloc-family");
2578 if (Family.isValid())
2579 Check(VariantFamily.isValid() &&
2580 VariantFamily.getValueAsString() == Family.getValueAsString(),
2581 "'alloc-variant-zeroed' must name a function belonging to the "
2582 "same 'alloc-family'");
2583
2584 Check(Variant->hasFnAttribute(Attribute::AllocKind) &&
2585 (Variant->getFnAttribute(Attribute::AllocKind).getAllocKind() &
2586 AllocFnKind::Zeroed) != AllocFnKind::Unknown,
2587 "'alloc-variant-zeroed' must name a function with "
2588 "'allockind(\"zeroed\")'");
2589
2590 Check(FT == Variant->getFunctionType(),
2591 "'alloc-variant-zeroed' must name a function with the same "
2592 "signature");
2593
2594 if (const Function *F = dyn_cast<Function>(V))
2595 Check(F->getCallingConv() == Variant->getCallingConv(),
2596 "'alloc-variant-zeroed' must name a function with the same "
2597 "calling convention");
2598 }
2599 }
2600
2601 if (Attrs.hasFnAttr(Attribute::VScaleRange)) {
2602 unsigned VScaleMin = Attrs.getFnAttrs().getVScaleRangeMin();
2603 if (VScaleMin == 0)
2604 CheckFailed("'vscale_range' minimum must be greater than 0", V);
2605 else if (!isPowerOf2_32(VScaleMin))
2606 CheckFailed("'vscale_range' minimum must be power-of-two value", V);
2607 std::optional<unsigned> VScaleMax = Attrs.getFnAttrs().getVScaleRangeMax();
2608 if (VScaleMax && VScaleMin > VScaleMax)
2609 CheckFailed("'vscale_range' minimum cannot be greater than maximum", V);
2610 else if (VScaleMax && !isPowerOf2_32(*VScaleMax))
2611 CheckFailed("'vscale_range' maximum must be power-of-two value", V);
2612 }
2613
2614 if (Attribute FPAttr = Attrs.getFnAttr("frame-pointer"); FPAttr.isValid()) {
2615 StringRef FP = FPAttr.getValueAsString();
2616 if (FP != "all" && FP != "non-leaf" && FP != "none" && FP != "reserved" &&
2617 FP != "non-leaf-no-reserve")
2618 CheckFailed("invalid value for 'frame-pointer' attribute: " + FP, V);
2619 }
2620
2621 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-prefix", V);
2622 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-entry", V);
2623 if (Attrs.hasFnAttr("patchable-function-entry-section"))
2624 Check(!Attrs.getFnAttr("patchable-function-entry-section")
2625 .getValueAsString()
2626 .empty(),
2627 "\"patchable-function-entry-section\" must not be empty");
2628 checkUnsignedBaseTenFuncAttr(Attrs, "warn-stack-size", V);
2629
2630 if (auto A = Attrs.getFnAttr("sign-return-address"); A.isValid()) {
2631 StringRef S = A.getValueAsString();
2632 if (S != "none" && S != "all" && S != "non-leaf")
2633 CheckFailed("invalid value for 'sign-return-address' attribute: " + S, V);
2634 }
2635
2636 if (auto A = Attrs.getFnAttr("sign-return-address-key"); A.isValid()) {
2637 StringRef S = A.getValueAsString();
2638 if (S != "a_key" && S != "b_key")
2639 CheckFailed("invalid value for 'sign-return-address-key' attribute: " + S,
2640 V);
2641 if (auto AA = Attrs.getFnAttr("sign-return-address"); !AA.isValid()) {
2642 CheckFailed(
2643 "'sign-return-address-key' present without `sign-return-address`");
2644 }
2645 }
2646
2647 if (auto A = Attrs.getFnAttr("branch-target-enforcement"); A.isValid()) {
2648 StringRef S = A.getValueAsString();
2649 if (S != "" && S != "true" && S != "false")
2650 CheckFailed(
2651 "invalid value for 'branch-target-enforcement' attribute: " + S, V);
2652 }
2653
2654 if (auto A = Attrs.getFnAttr("branch-protection-pauth-lr"); A.isValid()) {
2655 StringRef S = A.getValueAsString();
2656 if (S != "" && S != "true" && S != "false")
2657 CheckFailed(
2658 "invalid value for 'branch-protection-pauth-lr' attribute: " + S, V);
2659 }
2660
2661 if (auto A = Attrs.getFnAttr("guarded-control-stack"); A.isValid()) {
2662 StringRef S = A.getValueAsString();
2663 if (S != "" && S != "true" && S != "false")
2664 CheckFailed("invalid value for 'guarded-control-stack' attribute: " + S,
2665 V);
2666 }
2667
2668 if (auto A = Attrs.getFnAttr("vector-function-abi-variant"); A.isValid()) {
2669 StringRef S = A.getValueAsString();
2670 const std::optional<VFInfo> Info = VFABI::tryDemangleForVFABI(S, FT);
2671 if (!Info)
2672 CheckFailed("invalid name for a VFABI variant: " + S, V);
2673 }
2674
2675 if (auto A = Attrs.getFnAttr("modular-format"); A.isValid()) {
2676 StringRef S = A.getValueAsString();
2678 S.split(Args, ',');
2679 Check(Args.size() >= 5,
2680 "modular-format attribute requires at least 5 arguments", V);
2681 unsigned FirstArgIdx;
2682 Check(!Args[2].getAsInteger(10, FirstArgIdx),
2683 "modular-format attribute first arg index is not an integer", V);
2684 unsigned UpperBound = FT->getNumParams() + (FT->isVarArg() ? 1 : 0);
2685 Check(FirstArgIdx > 0 && FirstArgIdx <= UpperBound,
2686 "modular-format attribute first arg index is out of bounds", V);
2687 }
2688
2689 if (auto A = Attrs.getFnAttr("target-features"); A.isValid()) {
2690 StringRef S = A.getValueAsString();
2691 if (!S.empty()) {
2692 for (auto FeatureFlag : split(S, ',')) {
2693 if (FeatureFlag.empty())
2694 CheckFailed(
2695 "target-features attribute should not contain an empty string");
2696 else
2697 Check(FeatureFlag[0] == '+' || FeatureFlag[0] == '-',
2698 "target feature '" + FeatureFlag +
2699 "' must start with a '+' or '-'",
2700 V);
2701 }
2702 }
2703 }
2704}
2705void Verifier::verifyUnknownProfileMetadata(MDNode *MD) {
2706 Check(MD->getNumOperands() == 2,
2707 "'unknown' !prof should have a single additional operand", MD);
2708 auto *PassName = dyn_cast<MDString>(MD->getOperand(1));
2709 Check(PassName != nullptr,
2710 "'unknown' !prof should have an additional operand of type "
2711 "string");
2712 Check(!PassName->getString().empty(),
2713 "the 'unknown' !prof operand should not be an empty string");
2714}
2715
2716void Verifier::verifyFunctionMetadata(
2717 ArrayRef<std::pair<unsigned, MDNode *>> MDs) {
2718 for (const auto &Pair : MDs) {
2719 if (Pair.first == LLVMContext::MD_prof) {
2720 MDNode *MD = Pair.second;
2721 Check(MD->getNumOperands() >= 2,
2722 "!prof annotations should have no less than 2 operands", MD);
2723 // We may have functions that are synthesized by the compiler, e.g. in
2724 // WPD, that we can't currently determine the entry count.
2725 if (MD->getOperand(0).equalsStr(
2727 verifyUnknownProfileMetadata(MD);
2728 continue;
2729 }
2730
2731 // Check first operand.
2732 Check(MD->getOperand(0) != nullptr, "first operand should not be null",
2733 MD);
2735 "expected string with name of the !prof annotation", MD);
2736 MDString *MDS = cast<MDString>(MD->getOperand(0));
2737 StringRef ProfName = MDS->getString();
2740 "first operand should be 'function_entry_count'"
2741 " or 'synthetic_function_entry_count'",
2742 MD);
2743
2744 // Check second operand.
2745 Check(MD->getOperand(1) != nullptr, "second operand should not be null",
2746 MD);
2748 "expected integer argument to function_entry_count", MD);
2749 } else if (Pair.first == LLVMContext::MD_kcfi_type) {
2750 MDNode *MD = Pair.second;
2751 Check(MD->getNumOperands() == 1,
2752 "!kcfi_type must have exactly one operand", MD);
2753 Check(MD->getOperand(0) != nullptr, "!kcfi_type operand must not be null",
2754 MD);
2756 "expected a constant operand for !kcfi_type", MD);
2757 Constant *C = cast<ConstantAsMetadata>(MD->getOperand(0))->getValue();
2758 Check(isa<ConstantInt>(C) && isa<IntegerType>(C->getType()),
2759 "expected a constant integer operand for !kcfi_type", MD);
2761 "expected a 32-bit integer constant operand for !kcfi_type", MD);
2762 }
2763 }
2764}
2765
2766void Verifier::visitConstantExprsRecursively(const Constant *EntryC) {
2767 if (EntryC->getNumOperands() == 0)
2768 return;
2769
2770 if (!ConstantExprVisited.insert(EntryC).second)
2771 return;
2772
2774 Stack.push_back(EntryC);
2775
2776 while (!Stack.empty()) {
2777 const Constant *C = Stack.pop_back_val();
2778
2779 // Check this constant expression.
2780 if (const auto *CE = dyn_cast<ConstantExpr>(C))
2781 visitConstantExpr(CE);
2782
2783 if (const auto *CPA = dyn_cast<ConstantPtrAuth>(C))
2784 visitConstantPtrAuth(CPA);
2785
2786 if (const auto *GV = dyn_cast<GlobalValue>(C)) {
2787 // Global Values get visited separately, but we do need to make sure
2788 // that the global value is in the correct module
2789 Check(GV->getParent() == &M, "Referencing global in another module!",
2790 EntryC, &M, GV, GV->getParent());
2791 continue;
2792 }
2793
2794 // Visit all sub-expressions.
2795 for (const Use &U : C->operands()) {
2796 const auto *OpC = dyn_cast<Constant>(U);
2797 if (!OpC)
2798 continue;
2799 if (!ConstantExprVisited.insert(OpC).second)
2800 continue;
2801 Stack.push_back(OpC);
2802 }
2803 }
2804}
2805
2806void Verifier::visitConstantExpr(const ConstantExpr *CE) {
2807 if (CE->getOpcode() == Instruction::BitCast)
2808 Check(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0),
2809 CE->getType()),
2810 "Invalid bitcast", CE);
2811 else if (CE->getOpcode() == Instruction::PtrToAddr)
2812 checkPtrToAddr(CE->getOperand(0)->getType(), CE->getType(), *CE);
2813}
2814
2815void Verifier::visitConstantPtrAuth(const ConstantPtrAuth *CPA) {
2816 Check(CPA->getPointer()->getType()->isPointerTy(),
2817 "signed ptrauth constant base pointer must have pointer type");
2818
2819 Check(CPA->getType() == CPA->getPointer()->getType(),
2820 "signed ptrauth constant must have same type as its base pointer");
2821
2822 Check(CPA->getKey()->getBitWidth() == 32,
2823 "signed ptrauth constant key must be i32 constant integer");
2824
2826 "signed ptrauth constant address discriminator must be a pointer");
2827
2828 Check(CPA->getDiscriminator()->getBitWidth() == 64,
2829 "signed ptrauth constant discriminator must be i64 constant integer");
2830
2832 "signed ptrauth constant deactivation symbol must be a pointer");
2833
2836 "signed ptrauth constant deactivation symbol must be a global value "
2837 "or null");
2838}
2839
2840bool Verifier::verifyAttributeCount(AttributeList Attrs, unsigned Params) {
2841 // There shouldn't be more attribute sets than there are parameters plus the
2842 // function and return value.
2843 return Attrs.getNumAttrSets() <= Params + 2;
2844}
2845
2846void Verifier::verifyInlineAsmCall(const CallBase &Call) {
2847 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
2848 unsigned ArgNo = 0;
2849 unsigned LabelNo = 0;
2850 for (const InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) {
2851 if (CI.Type == InlineAsm::isLabel) {
2852 ++LabelNo;
2853 continue;
2854 }
2855
2856 // Only deal with constraints that correspond to call arguments.
2857 if (!CI.hasArg())
2858 continue;
2859
2860 if (CI.isIndirect) {
2861 const Value *Arg = Call.getArgOperand(ArgNo);
2862 Check(Arg->getType()->isPointerTy(),
2863 "Operand for indirect constraint must have pointer type", &Call);
2864
2866 "Operand for indirect constraint must have elementtype attribute",
2867 &Call);
2868 } else {
2869 Check(!Call.paramHasAttr(ArgNo, Attribute::ElementType),
2870 "Elementtype attribute can only be applied for indirect "
2871 "constraints",
2872 &Call);
2873 }
2874
2875 ArgNo++;
2876 }
2877
2878 if (auto *CallBr = dyn_cast<CallBrInst>(&Call)) {
2879 Check(LabelNo == CallBr->getNumIndirectDests(),
2880 "Number of label constraints does not match number of callbr dests",
2881 &Call);
2882 } else {
2883 Check(LabelNo == 0, "Label constraints can only be used with callbr",
2884 &Call);
2885 }
2886}
2887
2888/// Verify that statepoint intrinsic is well formed.
2889void Verifier::verifyStatepoint(const CallBase &Call) {
2890 assert(Call.getIntrinsicID() == Intrinsic::experimental_gc_statepoint);
2891
2894 "gc.statepoint must read and write all memory to preserve "
2895 "reordering restrictions required by safepoint semantics",
2896 Call);
2897
2898 const int64_t NumPatchBytes =
2899 cast<ConstantInt>(Call.getArgOperand(1))->getSExtValue();
2900 assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!");
2901 Check(NumPatchBytes >= 0,
2902 "gc.statepoint number of patchable bytes must be "
2903 "positive",
2904 Call);
2905
2906 Type *TargetElemType = Call.getParamElementType(2);
2907 Check(TargetElemType,
2908 "gc.statepoint callee argument must have elementtype attribute", Call);
2909 FunctionType *TargetFuncType = dyn_cast<FunctionType>(TargetElemType);
2910 Check(TargetFuncType,
2911 "gc.statepoint callee elementtype must be function type", Call);
2912
2913 const int NumCallArgs = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
2914 Check(NumCallArgs >= 0,
2915 "gc.statepoint number of arguments to underlying call "
2916 "must be positive",
2917 Call);
2918 const int NumParams = (int)TargetFuncType->getNumParams();
2919 if (TargetFuncType->isVarArg()) {
2920 Check(NumCallArgs >= NumParams,
2921 "gc.statepoint mismatch in number of vararg call args", Call);
2922
2923 // TODO: Remove this limitation
2924 Check(TargetFuncType->getReturnType()->isVoidTy(),
2925 "gc.statepoint doesn't support wrapping non-void "
2926 "vararg functions yet",
2927 Call);
2928 } else
2929 Check(NumCallArgs == NumParams,
2930 "gc.statepoint mismatch in number of call args", Call);
2931
2932 const uint64_t Flags
2933 = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
2934 Check((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0,
2935 "unknown flag used in gc.statepoint flags argument", Call);
2936
2937 // Verify that the types of the call parameter arguments match
2938 // the type of the wrapped callee.
2939 AttributeList Attrs = Call.getAttributes();
2940 for (int i = 0; i < NumParams; i++) {
2941 Type *ParamType = TargetFuncType->getParamType(i);
2942 Type *ArgType = Call.getArgOperand(5 + i)->getType();
2943 Check(ArgType == ParamType,
2944 "gc.statepoint call argument does not match wrapped "
2945 "function type",
2946 Call);
2947
2948 if (TargetFuncType->isVarArg()) {
2949 AttributeSet ArgAttrs = Attrs.getParamAttrs(5 + i);
2950 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
2951 "Attribute 'sret' cannot be used for vararg call arguments!", Call);
2952 }
2953 }
2954
2955 const int EndCallArgsInx = 4 + NumCallArgs;
2956
2957 const Value *NumTransitionArgsV = Call.getArgOperand(EndCallArgsInx + 1);
2958 Check(isa<ConstantInt>(NumTransitionArgsV),
2959 "gc.statepoint number of transition arguments "
2960 "must be constant integer",
2961 Call);
2962 const int NumTransitionArgs =
2963 cast<ConstantInt>(NumTransitionArgsV)->getZExtValue();
2964 Check(NumTransitionArgs == 0,
2965 "gc.statepoint w/inline transition bundle is deprecated", Call);
2966 const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs;
2967
2968 const Value *NumDeoptArgsV = Call.getArgOperand(EndTransitionArgsInx + 1);
2969 Check(isa<ConstantInt>(NumDeoptArgsV),
2970 "gc.statepoint number of deoptimization arguments "
2971 "must be constant integer",
2972 Call);
2973 const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue();
2974 Check(NumDeoptArgs == 0,
2975 "gc.statepoint w/inline deopt operands is deprecated", Call);
2976
2977 const int ExpectedNumArgs = 7 + NumCallArgs;
2978 Check(ExpectedNumArgs == (int)Call.arg_size(),
2979 "gc.statepoint too many arguments", Call);
2980
2981 // Check that the only uses of this gc.statepoint are gc.result or
2982 // gc.relocate calls which are tied to this statepoint and thus part
2983 // of the same statepoint sequence
2984 for (const User *U : Call.users()) {
2985 const CallInst *UserCall = dyn_cast<const CallInst>(U);
2986 Check(UserCall, "illegal use of statepoint token", Call, U);
2987 if (!UserCall)
2988 continue;
2989 Check(isa<GCRelocateInst>(UserCall) || isa<GCResultInst>(UserCall),
2990 "gc.result or gc.relocate are the only value uses "
2991 "of a gc.statepoint",
2992 Call, U);
2993 if (isa<GCResultInst>(UserCall)) {
2994 Check(UserCall->getArgOperand(0) == &Call,
2995 "gc.result connected to wrong gc.statepoint", Call, UserCall);
2996 } else if (isa<GCRelocateInst>(Call)) {
2997 Check(UserCall->getArgOperand(0) == &Call,
2998 "gc.relocate connected to wrong gc.statepoint", Call, UserCall);
2999 }
3000 }
3001
3002 // Note: It is legal for a single derived pointer to be listed multiple
3003 // times. It's non-optimal, but it is legal. It can also happen after
3004 // insertion if we strip a bitcast away.
3005 // Note: It is really tempting to check that each base is relocated and
3006 // that a derived pointer is never reused as a base pointer. This turns
3007 // out to be problematic since optimizations run after safepoint insertion
3008 // can recognize equality properties that the insertion logic doesn't know
3009 // about. See example statepoint.ll in the verifier subdirectory
3010}
3011
3012void Verifier::verifyFrameRecoverIndices() {
3013 for (auto &Counts : FrameEscapeInfo) {
3014 Function *F = Counts.first;
3015 unsigned EscapedObjectCount = Counts.second.first;
3016 unsigned MaxRecoveredIndex = Counts.second.second;
3017 Check(MaxRecoveredIndex <= EscapedObjectCount,
3018 "all indices passed to llvm.localrecover must be less than the "
3019 "number of arguments passed to llvm.localescape in the parent "
3020 "function",
3021 F);
3022 }
3023}
3024
3025static Instruction *getSuccPad(Instruction *Terminator) {
3026 BasicBlock *UnwindDest;
3027 if (auto *II = dyn_cast<InvokeInst>(Terminator))
3028 UnwindDest = II->getUnwindDest();
3029 else if (auto *CSI = dyn_cast<CatchSwitchInst>(Terminator))
3030 UnwindDest = CSI->getUnwindDest();
3031 else
3032 UnwindDest = cast<CleanupReturnInst>(Terminator)->getUnwindDest();
3033 return &*UnwindDest->getFirstNonPHIIt();
3034}
3035
3036void Verifier::verifySiblingFuncletUnwinds() {
3037 llvm::TimeTraceScope timeScope("Verifier verify sibling funclet unwinds");
3038 SmallPtrSet<Instruction *, 8> Visited;
3039 SmallPtrSet<Instruction *, 8> Active;
3040 for (const auto &Pair : SiblingFuncletInfo) {
3041 Instruction *PredPad = Pair.first;
3042 if (Visited.count(PredPad))
3043 continue;
3044 Active.insert(PredPad);
3045 Instruction *Terminator = Pair.second;
3046 do {
3047 Instruction *SuccPad = getSuccPad(Terminator);
3048 if (Active.count(SuccPad)) {
3049 // Found a cycle; report error
3050 Instruction *CyclePad = SuccPad;
3051 SmallVector<Instruction *, 8> CycleNodes;
3052 do {
3053 CycleNodes.push_back(CyclePad);
3054 Instruction *CycleTerminator = SiblingFuncletInfo[CyclePad];
3055 if (CycleTerminator != CyclePad)
3056 CycleNodes.push_back(CycleTerminator);
3057 CyclePad = getSuccPad(CycleTerminator);
3058 } while (CyclePad != SuccPad);
3059 Check(false, "EH pads can't handle each other's exceptions",
3060 ArrayRef<Instruction *>(CycleNodes));
3061 }
3062 // Don't re-walk a node we've already checked
3063 if (!Visited.insert(SuccPad).second)
3064 break;
3065 // Walk to this successor if it has a map entry.
3066 PredPad = SuccPad;
3067 auto TermI = SiblingFuncletInfo.find(PredPad);
3068 if (TermI == SiblingFuncletInfo.end())
3069 break;
3070 Terminator = TermI->second;
3071 Active.insert(PredPad);
3072 } while (true);
3073 // Each node only has one successor, so we've walked all the active
3074 // nodes' successors.
3075 Active.clear();
3076 }
3077}
3078
3079// visitFunction - Verify that a function is ok.
3080//
3081void Verifier::visitFunction(const Function &F) {
3082 visitGlobalValue(F);
3083
3084 // Check function arguments.
3085 FunctionType *FT = F.getFunctionType();
3086 unsigned NumArgs = F.arg_size();
3087
3088 Check(&Context == &F.getContext(),
3089 "Function context does not match Module context!", &F);
3090
3091 Check(!F.hasCommonLinkage(), "Functions may not have common linkage", &F);
3092 Check(FT->getNumParams() == NumArgs,
3093 "# formal arguments must match # of arguments for function type!", &F,
3094 FT);
3095 Check(F.getReturnType()->isFirstClassType() ||
3096 F.getReturnType()->isVoidTy() || F.getReturnType()->isStructTy(),
3097 "Functions cannot return aggregate values!", &F);
3098
3099 Check(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(),
3100 "Invalid struct return type!", &F);
3101
3102 if (MaybeAlign A = F.getAlign()) {
3103 Check(A->value() <= Value::MaximumAlignment,
3104 "huge alignment values are unsupported", &F);
3105 }
3106
3107 AttributeList Attrs = F.getAttributes();
3108
3109 Check(verifyAttributeCount(Attrs, FT->getNumParams()),
3110 "Attribute after last parameter!", &F);
3111
3112 bool IsIntrinsic = F.isIntrinsic();
3113
3114 // Check function attributes.
3115 verifyFunctionAttrs(FT, Attrs, &F, IsIntrinsic, /* IsInlineAsm */ false);
3116
3117 // On function declarations/definitions, we do not support the builtin
3118 // attribute. We do not check this in VerifyFunctionAttrs since that is
3119 // checking for Attributes that can/can not ever be on functions.
3120 Check(!Attrs.hasFnAttr(Attribute::Builtin),
3121 "Attribute 'builtin' can only be applied to a callsite.", &F);
3122
3123 Check(!Attrs.hasAttrSomewhere(Attribute::ElementType),
3124 "Attribute 'elementtype' can only be applied to a callsite.", &F);
3125
3126 if (Attrs.hasFnAttr(Attribute::Naked))
3127 for (const Argument &Arg : F.args())
3128 Check(Arg.use_empty(), "cannot use argument of naked function", &Arg);
3129
3130 // Check that this function meets the restrictions on this calling convention.
3131 // Sometimes varargs is used for perfectly forwarding thunks, so some of these
3132 // restrictions can be lifted.
3133 switch (F.getCallingConv()) {
3134 default:
3135 case CallingConv::C:
3136 break;
3137 case CallingConv::X86_INTR: {
3138 Check(F.arg_empty() || Attrs.hasParamAttr(0, Attribute::ByVal),
3139 "Calling convention parameter requires byval", &F);
3140 break;
3141 }
3142 case CallingConv::AMDGPU_KERNEL:
3143 case CallingConv::SPIR_KERNEL:
3144 case CallingConv::AMDGPU_CS_Chain:
3145 case CallingConv::AMDGPU_CS_ChainPreserve:
3146 Check(F.getReturnType()->isVoidTy(),
3147 "Calling convention requires void return type", &F);
3148 [[fallthrough]];
3149 case CallingConv::AMDGPU_VS:
3150 case CallingConv::AMDGPU_HS:
3151 case CallingConv::AMDGPU_GS:
3152 case CallingConv::AMDGPU_PS:
3153 case CallingConv::AMDGPU_CS:
3154 Check(!F.hasStructRetAttr(), "Calling convention does not allow sret", &F);
3155 if (F.getCallingConv() != CallingConv::SPIR_KERNEL) {
3156 const unsigned StackAS = DL.getAllocaAddrSpace();
3157 unsigned i = 0;
3158 for (const Argument &Arg : F.args()) {
3159 Check(!Attrs.hasParamAttr(i, Attribute::ByVal),
3160 "Calling convention disallows byval", &F);
3161 Check(!Attrs.hasParamAttr(i, Attribute::Preallocated),
3162 "Calling convention disallows preallocated", &F);
3163 Check(!Attrs.hasParamAttr(i, Attribute::InAlloca),
3164 "Calling convention disallows inalloca", &F);
3165
3166 if (Attrs.hasParamAttr(i, Attribute::ByRef)) {
3167 // FIXME: Should also disallow LDS and GDS, but we don't have the enum
3168 // value here.
3169 Check(Arg.getType()->getPointerAddressSpace() != StackAS,
3170 "Calling convention disallows stack byref", &F);
3171 }
3172
3173 ++i;
3174 }
3175 }
3176
3177 [[fallthrough]];
3178 case CallingConv::Fast:
3179 case CallingConv::Cold:
3180 case CallingConv::Intel_OCL_BI:
3181 case CallingConv::PTX_Kernel:
3182 case CallingConv::PTX_Device:
3183 Check(!F.isVarArg(),
3184 "Calling convention does not support varargs or "
3185 "perfect forwarding!",
3186 &F);
3187 break;
3188 case CallingConv::AMDGPU_Gfx_WholeWave:
3189 Check(!F.arg_empty() && F.arg_begin()->getType()->isIntegerTy(1),
3190 "Calling convention requires first argument to be i1", &F);
3191 Check(!F.arg_begin()->hasInRegAttr(),
3192 "Calling convention requires first argument to not be inreg", &F);
3193 Check(!F.isVarArg(),
3194 "Calling convention does not support varargs or "
3195 "perfect forwarding!",
3196 &F);
3197 break;
3198 }
3199
3200 // Check that the argument values match the function type for this function...
3201 unsigned i = 0;
3202 for (const Argument &Arg : F.args()) {
3203 Check(Arg.getType() == FT->getParamType(i),
3204 "Argument value does not match function argument type!", &Arg,
3205 FT->getParamType(i));
3206 Check(Arg.getType()->isFirstClassType(),
3207 "Function arguments must have first-class types!", &Arg);
3208 if (!IsIntrinsic) {
3209 Check(!Arg.getType()->isMetadataTy(),
3210 "Function takes metadata but isn't an intrinsic", &Arg, &F);
3211 Check(!Arg.getType()->isTokenLikeTy(),
3212 "Function takes token but isn't an intrinsic", &Arg, &F);
3213 Check(!Arg.getType()->isX86_AMXTy(),
3214 "Function takes x86_amx but isn't an intrinsic", &Arg, &F);
3215 }
3216
3217 // Check that swifterror argument is only used by loads and stores.
3218 if (Attrs.hasParamAttr(i, Attribute::SwiftError)) {
3219 verifySwiftErrorValue(&Arg);
3220 }
3221 ++i;
3222 }
3223
3224 if (!IsIntrinsic) {
3225 Check(!F.getReturnType()->isTokenLikeTy(),
3226 "Function returns a token but isn't an intrinsic", &F);
3227 Check(!F.getReturnType()->isX86_AMXTy(),
3228 "Function returns a x86_amx but isn't an intrinsic", &F);
3229 }
3230
3231 // Get the function metadata attachments.
3233 F.getAllMetadata(MDs);
3234 assert(F.hasMetadata() != MDs.empty() && "Bit out-of-sync");
3235 verifyFunctionMetadata(MDs);
3236
3237 // Check validity of the personality function
3238 if (F.hasPersonalityFn()) {
3239 auto *Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
3240 if (Per)
3241 Check(Per->getParent() == F.getParent(),
3242 "Referencing personality function in another module!", &F,
3243 F.getParent(), Per, Per->getParent());
3244 }
3245
3246 // EH funclet coloring can be expensive, recompute on-demand
3247 BlockEHFuncletColors.clear();
3248
3249 if (F.isMaterializable()) {
3250 // Function has a body somewhere we can't see.
3251 Check(MDs.empty(), "unmaterialized function cannot have metadata", &F,
3252 MDs.empty() ? nullptr : MDs.front().second);
3253 } else if (F.isDeclaration()) {
3254 for (const auto &I : MDs) {
3255 // This is used for call site debug information.
3256 CheckDI(I.first != LLVMContext::MD_dbg ||
3257 !cast<DISubprogram>(I.second)->isDistinct(),
3258 "function declaration may only have a unique !dbg attachment",
3259 &F);
3260 Check(I.first != LLVMContext::MD_prof,
3261 "function declaration may not have a !prof attachment", &F);
3262
3263 // Verify the metadata itself.
3264 visitMDNode(*I.second, AreDebugLocsAllowed::Yes);
3265 }
3266 Check(!F.hasPersonalityFn(),
3267 "Function declaration shouldn't have a personality routine", &F);
3268 } else {
3269 // Verify that this function (which has a body) is not named "llvm.*". It
3270 // is not legal to define intrinsics.
3271 Check(!IsIntrinsic, "llvm intrinsics cannot be defined!", &F);
3272
3273 // Check the entry node
3274 const BasicBlock *Entry = &F.getEntryBlock();
3275 Check(pred_empty(Entry),
3276 "Entry block to function must not have predecessors!", Entry);
3277
3278 // The address of the entry block cannot be taken, unless it is dead.
3279 if (Entry->hasAddressTaken()) {
3280 Check(!BlockAddress::lookup(Entry)->isConstantUsed(),
3281 "blockaddress may not be used with the entry block!", Entry);
3282 }
3283
3284 unsigned NumDebugAttachments = 0, NumProfAttachments = 0,
3285 NumKCFIAttachments = 0;
3286 // Visit metadata attachments.
3287 for (const auto &I : MDs) {
3288 // Verify that the attachment is legal.
3289 auto AllowLocs = AreDebugLocsAllowed::No;
3290 switch (I.first) {
3291 default:
3292 break;
3293 case LLVMContext::MD_dbg: {
3294 ++NumDebugAttachments;
3295 CheckDI(NumDebugAttachments == 1,
3296 "function must have a single !dbg attachment", &F, I.second);
3297 CheckDI(isa<DISubprogram>(I.second),
3298 "function !dbg attachment must be a subprogram", &F, I.second);
3299 CheckDI(cast<DISubprogram>(I.second)->isDistinct(),
3300 "function definition may only have a distinct !dbg attachment",
3301 &F);
3302
3303 auto *SP = cast<DISubprogram>(I.second);
3304 const Function *&AttachedTo = DISubprogramAttachments[SP];
3305 CheckDI(!AttachedTo || AttachedTo == &F,
3306 "DISubprogram attached to more than one function", SP, &F);
3307 AttachedTo = &F;
3308 AllowLocs = AreDebugLocsAllowed::Yes;
3309 break;
3310 }
3311 case LLVMContext::MD_prof:
3312 ++NumProfAttachments;
3313 Check(NumProfAttachments == 1,
3314 "function must have a single !prof attachment", &F, I.second);
3315 break;
3316 case LLVMContext::MD_kcfi_type:
3317 ++NumKCFIAttachments;
3318 Check(NumKCFIAttachments == 1,
3319 "function must have a single !kcfi_type attachment", &F,
3320 I.second);
3321 break;
3322 }
3323
3324 // Verify the metadata itself.
3325 visitMDNode(*I.second, AllowLocs);
3326 }
3327 }
3328
3329 // If this function is actually an intrinsic, verify that it is only used in
3330 // direct call/invokes, never having its "address taken".
3331 // Only do this if the module is materialized, otherwise we don't have all the
3332 // uses.
3333 if (F.isIntrinsic() && F.getParent()->isMaterialized()) {
3334 const User *U;
3335 if (F.hasAddressTaken(&U, false, true, false,
3336 /*IgnoreARCAttachedCall=*/true))
3337 Check(false, "Invalid user of intrinsic instruction!", U);
3338 }
3339
3340 // Check intrinsics' signatures.
3341 switch (F.getIntrinsicID()) {
3342 case Intrinsic::experimental_gc_get_pointer_base: {
3343 FunctionType *FT = F.getFunctionType();
3344 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3345 Check(isa<PointerType>(F.getReturnType()),
3346 "gc.get.pointer.base must return a pointer", F);
3347 Check(FT->getParamType(0) == F.getReturnType(),
3348 "gc.get.pointer.base operand and result must be of the same type", F);
3349 break;
3350 }
3351 case Intrinsic::experimental_gc_get_pointer_offset: {
3352 FunctionType *FT = F.getFunctionType();
3353 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
3354 Check(isa<PointerType>(FT->getParamType(0)),
3355 "gc.get.pointer.offset operand must be a pointer", F);
3356 Check(F.getReturnType()->isIntegerTy(),
3357 "gc.get.pointer.offset must return integer", F);
3358 break;
3359 }
3360 }
3361
3362 auto *N = F.getSubprogram();
3363 HasDebugInfo = (N != nullptr);
3364 if (!HasDebugInfo)
3365 return;
3366
3367 // Check that all !dbg attachments lead to back to N.
3368 //
3369 // FIXME: Check this incrementally while visiting !dbg attachments.
3370 // FIXME: Only check when N is the canonical subprogram for F.
3371 SmallPtrSet<const MDNode *, 32> Seen;
3372 auto VisitDebugLoc = [&](const Instruction &I, const MDNode *Node) {
3373 // Be careful about using DILocation here since we might be dealing with
3374 // broken code (this is the Verifier after all).
3375 const DILocation *DL = dyn_cast_or_null<DILocation>(Node);
3376 if (!DL)
3377 return;
3378 if (!Seen.insert(DL).second)
3379 return;
3380
3381 Metadata *Parent = DL->getRawScope();
3382 CheckDI(Parent && isa<DILocalScope>(Parent),
3383 "DILocation's scope must be a DILocalScope", N, &F, &I, DL, Parent);
3384
3385 DILocalScope *Scope = DL->getInlinedAtScope();
3386 Check(Scope, "Failed to find DILocalScope", DL);
3387
3388 if (!Seen.insert(Scope).second)
3389 return;
3390
3391 DISubprogram *SP = Scope->getSubprogram();
3392
3393 // Scope and SP could be the same MDNode and we don't want to skip
3394 // validation in that case
3395 if ((Scope != SP) && !Seen.insert(SP).second)
3396 return;
3397
3398 CheckDI(SP->describes(&F),
3399 "!dbg attachment points at wrong subprogram for function", N, &F,
3400 &I, DL, Scope, SP);
3401 };
3402 for (auto &BB : F)
3403 for (auto &I : BB) {
3404 VisitDebugLoc(I, I.getDebugLoc().getAsMDNode());
3405 // The llvm.loop annotations also contain two DILocations.
3406 if (auto MD = I.getMetadata(LLVMContext::MD_loop))
3407 for (unsigned i = 1; i < MD->getNumOperands(); ++i)
3408 VisitDebugLoc(I, dyn_cast_or_null<MDNode>(MD->getOperand(i)));
3409 if (BrokenDebugInfo)
3410 return;
3411 }
3412}
3413
3414// verifyBasicBlock - Verify that a basic block is well formed...
3415//
3416void Verifier::visitBasicBlock(BasicBlock &BB) {
3417 InstsInThisBlock.clear();
3418 ConvergenceVerifyHelper.visit(BB);
3419
3420 // Ensure that basic blocks have terminators!
3421 Check(BB.getTerminator(), "Basic Block does not have terminator!", &BB);
3422
3423 // Check constraints that this basic block imposes on all of the PHI nodes in
3424 // it.
3425 if (isa<PHINode>(BB.front())) {
3426 SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
3428 llvm::sort(Preds);
3429 for (const PHINode &PN : BB.phis()) {
3430 Check(PN.getNumIncomingValues() == Preds.size(),
3431 "PHINode should have one entry for each predecessor of its "
3432 "parent basic block!",
3433 &PN);
3434
3435 // Get and sort all incoming values in the PHI node...
3436 Values.clear();
3437 Values.reserve(PN.getNumIncomingValues());
3438 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
3439 Values.push_back(
3440 std::make_pair(PN.getIncomingBlock(i), PN.getIncomingValue(i)));
3441 llvm::sort(Values);
3442
3443 for (unsigned i = 0, e = Values.size(); i != e; ++i) {
3444 // Check to make sure that if there is more than one entry for a
3445 // particular basic block in this PHI node, that the incoming values are
3446 // all identical.
3447 //
3448 Check(i == 0 || Values[i].first != Values[i - 1].first ||
3449 Values[i].second == Values[i - 1].second,
3450 "PHI node has multiple entries for the same basic block with "
3451 "different incoming values!",
3452 &PN, Values[i].first, Values[i].second, Values[i - 1].second);
3453
3454 // Check to make sure that the predecessors and PHI node entries are
3455 // matched up.
3456 Check(Values[i].first == Preds[i],
3457 "PHI node entries do not match predecessors!", &PN,
3458 Values[i].first, Preds[i]);
3459 }
3460 }
3461 }
3462
3463 // Check that all instructions have their parent pointers set up correctly.
3464 for (auto &I : BB)
3465 {
3466 Check(I.getParent() == &BB, "Instruction has bogus parent pointer!");
3467 }
3468
3469 // Confirm that no issues arise from the debug program.
3470 CheckDI(!BB.getTrailingDbgRecords(), "Basic Block has trailing DbgRecords!",
3471 &BB);
3472}
3473
3474void Verifier::visitTerminator(Instruction &I) {
3475 // Ensure that terminators only exist at the end of the basic block.
3476 Check(&I == I.getParent()->getTerminator(),
3477 "Terminator found in the middle of a basic block!", I.getParent());
3478 visitInstruction(I);
3479}
3480
3481void Verifier::visitCondBrInst(CondBrInst &BI) {
3483 "Branch condition is not 'i1' type!", &BI, BI.getCondition());
3484 visitTerminator(BI);
3485}
3486
3487void Verifier::visitReturnInst(ReturnInst &RI) {
3488 Function *F = RI.getParent()->getParent();
3489 unsigned N = RI.getNumOperands();
3490 if (F->getReturnType()->isVoidTy())
3491 Check(N == 0,
3492 "Found return instr that returns non-void in Function of void "
3493 "return type!",
3494 &RI, F->getReturnType());
3495 else
3496 Check(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(),
3497 "Function return type does not match operand "
3498 "type of return inst!",
3499 &RI, F->getReturnType());
3500
3501 // Check to make sure that the return value has necessary properties for
3502 // terminators...
3503 visitTerminator(RI);
3504}
3505
3506void Verifier::visitSwitchInst(SwitchInst &SI) {
3507 Check(SI.getType()->isVoidTy(), "Switch must have void result type!", &SI);
3508 // Check to make sure that all of the constants in the switch instruction
3509 // have the same type as the switched-on value.
3510 Type *SwitchTy = SI.getCondition()->getType();
3511 SmallPtrSet<ConstantInt*, 32> Constants;
3512 for (auto &Case : SI.cases()) {
3513 Check(isa<ConstantInt>(Case.getCaseValue()),
3514 "Case value is not a constant integer.", &SI);
3515 Check(Case.getCaseValue()->getType() == SwitchTy,
3516 "Switch constants must all be same type as switch value!", &SI);
3517 Check(Constants.insert(Case.getCaseValue()).second,
3518 "Duplicate integer as switch case", &SI, Case.getCaseValue());
3519 }
3520
3521 visitTerminator(SI);
3522}
3523
3524void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
3526 "Indirectbr operand must have pointer type!", &BI);
3527 for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i)
3529 "Indirectbr destinations must all have pointer type!", &BI);
3530
3531 visitTerminator(BI);
3532}
3533
3534void Verifier::visitCallBrInst(CallBrInst &CBI) {
3535 if (!CBI.isInlineAsm()) {
3537 "Callbr: indirect function / invalid signature");
3538 Check(!CBI.hasOperandBundles(),
3539 "Callbr for intrinsics currently doesn't support operand bundles");
3540
3541 switch (CBI.getIntrinsicID()) {
3542 case Intrinsic::amdgcn_kill: {
3543 Check(CBI.getNumIndirectDests() == 1,
3544 "Callbr amdgcn_kill only supports one indirect dest");
3545 bool Unreachable = isa<UnreachableInst>(CBI.getIndirectDest(0)->begin());
3546 CallInst *Call = dyn_cast<CallInst>(CBI.getIndirectDest(0)->begin());
3547 Check(Unreachable || (Call && Call->getIntrinsicID() ==
3548 Intrinsic::amdgcn_unreachable),
3549 "Callbr amdgcn_kill indirect dest needs to be unreachable");
3550 break;
3551 }
3552 default:
3553 CheckFailed(
3554 "Callbr currently only supports asm-goto and selected intrinsics");
3555 }
3556 visitIntrinsicCall(CBI.getIntrinsicID(), CBI);
3557 } else {
3558 const InlineAsm *IA = cast<InlineAsm>(CBI.getCalledOperand());
3559 Check(!IA->canThrow(), "Unwinding from Callbr is not allowed");
3560
3561 verifyInlineAsmCall(CBI);
3562 }
3563 visitTerminator(CBI);
3564}
3565
3566void Verifier::visitSelectInst(SelectInst &SI) {
3567 Check(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1),
3568 SI.getOperand(2)),
3569 "Invalid operands for select instruction!", &SI);
3570
3571 Check(SI.getTrueValue()->getType() == SI.getType(),
3572 "Select values must have same type as select instruction!", &SI);
3573 visitInstruction(SI);
3574}
3575
3576/// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of
3577/// a pass, if any exist, it's an error.
3578///
3579void Verifier::visitUserOp1(Instruction &I) {
3580 Check(false, "User-defined operators should not live outside of a pass!", &I);
3581}
3582
3583void Verifier::visitTruncInst(TruncInst &I) {
3584 // Get the source and destination types
3585 Type *SrcTy = I.getOperand(0)->getType();
3586 Type *DestTy = I.getType();
3587
3588 // Get the size of the types in bits, we'll need this later
3589 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3590 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3591
3592 Check(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer", &I);
3593 Check(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer", &I);
3594 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3595 "trunc source and destination must both be a vector or neither", &I);
3596 Check(SrcBitSize > DestBitSize, "DestTy too big for Trunc", &I);
3597
3598 visitInstruction(I);
3599}
3600
3601void Verifier::visitZExtInst(ZExtInst &I) {
3602 // Get the source and destination types
3603 Type *SrcTy = I.getOperand(0)->getType();
3604 Type *DestTy = I.getType();
3605
3606 // Get the size of the types in bits, we'll need this later
3607 Check(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I);
3608 Check(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer", &I);
3609 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3610 "zext source and destination must both be a vector or neither", &I);
3611 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3612 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3613
3614 Check(SrcBitSize < DestBitSize, "Type too small for ZExt", &I);
3615
3616 visitInstruction(I);
3617}
3618
3619void Verifier::visitSExtInst(SExtInst &I) {
3620 // Get the source and destination types
3621 Type *SrcTy = I.getOperand(0)->getType();
3622 Type *DestTy = I.getType();
3623
3624 // Get the size of the types in bits, we'll need this later
3625 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3626 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3627
3628 Check(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer", &I);
3629 Check(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer", &I);
3630 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3631 "sext source and destination must both be a vector or neither", &I);
3632 Check(SrcBitSize < DestBitSize, "Type too small for SExt", &I);
3633
3634 visitInstruction(I);
3635}
3636
3637void Verifier::visitFPTruncInst(FPTruncInst &I) {
3638 // Get the source and destination types
3639 Type *SrcTy = I.getOperand(0)->getType();
3640 Type *DestTy = I.getType();
3641 // Get the size of the types in bits, we'll need this later
3642 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3643 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3644
3645 Check(SrcTy->isFPOrFPVectorTy(), "FPTrunc only operates on FP", &I);
3646 Check(DestTy->isFPOrFPVectorTy(), "FPTrunc only produces an FP", &I);
3647 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3648 "fptrunc source and destination must both be a vector or neither", &I);
3649 Check(SrcBitSize > DestBitSize, "DestTy too big for FPTrunc", &I);
3650
3651 visitInstruction(I);
3652}
3653
3654void Verifier::visitFPExtInst(FPExtInst &I) {
3655 // Get the source and destination types
3656 Type *SrcTy = I.getOperand(0)->getType();
3657 Type *DestTy = I.getType();
3658
3659 // Get the size of the types in bits, we'll need this later
3660 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3661 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3662
3663 Check(SrcTy->isFPOrFPVectorTy(), "FPExt only operates on FP", &I);
3664 Check(DestTy->isFPOrFPVectorTy(), "FPExt only produces an FP", &I);
3665 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3666 "fpext source and destination must both be a vector or neither", &I);
3667 Check(SrcBitSize < DestBitSize, "DestTy too small for FPExt", &I);
3668
3669 visitInstruction(I);
3670}
3671
3672void Verifier::visitUIToFPInst(UIToFPInst &I) {
3673 // Get the source and destination types
3674 Type *SrcTy = I.getOperand(0)->getType();
3675 Type *DestTy = I.getType();
3676
3677 bool SrcVec = SrcTy->isVectorTy();
3678 bool DstVec = DestTy->isVectorTy();
3679
3680 Check(SrcVec == DstVec,
3681 "UIToFP source and dest must both be vector or scalar", &I);
3682 Check(SrcTy->isIntOrIntVectorTy(),
3683 "UIToFP source must be integer or integer vector", &I);
3684 Check(DestTy->isFPOrFPVectorTy(), "UIToFP result must be FP or FP vector",
3685 &I);
3686
3687 if (SrcVec && DstVec)
3688 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3689 cast<VectorType>(DestTy)->getElementCount(),
3690 "UIToFP source and dest vector length mismatch", &I);
3691
3692 visitInstruction(I);
3693}
3694
3695void Verifier::visitSIToFPInst(SIToFPInst &I) {
3696 // Get the source and destination types
3697 Type *SrcTy = I.getOperand(0)->getType();
3698 Type *DestTy = I.getType();
3699
3700 bool SrcVec = SrcTy->isVectorTy();
3701 bool DstVec = DestTy->isVectorTy();
3702
3703 Check(SrcVec == DstVec,
3704 "SIToFP source and dest must both be vector or scalar", &I);
3705 Check(SrcTy->isIntOrIntVectorTy(),
3706 "SIToFP source must be integer or integer vector", &I);
3707 Check(DestTy->isFPOrFPVectorTy(), "SIToFP result must be FP or FP vector",
3708 &I);
3709
3710 if (SrcVec && DstVec)
3711 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3712 cast<VectorType>(DestTy)->getElementCount(),
3713 "SIToFP source and dest vector length mismatch", &I);
3714
3715 visitInstruction(I);
3716}
3717
3718void Verifier::visitFPToUIInst(FPToUIInst &I) {
3719 // Get the source and destination types
3720 Type *SrcTy = I.getOperand(0)->getType();
3721 Type *DestTy = I.getType();
3722
3723 bool SrcVec = SrcTy->isVectorTy();
3724 bool DstVec = DestTy->isVectorTy();
3725
3726 Check(SrcVec == DstVec,
3727 "FPToUI source and dest must both be vector or scalar", &I);
3728 Check(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector", &I);
3729 Check(DestTy->isIntOrIntVectorTy(),
3730 "FPToUI result must be integer or integer vector", &I);
3731
3732 if (SrcVec && DstVec)
3733 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3734 cast<VectorType>(DestTy)->getElementCount(),
3735 "FPToUI source and dest vector length mismatch", &I);
3736
3737 visitInstruction(I);
3738}
3739
3740void Verifier::visitFPToSIInst(FPToSIInst &I) {
3741 // Get the source and destination types
3742 Type *SrcTy = I.getOperand(0)->getType();
3743 Type *DestTy = I.getType();
3744
3745 bool SrcVec = SrcTy->isVectorTy();
3746 bool DstVec = DestTy->isVectorTy();
3747
3748 Check(SrcVec == DstVec,
3749 "FPToSI source and dest must both be vector or scalar", &I);
3750 Check(SrcTy->isFPOrFPVectorTy(), "FPToSI source must be FP or FP vector", &I);
3751 Check(DestTy->isIntOrIntVectorTy(),
3752 "FPToSI result must be integer or integer vector", &I);
3753
3754 if (SrcVec && DstVec)
3755 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3756 cast<VectorType>(DestTy)->getElementCount(),
3757 "FPToSI source and dest vector length mismatch", &I);
3758
3759 visitInstruction(I);
3760}
3761
3762void Verifier::checkPtrToAddr(Type *SrcTy, Type *DestTy, const Value &V) {
3763 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToAddr source must be pointer", V);
3764 Check(DestTy->isIntOrIntVectorTy(), "PtrToAddr result must be integral", V);
3765 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToAddr type mismatch",
3766 V);
3767
3768 if (SrcTy->isVectorTy()) {
3769 auto *VSrc = cast<VectorType>(SrcTy);
3770 auto *VDest = cast<VectorType>(DestTy);
3771 Check(VSrc->getElementCount() == VDest->getElementCount(),
3772 "PtrToAddr vector length mismatch", V);
3773 }
3774
3775 Type *AddrTy = DL.getAddressType(SrcTy);
3776 Check(AddrTy == DestTy, "PtrToAddr result must be address width", V);
3777}
3778
3779void Verifier::visitPtrToAddrInst(PtrToAddrInst &I) {
3780 checkPtrToAddr(I.getOperand(0)->getType(), I.getType(), I);
3781 visitInstruction(I);
3782}
3783
3784void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
3785 // Get the source and destination types
3786 Type *SrcTy = I.getOperand(0)->getType();
3787 Type *DestTy = I.getType();
3788
3789 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToInt source must be pointer", &I);
3790
3791 Check(DestTy->isIntOrIntVectorTy(), "PtrToInt result must be integral", &I);
3792 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch",
3793 &I);
3794
3795 if (SrcTy->isVectorTy()) {
3796 auto *VSrc = cast<VectorType>(SrcTy);
3797 auto *VDest = cast<VectorType>(DestTy);
3798 Check(VSrc->getElementCount() == VDest->getElementCount(),
3799 "PtrToInt Vector length mismatch", &I);
3800 }
3801
3802 visitInstruction(I);
3803}
3804
3805void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
3806 // Get the source and destination types
3807 Type *SrcTy = I.getOperand(0)->getType();
3808 Type *DestTy = I.getType();
3809
3810 Check(SrcTy->isIntOrIntVectorTy(), "IntToPtr source must be an integral", &I);
3811 Check(DestTy->isPtrOrPtrVectorTy(), "IntToPtr result must be a pointer", &I);
3812
3813 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch",
3814 &I);
3815 if (SrcTy->isVectorTy()) {
3816 auto *VSrc = cast<VectorType>(SrcTy);
3817 auto *VDest = cast<VectorType>(DestTy);
3818 Check(VSrc->getElementCount() == VDest->getElementCount(),
3819 "IntToPtr Vector length mismatch", &I);
3820 }
3821 visitInstruction(I);
3822}
3823
3824void Verifier::visitBitCastInst(BitCastInst &I) {
3825 Check(
3826 CastInst::castIsValid(Instruction::BitCast, I.getOperand(0), I.getType()),
3827 "Invalid bitcast", &I);
3828 visitInstruction(I);
3829}
3830
3831void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
3832 Type *SrcTy = I.getOperand(0)->getType();
3833 Type *DestTy = I.getType();
3834
3835 Check(SrcTy->isPtrOrPtrVectorTy(), "AddrSpaceCast source must be a pointer",
3836 &I);
3837 Check(DestTy->isPtrOrPtrVectorTy(), "AddrSpaceCast result must be a pointer",
3838 &I);
3840 "AddrSpaceCast must be between different address spaces", &I);
3841 if (auto *SrcVTy = dyn_cast<VectorType>(SrcTy))
3842 Check(SrcVTy->getElementCount() ==
3843 cast<VectorType>(DestTy)->getElementCount(),
3844 "AddrSpaceCast vector pointer number of elements mismatch", &I);
3845 visitInstruction(I);
3846}
3847
3848/// visitPHINode - Ensure that a PHI node is well formed.
3849///
3850void Verifier::visitPHINode(PHINode &PN) {
3851 // Ensure that the PHI nodes are all grouped together at the top of the block.
3852 // This can be tested by checking whether the instruction before this is
3853 // either nonexistent (because this is begin()) or is a PHI node. If not,
3854 // then there is some other instruction before a PHI.
3855 Check(&PN == &PN.getParent()->front() ||
3857 "PHI nodes not grouped at top of basic block!", &PN, PN.getParent());
3858
3859 // Check that a PHI doesn't yield a Token.
3860 Check(!PN.getType()->isTokenLikeTy(), "PHI nodes cannot have token type!");
3861
3862 // Check that all of the values of the PHI node have the same type as the
3863 // result.
3864 for (Value *IncValue : PN.incoming_values()) {
3865 Check(PN.getType() == IncValue->getType(),
3866 "PHI node operands are not the same type as the result!", &PN);
3867 }
3868
3869 // All other PHI node constraints are checked in the visitBasicBlock method.
3870
3871 visitInstruction(PN);
3872}
3873
3874void Verifier::visitCallBase(CallBase &Call) {
3876 "Called function must be a pointer!", Call);
3877 FunctionType *FTy = Call.getFunctionType();
3878
3879 // Verify that the correct number of arguments are being passed
3880 if (FTy->isVarArg())
3881 Check(Call.arg_size() >= FTy->getNumParams(),
3882 "Called function requires more parameters than were provided!", Call);
3883 else
3884 Check(Call.arg_size() == FTy->getNumParams(),
3885 "Incorrect number of arguments passed to called function!", Call);
3886
3887 // Verify that all arguments to the call match the function type.
3888 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
3889 Check(Call.getArgOperand(i)->getType() == FTy->getParamType(i),
3890 "Call parameter type does not match function signature!",
3891 Call.getArgOperand(i), FTy->getParamType(i), Call);
3892
3893 AttributeList Attrs = Call.getAttributes();
3894
3895 Check(verifyAttributeCount(Attrs, Call.arg_size()),
3896 "Attribute after last parameter!", Call);
3897
3898 Function *Callee =
3900 bool IsIntrinsic = Callee && Callee->isIntrinsic();
3901 if (IsIntrinsic)
3902 Check(Callee->getFunctionType() == FTy,
3903 "Intrinsic called with incompatible signature", Call);
3904
3905 // Verify if the calling convention of the callee is callable.
3907 "calling convention does not permit calls", Call);
3908
3909 // Disallow passing/returning values with alignment higher than we can
3910 // represent.
3911 // FIXME: Consider making DataLayout cap the alignment, so this isn't
3912 // necessary.
3913 auto VerifyTypeAlign = [&](Type *Ty, const Twine &Message) {
3914 if (!Ty->isSized())
3915 return;
3916 Align ABIAlign = DL.getABITypeAlign(Ty);
3917 Check(ABIAlign.value() <= Value::MaximumAlignment,
3918 "Incorrect alignment of " + Message + " to called function!", Call);
3919 };
3920
3921 if (!IsIntrinsic) {
3922 VerifyTypeAlign(FTy->getReturnType(), "return type");
3923 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3924 Type *Ty = FTy->getParamType(i);
3925 VerifyTypeAlign(Ty, "argument passed");
3926 }
3927 }
3928
3929 if (Attrs.hasFnAttr(Attribute::Speculatable)) {
3930 // Don't allow speculatable on call sites, unless the underlying function
3931 // declaration is also speculatable.
3932 Check(Callee && Callee->isSpeculatable(),
3933 "speculatable attribute may not apply to call sites", Call);
3934 }
3935
3936 if (Attrs.hasFnAttr(Attribute::Preallocated)) {
3937 Check(Call.getIntrinsicID() == Intrinsic::call_preallocated_arg,
3938 "preallocated as a call site attribute can only be on "
3939 "llvm.call.preallocated.arg");
3940 }
3941
3942 Check(!Attrs.hasFnAttr(Attribute::DenormalFPEnv),
3943 "denormal_fpenv attribute may not apply to call sites", Call);
3944
3945 // Verify call attributes.
3946 verifyFunctionAttrs(FTy, Attrs, &Call, IsIntrinsic, Call.isInlineAsm());
3947
3948 // Conservatively check the inalloca argument.
3949 // We have a bug if we can find that there is an underlying alloca without
3950 // inalloca.
3951 if (Call.hasInAllocaArgument()) {
3952 Value *InAllocaArg = Call.getArgOperand(FTy->getNumParams() - 1);
3953 if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets()))
3954 Check(AI->isUsedWithInAlloca(),
3955 "inalloca argument for call has mismatched alloca", AI, Call);
3956 }
3957
3958 // For each argument of the callsite, if it has the swifterror argument,
3959 // make sure the underlying alloca/parameter it comes from has a swifterror as
3960 // well.
3961 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3962 if (Call.paramHasAttr(i, Attribute::SwiftError)) {
3963 Value *SwiftErrorArg = Call.getArgOperand(i);
3964 if (auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets())) {
3965 Check(AI->isSwiftError(),
3966 "swifterror argument for call has mismatched alloca", AI, Call);
3967 continue;
3968 }
3969 auto ArgI = dyn_cast<Argument>(SwiftErrorArg);
3970 Check(ArgI, "swifterror argument should come from an alloca or parameter",
3971 SwiftErrorArg, Call);
3972 Check(ArgI->hasSwiftErrorAttr(),
3973 "swifterror argument for call has mismatched parameter", ArgI,
3974 Call);
3975 }
3976
3977 if (Attrs.hasParamAttr(i, Attribute::ImmArg)) {
3978 // Don't allow immarg on call sites, unless the underlying declaration
3979 // also has the matching immarg.
3980 Check(Callee && Callee->hasParamAttribute(i, Attribute::ImmArg),
3981 "immarg may not apply only to call sites", Call.getArgOperand(i),
3982 Call);
3983 }
3984
3985 if (Call.paramHasAttr(i, Attribute::ImmArg)) {
3986 Value *ArgVal = Call.getArgOperand(i);
3987 Check(isa<ConstantInt>(ArgVal) || isa<ConstantFP>(ArgVal),
3988 "immarg operand has non-immediate parameter", ArgVal, Call);
3989
3990 // If the imm-arg is an integer and also has a range attached,
3991 // check if the given value is within the range.
3992 if (Call.paramHasAttr(i, Attribute::Range)) {
3993 if (auto *CI = dyn_cast<ConstantInt>(ArgVal)) {
3994 const ConstantRange &CR =
3995 Call.getParamAttr(i, Attribute::Range).getValueAsConstantRange();
3996 Check(CR.contains(CI->getValue()),
3997 "immarg value " + Twine(CI->getValue().getSExtValue()) +
3998 " out of range [" + Twine(CR.getLower().getSExtValue()) +
3999 ", " + Twine(CR.getUpper().getSExtValue()) + ")",
4000 Call);
4001 }
4002 }
4003 }
4004
4005 if (Call.paramHasAttr(i, Attribute::Preallocated)) {
4006 Value *ArgVal = Call.getArgOperand(i);
4007 bool hasOB =
4009 bool isMustTail = Call.isMustTailCall();
4010 Check(hasOB != isMustTail,
4011 "preallocated operand either requires a preallocated bundle or "
4012 "the call to be musttail (but not both)",
4013 ArgVal, Call);
4014 }
4015 }
4016
4017 if (FTy->isVarArg()) {
4018 // FIXME? is 'nest' even legal here?
4019 bool SawNest = false;
4020 bool SawReturned = false;
4021
4022 for (unsigned Idx = 0; Idx < FTy->getNumParams(); ++Idx) {
4023 if (Attrs.hasParamAttr(Idx, Attribute::Nest))
4024 SawNest = true;
4025 if (Attrs.hasParamAttr(Idx, Attribute::Returned))
4026 SawReturned = true;
4027 }
4028
4029 // Check attributes on the varargs part.
4030 for (unsigned Idx = FTy->getNumParams(); Idx < Call.arg_size(); ++Idx) {
4031 Type *Ty = Call.getArgOperand(Idx)->getType();
4032 AttributeSet ArgAttrs = Attrs.getParamAttrs(Idx);
4033 verifyParameterAttrs(ArgAttrs, Ty, &Call);
4034
4035 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
4036 Check(!SawNest, "More than one parameter has attribute nest!", Call);
4037 SawNest = true;
4038 }
4039
4040 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
4041 Check(!SawReturned, "More than one parameter has attribute returned!",
4042 Call);
4043 Check(Ty->canLosslesslyBitCastTo(FTy->getReturnType()),
4044 "Incompatible argument and return types for 'returned' "
4045 "attribute",
4046 Call);
4047 SawReturned = true;
4048 }
4049
4050 // Statepoint intrinsic is vararg but the wrapped function may be not.
4051 // Allow sret here and check the wrapped function in verifyStatepoint.
4052 if (Call.getIntrinsicID() != Intrinsic::experimental_gc_statepoint)
4053 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
4054 "Attribute 'sret' cannot be used for vararg call arguments!",
4055 Call);
4056
4057 if (ArgAttrs.hasAttribute(Attribute::InAlloca))
4058 Check(Idx == Call.arg_size() - 1,
4059 "inalloca isn't on the last argument!", Call);
4060 }
4061 }
4062
4063 // Verify that there's no metadata unless it's a direct call to an intrinsic.
4064 if (!IsIntrinsic) {
4065 for (Type *ParamTy : FTy->params()) {
4066 Check(!ParamTy->isMetadataTy(),
4067 "Function has metadata parameter but isn't an intrinsic", Call);
4068 Check(!ParamTy->isTokenLikeTy(),
4069 "Function has token parameter but isn't an intrinsic", Call);
4070 }
4071 }
4072
4073 // Verify that indirect calls don't return tokens.
4074 if (!Call.getCalledFunction()) {
4075 Check(!FTy->getReturnType()->isTokenLikeTy(),
4076 "Return type cannot be token for indirect call!");
4077 Check(!FTy->getReturnType()->isX86_AMXTy(),
4078 "Return type cannot be x86_amx for indirect call!");
4079 }
4080
4082 visitIntrinsicCall(ID, Call);
4083
4084 // Verify that a callsite has at most one "deopt", at most one "funclet", at
4085 // most one "gc-transition", at most one "cfguardtarget", at most one
4086 // "preallocated" operand bundle, and at most one "ptrauth" operand bundle.
4087 bool FoundDeoptBundle = false, FoundFuncletBundle = false,
4088 FoundGCTransitionBundle = false, FoundCFGuardTargetBundle = false,
4089 FoundPreallocatedBundle = false, FoundGCLiveBundle = false,
4090 FoundPtrauthBundle = false, FoundKCFIBundle = false,
4091 FoundAttachedCallBundle = false;
4092 for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) {
4093 OperandBundleUse BU = Call.getOperandBundleAt(i);
4094 uint32_t Tag = BU.getTagID();
4095 if (Tag == LLVMContext::OB_deopt) {
4096 Check(!FoundDeoptBundle, "Multiple deopt operand bundles", Call);
4097 FoundDeoptBundle = true;
4098 } else if (Tag == LLVMContext::OB_gc_transition) {
4099 Check(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles",
4100 Call);
4101 FoundGCTransitionBundle = true;
4102 } else if (Tag == LLVMContext::OB_funclet) {
4103 Check(!FoundFuncletBundle, "Multiple funclet operand bundles", Call);
4104 FoundFuncletBundle = true;
4105 Check(BU.Inputs.size() == 1,
4106 "Expected exactly one funclet bundle operand", Call);
4107 Check(isa<FuncletPadInst>(BU.Inputs.front()),
4108 "Funclet bundle operands should correspond to a FuncletPadInst",
4109 Call);
4110 } else if (Tag == LLVMContext::OB_cfguardtarget) {
4111 Check(!FoundCFGuardTargetBundle, "Multiple CFGuardTarget operand bundles",
4112 Call);
4113 FoundCFGuardTargetBundle = true;
4114 Check(BU.Inputs.size() == 1,
4115 "Expected exactly one cfguardtarget bundle operand", Call);
4116 } else if (Tag == LLVMContext::OB_ptrauth) {
4117 Check(!FoundPtrauthBundle, "Multiple ptrauth operand bundles", Call);
4118 FoundPtrauthBundle = true;
4119 Check(BU.Inputs.size() == 2,
4120 "Expected exactly two ptrauth bundle operands", Call);
4121 Check(isa<ConstantInt>(BU.Inputs[0]) &&
4122 BU.Inputs[0]->getType()->isIntegerTy(32),
4123 "Ptrauth bundle key operand must be an i32 constant", Call);
4124 Check(BU.Inputs[1]->getType()->isIntegerTy(64),
4125 "Ptrauth bundle discriminator operand must be an i64", Call);
4126 } else if (Tag == LLVMContext::OB_kcfi) {
4127 Check(!FoundKCFIBundle, "Multiple kcfi operand bundles", Call);
4128 FoundKCFIBundle = true;
4129 Check(BU.Inputs.size() == 1, "Expected exactly one kcfi bundle operand",
4130 Call);
4131 Check(isa<ConstantInt>(BU.Inputs[0]) &&
4132 BU.Inputs[0]->getType()->isIntegerTy(32),
4133 "Kcfi bundle operand must be an i32 constant", Call);
4134 } else if (Tag == LLVMContext::OB_preallocated) {
4135 Check(!FoundPreallocatedBundle, "Multiple preallocated operand bundles",
4136 Call);
4137 FoundPreallocatedBundle = true;
4138 Check(BU.Inputs.size() == 1,
4139 "Expected exactly one preallocated bundle operand", Call);
4140 auto Input = dyn_cast<IntrinsicInst>(BU.Inputs.front());
4141 Check(Input &&
4142 Input->getIntrinsicID() == Intrinsic::call_preallocated_setup,
4143 "\"preallocated\" argument must be a token from "
4144 "llvm.call.preallocated.setup",
4145 Call);
4146 } else if (Tag == LLVMContext::OB_gc_live) {
4147 Check(!FoundGCLiveBundle, "Multiple gc-live operand bundles", Call);
4148 FoundGCLiveBundle = true;
4150 Check(!FoundAttachedCallBundle,
4151 "Multiple \"clang.arc.attachedcall\" operand bundles", Call);
4152 FoundAttachedCallBundle = true;
4153 verifyAttachedCallBundle(Call, BU);
4154 }
4155 }
4156
4157 // Verify that callee and callsite agree on whether to use pointer auth.
4158 Check(!(Call.getCalledFunction() && FoundPtrauthBundle),
4159 "Direct call cannot have a ptrauth bundle", Call);
4160
4161 // Verify that each inlinable callsite of a debug-info-bearing function in a
4162 // debug-info-bearing function has a debug location attached to it. Failure to
4163 // do so causes assertion failures when the inliner sets up inline scope info
4164 // (Interposable functions are not inlinable, neither are functions without
4165 // definitions.)
4171 "inlinable function call in a function with "
4172 "debug info must have a !dbg location",
4173 Call);
4174
4175 if (Call.isInlineAsm())
4176 verifyInlineAsmCall(Call);
4177
4178 ConvergenceVerifyHelper.visit(Call);
4179
4180 visitInstruction(Call);
4181}
4182
4183void Verifier::verifyTailCCMustTailAttrs(const AttrBuilder &Attrs,
4184 StringRef Context) {
4185 Check(!Attrs.contains(Attribute::InAlloca),
4186 Twine("inalloca attribute not allowed in ") + Context);
4187 Check(!Attrs.contains(Attribute::InReg),
4188 Twine("inreg attribute not allowed in ") + Context);
4189 Check(!Attrs.contains(Attribute::SwiftError),
4190 Twine("swifterror attribute not allowed in ") + Context);
4191 Check(!Attrs.contains(Attribute::Preallocated),
4192 Twine("preallocated attribute not allowed in ") + Context);
4193 Check(!Attrs.contains(Attribute::ByRef),
4194 Twine("byref attribute not allowed in ") + Context);
4195}
4196
4197/// Two types are "congruent" if they are identical, or if they are both pointer
4198/// types with different pointee types and the same address space.
4199static bool isTypeCongruent(Type *L, Type *R) {
4200 if (L == R)
4201 return true;
4204 if (!PL || !PR)
4205 return false;
4206 return PL->getAddressSpace() == PR->getAddressSpace();
4207}
4208
4209static AttrBuilder getParameterABIAttributes(LLVMContext& C, unsigned I, AttributeList Attrs) {
4210 static const Attribute::AttrKind ABIAttrs[] = {
4211 Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
4212 Attribute::InReg, Attribute::StackAlignment, Attribute::SwiftSelf,
4213 Attribute::SwiftAsync, Attribute::SwiftError, Attribute::Preallocated,
4214 Attribute::ByRef};
4215 AttrBuilder Copy(C);
4216 for (auto AK : ABIAttrs) {
4217 Attribute Attr = Attrs.getParamAttrs(I).getAttribute(AK);
4218 if (Attr.isValid())
4219 Copy.addAttribute(Attr);
4220 }
4221
4222 // `align` is ABI-affecting only in combination with `byval` or `byref`.
4223 if (Attrs.hasParamAttr(I, Attribute::Alignment) &&
4224 (Attrs.hasParamAttr(I, Attribute::ByVal) ||
4225 Attrs.hasParamAttr(I, Attribute::ByRef)))
4226 Copy.addAlignmentAttr(Attrs.getParamAlignment(I));
4227 return Copy;
4228}
4229
4230void Verifier::verifyMustTailCall(CallInst &CI) {
4231 Check(!CI.isInlineAsm(), "cannot use musttail call with inline asm", &CI);
4232
4233 Function *F = CI.getParent()->getParent();
4234 FunctionType *CallerTy = F->getFunctionType();
4235 FunctionType *CalleeTy = CI.getFunctionType();
4236 Check(CallerTy->isVarArg() == CalleeTy->isVarArg(),
4237 "cannot guarantee tail call due to mismatched varargs", &CI);
4238 Check(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()),
4239 "cannot guarantee tail call due to mismatched return types", &CI);
4240
4241 // - The calling conventions of the caller and callee must match.
4242 Check(F->getCallingConv() == CI.getCallingConv(),
4243 "cannot guarantee tail call due to mismatched calling conv", &CI);
4244
4245 // - The call must immediately precede a :ref:`ret <i_ret>` instruction,
4246 // or a pointer bitcast followed by a ret instruction.
4247 // - The ret instruction must return the (possibly bitcasted) value
4248 // produced by the call or void.
4249 Value *RetVal = &CI;
4251
4252 // Handle the optional bitcast.
4253 if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Next)) {
4254 Check(BI->getOperand(0) == RetVal,
4255 "bitcast following musttail call must use the call", BI);
4256 RetVal = BI;
4257 Next = BI->getNextNode();
4258 }
4259
4260 // Check the return.
4261 ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Next);
4262 Check(Ret, "musttail call must precede a ret with an optional bitcast", &CI);
4263 Check(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal ||
4265 "musttail call result must be returned", Ret);
4266
4267 AttributeList CallerAttrs = F->getAttributes();
4268 AttributeList CalleeAttrs = CI.getAttributes();
4269 if (CI.getCallingConv() == CallingConv::SwiftTail ||
4270 CI.getCallingConv() == CallingConv::Tail) {
4271 StringRef CCName =
4272 CI.getCallingConv() == CallingConv::Tail ? "tailcc" : "swifttailcc";
4273
4274 // - Only sret, byval, swiftself, and swiftasync ABI-impacting attributes
4275 // are allowed in swifttailcc call
4276 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4277 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
4278 SmallString<32> Context{CCName, StringRef(" musttail caller")};
4279 verifyTailCCMustTailAttrs(ABIAttrs, Context);
4280 }
4281 for (unsigned I = 0, E = CalleeTy->getNumParams(); I != E; ++I) {
4282 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
4283 SmallString<32> Context{CCName, StringRef(" musttail callee")};
4284 verifyTailCCMustTailAttrs(ABIAttrs, Context);
4285 }
4286 // - Varargs functions are not allowed
4287 Check(!CallerTy->isVarArg(), Twine("cannot guarantee ") + CCName +
4288 " tail call for varargs function");
4289 return;
4290 }
4291
4292 // - The caller and callee prototypes must match. Pointer types of
4293 // parameters or return types may differ in pointee type, but not
4294 // address space.
4295 if (!CI.getIntrinsicID()) {
4296 Check(CallerTy->getNumParams() == CalleeTy->getNumParams(),
4297 "cannot guarantee tail call due to mismatched parameter counts", &CI);
4298 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4299 Check(
4300 isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)),
4301 "cannot guarantee tail call due to mismatched parameter types", &CI);
4302 }
4303 }
4304
4305 // - All ABI-impacting function attributes, such as sret, byval, inreg,
4306 // returned, preallocated, and inalloca, must match.
4307 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
4308 AttrBuilder CallerABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
4309 AttrBuilder CalleeABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
4310 Check(CallerABIAttrs == CalleeABIAttrs,
4311 "cannot guarantee tail call due to mismatched ABI impacting "
4312 "function attributes",
4313 &CI, CI.getOperand(I));
4314 }
4315}
4316
4317void Verifier::visitCallInst(CallInst &CI) {
4318 visitCallBase(CI);
4319
4320 if (CI.isMustTailCall())
4321 verifyMustTailCall(CI);
4322}
4323
4324void Verifier::visitInvokeInst(InvokeInst &II) {
4325 visitCallBase(II);
4326
4327 // Verify that the first non-PHI instruction of the unwind destination is an
4328 // exception handling instruction.
4329 Check(
4330 II.getUnwindDest()->isEHPad(),
4331 "The unwind destination does not have an exception handling instruction!",
4332 &II);
4333
4334 visitTerminator(II);
4335}
4336
4337/// visitUnaryOperator - Check the argument to the unary operator.
4338///
4339void Verifier::visitUnaryOperator(UnaryOperator &U) {
4340 Check(U.getType() == U.getOperand(0)->getType(),
4341 "Unary operators must have same type for"
4342 "operands and result!",
4343 &U);
4344
4345 switch (U.getOpcode()) {
4346 // Check that floating-point arithmetic operators are only used with
4347 // floating-point operands.
4348 case Instruction::FNeg:
4349 Check(U.getType()->isFPOrFPVectorTy(),
4350 "FNeg operator only works with float types!", &U);
4351 break;
4352 default:
4353 llvm_unreachable("Unknown UnaryOperator opcode!");
4354 }
4355
4356 visitInstruction(U);
4357}
4358
4359/// visitBinaryOperator - Check that both arguments to the binary operator are
4360/// of the same type!
4361///
4362void Verifier::visitBinaryOperator(BinaryOperator &B) {
4363 Check(B.getOperand(0)->getType() == B.getOperand(1)->getType(),
4364 "Both operands to a binary operator are not of the same type!", &B);
4365
4366 switch (B.getOpcode()) {
4367 // Check that integer arithmetic operators are only used with
4368 // integral operands.
4369 case Instruction::Add:
4370 case Instruction::Sub:
4371 case Instruction::Mul:
4372 case Instruction::SDiv:
4373 case Instruction::UDiv:
4374 case Instruction::SRem:
4375 case Instruction::URem:
4376 Check(B.getType()->isIntOrIntVectorTy(),
4377 "Integer arithmetic operators only work with integral types!", &B);
4378 Check(B.getType() == B.getOperand(0)->getType(),
4379 "Integer arithmetic operators must have same type "
4380 "for operands and result!",
4381 &B);
4382 break;
4383 // Check that floating-point arithmetic operators are only used with
4384 // floating-point operands.
4385 case Instruction::FAdd:
4386 case Instruction::FSub:
4387 case Instruction::FMul:
4388 case Instruction::FDiv:
4389 case Instruction::FRem:
4390 Check(B.getType()->isFPOrFPVectorTy(),
4391 "Floating-point arithmetic operators only work with "
4392 "floating-point types!",
4393 &B);
4394 Check(B.getType() == B.getOperand(0)->getType(),
4395 "Floating-point arithmetic operators must have same type "
4396 "for operands and result!",
4397 &B);
4398 break;
4399 // Check that logical operators are only used with integral operands.
4400 case Instruction::And:
4401 case Instruction::Or:
4402 case Instruction::Xor:
4403 Check(B.getType()->isIntOrIntVectorTy(),
4404 "Logical operators only work with integral types!", &B);
4405 Check(B.getType() == B.getOperand(0)->getType(),
4406 "Logical operators must have same type for operands and result!", &B);
4407 break;
4408 case Instruction::Shl:
4409 case Instruction::LShr:
4410 case Instruction::AShr:
4411 Check(B.getType()->isIntOrIntVectorTy(),
4412 "Shifts only work with integral types!", &B);
4413 Check(B.getType() == B.getOperand(0)->getType(),
4414 "Shift return type must be same as operands!", &B);
4415 break;
4416 default:
4417 llvm_unreachable("Unknown BinaryOperator opcode!");
4418 }
4419
4420 visitInstruction(B);
4421}
4422
4423void Verifier::visitICmpInst(ICmpInst &IC) {
4424 // Check that the operands are the same type
4425 Type *Op0Ty = IC.getOperand(0)->getType();
4426 Type *Op1Ty = IC.getOperand(1)->getType();
4427 Check(Op0Ty == Op1Ty,
4428 "Both operands to ICmp instruction are not of the same type!", &IC);
4429 // Check that the operands are the right type
4430 Check(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPtrOrPtrVectorTy(),
4431 "Invalid operand types for ICmp instruction", &IC);
4432 // Check that the predicate is valid.
4433 Check(IC.isIntPredicate(), "Invalid predicate in ICmp instruction!", &IC);
4434
4435 visitInstruction(IC);
4436}
4437
4438void Verifier::visitFCmpInst(FCmpInst &FC) {
4439 // Check that the operands are the same type
4440 Type *Op0Ty = FC.getOperand(0)->getType();
4441 Type *Op1Ty = FC.getOperand(1)->getType();
4442 Check(Op0Ty == Op1Ty,
4443 "Both operands to FCmp instruction are not of the same type!", &FC);
4444 // Check that the operands are the right type
4445 Check(Op0Ty->isFPOrFPVectorTy(), "Invalid operand types for FCmp instruction",
4446 &FC);
4447 // Check that the predicate is valid.
4448 Check(FC.isFPPredicate(), "Invalid predicate in FCmp instruction!", &FC);
4449
4450 visitInstruction(FC);
4451}
4452
4453void Verifier::visitExtractElementInst(ExtractElementInst &EI) {
4455 "Invalid extractelement operands!", &EI);
4456 visitInstruction(EI);
4457}
4458
4459void Verifier::visitInsertElementInst(InsertElementInst &IE) {
4460 Check(InsertElementInst::isValidOperands(IE.getOperand(0), IE.getOperand(1),
4461 IE.getOperand(2)),
4462 "Invalid insertelement operands!", &IE);
4463 visitInstruction(IE);
4464}
4465
4466void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) {
4468 SV.getShuffleMask()),
4469 "Invalid shufflevector operands!", &SV);
4470 visitInstruction(SV);
4471}
4472
4473void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
4474 Type *TargetTy = GEP.getPointerOperandType()->getScalarType();
4475
4476 Check(isa<PointerType>(TargetTy),
4477 "GEP base pointer is not a vector or a vector of pointers", &GEP);
4478 Check(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP);
4479
4480 if (auto *STy = dyn_cast<StructType>(GEP.getSourceElementType())) {
4481 Check(!STy->isScalableTy(),
4482 "getelementptr cannot target structure that contains scalable vector"
4483 "type",
4484 &GEP);
4485 }
4486
4487 SmallVector<Value *, 16> Idxs(GEP.indices());
4488 Check(
4489 all_of(Idxs, [](Value *V) { return V->getType()->isIntOrIntVectorTy(); }),
4490 "GEP indexes must be integers", &GEP);
4491 Type *ElTy =
4492 GetElementPtrInst::getIndexedType(GEP.getSourceElementType(), Idxs);
4493 Check(ElTy, "Invalid indices for GEP pointer type!", &GEP);
4494
4495 PointerType *PtrTy = dyn_cast<PointerType>(GEP.getType()->getScalarType());
4496
4497 Check(PtrTy && GEP.getResultElementType() == ElTy,
4498 "GEP is not of right type for indices!", &GEP, ElTy);
4499
4500 if (auto *GEPVTy = dyn_cast<VectorType>(GEP.getType())) {
4501 // Additional checks for vector GEPs.
4502 ElementCount GEPWidth = GEPVTy->getElementCount();
4503 if (GEP.getPointerOperandType()->isVectorTy())
4504 Check(
4505 GEPWidth ==
4506 cast<VectorType>(GEP.getPointerOperandType())->getElementCount(),
4507 "Vector GEP result width doesn't match operand's", &GEP);
4508 for (Value *Idx : Idxs) {
4509 Type *IndexTy = Idx->getType();
4510 if (auto *IndexVTy = dyn_cast<VectorType>(IndexTy)) {
4511 ElementCount IndexWidth = IndexVTy->getElementCount();
4512 Check(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP);
4513 }
4514 Check(IndexTy->isIntOrIntVectorTy(),
4515 "All GEP indices should be of integer type");
4516 }
4517 }
4518
4519 // Check that GEP does not index into a vector with non-byte-addressable
4520 // elements.
4522 GTI != GTE; ++GTI) {
4523 if (GTI.isVector()) {
4524 Type *ElemTy = GTI.getIndexedType();
4525 Check(DL.typeSizeEqualsStoreSize(ElemTy),
4526 "GEP into vector with non-byte-addressable element type", &GEP);
4527 }
4528 }
4529
4530 Check(GEP.getAddressSpace() == PtrTy->getAddressSpace(),
4531 "GEP address space doesn't match type", &GEP);
4532
4533 visitInstruction(GEP);
4534}
4535
4536static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
4537 return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
4538}
4539
4540/// Verify !range and !absolute_symbol metadata. These have the same
4541/// restrictions, except !absolute_symbol allows the full set.
4542void Verifier::verifyRangeLikeMetadata(const Value &I, const MDNode *Range,
4543 Type *Ty, RangeLikeMetadataKind Kind) {
4544 unsigned NumOperands = Range->getNumOperands();
4545 Check(NumOperands % 2 == 0, "Unfinished range!", Range);
4546 unsigned NumRanges = NumOperands / 2;
4547 Check(NumRanges >= 1, "It should have at least one range!", Range);
4548
4549 ConstantRange LastRange(1, true); // Dummy initial value
4550 for (unsigned i = 0; i < NumRanges; ++i) {
4551 ConstantInt *Low =
4552 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i));
4553 Check(Low, "The lower limit must be an integer!", Low);
4554 ConstantInt *High =
4555 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i + 1));
4556 Check(High, "The upper limit must be an integer!", High);
4557
4558 Check(High->getType() == Low->getType(), "Range pair types must match!",
4559 &I);
4560
4561 if (Kind == RangeLikeMetadataKind::NoaliasAddrspace) {
4562 Check(High->getType()->isIntegerTy(32),
4563 "noalias.addrspace type must be i32!", &I);
4564 } else {
4565 Check(High->getType() == Ty->getScalarType(),
4566 "Range types must match instruction type!", &I);
4567 }
4568
4569 APInt HighV = High->getValue();
4570 APInt LowV = Low->getValue();
4571
4572 // ConstantRange asserts if the ranges are the same except for the min/max
4573 // value. Leave the cases it tolerates for the empty range error below.
4574 Check(LowV != HighV || LowV.isMaxValue() || LowV.isMinValue(),
4575 "The upper and lower limits cannot be the same value", &I);
4576
4577 ConstantRange CurRange(LowV, HighV);
4578 Check(!CurRange.isEmptySet() &&
4579 (Kind == RangeLikeMetadataKind::AbsoluteSymbol ||
4580 !CurRange.isFullSet()),
4581 "Range must not be empty!", Range);
4582 if (i != 0) {
4583 Check(CurRange.intersectWith(LastRange).isEmptySet(),
4584 "Intervals are overlapping", Range);
4585 Check(LowV.sgt(LastRange.getLower()), "Intervals are not in order",
4586 Range);
4587 Check(!isContiguous(CurRange, LastRange), "Intervals are contiguous",
4588 Range);
4589 }
4590 LastRange = ConstantRange(LowV, HighV);
4591 }
4592 if (NumRanges > 2) {
4593 APInt FirstLow =
4594 mdconst::dyn_extract<ConstantInt>(Range->getOperand(0))->getValue();
4595 APInt FirstHigh =
4596 mdconst::dyn_extract<ConstantInt>(Range->getOperand(1))->getValue();
4597 ConstantRange FirstRange(FirstLow, FirstHigh);
4598 Check(FirstRange.intersectWith(LastRange).isEmptySet(),
4599 "Intervals are overlapping", Range);
4600 Check(!isContiguous(FirstRange, LastRange), "Intervals are contiguous",
4601 Range);
4602 }
4603}
4604
4605void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) {
4606 assert(Range && Range == I.getMetadata(LLVMContext::MD_range) &&
4607 "precondition violation");
4608 verifyRangeLikeMetadata(I, Range, Ty, RangeLikeMetadataKind::Range);
4609}
4610
4611void Verifier::visitNoFPClassMetadata(Instruction &I, MDNode *NoFPClass,
4612 Type *Ty) {
4613 Check(AttributeFuncs::isNoFPClassCompatibleType(Ty),
4614 "nofpclass only applies to floating-point typed loads", I);
4615
4616 Check(NoFPClass->getNumOperands() == 1,
4617 "nofpclass must have exactly one entry", NoFPClass);
4618 ConstantInt *MaskVal =
4620 Check(MaskVal && MaskVal->getType()->isIntegerTy(32),
4621 "nofpclass entry must be a constant i32", NoFPClass);
4622 uint32_t Val = MaskVal->getZExtValue();
4623 Check(Val != 0, "'nofpclass' must have at least one test bit set", NoFPClass,
4624 I);
4625
4626 Check((Val & ~static_cast<unsigned>(fcAllFlags)) == 0,
4627 "Invalid value for 'nofpclass' test mask", NoFPClass, I);
4628}
4629
4630void Verifier::visitNoaliasAddrspaceMetadata(Instruction &I, MDNode *Range,
4631 Type *Ty) {
4632 assert(Range && Range == I.getMetadata(LLVMContext::MD_noalias_addrspace) &&
4633 "precondition violation");
4634 verifyRangeLikeMetadata(I, Range, Ty,
4635 RangeLikeMetadataKind::NoaliasAddrspace);
4636}
4637
4638void Verifier::checkAtomicMemAccessSize(Type *Ty, const Instruction *I) {
4639 unsigned Size = DL.getTypeSizeInBits(Ty).getFixedValue();
4640 Check(Size >= 8, "atomic memory access' size must be byte-sized", Ty, I);
4641 Check(!(Size & (Size - 1)),
4642 "atomic memory access' operand must have a power-of-two size", Ty, I);
4643}
4644
4645void Verifier::visitLoadInst(LoadInst &LI) {
4647 Check(PTy, "Load operand must be a pointer.", &LI);
4648 Type *ElTy = LI.getType();
4649 if (MaybeAlign A = LI.getAlign()) {
4650 Check(A->value() <= Value::MaximumAlignment,
4651 "huge alignment values are unsupported", &LI);
4652 }
4653 Check(ElTy->isSized(), "loading unsized types is not allowed", &LI);
4654 if (LI.isAtomic()) {
4655 Check(LI.getOrdering() != AtomicOrdering::Release &&
4656 LI.getOrdering() != AtomicOrdering::AcquireRelease,
4657 "Load cannot have Release ordering", &LI);
4658 Check(ElTy->getScalarType()->isIntOrPtrTy() ||
4659 ElTy->getScalarType()->isByteTy() ||
4661 "atomic load operand must have integer, byte, pointer, floating "
4662 "point, or vector type!",
4663 ElTy, &LI);
4664
4665 checkAtomicMemAccessSize(ElTy, &LI);
4666 } else {
4668 "Non-atomic load cannot have SynchronizationScope specified", &LI);
4669 }
4670
4671 visitInstruction(LI);
4672}
4673
4674void Verifier::visitStoreInst(StoreInst &SI) {
4675 PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType());
4676 Check(PTy, "Store operand must be a pointer.", &SI);
4677 Type *ElTy = SI.getOperand(0)->getType();
4678 if (MaybeAlign A = SI.getAlign()) {
4679 Check(A->value() <= Value::MaximumAlignment,
4680 "huge alignment values are unsupported", &SI);
4681 }
4682 Check(ElTy->isSized(), "storing unsized types is not allowed", &SI);
4683 if (SI.isAtomic()) {
4684 Check(SI.getOrdering() != AtomicOrdering::Acquire &&
4685 SI.getOrdering() != AtomicOrdering::AcquireRelease,
4686 "Store cannot have Acquire ordering", &SI);
4687 Check(ElTy->getScalarType()->isIntOrPtrTy() ||
4688 ElTy->getScalarType()->isByteTy() ||
4690 "atomic store operand must have integer, byte, pointer, floating "
4691 "point, or vector type!",
4692 ElTy, &SI);
4693 checkAtomicMemAccessSize(ElTy, &SI);
4694 } else {
4695 Check(SI.getSyncScopeID() == SyncScope::System,
4696 "Non-atomic store cannot have SynchronizationScope specified", &SI);
4697 }
4698 visitInstruction(SI);
4699}
4700
4701/// Check that SwiftErrorVal is used as a swifterror argument in CS.
4702void Verifier::verifySwiftErrorCall(CallBase &Call,
4703 const Value *SwiftErrorVal) {
4704 for (const auto &I : llvm::enumerate(Call.args())) {
4705 if (I.value() == SwiftErrorVal) {
4706 Check(Call.paramHasAttr(I.index(), Attribute::SwiftError),
4707 "swifterror value when used in a callsite should be marked "
4708 "with swifterror attribute",
4709 SwiftErrorVal, Call);
4710 }
4711 }
4712}
4713
4714void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) {
4715 // Check that swifterror value is only used by loads, stores, or as
4716 // a swifterror argument.
4717 for (const User *U : SwiftErrorVal->users()) {
4719 isa<InvokeInst>(U),
4720 "swifterror value can only be loaded and stored from, or "
4721 "as a swifterror argument!",
4722 SwiftErrorVal, U);
4723 // If it is used by a store, check it is the second operand.
4724 if (auto StoreI = dyn_cast<StoreInst>(U))
4725 Check(StoreI->getOperand(1) == SwiftErrorVal,
4726 "swifterror value should be the second operand when used "
4727 "by stores",
4728 SwiftErrorVal, U);
4729 if (auto *Call = dyn_cast<CallBase>(U))
4730 verifySwiftErrorCall(*const_cast<CallBase *>(Call), SwiftErrorVal);
4731 }
4732}
4733
4734void Verifier::visitAllocaInst(AllocaInst &AI) {
4735 Type *Ty = AI.getAllocatedType();
4736 SmallPtrSet<Type*, 4> Visited;
4737 Check(Ty->isSized(&Visited), "Cannot allocate unsized type", &AI);
4738 // Check if it's a target extension type that disallows being used on the
4739 // stack.
4741 "Alloca has illegal target extension type", &AI);
4743 "Alloca array size must have integer type", &AI);
4744 if (MaybeAlign A = AI.getAlign()) {
4745 Check(A->value() <= Value::MaximumAlignment,
4746 "huge alignment values are unsupported", &AI);
4747 }
4748
4749 if (AI.isSwiftError()) {
4750 Check(Ty->isPointerTy(), "swifterror alloca must have pointer type", &AI);
4752 "swifterror alloca must not be array allocation", &AI);
4753 verifySwiftErrorValue(&AI);
4754 }
4755
4756 if (TT.isAMDGPU()) {
4758 "alloca on amdgpu must be in addrspace(5)", &AI);
4759 }
4760
4761 visitInstruction(AI);
4762}
4763
4764void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
4765 Type *ElTy = CXI.getOperand(1)->getType();
4766 Check(ElTy->isIntOrPtrTy(),
4767 "cmpxchg operand must have integer or pointer type", ElTy, &CXI);
4768 checkAtomicMemAccessSize(ElTy, &CXI);
4769 visitInstruction(CXI);
4770}
4771
4772void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
4773 Check(RMWI.getOrdering() != AtomicOrdering::Unordered,
4774 "atomicrmw instructions cannot be unordered.", &RMWI);
4775 auto Op = RMWI.getOperation();
4776 Type *ElTy = RMWI.getOperand(1)->getType();
4777 if (Op == AtomicRMWInst::Xchg) {
4778 Check(ElTy->isIntegerTy() || ElTy->isFloatingPointTy() ||
4779 ElTy->isPointerTy(),
4780 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4781 " operand must have integer or floating point type!",
4782 &RMWI, ElTy);
4783 } else if (AtomicRMWInst::isFPOperation(Op)) {
4785 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4786 " operand must have floating-point or fixed vector of floating-point "
4787 "type!",
4788 &RMWI, ElTy);
4789 } else {
4790 Check(ElTy->isIntegerTy(),
4791 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4792 " operand must have integer type!",
4793 &RMWI, ElTy);
4794 }
4795 checkAtomicMemAccessSize(ElTy, &RMWI);
4797 "Invalid binary operation!", &RMWI);
4798 visitInstruction(RMWI);
4799}
4800
4801void Verifier::visitFenceInst(FenceInst &FI) {
4802 const AtomicOrdering Ordering = FI.getOrdering();
4803 Check(Ordering == AtomicOrdering::Acquire ||
4804 Ordering == AtomicOrdering::Release ||
4805 Ordering == AtomicOrdering::AcquireRelease ||
4806 Ordering == AtomicOrdering::SequentiallyConsistent,
4807 "fence instructions may only have acquire, release, acq_rel, or "
4808 "seq_cst ordering.",
4809 &FI);
4810 visitInstruction(FI);
4811}
4812
4813void Verifier::visitExtractValueInst(ExtractValueInst &EVI) {
4815 EVI.getIndices()) == EVI.getType(),
4816 "Invalid ExtractValueInst operands!", &EVI);
4817
4818 visitInstruction(EVI);
4819}
4820
4821void Verifier::visitInsertValueInst(InsertValueInst &IVI) {
4823 IVI.getIndices()) ==
4824 IVI.getOperand(1)->getType(),
4825 "Invalid InsertValueInst operands!", &IVI);
4826
4827 visitInstruction(IVI);
4828}
4829
4830static Value *getParentPad(Value *EHPad) {
4831 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
4832 return FPI->getParentPad();
4833
4834 return cast<CatchSwitchInst>(EHPad)->getParentPad();
4835}
4836
4837void Verifier::visitEHPadPredecessors(Instruction &I) {
4838 assert(I.isEHPad());
4839
4840 BasicBlock *BB = I.getParent();
4841 Function *F = BB->getParent();
4842
4843 Check(BB != &F->getEntryBlock(), "EH pad cannot be in entry block.", &I);
4844
4845 if (auto *LPI = dyn_cast<LandingPadInst>(&I)) {
4846 // The landingpad instruction defines its parent as a landing pad block. The
4847 // landing pad block may be branched to only by the unwind edge of an
4848 // invoke.
4849 for (BasicBlock *PredBB : predecessors(BB)) {
4850 const auto *II = dyn_cast<InvokeInst>(PredBB->getTerminator());
4851 Check(II && II->getUnwindDest() == BB && II->getNormalDest() != BB,
4852 "Block containing LandingPadInst must be jumped to "
4853 "only by the unwind edge of an invoke.",
4854 LPI);
4855 }
4856 return;
4857 }
4858 if (auto *CPI = dyn_cast<CatchPadInst>(&I)) {
4859 if (!pred_empty(BB))
4860 Check(BB->getUniquePredecessor() == CPI->getCatchSwitch()->getParent(),
4861 "Block containg CatchPadInst must be jumped to "
4862 "only by its catchswitch.",
4863 CPI);
4864 Check(BB != CPI->getCatchSwitch()->getUnwindDest(),
4865 "Catchswitch cannot unwind to one of its catchpads",
4866 CPI->getCatchSwitch(), CPI);
4867 return;
4868 }
4869
4870 // Verify that each pred has a legal terminator with a legal to/from EH
4871 // pad relationship.
4872 Instruction *ToPad = &I;
4873 Value *ToPadParent = getParentPad(ToPad);
4874 for (BasicBlock *PredBB : predecessors(BB)) {
4875 Instruction *TI = PredBB->getTerminator();
4876 Value *FromPad;
4877 if (auto *II = dyn_cast<InvokeInst>(TI)) {
4878 Check(II->getUnwindDest() == BB && II->getNormalDest() != BB,
4879 "EH pad must be jumped to via an unwind edge", ToPad, II);
4880 auto *CalledFn =
4881 dyn_cast<Function>(II->getCalledOperand()->stripPointerCasts());
4882 if (CalledFn && CalledFn->isIntrinsic() && II->doesNotThrow() &&
4883 !IntrinsicInst::mayLowerToFunctionCall(CalledFn->getIntrinsicID()))
4884 continue;
4885 if (auto Bundle = II->getOperandBundle(LLVMContext::OB_funclet))
4886 FromPad = Bundle->Inputs[0];
4887 else
4888 FromPad = ConstantTokenNone::get(II->getContext());
4889 } else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
4890 FromPad = CRI->getOperand(0);
4891 Check(FromPad != ToPadParent, "A cleanupret must exit its cleanup", CRI);
4892 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) {
4893 FromPad = CSI;
4894 } else {
4895 Check(false, "EH pad must be jumped to via an unwind edge", ToPad, TI);
4896 }
4897
4898 // The edge may exit from zero or more nested pads.
4899 SmallPtrSet<Value *, 8> Seen;
4900 for (;; FromPad = getParentPad(FromPad)) {
4901 Check(FromPad != ToPad,
4902 "EH pad cannot handle exceptions raised within it", FromPad, TI);
4903 if (FromPad == ToPadParent) {
4904 // This is a legal unwind edge.
4905 break;
4906 }
4907 Check(!isa<ConstantTokenNone>(FromPad),
4908 "A single unwind edge may only enter one EH pad", TI);
4909 Check(Seen.insert(FromPad).second, "EH pad jumps through a cycle of pads",
4910 FromPad);
4911
4912 // This will be diagnosed on the corresponding instruction already. We
4913 // need the extra check here to make sure getParentPad() works.
4914 Check(isa<FuncletPadInst>(FromPad) || isa<CatchSwitchInst>(FromPad),
4915 "Parent pad must be catchpad/cleanuppad/catchswitch", TI);
4916 }
4917 }
4918}
4919
4920void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
4921 // The landingpad instruction is ill-formed if it doesn't have any clauses and
4922 // isn't a cleanup.
4923 Check(LPI.getNumClauses() > 0 || LPI.isCleanup(),
4924 "LandingPadInst needs at least one clause or to be a cleanup.", &LPI);
4925
4926 visitEHPadPredecessors(LPI);
4927
4928 if (!LandingPadResultTy)
4929 LandingPadResultTy = LPI.getType();
4930 else
4931 Check(LandingPadResultTy == LPI.getType(),
4932 "The landingpad instruction should have a consistent result type "
4933 "inside a function.",
4934 &LPI);
4935
4936 Function *F = LPI.getParent()->getParent();
4937 Check(F->hasPersonalityFn(),
4938 "LandingPadInst needs to be in a function with a personality.", &LPI);
4939
4940 // The landingpad instruction must be the first non-PHI instruction in the
4941 // block.
4942 Check(LPI.getParent()->getLandingPadInst() == &LPI,
4943 "LandingPadInst not the first non-PHI instruction in the block.", &LPI);
4944
4945 for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) {
4946 Constant *Clause = LPI.getClause(i);
4947 if (LPI.isCatch(i)) {
4948 Check(isa<PointerType>(Clause->getType()),
4949 "Catch operand does not have pointer type!", &LPI);
4950 } else {
4951 Check(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI);
4953 "Filter operand is not an array of constants!", &LPI);
4954 }
4955 }
4956
4957 visitInstruction(LPI);
4958}
4959
4960void Verifier::visitResumeInst(ResumeInst &RI) {
4962 "ResumeInst needs to be in a function with a personality.", &RI);
4963
4964 if (!LandingPadResultTy)
4965 LandingPadResultTy = RI.getValue()->getType();
4966 else
4967 Check(LandingPadResultTy == RI.getValue()->getType(),
4968 "The resume instruction should have a consistent result type "
4969 "inside a function.",
4970 &RI);
4971
4972 visitTerminator(RI);
4973}
4974
4975void Verifier::visitCatchPadInst(CatchPadInst &CPI) {
4976 BasicBlock *BB = CPI.getParent();
4977
4978 Function *F = BB->getParent();
4979 Check(F->hasPersonalityFn(),
4980 "CatchPadInst needs to be in a function with a personality.", &CPI);
4981
4983 "CatchPadInst needs to be directly nested in a CatchSwitchInst.",
4984 CPI.getParentPad());
4985
4986 // The catchpad instruction must be the first non-PHI instruction in the
4987 // block.
4988 Check(&*BB->getFirstNonPHIIt() == &CPI,
4989 "CatchPadInst not the first non-PHI instruction in the block.", &CPI);
4990
4992 [](Use &U) {
4993 auto *V = U.get();
4994 return isa<Constant>(V) || isa<AllocaInst>(V);
4995 }),
4996 "Argument operand must be alloca or constant.", &CPI);
4997
4998 visitEHPadPredecessors(CPI);
4999 visitFuncletPadInst(CPI);
5000}
5001
5002void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) {
5003 Check(isa<CatchPadInst>(CatchReturn.getOperand(0)),
5004 "CatchReturnInst needs to be provided a CatchPad", &CatchReturn,
5005 CatchReturn.getOperand(0));
5006
5007 visitTerminator(CatchReturn);
5008}
5009
5010void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) {
5011 BasicBlock *BB = CPI.getParent();
5012
5013 Function *F = BB->getParent();
5014 Check(F->hasPersonalityFn(),
5015 "CleanupPadInst needs to be in a function with a personality.", &CPI);
5016
5017 // The cleanuppad instruction must be the first non-PHI instruction in the
5018 // block.
5019 Check(&*BB->getFirstNonPHIIt() == &CPI,
5020 "CleanupPadInst not the first non-PHI instruction in the block.", &CPI);
5021
5022 auto *ParentPad = CPI.getParentPad();
5023 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
5024 "CleanupPadInst has an invalid parent.", &CPI);
5025
5026 visitEHPadPredecessors(CPI);
5027 visitFuncletPadInst(CPI);
5028}
5029
5030void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) {
5031 User *FirstUser = nullptr;
5032 Value *FirstUnwindPad = nullptr;
5033 SmallVector<FuncletPadInst *, 8> Worklist({&FPI});
5034 SmallPtrSet<FuncletPadInst *, 8> Seen;
5035
5036 while (!Worklist.empty()) {
5037 FuncletPadInst *CurrentPad = Worklist.pop_back_val();
5038 Check(Seen.insert(CurrentPad).second,
5039 "FuncletPadInst must not be nested within itself", CurrentPad);
5040 Value *UnresolvedAncestorPad = nullptr;
5041 for (User *U : CurrentPad->users()) {
5042 BasicBlock *UnwindDest;
5043 if (auto *CRI = dyn_cast<CleanupReturnInst>(U)) {
5044 UnwindDest = CRI->getUnwindDest();
5045 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(U)) {
5046 // We allow catchswitch unwind to caller to nest
5047 // within an outer pad that unwinds somewhere else,
5048 // because catchswitch doesn't have a nounwind variant.
5049 // See e.g. SimplifyCFGOpt::SimplifyUnreachable.
5050 if (CSI->unwindsToCaller())
5051 continue;
5052 UnwindDest = CSI->getUnwindDest();
5053 } else if (auto *II = dyn_cast<InvokeInst>(U)) {
5054 UnwindDest = II->getUnwindDest();
5055 } else if (isa<CallInst>(U)) {
5056 // Calls which don't unwind may be found inside funclet
5057 // pads that unwind somewhere else. We don't *require*
5058 // such calls to be annotated nounwind.
5059 continue;
5060 } else if (auto *CPI = dyn_cast<CleanupPadInst>(U)) {
5061 // The unwind dest for a cleanup can only be found by
5062 // recursive search. Add it to the worklist, and we'll
5063 // search for its first use that determines where it unwinds.
5064 Worklist.push_back(CPI);
5065 continue;
5066 } else {
5067 Check(isa<CatchReturnInst>(U), "Bogus funclet pad use", U);
5068 continue;
5069 }
5070
5071 Value *UnwindPad;
5072 bool ExitsFPI;
5073 if (UnwindDest) {
5074 UnwindPad = &*UnwindDest->getFirstNonPHIIt();
5075 if (!cast<Instruction>(UnwindPad)->isEHPad())
5076 continue;
5077 Value *UnwindParent = getParentPad(UnwindPad);
5078 // Ignore unwind edges that don't exit CurrentPad.
5079 if (UnwindParent == CurrentPad)
5080 continue;
5081 // Determine whether the original funclet pad is exited,
5082 // and if we are scanning nested pads determine how many
5083 // of them are exited so we can stop searching their
5084 // children.
5085 Value *ExitedPad = CurrentPad;
5086 ExitsFPI = false;
5087 do {
5088 if (ExitedPad == &FPI) {
5089 ExitsFPI = true;
5090 // Now we can resolve any ancestors of CurrentPad up to
5091 // FPI, but not including FPI since we need to make sure
5092 // to check all direct users of FPI for consistency.
5093 UnresolvedAncestorPad = &FPI;
5094 break;
5095 }
5096 Value *ExitedParent = getParentPad(ExitedPad);
5097 if (ExitedParent == UnwindParent) {
5098 // ExitedPad is the ancestor-most pad which this unwind
5099 // edge exits, so we can resolve up to it, meaning that
5100 // ExitedParent is the first ancestor still unresolved.
5101 UnresolvedAncestorPad = ExitedParent;
5102 break;
5103 }
5104 ExitedPad = ExitedParent;
5105 } while (!isa<ConstantTokenNone>(ExitedPad));
5106 } else {
5107 // Unwinding to caller exits all pads.
5108 UnwindPad = ConstantTokenNone::get(FPI.getContext());
5109 ExitsFPI = true;
5110 UnresolvedAncestorPad = &FPI;
5111 }
5112
5113 if (ExitsFPI) {
5114 // This unwind edge exits FPI. Make sure it agrees with other
5115 // such edges.
5116 if (FirstUser) {
5117 Check(UnwindPad == FirstUnwindPad,
5118 "Unwind edges out of a funclet "
5119 "pad must have the same unwind "
5120 "dest",
5121 &FPI, U, FirstUser);
5122 } else {
5123 FirstUser = U;
5124 FirstUnwindPad = UnwindPad;
5125 // Record cleanup sibling unwinds for verifySiblingFuncletUnwinds
5126 if (isa<CleanupPadInst>(&FPI) && !isa<ConstantTokenNone>(UnwindPad) &&
5127 getParentPad(UnwindPad) == getParentPad(&FPI))
5128 SiblingFuncletInfo[&FPI] = cast<Instruction>(U);
5129 }
5130 }
5131 // Make sure we visit all uses of FPI, but for nested pads stop as
5132 // soon as we know where they unwind to.
5133 if (CurrentPad != &FPI)
5134 break;
5135 }
5136 if (UnresolvedAncestorPad) {
5137 if (CurrentPad == UnresolvedAncestorPad) {
5138 // When CurrentPad is FPI itself, we don't mark it as resolved even if
5139 // we've found an unwind edge that exits it, because we need to verify
5140 // all direct uses of FPI.
5141 assert(CurrentPad == &FPI);
5142 continue;
5143 }
5144 // Pop off the worklist any nested pads that we've found an unwind
5145 // destination for. The pads on the worklist are the uncles,
5146 // great-uncles, etc. of CurrentPad. We've found an unwind destination
5147 // for all ancestors of CurrentPad up to but not including
5148 // UnresolvedAncestorPad.
5149 Value *ResolvedPad = CurrentPad;
5150 while (!Worklist.empty()) {
5151 Value *UnclePad = Worklist.back();
5152 Value *AncestorPad = getParentPad(UnclePad);
5153 // Walk ResolvedPad up the ancestor list until we either find the
5154 // uncle's parent or the last resolved ancestor.
5155 while (ResolvedPad != AncestorPad) {
5156 Value *ResolvedParent = getParentPad(ResolvedPad);
5157 if (ResolvedParent == UnresolvedAncestorPad) {
5158 break;
5159 }
5160 ResolvedPad = ResolvedParent;
5161 }
5162 // If the resolved ancestor search didn't find the uncle's parent,
5163 // then the uncle is not yet resolved.
5164 if (ResolvedPad != AncestorPad)
5165 break;
5166 // This uncle is resolved, so pop it from the worklist.
5167 Worklist.pop_back();
5168 }
5169 }
5170 }
5171
5172 if (FirstUnwindPad) {
5173 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FPI.getParentPad())) {
5174 BasicBlock *SwitchUnwindDest = CatchSwitch->getUnwindDest();
5175 Value *SwitchUnwindPad;
5176 if (SwitchUnwindDest)
5177 SwitchUnwindPad = &*SwitchUnwindDest->getFirstNonPHIIt();
5178 else
5179 SwitchUnwindPad = ConstantTokenNone::get(FPI.getContext());
5180 Check(SwitchUnwindPad == FirstUnwindPad,
5181 "Unwind edges out of a catch must have the same unwind dest as "
5182 "the parent catchswitch",
5183 &FPI, FirstUser, CatchSwitch);
5184 }
5185 }
5186
5187 visitInstruction(FPI);
5188}
5189
5190void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) {
5191 BasicBlock *BB = CatchSwitch.getParent();
5192
5193 Function *F = BB->getParent();
5194 Check(F->hasPersonalityFn(),
5195 "CatchSwitchInst needs to be in a function with a personality.",
5196 &CatchSwitch);
5197
5198 // The catchswitch instruction must be the first non-PHI instruction in the
5199 // block.
5200 Check(&*BB->getFirstNonPHIIt() == &CatchSwitch,
5201 "CatchSwitchInst not the first non-PHI instruction in the block.",
5202 &CatchSwitch);
5203
5204 auto *ParentPad = CatchSwitch.getParentPad();
5205 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
5206 "CatchSwitchInst has an invalid parent.", ParentPad);
5207
5208 if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) {
5209 BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt();
5210 Check(I->isEHPad() && !isa<LandingPadInst>(I),
5211 "CatchSwitchInst must unwind to an EH block which is not a "
5212 "landingpad.",
5213 &CatchSwitch);
5214
5215 // Record catchswitch sibling unwinds for verifySiblingFuncletUnwinds
5216 if (getParentPad(&*I) == ParentPad)
5217 SiblingFuncletInfo[&CatchSwitch] = &CatchSwitch;
5218 }
5219
5220 Check(CatchSwitch.getNumHandlers() != 0,
5221 "CatchSwitchInst cannot have empty handler list", &CatchSwitch);
5222
5223 for (BasicBlock *Handler : CatchSwitch.handlers()) {
5224 Check(isa<CatchPadInst>(Handler->getFirstNonPHIIt()),
5225 "CatchSwitchInst handlers must be catchpads", &CatchSwitch, Handler);
5226 }
5227
5228 visitEHPadPredecessors(CatchSwitch);
5229 visitTerminator(CatchSwitch);
5230}
5231
5232void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) {
5234 "CleanupReturnInst needs to be provided a CleanupPad", &CRI,
5235 CRI.getOperand(0));
5236
5237 if (BasicBlock *UnwindDest = CRI.getUnwindDest()) {
5238 BasicBlock::iterator I = UnwindDest->getFirstNonPHIIt();
5239 Check(I->isEHPad() && !isa<LandingPadInst>(I),
5240 "CleanupReturnInst must unwind to an EH block which is not a "
5241 "landingpad.",
5242 &CRI);
5243 }
5244
5245 visitTerminator(CRI);
5246}
5247
5248void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
5249 Instruction *Op = cast<Instruction>(I.getOperand(i));
5250 // If the we have an invalid invoke, don't try to compute the dominance.
5251 // We already reject it in the invoke specific checks and the dominance
5252 // computation doesn't handle multiple edges.
5253 if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) {
5254 if (II->getNormalDest() == II->getUnwindDest())
5255 return;
5256 }
5257
5258 // Quick check whether the def has already been encountered in the same block.
5259 // PHI nodes are not checked to prevent accepting preceding PHIs, because PHI
5260 // uses are defined to happen on the incoming edge, not at the instruction.
5261 //
5262 // FIXME: If this operand is a MetadataAsValue (wrapping a LocalAsMetadata)
5263 // wrapping an SSA value, assert that we've already encountered it. See
5264 // related FIXME in Mapper::mapLocalAsMetadata in ValueMapper.cpp.
5265 if (!isa<PHINode>(I) && InstsInThisBlock.count(Op))
5266 return;
5267
5268 const Use &U = I.getOperandUse(i);
5269 Check(DT.dominates(Op, U), "Instruction does not dominate all uses!", Op, &I);
5270}
5271
5272void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) {
5273 Check(I.getType()->isPointerTy(),
5274 "dereferenceable, dereferenceable_or_null "
5275 "apply only to pointer types",
5276 &I);
5278 "dereferenceable, dereferenceable_or_null apply only to load"
5279 " and inttoptr instructions, use attributes for calls or invokes",
5280 &I);
5281 Check(MD->getNumOperands() == 1,
5282 "dereferenceable, dereferenceable_or_null "
5283 "take one operand!",
5284 &I);
5285 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0));
5286 Check(CI && CI->getType()->isIntegerTy(64),
5287 "dereferenceable, "
5288 "dereferenceable_or_null metadata value must be an i64!",
5289 &I);
5290}
5291
5292void Verifier::visitNofreeMetadata(Instruction &I, MDNode *MD) {
5293 Check(I.getType()->isPointerTy(), "nofree applies only to pointer types", &I);
5294 Check((isa<IntToPtrInst>(I)), "nofree applies only to inttoptr instruction",
5295 &I);
5296 Check(MD->getNumOperands() == 0, "nofree metadata must be empty", &I);
5297}
5298
5299void Verifier::visitProfMetadata(Instruction &I, MDNode *MD) {
5300 auto GetBranchingTerminatorNumOperands = [&]() {
5301 unsigned ExpectedNumOperands = 0;
5302 if (CondBrInst *BI = dyn_cast<CondBrInst>(&I))
5303 ExpectedNumOperands = BI->getNumSuccessors();
5304 else if (SwitchInst *SI = dyn_cast<SwitchInst>(&I))
5305 ExpectedNumOperands = SI->getNumSuccessors();
5306 else if (isa<CallInst>(&I))
5307 ExpectedNumOperands = 1;
5308 else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(&I))
5309 ExpectedNumOperands = IBI->getNumDestinations();
5310 else if (isa<SelectInst>(&I))
5311 ExpectedNumOperands = 2;
5312 else if (CallBrInst *CI = dyn_cast<CallBrInst>(&I))
5313 ExpectedNumOperands = CI->getNumSuccessors();
5314 return ExpectedNumOperands;
5315 };
5316 Check(MD->getNumOperands() >= 1,
5317 "!prof annotations should have at least 1 operand", MD);
5318 // Check first operand.
5319 Check(MD->getOperand(0) != nullptr, "first operand should not be null", MD);
5321 "expected string with name of the !prof annotation", MD);
5322 MDString *MDS = cast<MDString>(MD->getOperand(0));
5323 StringRef ProfName = MDS->getString();
5324
5326 Check(GetBranchingTerminatorNumOperands() != 0 || isa<InvokeInst>(I),
5327 "'unknown' !prof should only appear on instructions on which "
5328 "'branch_weights' would",
5329 MD);
5330 verifyUnknownProfileMetadata(MD);
5331 return;
5332 }
5333
5334 Check(MD->getNumOperands() >= 2,
5335 "!prof annotations should have no less than 2 operands", MD);
5336
5337 // Check consistency of !prof branch_weights metadata.
5338 if (ProfName == MDProfLabels::BranchWeights) {
5339 unsigned NumBranchWeights = getNumBranchWeights(*MD);
5340 if (isa<InvokeInst>(&I)) {
5341 Check(NumBranchWeights == 1 || NumBranchWeights == 2,
5342 "Wrong number of InvokeInst branch_weights operands", MD);
5343 } else {
5344 const unsigned ExpectedNumOperands = GetBranchingTerminatorNumOperands();
5345 if (ExpectedNumOperands == 0)
5346 CheckFailed("!prof branch_weights are not allowed for this instruction",
5347 MD);
5348
5349 Check(NumBranchWeights == ExpectedNumOperands, "Wrong number of operands",
5350 MD);
5351 }
5352 for (unsigned i = getBranchWeightOffset(MD); i < MD->getNumOperands();
5353 ++i) {
5354 auto &MDO = MD->getOperand(i);
5355 Check(MDO, "second operand should not be null", MD);
5357 "!prof brunch_weights operand is not a const int");
5358 }
5359 } else if (ProfName == MDProfLabels::ValueProfile) {
5360 Check(isValueProfileMD(MD), "invalid value profiling metadata", MD);
5361 ConstantInt *KindInt = mdconst::dyn_extract<ConstantInt>(MD->getOperand(1));
5362 Check(KindInt, "VP !prof missing kind argument", MD);
5363
5364 auto Kind = KindInt->getZExtValue();
5365 Check(Kind >= InstrProfValueKind::IPVK_First &&
5366 Kind <= InstrProfValueKind::IPVK_Last,
5367 "Invalid VP !prof kind", MD);
5368 Check(MD->getNumOperands() % 2 == 1,
5369 "VP !prof should have an even number "
5370 "of arguments after 'VP'",
5371 MD);
5372 if (Kind == InstrProfValueKind::IPVK_IndirectCallTarget ||
5373 Kind == InstrProfValueKind::IPVK_MemOPSize)
5375 "VP !prof indirect call or memop size expected to be applied to "
5376 "CallBase instructions only",
5377 MD);
5378 } else {
5379 CheckFailed("expected either branch_weights or VP profile name", MD);
5380 }
5381}
5382
5383void Verifier::visitDIAssignIDMetadata(Instruction &I, MDNode *MD) {
5384 assert(I.hasMetadata(LLVMContext::MD_DIAssignID));
5385 // DIAssignID metadata must be attached to either an alloca or some form of
5386 // store/memory-writing instruction.
5387 // FIXME: We allow all intrinsic insts here to avoid trying to enumerate all
5388 // possible store intrinsics.
5389 bool ExpectedInstTy =
5391 CheckDI(ExpectedInstTy, "!DIAssignID attached to unexpected instruction kind",
5392 I, MD);
5393 // Iterate over the MetadataAsValue uses of the DIAssignID - these should
5394 // only be found as DbgAssignIntrinsic operands.
5395 if (auto *AsValue = MetadataAsValue::getIfExists(Context, MD)) {
5396 for (auto *User : AsValue->users()) {
5398 "!DIAssignID should only be used by llvm.dbg.assign intrinsics",
5399 MD, User);
5400 // All of the dbg.assign intrinsics should be in the same function as I.
5401 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(User))
5402 CheckDI(DAI->getFunction() == I.getFunction(),
5403 "dbg.assign not in same function as inst", DAI, &I);
5404 }
5405 }
5406 for (DbgVariableRecord *DVR :
5407 cast<DIAssignID>(MD)->getAllDbgVariableRecordUsers()) {
5408 CheckDI(DVR->isDbgAssign(),
5409 "!DIAssignID should only be used by Assign DVRs.", MD, DVR);
5410 CheckDI(DVR->getFunction() == I.getFunction(),
5411 "DVRAssign not in same function as inst", DVR, &I);
5412 }
5413}
5414
5415void Verifier::visitMMRAMetadata(Instruction &I, MDNode *MD) {
5417 "!mmra metadata attached to unexpected instruction kind", I, MD);
5418
5419 // MMRA Metadata should either be a tag, e.g. !{!"foo", !"bar"}, or a
5420 // list of tags such as !2 in the following example:
5421 // !0 = !{!"a", !"b"}
5422 // !1 = !{!"c", !"d"}
5423 // !2 = !{!0, !1}
5424 if (MMRAMetadata::isTagMD(MD))
5425 return;
5426
5427 Check(isa<MDTuple>(MD), "!mmra expected to be a metadata tuple", I, MD);
5428 for (const MDOperand &MDOp : MD->operands())
5429 Check(MMRAMetadata::isTagMD(MDOp.get()),
5430 "!mmra metadata tuple operand is not an MMRA tag", I, MDOp.get());
5431}
5432
5433void Verifier::visitCallStackMetadata(MDNode *MD) {
5434 // Call stack metadata should consist of a list of at least 1 constant int
5435 // (representing a hash of the location).
5436 Check(MD->getNumOperands() >= 1,
5437 "call stack metadata should have at least 1 operand", MD);
5438
5439 for (const auto &Op : MD->operands())
5441 "call stack metadata operand should be constant integer", Op);
5442}
5443
5444void Verifier::visitMemProfMetadata(Instruction &I, MDNode *MD) {
5445 Check(isa<CallBase>(I), "!memprof metadata should only exist on calls", &I);
5446 Check(MD->getNumOperands() >= 1,
5447 "!memprof annotations should have at least 1 metadata operand "
5448 "(MemInfoBlock)",
5449 MD);
5450
5451 // Check each MIB
5452 for (auto &MIBOp : MD->operands()) {
5453 MDNode *MIB = dyn_cast<MDNode>(MIBOp);
5454 // The first operand of an MIB should be the call stack metadata.
5455 // There rest of the operands should be MDString tags, and there should be
5456 // at least one.
5457 Check(MIB->getNumOperands() >= 2,
5458 "Each !memprof MemInfoBlock should have at least 2 operands", MIB);
5459
5460 // Check call stack metadata (first operand).
5461 Check(MIB->getOperand(0) != nullptr,
5462 "!memprof MemInfoBlock first operand should not be null", MIB);
5463 Check(isa<MDNode>(MIB->getOperand(0)),
5464 "!memprof MemInfoBlock first operand should be an MDNode", MIB);
5465 MDNode *StackMD = dyn_cast<MDNode>(MIB->getOperand(0));
5466 visitCallStackMetadata(StackMD);
5467
5468 // The second MIB operand should be MDString.
5470 "!memprof MemInfoBlock second operand should be an MDString", MIB);
5471
5472 // Any remaining should be MDNode that are pairs of integers
5473 for (unsigned I = 2; I < MIB->getNumOperands(); ++I) {
5474 MDNode *OpNode = dyn_cast<MDNode>(MIB->getOperand(I));
5475 Check(OpNode, "Not all !memprof MemInfoBlock operands 2 to N are MDNode",
5476 MIB);
5477 Check(OpNode->getNumOperands() == 2,
5478 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with 2 "
5479 "operands",
5480 MIB);
5481 // Check that all of Op's operands are ConstantInt.
5482 Check(llvm::all_of(OpNode->operands(),
5483 [](const MDOperand &Op) {
5484 return mdconst::hasa<ConstantInt>(Op);
5485 }),
5486 "Not all !memprof MemInfoBlock operands 2 to N are MDNode with "
5487 "ConstantInt operands",
5488 MIB);
5489 }
5490 }
5491}
5492
5493void Verifier::visitCallsiteMetadata(Instruction &I, MDNode *MD) {
5494 Check(isa<CallBase>(I), "!callsite metadata should only exist on calls", &I);
5495 // Verify the partial callstack annotated from memprof profiles. This callsite
5496 // is a part of a profiled allocation callstack.
5497 visitCallStackMetadata(MD);
5498}
5499
5500static inline bool isConstantIntMetadataOperand(const Metadata *MD) {
5501 if (auto *VAL = dyn_cast<ValueAsMetadata>(MD))
5502 return isa<ConstantInt>(VAL->getValue());
5503 return false;
5504}
5505
5506void Verifier::visitCalleeTypeMetadata(Instruction &I, MDNode *MD) {
5507 Check(isa<CallBase>(I), "!callee_type metadata should only exist on calls",
5508 &I);
5509 for (Metadata *Op : MD->operands()) {
5511 "The callee_type metadata must be a list of type metadata nodes", Op);
5512 auto *TypeMD = cast<MDNode>(Op);
5513 Check(TypeMD->getNumOperands() == 2,
5514 "Well-formed generalized type metadata must contain exactly two "
5515 "operands",
5516 Op);
5517 Check(isConstantIntMetadataOperand(TypeMD->getOperand(0)) &&
5518 mdconst::extract<ConstantInt>(TypeMD->getOperand(0))->isZero(),
5519 "The first operand of type metadata for functions must be zero", Op);
5520 Check(TypeMD->hasGeneralizedMDString(),
5521 "Only generalized type metadata can be part of the callee_type "
5522 "metadata list",
5523 Op);
5524 }
5525}
5526
5527void Verifier::visitAnnotationMetadata(MDNode *Annotation) {
5528 Check(isa<MDTuple>(Annotation), "annotation must be a tuple");
5529 Check(Annotation->getNumOperands() >= 1,
5530 "annotation must have at least one operand");
5531 for (const MDOperand &Op : Annotation->operands()) {
5532 bool TupleOfStrings =
5533 isa<MDTuple>(Op.get()) &&
5534 all_of(cast<MDTuple>(Op)->operands(), [](auto &Annotation) {
5535 return isa<MDString>(Annotation.get());
5536 });
5537 Check(isa<MDString>(Op.get()) || TupleOfStrings,
5538 "operands must be a string or a tuple of strings");
5539 }
5540}
5541
5542void Verifier::visitAliasScopeMetadata(const MDNode *MD) {
5543 unsigned NumOps = MD->getNumOperands();
5544 Check(NumOps >= 2 && NumOps <= 3, "scope must have two or three operands",
5545 MD);
5546 Check(MD->getOperand(0).get() == MD || isa<MDString>(MD->getOperand(0)),
5547 "first scope operand must be self-referential or string", MD);
5548 if (NumOps == 3)
5550 "third scope operand must be string (if used)", MD);
5551
5552 MDNode *Domain = dyn_cast<MDNode>(MD->getOperand(1));
5553 Check(Domain != nullptr, "second scope operand must be MDNode", MD);
5554
5555 unsigned NumDomainOps = Domain->getNumOperands();
5556 Check(NumDomainOps >= 1 && NumDomainOps <= 2,
5557 "domain must have one or two operands", Domain);
5558 Check(Domain->getOperand(0).get() == Domain ||
5559 isa<MDString>(Domain->getOperand(0)),
5560 "first domain operand must be self-referential or string", Domain);
5561 if (NumDomainOps == 2)
5562 Check(isa<MDString>(Domain->getOperand(1)),
5563 "second domain operand must be string (if used)", Domain);
5564}
5565
5566void Verifier::visitAliasScopeListMetadata(const MDNode *MD) {
5567 for (const MDOperand &Op : MD->operands()) {
5568 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5569 Check(OpMD != nullptr, "scope list must consist of MDNodes", MD);
5570 visitAliasScopeMetadata(OpMD);
5571 }
5572}
5573
5574void Verifier::visitAccessGroupMetadata(const MDNode *MD) {
5575 auto IsValidAccessScope = [](const MDNode *MD) {
5576 return MD->getNumOperands() == 0 && MD->isDistinct();
5577 };
5578
5579 // It must be either an access scope itself...
5580 if (IsValidAccessScope(MD))
5581 return;
5582
5583 // ...or a list of access scopes.
5584 for (const MDOperand &Op : MD->operands()) {
5585 const MDNode *OpMD = dyn_cast<MDNode>(Op);
5586 Check(OpMD != nullptr, "Access scope list must consist of MDNodes", MD);
5587 Check(IsValidAccessScope(OpMD),
5588 "Access scope list contains invalid access scope", MD);
5589 }
5590}
5591
5592void Verifier::visitCapturesMetadata(Instruction &I, const MDNode *Captures) {
5593 static const char *ValidArgs[] = {"address_is_null", "address",
5594 "read_provenance", "provenance"};
5595
5596 auto *SI = dyn_cast<StoreInst>(&I);
5597 Check(SI, "!captures metadata can only be applied to store instructions", &I);
5598 Check(SI->getValueOperand()->getType()->isPointerTy(),
5599 "!captures metadata can only be applied to store with value operand of "
5600 "pointer type",
5601 &I);
5602 Check(Captures->getNumOperands() != 0, "!captures metadata cannot be empty",
5603 &I);
5604
5605 for (Metadata *Op : Captures->operands()) {
5606 auto *Str = dyn_cast<MDString>(Op);
5607 Check(Str, "!captures metadata must be a list of strings", &I);
5608 Check(is_contained(ValidArgs, Str->getString()),
5609 "invalid entry in !captures metadata", &I, Str);
5610 }
5611}
5612
5613void Verifier::visitAllocTokenMetadata(Instruction &I, MDNode *MD) {
5614 Check(isa<CallBase>(I), "!alloc_token should only exist on calls", &I);
5615 Check(MD->getNumOperands() == 2, "!alloc_token must have 2 operands", MD);
5616 Check(isa<MDString>(MD->getOperand(0)), "expected string", MD);
5618 "expected integer constant", MD);
5619}
5620
5621void Verifier::visitInlineHistoryMetadata(Instruction &I, MDNode *MD) {
5622 Check(isa<CallBase>(I), "!inline_history should only exist on calls", &I);
5623 for (Metadata *Op : MD->operands()) {
5624 // Can be null when a function is erased.
5625 if (!Op)
5626 continue;
5629 ->getValue()
5630 ->stripPointerCastsAndAliases()),
5631 "!inline_history operands must be functions or null", MD);
5632 }
5633}
5634
5635/// verifyInstruction - Verify that an instruction is well formed.
5636///
5637void Verifier::visitInstruction(Instruction &I) {
5638 BasicBlock *BB = I.getParent();
5639 Check(BB, "Instruction not embedded in basic block!", &I);
5640
5641 if (!isa<PHINode>(I)) { // Check that non-phi nodes are not self referential
5642 for (User *U : I.users()) {
5643 Check(U != (User *)&I || !DT.isReachableFromEntry(BB),
5644 "Only PHI nodes may reference their own value!", &I);
5645 }
5646 }
5647
5648 // Check that void typed values don't have names
5649 Check(!I.getType()->isVoidTy() || !I.hasName(),
5650 "Instruction has a name, but provides a void value!", &I);
5651
5652 // Check that the return value of the instruction is either void or a legal
5653 // value type.
5654 Check(I.getType()->isVoidTy() || I.getType()->isFirstClassType(),
5655 "Instruction returns a non-scalar type!", &I);
5656
5657 // Check that the instruction doesn't produce metadata. Calls are already
5658 // checked against the callee type.
5659 Check(!I.getType()->isMetadataTy() || isa<CallInst>(I) || isa<InvokeInst>(I),
5660 "Invalid use of metadata!", &I);
5661
5662 // Check that all uses of the instruction, if they are instructions
5663 // themselves, actually have parent basic blocks. If the use is not an
5664 // instruction, it is an error!
5665 for (Use &U : I.uses()) {
5666 if (Instruction *Used = dyn_cast<Instruction>(U.getUser()))
5667 Check(Used->getParent() != nullptr,
5668 "Instruction referencing"
5669 " instruction not embedded in a basic block!",
5670 &I, Used);
5671 else {
5672 CheckFailed("Use of instruction is not an instruction!", U);
5673 return;
5674 }
5675 }
5676
5677 // Get a pointer to the call base of the instruction if it is some form of
5678 // call.
5679 const CallBase *CBI = dyn_cast<CallBase>(&I);
5680
5681 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
5682 Check(I.getOperand(i) != nullptr, "Instruction has null operand!", &I);
5683
5684 // Check to make sure that only first-class-values are operands to
5685 // instructions.
5686 if (!I.getOperand(i)->getType()->isFirstClassType()) {
5687 Check(false, "Instruction operands must be first-class values!", &I);
5688 }
5689
5690 if (Function *F = dyn_cast<Function>(I.getOperand(i))) {
5691 // This code checks whether the function is used as the operand of a
5692 // clang_arc_attachedcall operand bundle.
5693 auto IsAttachedCallOperand = [](Function *F, const CallBase *CBI,
5694 int Idx) {
5695 return CBI && CBI->isOperandBundleOfType(
5697 };
5698
5699 // Check to make sure that the "address of" an intrinsic function is never
5700 // taken. Ignore cases where the address of the intrinsic function is used
5701 // as the argument of operand bundle "clang.arc.attachedcall" as those
5702 // cases are handled in verifyAttachedCallBundle.
5703 Check((!F->isIntrinsic() ||
5704 (CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i)) ||
5705 IsAttachedCallOperand(F, CBI, i)),
5706 "Cannot take the address of an intrinsic!", &I);
5707 Check(!F->isIntrinsic() || isa<CallInst>(I) || isa<CallBrInst>(I) ||
5708 F->getIntrinsicID() == Intrinsic::donothing ||
5709 F->getIntrinsicID() == Intrinsic::seh_try_begin ||
5710 F->getIntrinsicID() == Intrinsic::seh_try_end ||
5711 F->getIntrinsicID() == Intrinsic::seh_scope_begin ||
5712 F->getIntrinsicID() == Intrinsic::seh_scope_end ||
5713 F->getIntrinsicID() == Intrinsic::coro_resume ||
5714 F->getIntrinsicID() == Intrinsic::coro_destroy ||
5715 F->getIntrinsicID() == Intrinsic::coro_await_suspend_void ||
5716 F->getIntrinsicID() == Intrinsic::coro_await_suspend_bool ||
5717 F->getIntrinsicID() == Intrinsic::coro_await_suspend_handle ||
5718 F->getIntrinsicID() ==
5719 Intrinsic::experimental_patchpoint_void ||
5720 F->getIntrinsicID() == Intrinsic::experimental_patchpoint ||
5721 F->getIntrinsicID() == Intrinsic::fake_use ||
5722 F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint ||
5723 F->getIntrinsicID() == Intrinsic::wasm_throw ||
5724 F->getIntrinsicID() == Intrinsic::wasm_rethrow ||
5725 IsAttachedCallOperand(F, CBI, i),
5726 "Cannot invoke an intrinsic other than donothing, patchpoint, "
5727 "statepoint, coro_resume, coro_destroy, clang.arc.attachedcall or "
5728 "wasm.(re)throw",
5729 &I);
5730 Check(F->getParent() == &M, "Referencing function in another module!", &I,
5731 &M, F, F->getParent());
5732 } else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) {
5733 Check(OpBB->getParent() == BB->getParent(),
5734 "Referring to a basic block in another function!", &I);
5735 } else if (Argument *OpArg = dyn_cast<Argument>(I.getOperand(i))) {
5736 Check(OpArg->getParent() == BB->getParent(),
5737 "Referring to an argument in another function!", &I);
5738 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) {
5739 Check(GV->getParent() == &M, "Referencing global in another module!", &I,
5740 &M, GV, GV->getParent());
5741 } else if (Instruction *OpInst = dyn_cast<Instruction>(I.getOperand(i))) {
5742 Check(OpInst->getFunction() == BB->getParent(),
5743 "Referring to an instruction in another function!", &I);
5744 verifyDominatesUse(I, i);
5745 } else if (isa<InlineAsm>(I.getOperand(i))) {
5746 Check(CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i),
5747 "Cannot take the address of an inline asm!", &I);
5748 } else if (auto *C = dyn_cast<Constant>(I.getOperand(i))) {
5749 visitConstantExprsRecursively(C);
5750 }
5751 }
5752
5753 if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) {
5754 Check(I.getType()->isFPOrFPVectorTy(),
5755 "fpmath requires a floating point result!", &I);
5756 Check(MD->getNumOperands() == 1, "fpmath takes one operand!", &I);
5757 if (ConstantFP *CFP0 =
5759 const APFloat &Accuracy = CFP0->getValueAPF();
5760 Check(&Accuracy.getSemantics() == &APFloat::IEEEsingle(),
5761 "fpmath accuracy must have float type", &I);
5762 Check(Accuracy.isFiniteNonZero() && !Accuracy.isNegative(),
5763 "fpmath accuracy not a positive number!", &I);
5764 } else {
5765 Check(false, "invalid fpmath accuracy!", &I);
5766 }
5767 }
5768
5769 if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) {
5771 "Ranges are only for loads, calls and invokes!", &I);
5772 visitRangeMetadata(I, Range, I.getType());
5773 }
5774
5775 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nofpclass)) {
5776 Check(isa<LoadInst>(I), "nofpclass is only for loads", &I);
5777 visitNoFPClassMetadata(I, MD, I.getType());
5778 }
5779
5780 if (MDNode *Range = I.getMetadata(LLVMContext::MD_noalias_addrspace)) {
5783 "noalias.addrspace are only for memory operations!", &I);
5784 visitNoaliasAddrspaceMetadata(I, Range, I.getType());
5785 }
5786
5787 if (I.hasMetadata(LLVMContext::MD_invariant_group)) {
5789 "invariant.group metadata is only for loads and stores", &I);
5790 }
5791
5792 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nonnull)) {
5793 Check(I.getType()->isPointerTy(), "nonnull applies only to pointer types",
5794 &I);
5796 "nonnull applies only to load instructions, use attributes"
5797 " for calls or invokes",
5798 &I);
5799 Check(MD->getNumOperands() == 0, "nonnull metadata must be empty", &I);
5800 }
5801
5802 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable))
5803 visitDereferenceableMetadata(I, MD);
5804
5805 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable_or_null))
5806 visitDereferenceableMetadata(I, MD);
5807
5808 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nofree))
5809 visitNofreeMetadata(I, MD);
5810
5811 if (MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa))
5812 TBAAVerifyHelper.visitTBAAMetadata(&I, TBAA);
5813
5814 if (MDNode *MD = I.getMetadata(LLVMContext::MD_noalias))
5815 visitAliasScopeListMetadata(MD);
5816 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alias_scope))
5817 visitAliasScopeListMetadata(MD);
5818
5819 if (MDNode *MD = I.getMetadata(LLVMContext::MD_access_group))
5820 visitAccessGroupMetadata(MD);
5821
5822 if (MDNode *AlignMD = I.getMetadata(LLVMContext::MD_align)) {
5823 Check(I.getType()->isPointerTy(), "align applies only to pointer types",
5824 &I);
5826 "align applies only to load instructions, "
5827 "use attributes for calls or invokes",
5828 &I);
5829 Check(AlignMD->getNumOperands() == 1, "align takes one operand!", &I);
5830 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(AlignMD->getOperand(0));
5831 Check(CI && CI->getType()->isIntegerTy(64),
5832 "align metadata value must be an i64!", &I);
5833 uint64_t Align = CI->getZExtValue();
5834 Check(isPowerOf2_64(Align), "align metadata value must be a power of 2!",
5835 &I);
5836 Check(Align <= Value::MaximumAlignment,
5837 "alignment is larger that implementation defined limit", &I);
5838 }
5839
5840 if (MDNode *MD = I.getMetadata(LLVMContext::MD_prof))
5841 visitProfMetadata(I, MD);
5842
5843 if (MDNode *MD = I.getMetadata(LLVMContext::MD_memprof))
5844 visitMemProfMetadata(I, MD);
5845
5846 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callsite))
5847 visitCallsiteMetadata(I, MD);
5848
5849 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callee_type))
5850 visitCalleeTypeMetadata(I, MD);
5851
5852 if (MDNode *MD = I.getMetadata(LLVMContext::MD_DIAssignID))
5853 visitDIAssignIDMetadata(I, MD);
5854
5855 if (MDNode *MMRA = I.getMetadata(LLVMContext::MD_mmra))
5856 visitMMRAMetadata(I, MMRA);
5857
5858 if (MDNode *Annotation = I.getMetadata(LLVMContext::MD_annotation))
5859 visitAnnotationMetadata(Annotation);
5860
5861 if (MDNode *Captures = I.getMetadata(LLVMContext::MD_captures))
5862 visitCapturesMetadata(I, Captures);
5863
5864 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alloc_token))
5865 visitAllocTokenMetadata(I, MD);
5866
5867 if (MDNode *MD = I.getMetadata(LLVMContext::MD_inline_history))
5868 visitInlineHistoryMetadata(I, MD);
5869
5870 if (MDNode *N = I.getDebugLoc().getAsMDNode()) {
5871 CheckDI(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N);
5872 visitMDNode(*N, AreDebugLocsAllowed::Yes);
5873
5874 if (auto *DL = dyn_cast<DILocation>(N)) {
5875 if (DL->getAtomGroup()) {
5876 CheckDI(DL->getScope()->getSubprogram()->getKeyInstructionsEnabled(),
5877 "DbgLoc uses atomGroup but DISubprogram doesn't have Key "
5878 "Instructions enabled",
5879 DL, DL->getScope()->getSubprogram());
5880 }
5881 }
5882 }
5883
5885 I.getAllMetadata(MDs);
5886 for (auto Attachment : MDs) {
5887 unsigned Kind = Attachment.first;
5888 auto AllowLocs =
5889 (Kind == LLVMContext::MD_dbg || Kind == LLVMContext::MD_loop)
5890 ? AreDebugLocsAllowed::Yes
5891 : AreDebugLocsAllowed::No;
5892 visitMDNode(*Attachment.second, AllowLocs);
5893 }
5894
5895 InstsInThisBlock.insert(&I);
5896}
5897
5898/// Allow intrinsics to be verified in different ways.
5899void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
5901 Check(IF->isDeclaration(), "Intrinsic functions should never be defined!",
5902 IF);
5903
5904 // Verify that the intrinsic prototype lines up with what the .td files
5905 // describe.
5906 FunctionType *IFTy = IF->getFunctionType();
5907 bool IsVarArg = IFTy->isVarArg();
5908
5912
5913 // Walk the descriptors to extract overloaded types.
5918 "Intrinsic has incorrect return type!", IF);
5920 "Intrinsic has incorrect argument type!", IF);
5921
5922 // Verify if the intrinsic call matches the vararg property.
5923 if (IsVarArg)
5925 "Intrinsic was not defined with variable arguments!", IF);
5926 else
5928 "Callsite was not defined with variable arguments!", IF);
5929
5930 // All descriptors should be absorbed by now.
5931 Check(TableRef.empty(), "Intrinsic has too few arguments!", IF);
5932
5933 // Now that we have the intrinsic ID and the actual argument types (and we
5934 // know they are legal for the intrinsic!) get the intrinsic name through the
5935 // usual means. This allows us to verify the mangling of argument types into
5936 // the name.
5937 const std::string ExpectedName =
5938 Intrinsic::getName(ID, ArgTys, IF->getParent(), IFTy);
5939 Check(ExpectedName == IF->getName(),
5940 "Intrinsic name not mangled correctly for type arguments! "
5941 "Should be: " +
5942 ExpectedName,
5943 IF);
5944
5945 // If the intrinsic takes MDNode arguments, verify that they are either global
5946 // or are local to *this* function.
5947 for (Value *V : Call.args()) {
5948 if (auto *MD = dyn_cast<MetadataAsValue>(V))
5949 visitMetadataAsValue(*MD, Call.getCaller());
5950 if (auto *Const = dyn_cast<Constant>(V))
5951 Check(!Const->getType()->isX86_AMXTy(),
5952 "const x86_amx is not allowed in argument!");
5953 }
5954
5955 switch (ID) {
5956 default:
5957 break;
5958 case Intrinsic::assume: {
5959 if (Call.hasOperandBundles()) {
5961 Check(Cond && Cond->isOne(),
5962 "assume with operand bundles must have i1 true condition", Call);
5963 }
5964 for (auto &Elem : Call.bundle_op_infos()) {
5965 unsigned ArgCount = Elem.End - Elem.Begin;
5966 // Separate storage assumptions are special insofar as they're the only
5967 // operand bundles allowed on assumes that aren't parameter attributes.
5968 if (Elem.Tag->getKey() == "separate_storage") {
5969 Check(ArgCount == 2,
5970 "separate_storage assumptions should have 2 arguments", Call);
5971 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy() &&
5972 Call.getOperand(Elem.Begin + 1)->getType()->isPointerTy(),
5973 "arguments to separate_storage assumptions should be pointers",
5974 Call);
5975 continue;
5976 }
5977 Check(Elem.Tag->getKey() == "ignore" ||
5978 Attribute::isExistingAttribute(Elem.Tag->getKey()),
5979 "tags must be valid attribute names", Call);
5980 Attribute::AttrKind Kind =
5981 Attribute::getAttrKindFromName(Elem.Tag->getKey());
5982 if (Kind == Attribute::Alignment) {
5983 Check(ArgCount <= 3 && ArgCount >= 2,
5984 "alignment assumptions should have 2 or 3 arguments", Call);
5985 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5986 "first argument should be a pointer", Call);
5987 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5988 "second argument should be an integer", Call);
5989 if (ArgCount == 3)
5990 Check(Call.getOperand(Elem.Begin + 2)->getType()->isIntegerTy(),
5991 "third argument should be an integer if present", Call);
5992 continue;
5993 }
5994 if (Kind == Attribute::Dereferenceable) {
5995 Check(ArgCount == 2,
5996 "dereferenceable assumptions should have 2 arguments", Call);
5997 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5998 "first argument should be a pointer", Call);
5999 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
6000 "second argument should be an integer", Call);
6001 continue;
6002 }
6003 Check(ArgCount <= 2, "too many arguments", Call);
6004 if (Kind == Attribute::None)
6005 break;
6006 if (Attribute::isIntAttrKind(Kind)) {
6007 Check(ArgCount == 2, "this attribute should have 2 arguments", Call);
6008 Check(isa<ConstantInt>(Call.getOperand(Elem.Begin + 1)),
6009 "the second argument should be a constant integral value", Call);
6010 } else if (Attribute::canUseAsParamAttr(Kind)) {
6011 Check((ArgCount) == 1, "this attribute should have one argument", Call);
6012 } else if (Attribute::canUseAsFnAttr(Kind)) {
6013 Check((ArgCount) == 0, "this attribute has no argument", Call);
6014 }
6015 }
6016 break;
6017 }
6018 case Intrinsic::ucmp:
6019 case Intrinsic::scmp: {
6020 Type *SrcTy = Call.getOperand(0)->getType();
6021 Type *DestTy = Call.getType();
6022
6023 Check(DestTy->getScalarSizeInBits() >= 2,
6024 "result type must be at least 2 bits wide", Call);
6025
6026 bool IsDestTypeVector = DestTy->isVectorTy();
6027 Check(SrcTy->isVectorTy() == IsDestTypeVector,
6028 "ucmp/scmp argument and result types must both be either vector or "
6029 "scalar types",
6030 Call);
6031 if (IsDestTypeVector) {
6032 auto SrcVecLen = cast<VectorType>(SrcTy)->getElementCount();
6033 auto DestVecLen = cast<VectorType>(DestTy)->getElementCount();
6034 Check(SrcVecLen == DestVecLen,
6035 "return type and arguments must have the same number of "
6036 "elements",
6037 Call);
6038 }
6039 break;
6040 }
6041 case Intrinsic::coro_begin:
6042 case Intrinsic::coro_begin_custom_abi:
6044 "id argument of llvm.coro.begin must refer to coro.id");
6045 break;
6046 case Intrinsic::coro_id: {
6048 "align argument only accepts constants");
6049 auto *Promise = Call.getArgOperand(1);
6050 Check(isa<ConstantPointerNull>(Promise) || isa<AllocaInst>(Promise),
6051 "promise argument must refer to an alloca");
6052
6053 auto *CoroAddr = Call.getArgOperand(2)->stripPointerCasts();
6054 bool BeforeCoroEarly = isa<ConstantPointerNull>(CoroAddr);
6055 Check(BeforeCoroEarly || isa<Function>(CoroAddr),
6056 "coro argument must refer to a function");
6057
6058 auto *InfoArg = Call.getArgOperand(3);
6059 bool BeforeCoroSplit = isa<ConstantPointerNull>(InfoArg);
6060 if (BeforeCoroSplit)
6061 break;
6062
6063 Check(!BeforeCoroEarly, "cannot run CoroSplit before CoroEarly");
6064 auto *GV = dyn_cast<GlobalVariable>(InfoArg);
6065 Check(GV && GV->isConstant() && GV->hasDefinitiveInitializer(),
6066 "info argument of llvm.coro.id must refer to an initialized "
6067 "constant");
6068 Constant *Init = GV->getInitializer();
6070 "info argument of llvm.coro.id must refer to either a struct or "
6071 "an array");
6072 break;
6073 }
6074 case Intrinsic::is_fpclass: {
6075 const ConstantInt *TestMask = cast<ConstantInt>(Call.getOperand(1));
6076 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
6077 "unsupported bits for llvm.is.fpclass test mask");
6078 break;
6079 }
6080 case Intrinsic::fptrunc_round: {
6081 // Check the rounding mode
6082 Metadata *MD = nullptr;
6084 if (MAV)
6085 MD = MAV->getMetadata();
6086
6087 Check(MD != nullptr, "missing rounding mode argument", Call);
6088
6089 Check(isa<MDString>(MD),
6090 ("invalid value for llvm.fptrunc.round metadata operand"
6091 " (the operand should be a string)"),
6092 MD);
6093
6094 std::optional<RoundingMode> RoundMode =
6095 convertStrToRoundingMode(cast<MDString>(MD)->getString());
6096 Check(RoundMode && *RoundMode != RoundingMode::Dynamic,
6097 "unsupported rounding mode argument", Call);
6098 break;
6099 }
6100 case Intrinsic::convert_to_arbitrary_fp: {
6101 // Check that vector element counts are consistent.
6102 Type *ValueTy = Call.getArgOperand(0)->getType();
6103 Type *IntTy = Call.getType();
6104
6105 if (auto *ValueVecTy = dyn_cast<VectorType>(ValueTy)) {
6106 auto *IntVecTy = dyn_cast<VectorType>(IntTy);
6107 Check(IntVecTy,
6108 "if floating-point operand is a vector, integer operand must also "
6109 "be a vector",
6110 Call);
6111 Check(ValueVecTy->getElementCount() == IntVecTy->getElementCount(),
6112 "floating-point and integer vector operands must have the same "
6113 "element count",
6114 Call);
6115 }
6116
6117 // Check interpretation metadata (argoperand 1).
6118 auto *InterpMAV = dyn_cast<MetadataAsValue>(Call.getArgOperand(1));
6119 Check(InterpMAV, "missing interpretation metadata operand", Call);
6120 auto *InterpStr = dyn_cast<MDString>(InterpMAV->getMetadata());
6121 Check(InterpStr, "interpretation metadata operand must be a string", Call);
6122 StringRef Interp = InterpStr->getString();
6123
6124 Check(!Interp.empty(), "interpretation metadata string must not be empty",
6125 Call);
6126
6127 // Valid interpretation strings: mini-float format names.
6129 "unsupported interpretation metadata string", Call);
6130
6131 // Check rounding mode metadata (argoperand 2).
6132 auto *RoundingMAV = dyn_cast<MetadataAsValue>(Call.getArgOperand(2));
6133 Check(RoundingMAV, "missing rounding mode metadata operand", Call);
6134 auto *RoundingStr = dyn_cast<MDString>(RoundingMAV->getMetadata());
6135 Check(RoundingStr, "rounding mode metadata operand must be a string", Call);
6136
6137 std::optional<RoundingMode> RM =
6138 convertStrToRoundingMode(RoundingStr->getString());
6139 Check(RM && *RM != RoundingMode::Dynamic,
6140 "unsupported rounding mode argument", Call);
6141 break;
6142 }
6143 case Intrinsic::convert_from_arbitrary_fp: {
6144 // Check that vector element counts are consistent.
6145 Type *IntTy = Call.getArgOperand(0)->getType();
6146 Type *ValueTy = Call.getType();
6147
6148 if (auto *ValueVecTy = dyn_cast<VectorType>(ValueTy)) {
6149 auto *IntVecTy = dyn_cast<VectorType>(IntTy);
6150 Check(IntVecTy,
6151 "if floating-point operand is a vector, integer operand must also "
6152 "be a vector",
6153 Call);
6154 Check(ValueVecTy->getElementCount() == IntVecTy->getElementCount(),
6155 "floating-point and integer vector operands must have the same "
6156 "element count",
6157 Call);
6158 }
6159
6160 // Check interpretation metadata (argoperand 1).
6161 auto *InterpMAV = dyn_cast<MetadataAsValue>(Call.getArgOperand(1));
6162 Check(InterpMAV, "missing interpretation metadata operand", Call);
6163 auto *InterpStr = dyn_cast<MDString>(InterpMAV->getMetadata());
6164 Check(InterpStr, "interpretation metadata operand must be a string", Call);
6165 StringRef Interp = InterpStr->getString();
6166
6167 Check(!Interp.empty(), "interpretation metadata string must not be empty",
6168 Call);
6169
6170 // Valid interpretation strings: mini-float format names.
6172 "unsupported interpretation metadata string", Call);
6173 break;
6174 }
6175#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
6176#include "llvm/IR/VPIntrinsics.def"
6177#undef BEGIN_REGISTER_VP_INTRINSIC
6178 visitVPIntrinsic(cast<VPIntrinsic>(Call));
6179 break;
6180#define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC) \
6181 case Intrinsic::INTRINSIC:
6182#include "llvm/IR/ConstrainedOps.def"
6183#undef INSTRUCTION
6184 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(Call));
6185 break;
6186 case Intrinsic::dbg_declare: // llvm.dbg.declare
6187 case Intrinsic::dbg_value: // llvm.dbg.value
6188 case Intrinsic::dbg_assign: // llvm.dbg.assign
6189 case Intrinsic::dbg_label: // llvm.dbg.label
6190 // We no longer interpret debug intrinsics (the old variable-location
6191 // design). They're meaningless as far as LLVM is concerned we could make
6192 // it an error for them to appear, but it's possible we'll have users
6193 // converting back to intrinsics for the forseeable future (such as DXIL),
6194 // so tolerate their existance.
6195 break;
6196 case Intrinsic::memcpy:
6197 case Intrinsic::memcpy_inline:
6198 case Intrinsic::memmove:
6199 case Intrinsic::memset:
6200 case Intrinsic::memset_inline:
6201 break;
6202 case Intrinsic::experimental_memset_pattern: {
6203 const auto Memset = cast<MemSetPatternInst>(&Call);
6204 Check(Memset->getValue()->getType()->isSized(),
6205 "unsized types cannot be used as memset patterns", Call);
6206 break;
6207 }
6208 case Intrinsic::memcpy_element_unordered_atomic:
6209 case Intrinsic::memmove_element_unordered_atomic:
6210 case Intrinsic::memset_element_unordered_atomic: {
6211 const auto *AMI = cast<AnyMemIntrinsic>(&Call);
6212
6213 ConstantInt *ElementSizeCI =
6214 cast<ConstantInt>(AMI->getRawElementSizeInBytes());
6215 const APInt &ElementSizeVal = ElementSizeCI->getValue();
6216 Check(ElementSizeVal.isPowerOf2(),
6217 "element size of the element-wise atomic memory intrinsic "
6218 "must be a power of 2",
6219 Call);
6220
6221 auto IsValidAlignment = [&](MaybeAlign Alignment) {
6222 return Alignment && ElementSizeVal.ule(Alignment->value());
6223 };
6224 Check(IsValidAlignment(AMI->getDestAlign()),
6225 "incorrect alignment of the destination argument", Call);
6226 if (const auto *AMT = dyn_cast<AnyMemTransferInst>(AMI)) {
6227 Check(IsValidAlignment(AMT->getSourceAlign()),
6228 "incorrect alignment of the source argument", Call);
6229 }
6230 break;
6231 }
6232 case Intrinsic::call_preallocated_setup: {
6233 auto *NumArgs = cast<ConstantInt>(Call.getArgOperand(0));
6234 bool FoundCall = false;
6235 for (User *U : Call.users()) {
6236 auto *UseCall = dyn_cast<CallBase>(U);
6237 Check(UseCall != nullptr,
6238 "Uses of llvm.call.preallocated.setup must be calls");
6239 Intrinsic::ID IID = UseCall->getIntrinsicID();
6240 if (IID == Intrinsic::call_preallocated_arg) {
6241 auto *AllocArgIndex = dyn_cast<ConstantInt>(UseCall->getArgOperand(1));
6242 Check(AllocArgIndex != nullptr,
6243 "llvm.call.preallocated.alloc arg index must be a constant");
6244 auto AllocArgIndexInt = AllocArgIndex->getValue();
6245 Check(AllocArgIndexInt.sge(0) &&
6246 AllocArgIndexInt.slt(NumArgs->getValue()),
6247 "llvm.call.preallocated.alloc arg index must be between 0 and "
6248 "corresponding "
6249 "llvm.call.preallocated.setup's argument count");
6250 } else if (IID == Intrinsic::call_preallocated_teardown) {
6251 // nothing to do
6252 } else {
6253 Check(!FoundCall, "Can have at most one call corresponding to a "
6254 "llvm.call.preallocated.setup");
6255 FoundCall = true;
6256 size_t NumPreallocatedArgs = 0;
6257 for (unsigned i = 0; i < UseCall->arg_size(); i++) {
6258 if (UseCall->paramHasAttr(i, Attribute::Preallocated)) {
6259 ++NumPreallocatedArgs;
6260 }
6261 }
6262 Check(NumPreallocatedArgs != 0,
6263 "cannot use preallocated intrinsics on a call without "
6264 "preallocated arguments");
6265 Check(NumArgs->equalsInt(NumPreallocatedArgs),
6266 "llvm.call.preallocated.setup arg size must be equal to number "
6267 "of preallocated arguments "
6268 "at call site",
6269 Call, *UseCall);
6270 // getOperandBundle() cannot be called if more than one of the operand
6271 // bundle exists. There is already a check elsewhere for this, so skip
6272 // here if we see more than one.
6273 if (UseCall->countOperandBundlesOfType(LLVMContext::OB_preallocated) >
6274 1) {
6275 return;
6276 }
6277 auto PreallocatedBundle =
6278 UseCall->getOperandBundle(LLVMContext::OB_preallocated);
6279 Check(PreallocatedBundle,
6280 "Use of llvm.call.preallocated.setup outside intrinsics "
6281 "must be in \"preallocated\" operand bundle");
6282 Check(PreallocatedBundle->Inputs.front().get() == &Call,
6283 "preallocated bundle must have token from corresponding "
6284 "llvm.call.preallocated.setup");
6285 }
6286 }
6287 break;
6288 }
6289 case Intrinsic::call_preallocated_arg: {
6290 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
6291 Check(Token &&
6292 Token->getIntrinsicID() == Intrinsic::call_preallocated_setup,
6293 "llvm.call.preallocated.arg token argument must be a "
6294 "llvm.call.preallocated.setup");
6295 Check(Call.hasFnAttr(Attribute::Preallocated),
6296 "llvm.call.preallocated.arg must be called with a \"preallocated\" "
6297 "call site attribute");
6298 break;
6299 }
6300 case Intrinsic::call_preallocated_teardown: {
6301 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
6302 Check(Token &&
6303 Token->getIntrinsicID() == Intrinsic::call_preallocated_setup,
6304 "llvm.call.preallocated.teardown token argument must be a "
6305 "llvm.call.preallocated.setup");
6306 break;
6307 }
6308 case Intrinsic::gcroot:
6309 case Intrinsic::gcwrite:
6310 case Intrinsic::gcread:
6311 if (ID == Intrinsic::gcroot) {
6312 AllocaInst *AI =
6314 Check(AI, "llvm.gcroot parameter #1 must be an alloca.", Call);
6316 "llvm.gcroot parameter #2 must be a constant.", Call);
6317 if (!AI->getAllocatedType()->isPointerTy()) {
6319 "llvm.gcroot parameter #1 must either be a pointer alloca, "
6320 "or argument #2 must be a non-null constant.",
6321 Call);
6322 }
6323 }
6324
6325 Check(Call.getParent()->getParent()->hasGC(),
6326 "Enclosing function does not use GC.", Call);
6327 break;
6328 case Intrinsic::init_trampoline:
6330 "llvm.init_trampoline parameter #2 must resolve to a function.",
6331 Call);
6332 break;
6333 case Intrinsic::prefetch:
6334 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6335 "rw argument to llvm.prefetch must be 0-1", Call);
6336 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6337 "locality argument to llvm.prefetch must be 0-3", Call);
6338 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6339 "cache type argument to llvm.prefetch must be 0-1", Call);
6340 break;
6341 case Intrinsic::reloc_none: {
6343 cast<MetadataAsValue>(Call.getArgOperand(0))->getMetadata()),
6344 "llvm.reloc.none argument must be a metadata string", &Call);
6345 break;
6346 }
6347 case Intrinsic::stackprotector:
6349 "llvm.stackprotector parameter #2 must resolve to an alloca.", Call);
6350 break;
6351 case Intrinsic::localescape: {
6352 BasicBlock *BB = Call.getParent();
6353 Check(BB->isEntryBlock(), "llvm.localescape used outside of entry block",
6354 Call);
6355 Check(!SawFrameEscape, "multiple calls to llvm.localescape in one function",
6356 Call);
6357 for (Value *Arg : Call.args()) {
6358 if (isa<ConstantPointerNull>(Arg))
6359 continue; // Null values are allowed as placeholders.
6360 auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
6361 Check(AI && AI->isStaticAlloca(),
6362 "llvm.localescape only accepts static allocas", Call);
6363 }
6364 FrameEscapeInfo[BB->getParent()].first = Call.arg_size();
6365 SawFrameEscape = true;
6366 break;
6367 }
6368 case Intrinsic::localrecover: {
6370 Function *Fn = dyn_cast<Function>(FnArg);
6371 Check(Fn && !Fn->isDeclaration(),
6372 "llvm.localrecover first "
6373 "argument must be function defined in this module",
6374 Call);
6375 auto *IdxArg = cast<ConstantInt>(Call.getArgOperand(2));
6376 auto &Entry = FrameEscapeInfo[Fn];
6377 Entry.second = unsigned(
6378 std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1));
6379 break;
6380 }
6381
6382 case Intrinsic::experimental_gc_statepoint:
6383 if (auto *CI = dyn_cast<CallInst>(&Call))
6384 Check(!CI->isInlineAsm(),
6385 "gc.statepoint support for inline assembly unimplemented", CI);
6386 Check(Call.getParent()->getParent()->hasGC(),
6387 "Enclosing function does not use GC.", Call);
6388
6389 verifyStatepoint(Call);
6390 break;
6391 case Intrinsic::experimental_gc_result: {
6392 Check(Call.getParent()->getParent()->hasGC(),
6393 "Enclosing function does not use GC.", Call);
6394
6395 auto *Statepoint = Call.getArgOperand(0);
6396 if (isa<UndefValue>(Statepoint))
6397 break;
6398
6399 // Are we tied to a statepoint properly?
6400 const auto *StatepointCall = dyn_cast<CallBase>(Statepoint);
6401 Check(StatepointCall && StatepointCall->getIntrinsicID() ==
6402 Intrinsic::experimental_gc_statepoint,
6403 "gc.result operand #1 must be from a statepoint", Call,
6404 Call.getArgOperand(0));
6405
6406 // Check that result type matches wrapped callee.
6407 auto *TargetFuncType =
6408 cast<FunctionType>(StatepointCall->getParamElementType(2));
6409 Check(Call.getType() == TargetFuncType->getReturnType(),
6410 "gc.result result type does not match wrapped callee", Call);
6411 break;
6412 }
6413 case Intrinsic::experimental_gc_relocate: {
6414 Check(Call.arg_size() == 3, "wrong number of arguments", Call);
6415
6417 "gc.relocate must return a pointer or a vector of pointers", Call);
6418
6419 // Check that this relocate is correctly tied to the statepoint
6420
6421 // This is case for relocate on the unwinding path of an invoke statepoint
6422 if (LandingPadInst *LandingPad =
6424
6425 const BasicBlock *InvokeBB =
6426 LandingPad->getParent()->getUniquePredecessor();
6427
6428 // Landingpad relocates should have only one predecessor with invoke
6429 // statepoint terminator
6430 Check(InvokeBB, "safepoints should have unique landingpads",
6431 LandingPad->getParent());
6432 Check(InvokeBB->getTerminator(), "safepoint block should be well formed",
6433 InvokeBB);
6435 "gc relocate should be linked to a statepoint", InvokeBB);
6436 } else {
6437 // In all other cases relocate should be tied to the statepoint directly.
6438 // This covers relocates on a normal return path of invoke statepoint and
6439 // relocates of a call statepoint.
6440 auto *Token = Call.getArgOperand(0);
6442 "gc relocate is incorrectly tied to the statepoint", Call, Token);
6443 }
6444
6445 // Verify rest of the relocate arguments.
6446 const Value &StatepointCall = *cast<GCRelocateInst>(Call).getStatepoint();
6447
6448 // Both the base and derived must be piped through the safepoint.
6451 "gc.relocate operand #2 must be integer offset", Call);
6452
6453 Value *Derived = Call.getArgOperand(2);
6454 Check(isa<ConstantInt>(Derived),
6455 "gc.relocate operand #3 must be integer offset", Call);
6456
6457 const uint64_t BaseIndex = cast<ConstantInt>(Base)->getZExtValue();
6458 const uint64_t DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue();
6459
6460 // Check the bounds
6461 if (isa<UndefValue>(StatepointCall))
6462 break;
6463 if (auto Opt = cast<GCStatepointInst>(StatepointCall)
6464 .getOperandBundle(LLVMContext::OB_gc_live)) {
6465 Check(BaseIndex < Opt->Inputs.size(),
6466 "gc.relocate: statepoint base index out of bounds", Call);
6467 Check(DerivedIndex < Opt->Inputs.size(),
6468 "gc.relocate: statepoint derived index out of bounds", Call);
6469 }
6470
6471 // Relocated value must be either a pointer type or vector-of-pointer type,
6472 // but gc_relocate does not need to return the same pointer type as the
6473 // relocated pointer. It can be casted to the correct type later if it's
6474 // desired. However, they must have the same address space and 'vectorness'
6475 GCRelocateInst &Relocate = cast<GCRelocateInst>(Call);
6476 auto *ResultType = Call.getType();
6477 auto *DerivedType = Relocate.getDerivedPtr()->getType();
6478 auto *BaseType = Relocate.getBasePtr()->getType();
6479
6480 Check(BaseType->isPtrOrPtrVectorTy(),
6481 "gc.relocate: relocated value must be a pointer", Call);
6482 Check(DerivedType->isPtrOrPtrVectorTy(),
6483 "gc.relocate: relocated value must be a pointer", Call);
6484
6485 Check(ResultType->isVectorTy() == DerivedType->isVectorTy(),
6486 "gc.relocate: vector relocates to vector and pointer to pointer",
6487 Call);
6488 Check(
6489 ResultType->getPointerAddressSpace() ==
6490 DerivedType->getPointerAddressSpace(),
6491 "gc.relocate: relocating a pointer shouldn't change its address space",
6492 Call);
6493
6494 auto GC = llvm::getGCStrategy(Relocate.getFunction()->getGC());
6495 Check(GC, "gc.relocate: calling function must have GCStrategy",
6496 Call.getFunction());
6497 if (GC) {
6498 auto isGCPtr = [&GC](Type *PTy) {
6499 return GC->isGCManagedPointer(PTy->getScalarType()).value_or(true);
6500 };
6501 Check(isGCPtr(ResultType), "gc.relocate: must return gc pointer", Call);
6502 Check(isGCPtr(BaseType),
6503 "gc.relocate: relocated value must be a gc pointer", Call);
6504 Check(isGCPtr(DerivedType),
6505 "gc.relocate: relocated value must be a gc pointer", Call);
6506 }
6507 break;
6508 }
6509 case Intrinsic::experimental_patchpoint: {
6510 if (Call.getCallingConv() == CallingConv::AnyReg) {
6512 "patchpoint: invalid return type used with anyregcc", Call);
6513 }
6514 break;
6515 }
6516 case Intrinsic::eh_exceptioncode:
6517 case Intrinsic::eh_exceptionpointer: {
6519 "eh.exceptionpointer argument must be a catchpad", Call);
6520 break;
6521 }
6522 case Intrinsic::get_active_lane_mask: {
6524 "get_active_lane_mask: must return a "
6525 "vector",
6526 Call);
6527 auto *ElemTy = Call.getType()->getScalarType();
6528 Check(ElemTy->isIntegerTy(1),
6529 "get_active_lane_mask: element type is not "
6530 "i1",
6531 Call);
6532 break;
6533 }
6534 case Intrinsic::experimental_get_vector_length: {
6535 ConstantInt *VF = cast<ConstantInt>(Call.getArgOperand(1));
6536 Check(!VF->isNegative() && !VF->isZero(),
6537 "get_vector_length: VF must be positive", Call);
6538 break;
6539 }
6540 case Intrinsic::masked_load: {
6541 Check(Call.getType()->isVectorTy(), "masked_load: must return a vector",
6542 Call);
6543
6545 Value *PassThru = Call.getArgOperand(2);
6546 Check(Mask->getType()->isVectorTy(), "masked_load: mask must be vector",
6547 Call);
6548 Check(PassThru->getType() == Call.getType(),
6549 "masked_load: pass through and return type must match", Call);
6550 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
6551 cast<VectorType>(Call.getType())->getElementCount(),
6552 "masked_load: vector mask must be same length as return", Call);
6553 break;
6554 }
6555 case Intrinsic::masked_store: {
6556 Value *Val = Call.getArgOperand(0);
6558 Check(Mask->getType()->isVectorTy(), "masked_store: mask must be vector",
6559 Call);
6560 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
6561 cast<VectorType>(Val->getType())->getElementCount(),
6562 "masked_store: vector mask must be same length as value", Call);
6563 break;
6564 }
6565 case Intrinsic::experimental_guard: {
6566 Check(isa<CallInst>(Call), "experimental_guard cannot be invoked", Call);
6568 "experimental_guard must have exactly one "
6569 "\"deopt\" operand bundle");
6570 break;
6571 }
6572
6573 case Intrinsic::experimental_deoptimize: {
6574 Check(isa<CallInst>(Call), "experimental_deoptimize cannot be invoked",
6575 Call);
6577 "experimental_deoptimize must have exactly one "
6578 "\"deopt\" operand bundle");
6580 "experimental_deoptimize return type must match caller return type");
6581
6582 if (isa<CallInst>(Call)) {
6584 Check(RI,
6585 "calls to experimental_deoptimize must be followed by a return");
6586
6587 if (!Call.getType()->isVoidTy() && RI)
6588 Check(RI->getReturnValue() == &Call,
6589 "calls to experimental_deoptimize must be followed by a return "
6590 "of the value computed by experimental_deoptimize");
6591 }
6592
6593 break;
6594 }
6595 case Intrinsic::vastart: {
6597 "va_start called in a non-varargs function");
6598 break;
6599 }
6600 case Intrinsic::get_dynamic_area_offset: {
6601 auto *IntTy = dyn_cast<IntegerType>(Call.getType());
6602 Check(IntTy && DL.getPointerSizeInBits(DL.getAllocaAddrSpace()) ==
6603 IntTy->getBitWidth(),
6604 "get_dynamic_area_offset result type must be scalar integer matching "
6605 "alloca address space width",
6606 Call);
6607 break;
6608 }
6609 case Intrinsic::masked_udiv:
6610 case Intrinsic::masked_sdiv:
6611 case Intrinsic::masked_urem:
6612 case Intrinsic::masked_srem:
6613 case Intrinsic::vector_reduce_and:
6614 case Intrinsic::vector_reduce_or:
6615 case Intrinsic::vector_reduce_xor:
6616 case Intrinsic::vector_reduce_add:
6617 case Intrinsic::vector_reduce_mul:
6618 case Intrinsic::vector_reduce_smax:
6619 case Intrinsic::vector_reduce_smin:
6620 case Intrinsic::vector_reduce_umax:
6621 case Intrinsic::vector_reduce_umin: {
6622 Type *ArgTy = Call.getArgOperand(0)->getType();
6623 Check(ArgTy->isIntOrIntVectorTy() && ArgTy->isVectorTy(),
6624 "Intrinsic has incorrect argument type!");
6625 break;
6626 }
6627 case Intrinsic::vector_reduce_fmax:
6628 case Intrinsic::vector_reduce_fmin: {
6629 Type *ArgTy = Call.getArgOperand(0)->getType();
6630 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6631 "Intrinsic has incorrect argument type!");
6632 break;
6633 }
6634 case Intrinsic::vector_reduce_fadd:
6635 case Intrinsic::vector_reduce_fmul: {
6636 // Unlike the other reductions, the first argument is a start value. The
6637 // second argument is the vector to be reduced.
6638 Type *ArgTy = Call.getArgOperand(1)->getType();
6639 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
6640 "Intrinsic has incorrect argument type!");
6641 break;
6642 }
6643 case Intrinsic::smul_fix:
6644 case Intrinsic::smul_fix_sat:
6645 case Intrinsic::umul_fix:
6646 case Intrinsic::umul_fix_sat:
6647 case Intrinsic::sdiv_fix:
6648 case Intrinsic::sdiv_fix_sat:
6649 case Intrinsic::udiv_fix:
6650 case Intrinsic::udiv_fix_sat: {
6651 Value *Op1 = Call.getArgOperand(0);
6652 Value *Op2 = Call.getArgOperand(1);
6654 "first operand of [us][mul|div]_fix[_sat] must be an int type or "
6655 "vector of ints");
6657 "second operand of [us][mul|div]_fix[_sat] must be an int type or "
6658 "vector of ints");
6659
6660 auto *Op3 = cast<ConstantInt>(Call.getArgOperand(2));
6661 Check(Op3->getType()->isIntegerTy(),
6662 "third operand of [us][mul|div]_fix[_sat] must be an int type");
6663 Check(Op3->getBitWidth() <= 32,
6664 "third operand of [us][mul|div]_fix[_sat] must fit within 32 bits");
6665
6666 if (ID == Intrinsic::smul_fix || ID == Intrinsic::smul_fix_sat ||
6667 ID == Intrinsic::sdiv_fix || ID == Intrinsic::sdiv_fix_sat) {
6668 Check(Op3->getZExtValue() < Op1->getType()->getScalarSizeInBits(),
6669 "the scale of s[mul|div]_fix[_sat] must be less than the width of "
6670 "the operands");
6671 } else {
6672 Check(Op3->getZExtValue() <= Op1->getType()->getScalarSizeInBits(),
6673 "the scale of u[mul|div]_fix[_sat] must be less than or equal "
6674 "to the width of the operands");
6675 }
6676 break;
6677 }
6678 case Intrinsic::lrint:
6679 case Intrinsic::llrint:
6680 case Intrinsic::lround:
6681 case Intrinsic::llround: {
6682 Type *ValTy = Call.getArgOperand(0)->getType();
6683 Type *ResultTy = Call.getType();
6684 auto *VTy = dyn_cast<VectorType>(ValTy);
6685 auto *RTy = dyn_cast<VectorType>(ResultTy);
6686 Check(ValTy->isFPOrFPVectorTy() && ResultTy->isIntOrIntVectorTy(),
6687 ExpectedName + ": argument must be floating-point or vector "
6688 "of floating-points, and result must be integer or "
6689 "vector of integers",
6690 &Call);
6691 Check(ValTy->isVectorTy() == ResultTy->isVectorTy(),
6692 ExpectedName + ": argument and result disagree on vector use", &Call);
6693 if (VTy) {
6694 Check(VTy->getElementCount() == RTy->getElementCount(),
6695 ExpectedName + ": argument must be same length as result", &Call);
6696 }
6697 break;
6698 }
6699 case Intrinsic::bswap: {
6700 Type *Ty = Call.getType();
6701 unsigned Size = Ty->getScalarSizeInBits();
6702 Check(Size % 16 == 0, "bswap must be an even number of bytes", &Call);
6703 break;
6704 }
6705 case Intrinsic::invariant_start: {
6706 ConstantInt *InvariantSize = dyn_cast<ConstantInt>(Call.getArgOperand(0));
6707 Check(InvariantSize &&
6708 (!InvariantSize->isNegative() || InvariantSize->isMinusOne()),
6709 "invariant_start parameter must be -1, 0 or a positive number",
6710 &Call);
6711 break;
6712 }
6713 case Intrinsic::matrix_multiply:
6714 case Intrinsic::matrix_transpose:
6715 case Intrinsic::matrix_column_major_load:
6716 case Intrinsic::matrix_column_major_store: {
6718 ConstantInt *Stride = nullptr;
6719 ConstantInt *NumRows;
6720 ConstantInt *NumColumns;
6721 VectorType *ResultTy;
6722 Type *Op0ElemTy = nullptr;
6723 Type *Op1ElemTy = nullptr;
6724 switch (ID) {
6725 case Intrinsic::matrix_multiply: {
6726 NumRows = cast<ConstantInt>(Call.getArgOperand(2));
6727 ConstantInt *N = cast<ConstantInt>(Call.getArgOperand(3));
6728 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6730 ->getNumElements() ==
6731 NumRows->getZExtValue() * N->getZExtValue(),
6732 "First argument of a matrix operation does not match specified "
6733 "shape!");
6735 ->getNumElements() ==
6736 N->getZExtValue() * NumColumns->getZExtValue(),
6737 "Second argument of a matrix operation does not match specified "
6738 "shape!");
6739
6740 ResultTy = cast<VectorType>(Call.getType());
6741 Op0ElemTy =
6742 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6743 Op1ElemTy =
6744 cast<VectorType>(Call.getArgOperand(1)->getType())->getElementType();
6745 break;
6746 }
6747 case Intrinsic::matrix_transpose:
6748 NumRows = cast<ConstantInt>(Call.getArgOperand(1));
6749 NumColumns = cast<ConstantInt>(Call.getArgOperand(2));
6750 ResultTy = cast<VectorType>(Call.getType());
6751 Op0ElemTy =
6752 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6753 break;
6754 case Intrinsic::matrix_column_major_load: {
6756 NumRows = cast<ConstantInt>(Call.getArgOperand(3));
6757 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
6758 ResultTy = cast<VectorType>(Call.getType());
6759 break;
6760 }
6761 case Intrinsic::matrix_column_major_store: {
6763 NumRows = cast<ConstantInt>(Call.getArgOperand(4));
6764 NumColumns = cast<ConstantInt>(Call.getArgOperand(5));
6765 ResultTy = cast<VectorType>(Call.getArgOperand(0)->getType());
6766 Op0ElemTy =
6767 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
6768 break;
6769 }
6770 default:
6771 llvm_unreachable("unexpected intrinsic");
6772 }
6773
6774 Check(ResultTy->getElementType()->isIntegerTy() ||
6775 ResultTy->getElementType()->isFloatingPointTy(),
6776 "Result type must be an integer or floating-point type!", IF);
6777
6778 if (Op0ElemTy)
6779 Check(ResultTy->getElementType() == Op0ElemTy,
6780 "Vector element type mismatch of the result and first operand "
6781 "vector!",
6782 IF);
6783
6784 if (Op1ElemTy)
6785 Check(ResultTy->getElementType() == Op1ElemTy,
6786 "Vector element type mismatch of the result and second operand "
6787 "vector!",
6788 IF);
6789
6791 NumRows->getZExtValue() * NumColumns->getZExtValue(),
6792 "Result of a matrix operation does not fit in the returned vector!");
6793
6794 if (Stride) {
6795 Check(Stride->getBitWidth() <= 64, "Stride bitwidth cannot exceed 64!",
6796 IF);
6797 Check(Stride->getZExtValue() >= NumRows->getZExtValue(),
6798 "Stride must be greater or equal than the number of rows!", IF);
6799 }
6800
6801 break;
6802 }
6803 case Intrinsic::stepvector: {
6805 Check(VecTy && VecTy->getScalarType()->isIntegerTy() &&
6806 VecTy->getScalarSizeInBits() >= 8,
6807 "stepvector only supported for vectors of integers "
6808 "with a bitwidth of at least 8.",
6809 &Call);
6810 break;
6811 }
6812 case Intrinsic::experimental_vector_match: {
6813 Value *Op1 = Call.getArgOperand(0);
6814 Value *Op2 = Call.getArgOperand(1);
6816
6817 VectorType *Op1Ty = dyn_cast<VectorType>(Op1->getType());
6818 VectorType *Op2Ty = dyn_cast<VectorType>(Op2->getType());
6819 VectorType *MaskTy = dyn_cast<VectorType>(Mask->getType());
6820
6821 Check(Op1Ty && Op2Ty && MaskTy, "Operands must be vectors.", &Call);
6823 "Second operand must be a fixed length vector.", &Call);
6824 Check(Op1Ty->getElementType()->isIntegerTy(),
6825 "First operand must be a vector of integers.", &Call);
6826 Check(Op1Ty->getElementType() == Op2Ty->getElementType(),
6827 "First two operands must have the same element type.", &Call);
6828 Check(Op1Ty->getElementCount() == MaskTy->getElementCount(),
6829 "First operand and mask must have the same number of elements.",
6830 &Call);
6831 Check(MaskTy->getElementType()->isIntegerTy(1),
6832 "Mask must be a vector of i1's.", &Call);
6833 Check(Call.getType() == MaskTy, "Return type must match the mask type.",
6834 &Call);
6835 break;
6836 }
6837 case Intrinsic::vector_insert: {
6838 Value *Vec = Call.getArgOperand(0);
6839 Value *SubVec = Call.getArgOperand(1);
6840 Value *Idx = Call.getArgOperand(2);
6841 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6842
6843 VectorType *VecTy = cast<VectorType>(Vec->getType());
6844 VectorType *SubVecTy = cast<VectorType>(SubVec->getType());
6845
6846 ElementCount VecEC = VecTy->getElementCount();
6847 ElementCount SubVecEC = SubVecTy->getElementCount();
6848 Check(VecTy->getElementType() == SubVecTy->getElementType(),
6849 "vector_insert parameters must have the same element "
6850 "type.",
6851 &Call);
6852 Check(IdxN % SubVecEC.getKnownMinValue() == 0,
6853 "vector_insert index must be a constant multiple of "
6854 "the subvector's known minimum vector length.");
6855
6856 // If this insertion is not the 'mixed' case where a fixed vector is
6857 // inserted into a scalable vector, ensure that the insertion of the
6858 // subvector does not overrun the parent vector.
6859 if (VecEC.isScalable() == SubVecEC.isScalable()) {
6860 Check(IdxN < VecEC.getKnownMinValue() &&
6861 IdxN + SubVecEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6862 "subvector operand of vector_insert would overrun the "
6863 "vector being inserted into.");
6864 }
6865 break;
6866 }
6867 case Intrinsic::vector_extract: {
6868 Value *Vec = Call.getArgOperand(0);
6869 Value *Idx = Call.getArgOperand(1);
6870 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6871
6872 VectorType *ResultTy = cast<VectorType>(Call.getType());
6873 VectorType *VecTy = cast<VectorType>(Vec->getType());
6874
6875 ElementCount VecEC = VecTy->getElementCount();
6876 ElementCount ResultEC = ResultTy->getElementCount();
6877
6878 Check(ResultTy->getElementType() == VecTy->getElementType(),
6879 "vector_extract result must have the same element "
6880 "type as the input vector.",
6881 &Call);
6882 Check(IdxN % ResultEC.getKnownMinValue() == 0,
6883 "vector_extract index must be a constant multiple of "
6884 "the result type's known minimum vector length.");
6885
6886 // If this extraction is not the 'mixed' case where a fixed vector is
6887 // extracted from a scalable vector, ensure that the extraction does not
6888 // overrun the parent vector.
6889 if (VecEC.isScalable() == ResultEC.isScalable()) {
6890 Check(IdxN < VecEC.getKnownMinValue() &&
6891 IdxN + ResultEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6892 "vector_extract would overrun.");
6893 }
6894 break;
6895 }
6896 case Intrinsic::vector_partial_reduce_fadd:
6897 case Intrinsic::vector_partial_reduce_add: {
6900
6901 unsigned VecWidth = VecTy->getElementCount().getKnownMinValue();
6902 unsigned AccWidth = AccTy->getElementCount().getKnownMinValue();
6903
6904 Check((VecWidth % AccWidth) == 0,
6905 "Invalid vector widths for partial "
6906 "reduction. The width of the input vector "
6907 "must be a positive integer multiple of "
6908 "the width of the accumulator vector.");
6909 break;
6910 }
6911 case Intrinsic::experimental_noalias_scope_decl: {
6912 NoAliasScopeDecls.push_back(cast<IntrinsicInst>(&Call));
6913 break;
6914 }
6915 case Intrinsic::preserve_array_access_index:
6916 case Intrinsic::preserve_struct_access_index:
6917 case Intrinsic::aarch64_ldaxr:
6918 case Intrinsic::aarch64_ldxr:
6919 case Intrinsic::arm_ldaex:
6920 case Intrinsic::arm_ldrex: {
6921 Type *ElemTy = Call.getParamElementType(0);
6922 Check(ElemTy, "Intrinsic requires elementtype attribute on first argument.",
6923 &Call);
6924 break;
6925 }
6926 case Intrinsic::aarch64_stlxr:
6927 case Intrinsic::aarch64_stxr:
6928 case Intrinsic::arm_stlex:
6929 case Intrinsic::arm_strex: {
6930 Type *ElemTy = Call.getAttributes().getParamElementType(1);
6931 Check(ElemTy,
6932 "Intrinsic requires elementtype attribute on second argument.",
6933 &Call);
6934 break;
6935 }
6936 case Intrinsic::aarch64_prefetch: {
6937 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6938 "write argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6939 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6940 "target argument to llvm.aarch64.prefetch must be 0-3", Call);
6941 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6942 "stream argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6943 Check(cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue() < 2,
6944 "isdata argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6945 break;
6946 }
6947 case Intrinsic::aarch64_range_prefetch: {
6948 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6949 "write argument to llvm.aarch64.range.prefetch must be 0 or 1", Call);
6950 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 2,
6951 "stream argument to llvm.aarch64.range.prefetch must be 0 or 1",
6952 Call);
6953 break;
6954 }
6955 case Intrinsic::callbr_landingpad: {
6956 const auto *CBR = dyn_cast<CallBrInst>(Call.getOperand(0));
6957 Check(CBR, "intrinstic requires callbr operand", &Call);
6958 if (!CBR)
6959 break;
6960
6961 const BasicBlock *LandingPadBB = Call.getParent();
6962 const BasicBlock *PredBB = LandingPadBB->getUniquePredecessor();
6963 if (!PredBB) {
6964 CheckFailed("Intrinsic in block must have 1 unique predecessor", &Call);
6965 break;
6966 }
6967 if (!isa<CallBrInst>(PredBB->getTerminator())) {
6968 CheckFailed("Intrinsic must have corresponding callbr in predecessor",
6969 &Call);
6970 break;
6971 }
6972 Check(llvm::is_contained(CBR->getIndirectDests(), LandingPadBB),
6973 "Intrinsic's corresponding callbr must have intrinsic's parent basic "
6974 "block in indirect destination list",
6975 &Call);
6976 const Instruction &First = *LandingPadBB->begin();
6977 Check(&First == &Call, "No other instructions may proceed intrinsic",
6978 &Call);
6979 break;
6980 }
6981 case Intrinsic::structured_gep: {
6982 // Parser should refuse those 2 cases.
6983 assert(Call.arg_size() >= 1);
6985
6986 Check(Call.paramHasAttr(0, Attribute::ElementType),
6987 "Intrinsic first parameter is missing an ElementType attribute",
6988 &Call);
6989
6990 Type *T = Call.getParamAttr(0, Attribute::ElementType).getValueAsType();
6991 for (unsigned I = 1; I < Call.arg_size(); ++I) {
6993 ConstantInt *CI = dyn_cast<ConstantInt>(Index);
6994 Check(Index->getType()->isIntegerTy(),
6995 "Index operand type must be an integer", &Call);
6996
6997 if (ArrayType *AT = dyn_cast<ArrayType>(T)) {
6998 T = AT->getElementType();
6999 } else if (StructType *ST = dyn_cast<StructType>(T)) {
7000 Check(CI, "Indexing into a struct requires a constant int", &Call);
7001 Check(CI->getZExtValue() < ST->getNumElements(),
7002 "Indexing in a struct should be inbounds", &Call);
7003 T = ST->getElementType(CI->getZExtValue());
7004 } else if (VectorType *VT = dyn_cast<VectorType>(T)) {
7005 T = VT->getElementType();
7006 } else {
7007 CheckFailed("Reached a non-composite type with more indices to process",
7008 &Call);
7009 }
7010 }
7011 break;
7012 }
7013 case Intrinsic::structured_alloca:
7014 Check(Call.hasRetAttr(Attribute::ElementType),
7015 "@llvm.structured.alloca calls require elementtype attribute.",
7016 &Call);
7017 break;
7018 case Intrinsic::amdgcn_cs_chain: {
7019 auto CallerCC = Call.getCaller()->getCallingConv();
7020 switch (CallerCC) {
7021 case CallingConv::AMDGPU_CS:
7022 case CallingConv::AMDGPU_CS_Chain:
7023 case CallingConv::AMDGPU_CS_ChainPreserve:
7024 case CallingConv::AMDGPU_ES:
7025 case CallingConv::AMDGPU_GS:
7026 case CallingConv::AMDGPU_HS:
7027 case CallingConv::AMDGPU_LS:
7028 case CallingConv::AMDGPU_VS:
7029 break;
7030 default:
7031 CheckFailed("Intrinsic cannot be called from functions with this "
7032 "calling convention",
7033 &Call);
7034 break;
7035 }
7036
7037 Check(Call.paramHasAttr(2, Attribute::InReg),
7038 "SGPR arguments must have the `inreg` attribute", &Call);
7039 Check(!Call.paramHasAttr(3, Attribute::InReg),
7040 "VGPR arguments must not have the `inreg` attribute", &Call);
7041
7042 auto *Next = Call.getNextNode();
7043 bool IsAMDUnreachable = Next && isa<IntrinsicInst>(Next) &&
7044 cast<IntrinsicInst>(Next)->getIntrinsicID() ==
7045 Intrinsic::amdgcn_unreachable;
7046 Check(Next && (isa<UnreachableInst>(Next) || IsAMDUnreachable),
7047 "llvm.amdgcn.cs.chain must be followed by unreachable", &Call);
7048 break;
7049 }
7050 case Intrinsic::amdgcn_init_exec_from_input: {
7051 const Argument *Arg = dyn_cast<Argument>(Call.getOperand(0));
7052 Check(Arg && Arg->hasInRegAttr(),
7053 "only inreg arguments to the parent function are valid as inputs to "
7054 "this intrinsic",
7055 &Call);
7056 break;
7057 }
7058 case Intrinsic::amdgcn_set_inactive_chain_arg: {
7059 auto CallerCC = Call.getCaller()->getCallingConv();
7060 switch (CallerCC) {
7061 case CallingConv::AMDGPU_CS_Chain:
7062 case CallingConv::AMDGPU_CS_ChainPreserve:
7063 break;
7064 default:
7065 CheckFailed("Intrinsic can only be used from functions with the "
7066 "amdgpu_cs_chain or amdgpu_cs_chain_preserve "
7067 "calling conventions",
7068 &Call);
7069 break;
7070 }
7071
7072 unsigned InactiveIdx = 1;
7073 Check(!Call.paramHasAttr(InactiveIdx, Attribute::InReg),
7074 "Value for inactive lanes must not have the `inreg` attribute",
7075 &Call);
7076 Check(isa<Argument>(Call.getArgOperand(InactiveIdx)),
7077 "Value for inactive lanes must be a function argument", &Call);
7078 Check(!cast<Argument>(Call.getArgOperand(InactiveIdx))->hasInRegAttr(),
7079 "Value for inactive lanes must be a VGPR function argument", &Call);
7080 break;
7081 }
7082 case Intrinsic::amdgcn_call_whole_wave: {
7084 Check(F, "Indirect whole wave calls are not allowed", &Call);
7085
7086 CallingConv::ID CC = F->getCallingConv();
7087 Check(CC == CallingConv::AMDGPU_Gfx_WholeWave,
7088 "Callee must have the amdgpu_gfx_whole_wave calling convention",
7089 &Call);
7090
7091 Check(!F->isVarArg(), "Variadic whole wave calls are not allowed", &Call);
7092
7093 Check(Call.arg_size() == F->arg_size(),
7094 "Call argument count must match callee argument count", &Call);
7095
7096 // The first argument of the call is the callee, and the first argument of
7097 // the callee is the active mask. The rest of the arguments must match.
7098 Check(F->arg_begin()->getType()->isIntegerTy(1),
7099 "Callee must have i1 as its first argument", &Call);
7100 for (auto [CallArg, FuncArg] :
7101 drop_begin(zip_equal(Call.args(), F->args()))) {
7102 Check(CallArg->getType() == FuncArg.getType(),
7103 "Argument types must match", &Call);
7104
7105 // Check that inreg attributes match between call site and function
7106 Check(Call.paramHasAttr(FuncArg.getArgNo(), Attribute::InReg) ==
7107 FuncArg.hasInRegAttr(),
7108 "Argument inreg attributes must match", &Call);
7109 }
7110 break;
7111 }
7112 case Intrinsic::amdgcn_s_prefetch_data: {
7113 Check(
7116 "llvm.amdgcn.s.prefetch.data only supports global or constant memory");
7117 break;
7118 }
7119 case Intrinsic::amdgcn_mfma_scale_f32_16x16x128_f8f6f4:
7120 case Intrinsic::amdgcn_mfma_scale_f32_32x32x64_f8f6f4: {
7121 Value *Src0 = Call.getArgOperand(0);
7122 Value *Src1 = Call.getArgOperand(1);
7123
7124 uint64_t CBSZ = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
7125 uint64_t BLGP = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
7126 Check(CBSZ <= 4, "invalid value for cbsz format", Call,
7127 Call.getArgOperand(3));
7128 Check(BLGP <= 4, "invalid value for blgp format", Call,
7129 Call.getArgOperand(4));
7130
7131 // AMDGPU::MFMAScaleFormats values
7132 auto getFormatNumRegs = [](unsigned FormatVal) {
7133 switch (FormatVal) {
7134 case 0:
7135 case 1:
7136 return 8u;
7137 case 2:
7138 case 3:
7139 return 6u;
7140 case 4:
7141 return 4u;
7142 default:
7143 llvm_unreachable("invalid format value");
7144 }
7145 };
7146
7147 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
7148 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
7149 return false;
7150 unsigned NumElts = Ty->getNumElements();
7151 return NumElts == 4 || NumElts == 6 || NumElts == 8;
7152 };
7153
7154 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
7155 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
7156 Check(isValidSrcASrcBVector(Src0Ty),
7157 "operand 0 must be 4, 6 or 8 element i32 vector", &Call, Src0);
7158 Check(isValidSrcASrcBVector(Src1Ty),
7159 "operand 1 must be 4, 6 or 8 element i32 vector", &Call, Src1);
7160
7161 // Permit excess registers for the format.
7162 Check(Src0Ty->getNumElements() >= getFormatNumRegs(CBSZ),
7163 "invalid vector type for format", &Call, Src0, Call.getArgOperand(3));
7164 Check(Src1Ty->getNumElements() >= getFormatNumRegs(BLGP),
7165 "invalid vector type for format", &Call, Src1, Call.getArgOperand(5));
7166 break;
7167 }
7168 case Intrinsic::amdgcn_wmma_f32_16x16x128_f8f6f4:
7169 case Intrinsic::amdgcn_wmma_scale_f32_16x16x128_f8f6f4:
7170 case Intrinsic::amdgcn_wmma_scale16_f32_16x16x128_f8f6f4: {
7171 Value *Src0 = Call.getArgOperand(1);
7172 Value *Src1 = Call.getArgOperand(3);
7173
7174 unsigned FmtA = cast<ConstantInt>(Call.getArgOperand(0))->getZExtValue();
7175 unsigned FmtB = cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue();
7176 Check(FmtA <= 4, "invalid value for matrix format", Call,
7177 Call.getArgOperand(0));
7178 Check(FmtB <= 4, "invalid value for matrix format", Call,
7179 Call.getArgOperand(2));
7180
7181 // AMDGPU::MatrixFMT values
7182 auto getFormatNumRegs = [](unsigned FormatVal) {
7183 switch (FormatVal) {
7184 case 0:
7185 case 1:
7186 return 16u;
7187 case 2:
7188 case 3:
7189 return 12u;
7190 case 4:
7191 return 8u;
7192 default:
7193 llvm_unreachable("invalid format value");
7194 }
7195 };
7196
7197 auto isValidSrcASrcBVector = [](FixedVectorType *Ty) {
7198 if (!Ty || !Ty->getElementType()->isIntegerTy(32))
7199 return false;
7200 unsigned NumElts = Ty->getNumElements();
7201 return NumElts == 16 || NumElts == 12 || NumElts == 8;
7202 };
7203
7204 auto *Src0Ty = dyn_cast<FixedVectorType>(Src0->getType());
7205 auto *Src1Ty = dyn_cast<FixedVectorType>(Src1->getType());
7206 Check(isValidSrcASrcBVector(Src0Ty),
7207 "operand 1 must be 8, 12 or 16 element i32 vector", &Call, Src0);
7208 Check(isValidSrcASrcBVector(Src1Ty),
7209 "operand 3 must be 8, 12 or 16 element i32 vector", &Call, Src1);
7210
7211 // Permit excess registers for the format.
7212 Check(Src0Ty->getNumElements() >= getFormatNumRegs(FmtA),
7213 "invalid vector type for format", &Call, Src0, Call.getArgOperand(0));
7214 Check(Src1Ty->getNumElements() >= getFormatNumRegs(FmtB),
7215 "invalid vector type for format", &Call, Src1, Call.getArgOperand(2));
7216 break;
7217 }
7218 case Intrinsic::amdgcn_cooperative_atomic_load_32x4B:
7219 case Intrinsic::amdgcn_cooperative_atomic_load_16x8B:
7220 case Intrinsic::amdgcn_cooperative_atomic_load_8x16B:
7221 case Intrinsic::amdgcn_cooperative_atomic_store_32x4B:
7222 case Intrinsic::amdgcn_cooperative_atomic_store_16x8B:
7223 case Intrinsic::amdgcn_cooperative_atomic_store_8x16B: {
7224 // Check we only use this intrinsic on the FLAT or GLOBAL address spaces.
7225 Value *PtrArg = Call.getArgOperand(0);
7226 const unsigned AS = PtrArg->getType()->getPointerAddressSpace();
7228 "cooperative atomic intrinsics require a generic or global pointer",
7229 &Call, PtrArg);
7230
7231 // Last argument must be a MD string
7233 MDNode *MD = cast<MDNode>(Op->getMetadata());
7234 Check((MD->getNumOperands() == 1) && isa<MDString>(MD->getOperand(0)),
7235 "cooperative atomic intrinsics require that the last argument is a "
7236 "metadata string",
7237 &Call, Op);
7238 break;
7239 }
7240 case Intrinsic::nvvm_setmaxnreg_inc_sync_aligned_u32:
7241 case Intrinsic::nvvm_setmaxnreg_dec_sync_aligned_u32: {
7242 Value *V = Call.getArgOperand(0);
7243 unsigned RegCount = cast<ConstantInt>(V)->getZExtValue();
7244 Check(RegCount % 8 == 0,
7245 "reg_count argument to nvvm.setmaxnreg must be in multiples of 8");
7246 break;
7247 }
7248 case Intrinsic::experimental_convergence_entry:
7249 case Intrinsic::experimental_convergence_anchor:
7250 break;
7251 case Intrinsic::experimental_convergence_loop:
7252 break;
7253 case Intrinsic::ptrmask: {
7254 Type *Ty0 = Call.getArgOperand(0)->getType();
7255 Type *Ty1 = Call.getArgOperand(1)->getType();
7257 "llvm.ptrmask intrinsic first argument must be pointer or vector "
7258 "of pointers",
7259 &Call);
7260 Check(
7261 Ty0->isVectorTy() == Ty1->isVectorTy(),
7262 "llvm.ptrmask intrinsic arguments must be both scalars or both vectors",
7263 &Call);
7264 if (Ty0->isVectorTy())
7265 Check(cast<VectorType>(Ty0)->getElementCount() ==
7266 cast<VectorType>(Ty1)->getElementCount(),
7267 "llvm.ptrmask intrinsic arguments must have the same number of "
7268 "elements",
7269 &Call);
7270 Check(DL.getIndexTypeSizeInBits(Ty0) == Ty1->getScalarSizeInBits(),
7271 "llvm.ptrmask intrinsic second argument bitwidth must match "
7272 "pointer index type size of first argument",
7273 &Call);
7274 break;
7275 }
7276 case Intrinsic::thread_pointer: {
7278 DL.getDefaultGlobalsAddressSpace(),
7279 "llvm.thread.pointer intrinsic return type must be for the globals "
7280 "address space",
7281 &Call);
7282 break;
7283 }
7284 case Intrinsic::threadlocal_address: {
7285 const Value &Arg0 = *Call.getArgOperand(0);
7286 Check(isa<GlobalValue>(Arg0),
7287 "llvm.threadlocal.address first argument must be a GlobalValue");
7288 Check(cast<GlobalValue>(Arg0).isThreadLocal(),
7289 "llvm.threadlocal.address operand isThreadLocal() must be true");
7290 break;
7291 }
7292 case Intrinsic::lifetime_start:
7293 case Intrinsic::lifetime_end: {
7294 Value *Ptr = Call.getArgOperand(0);
7295 IntrinsicInst *II = dyn_cast<IntrinsicInst>(Ptr);
7296 Check(isa<AllocaInst>(Ptr) || isa<PoisonValue>(Ptr) ||
7297 (II && II->getIntrinsicID() == Intrinsic::structured_alloca),
7298 "llvm.lifetime.start/end can only be used on alloca or poison",
7299 &Call);
7300 break;
7301 }
7302 case Intrinsic::sponentry: {
7303 const unsigned StackAS = DL.getAllocaAddrSpace();
7304 const Type *RetTy = Call.getFunctionType()->getReturnType();
7305 Check(RetTy->getPointerAddressSpace() == StackAS,
7306 "llvm.sponentry must return a pointer to the stack", &Call);
7307 break;
7308 }
7309 };
7310
7311 // Verify that there aren't any unmediated control transfers between funclets.
7313 Function *F = Call.getParent()->getParent();
7314 if (F->hasPersonalityFn() &&
7315 isScopedEHPersonality(classifyEHPersonality(F->getPersonalityFn()))) {
7316 // Run EH funclet coloring on-demand and cache results for other intrinsic
7317 // calls in this function
7318 if (BlockEHFuncletColors.empty())
7319 BlockEHFuncletColors = colorEHFunclets(*F);
7320
7321 // Check for catch-/cleanup-pad in first funclet block
7322 bool InEHFunclet = false;
7323 BasicBlock *CallBB = Call.getParent();
7324 const ColorVector &CV = BlockEHFuncletColors.find(CallBB)->second;
7325 assert(CV.size() > 0 && "Uncolored block");
7326 for (BasicBlock *ColorFirstBB : CV)
7327 if (auto It = ColorFirstBB->getFirstNonPHIIt();
7328 It != ColorFirstBB->end())
7330 InEHFunclet = true;
7331
7332 // Check for funclet operand bundle
7333 bool HasToken = false;
7334 for (unsigned I = 0, E = Call.getNumOperandBundles(); I != E; ++I)
7336 HasToken = true;
7337
7338 // This would cause silent code truncation in WinEHPrepare
7339 if (InEHFunclet)
7340 Check(HasToken, "Missing funclet token on intrinsic call", &Call);
7341 }
7342 }
7343}
7344
7345/// Carefully grab the subprogram from a local scope.
7346///
7347/// This carefully grabs the subprogram from a local scope, avoiding the
7348/// built-in assertions that would typically fire.
7350 if (!LocalScope)
7351 return nullptr;
7352
7353 if (auto *SP = dyn_cast<DISubprogram>(LocalScope))
7354 return SP;
7355
7356 if (auto *LB = dyn_cast<DILexicalBlockBase>(LocalScope))
7357 return getSubprogram(LB->getRawScope());
7358
7359 // Just return null; broken scope chains are checked elsewhere.
7360 assert(!isa<DILocalScope>(LocalScope) && "Unknown type of local scope");
7361 return nullptr;
7362}
7363
7364void Verifier::visit(DbgLabelRecord &DLR) {
7366 "invalid #dbg_label intrinsic variable", &DLR, DLR.getRawLabel());
7367
7368 // Ignore broken !dbg attachments; they're checked elsewhere.
7369 if (MDNode *N = DLR.getDebugLoc().getAsMDNode())
7370 if (!isa<DILocation>(N))
7371 return;
7372
7373 BasicBlock *BB = DLR.getParent();
7374 Function *F = BB ? BB->getParent() : nullptr;
7375
7376 // The scopes for variables and !dbg attachments must agree.
7377 DILabel *Label = DLR.getLabel();
7378 DILocation *Loc = DLR.getDebugLoc();
7379 CheckDI(Loc, "#dbg_label record requires a !dbg attachment", &DLR, BB, F);
7380
7381 DISubprogram *LabelSP = getSubprogram(Label->getRawScope());
7382 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
7383 if (!LabelSP || !LocSP)
7384 return;
7385
7386 CheckDI(LabelSP == LocSP,
7387 "mismatched subprogram between #dbg_label label and !dbg attachment",
7388 &DLR, BB, F, Label, Label->getScope()->getSubprogram(), Loc,
7389 Loc->getScope()->getSubprogram());
7390}
7391
7392void Verifier::visit(DbgVariableRecord &DVR) {
7393 BasicBlock *BB = DVR.getParent();
7394 Function *F = BB->getParent();
7395
7396 CheckDI(DVR.getType() == DbgVariableRecord::LocationType::Value ||
7397 DVR.getType() == DbgVariableRecord::LocationType::Declare ||
7398 DVR.getType() == DbgVariableRecord::LocationType::DeclareValue ||
7399 DVR.getType() == DbgVariableRecord::LocationType::Assign,
7400 "invalid #dbg record type", &DVR, DVR.getType(), BB, F);
7401
7402 // The location for a DbgVariableRecord must be either a ValueAsMetadata,
7403 // DIArgList, or an empty MDNode (which is a legacy representation for an
7404 // "undef" location).
7405 auto *MD = DVR.getRawLocation();
7406 CheckDI(MD && (isa<ValueAsMetadata>(MD) || isa<DIArgList>(MD) ||
7407 (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands())),
7408 "invalid #dbg record address/value", &DVR, MD, BB, F);
7409 if (auto *VAM = dyn_cast<ValueAsMetadata>(MD)) {
7410 visitValueAsMetadata(*VAM, F);
7411 if (DVR.isDbgDeclare()) {
7412 // Allow integers here to support inttoptr salvage.
7413 Type *Ty = VAM->getValue()->getType();
7414 CheckDI(Ty->isPointerTy() || Ty->isIntegerTy(),
7415 "location of #dbg_declare must be a pointer or int", &DVR, MD, BB,
7416 F);
7417 }
7418 } else if (auto *AL = dyn_cast<DIArgList>(MD)) {
7419 visitDIArgList(*AL, F);
7420 }
7421
7423 "invalid #dbg record variable", &DVR, DVR.getRawVariable(), BB, F);
7424 visitMDNode(*DVR.getRawVariable(), AreDebugLocsAllowed::No);
7425
7427 "invalid #dbg record expression", &DVR, DVR.getRawExpression(), BB,
7428 F);
7429 visitMDNode(*DVR.getExpression(), AreDebugLocsAllowed::No);
7430
7431 if (DVR.isDbgAssign()) {
7433 "invalid #dbg_assign DIAssignID", &DVR, DVR.getRawAssignID(), BB,
7434 F);
7435 visitMDNode(*cast<DIAssignID>(DVR.getRawAssignID()),
7436 AreDebugLocsAllowed::No);
7437
7438 const auto *RawAddr = DVR.getRawAddress();
7439 // Similarly to the location above, the address for an assign
7440 // DbgVariableRecord must be a ValueAsMetadata or an empty MDNode, which
7441 // represents an undef address.
7442 CheckDI(
7443 isa<ValueAsMetadata>(RawAddr) ||
7444 (isa<MDNode>(RawAddr) && !cast<MDNode>(RawAddr)->getNumOperands()),
7445 "invalid #dbg_assign address", &DVR, DVR.getRawAddress(), BB, F);
7446 if (auto *VAM = dyn_cast<ValueAsMetadata>(RawAddr))
7447 visitValueAsMetadata(*VAM, F);
7448
7450 "invalid #dbg_assign address expression", &DVR,
7451 DVR.getRawAddressExpression(), BB, F);
7452 visitMDNode(*DVR.getAddressExpression(), AreDebugLocsAllowed::No);
7453
7454 // All of the linked instructions should be in the same function as DVR.
7455 for (Instruction *I : at::getAssignmentInsts(&DVR))
7456 CheckDI(DVR.getFunction() == I->getFunction(),
7457 "inst not in same function as #dbg_assign", I, &DVR, BB, F);
7458 }
7459
7460 // This check is redundant with one in visitLocalVariable().
7461 DILocalVariable *Var = DVR.getVariable();
7462 CheckDI(isType(Var->getRawType()), "invalid type ref", Var, Var->getRawType(),
7463 BB, F);
7464
7465 auto *DLNode = DVR.getDebugLoc().getAsMDNode();
7466 CheckDI(isa_and_nonnull<DILocation>(DLNode), "invalid #dbg record DILocation",
7467 &DVR, DLNode, BB, F);
7468 DILocation *Loc = DVR.getDebugLoc();
7469
7470 // The scopes for variables and !dbg attachments must agree.
7471 DISubprogram *VarSP = getSubprogram(Var->getRawScope());
7472 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
7473 if (!VarSP || !LocSP)
7474 return; // Broken scope chains are checked elsewhere.
7475
7476 CheckDI(VarSP == LocSP,
7477 "mismatched subprogram between #dbg record variable and DILocation",
7478 &DVR, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
7479 Loc->getScope()->getSubprogram(), BB, F);
7480
7481 verifyFnArgs(DVR);
7482}
7483
7484void Verifier::visitVPIntrinsic(VPIntrinsic &VPI) {
7485 if (auto *VPCast = dyn_cast<VPCastIntrinsic>(&VPI)) {
7486 auto *RetTy = cast<VectorType>(VPCast->getType());
7487 auto *ValTy = cast<VectorType>(VPCast->getOperand(0)->getType());
7488 Check(RetTy->getElementCount() == ValTy->getElementCount(),
7489 "VP cast intrinsic first argument and result vector lengths must be "
7490 "equal",
7491 *VPCast);
7492
7493 switch (VPCast->getIntrinsicID()) {
7494 default:
7495 llvm_unreachable("Unknown VP cast intrinsic");
7496 case Intrinsic::vp_trunc:
7497 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
7498 "llvm.vp.trunc intrinsic first argument and result element type "
7499 "must be integer",
7500 *VPCast);
7501 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
7502 "llvm.vp.trunc intrinsic the bit size of first argument must be "
7503 "larger than the bit size of the return type",
7504 *VPCast);
7505 break;
7506 case Intrinsic::vp_zext:
7507 case Intrinsic::vp_sext:
7508 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
7509 "llvm.vp.zext or llvm.vp.sext intrinsic first argument and result "
7510 "element type must be integer",
7511 *VPCast);
7512 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
7513 "llvm.vp.zext or llvm.vp.sext intrinsic the bit size of first "
7514 "argument must be smaller than the bit size of the return type",
7515 *VPCast);
7516 break;
7517 case Intrinsic::vp_fptoui:
7518 case Intrinsic::vp_fptosi:
7519 case Intrinsic::vp_lrint:
7520 case Intrinsic::vp_llrint:
7521 Check(
7522 RetTy->isIntOrIntVectorTy() && ValTy->isFPOrFPVectorTy(),
7523 "llvm.vp.fptoui, llvm.vp.fptosi, llvm.vp.lrint or llvm.vp.llrint" "intrinsic first argument element "
7524 "type must be floating-point and result element type must be integer",
7525 *VPCast);
7526 break;
7527 case Intrinsic::vp_uitofp:
7528 case Intrinsic::vp_sitofp:
7529 Check(
7530 RetTy->isFPOrFPVectorTy() && ValTy->isIntOrIntVectorTy(),
7531 "llvm.vp.uitofp or llvm.vp.sitofp intrinsic first argument element "
7532 "type must be integer and result element type must be floating-point",
7533 *VPCast);
7534 break;
7535 case Intrinsic::vp_fptrunc:
7536 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
7537 "llvm.vp.fptrunc intrinsic first argument and result element type "
7538 "must be floating-point",
7539 *VPCast);
7540 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
7541 "llvm.vp.fptrunc intrinsic the bit size of first argument must be "
7542 "larger than the bit size of the return type",
7543 *VPCast);
7544 break;
7545 case Intrinsic::vp_fpext:
7546 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
7547 "llvm.vp.fpext intrinsic first argument and result element type "
7548 "must be floating-point",
7549 *VPCast);
7550 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
7551 "llvm.vp.fpext intrinsic the bit size of first argument must be "
7552 "smaller than the bit size of the return type",
7553 *VPCast);
7554 break;
7555 case Intrinsic::vp_ptrtoint:
7556 Check(RetTy->isIntOrIntVectorTy() && ValTy->isPtrOrPtrVectorTy(),
7557 "llvm.vp.ptrtoint intrinsic first argument element type must be "
7558 "pointer and result element type must be integer",
7559 *VPCast);
7560 break;
7561 case Intrinsic::vp_inttoptr:
7562 Check(RetTy->isPtrOrPtrVectorTy() && ValTy->isIntOrIntVectorTy(),
7563 "llvm.vp.inttoptr intrinsic first argument element type must be "
7564 "integer and result element type must be pointer",
7565 *VPCast);
7566 break;
7567 }
7568 }
7569
7570 switch (VPI.getIntrinsicID()) {
7571 case Intrinsic::vp_fcmp: {
7572 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
7574 "invalid predicate for VP FP comparison intrinsic", &VPI);
7575 break;
7576 }
7577 case Intrinsic::vp_icmp: {
7578 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
7580 "invalid predicate for VP integer comparison intrinsic", &VPI);
7581 break;
7582 }
7583 case Intrinsic::vp_is_fpclass: {
7584 auto TestMask = cast<ConstantInt>(VPI.getOperand(1));
7585 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
7586 "unsupported bits for llvm.vp.is.fpclass test mask");
7587 break;
7588 }
7589 case Intrinsic::experimental_vp_splice: {
7590 VectorType *VecTy = cast<VectorType>(VPI.getType());
7591 int64_t Idx = cast<ConstantInt>(VPI.getArgOperand(2))->getSExtValue();
7592 int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
7593 if (VPI.getParent() && VPI.getParent()->getParent()) {
7594 AttributeList Attrs = VPI.getParent()->getParent()->getAttributes();
7595 if (Attrs.hasFnAttr(Attribute::VScaleRange))
7596 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
7597 }
7598 Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
7599 (Idx >= 0 && Idx < KnownMinNumElements),
7600 "The splice index exceeds the range [-VL, VL-1] where VL is the "
7601 "known minimum number of elements in the vector. For scalable "
7602 "vectors the minimum number of elements is determined from "
7603 "vscale_range.",
7604 &VPI);
7605 break;
7606 }
7607 }
7608}
7609
7610void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
7611 unsigned NumOperands = FPI.getNonMetadataArgCount();
7612 bool HasRoundingMD =
7614
7615 // Add the expected number of metadata operands.
7616 NumOperands += (1 + HasRoundingMD);
7617
7618 // Compare intrinsics carry an extra predicate metadata operand.
7620 NumOperands += 1;
7621 Check((FPI.arg_size() == NumOperands),
7622 "invalid arguments for constrained FP intrinsic", &FPI);
7623
7624 switch (FPI.getIntrinsicID()) {
7625 case Intrinsic::experimental_constrained_lrint:
7626 case Intrinsic::experimental_constrained_llrint: {
7627 Type *ValTy = FPI.getArgOperand(0)->getType();
7628 Type *ResultTy = FPI.getType();
7629 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
7630 "Intrinsic does not support vectors", &FPI);
7631 break;
7632 }
7633
7634 case Intrinsic::experimental_constrained_lround:
7635 case Intrinsic::experimental_constrained_llround: {
7636 Type *ValTy = FPI.getArgOperand(0)->getType();
7637 Type *ResultTy = FPI.getType();
7638 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
7639 "Intrinsic does not support vectors", &FPI);
7640 break;
7641 }
7642
7643 case Intrinsic::experimental_constrained_fcmp:
7644 case Intrinsic::experimental_constrained_fcmps: {
7645 auto Pred = cast<ConstrainedFPCmpIntrinsic>(&FPI)->getPredicate();
7647 "invalid predicate for constrained FP comparison intrinsic", &FPI);
7648 break;
7649 }
7650
7651 case Intrinsic::experimental_constrained_fptosi:
7652 case Intrinsic::experimental_constrained_fptoui: {
7653 Value *Operand = FPI.getArgOperand(0);
7654 ElementCount SrcEC;
7655 Check(Operand->getType()->isFPOrFPVectorTy(),
7656 "Intrinsic first argument must be floating point", &FPI);
7657 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7658 SrcEC = cast<VectorType>(OperandT)->getElementCount();
7659 }
7660
7661 Operand = &FPI;
7662 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
7663 "Intrinsic first argument and result disagree on vector use", &FPI);
7664 Check(Operand->getType()->isIntOrIntVectorTy(),
7665 "Intrinsic result must be an integer", &FPI);
7666 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7667 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
7668 "Intrinsic first argument and result vector lengths must be equal",
7669 &FPI);
7670 }
7671 break;
7672 }
7673
7674 case Intrinsic::experimental_constrained_sitofp:
7675 case Intrinsic::experimental_constrained_uitofp: {
7676 Value *Operand = FPI.getArgOperand(0);
7677 ElementCount SrcEC;
7678 Check(Operand->getType()->isIntOrIntVectorTy(),
7679 "Intrinsic first argument must be integer", &FPI);
7680 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7681 SrcEC = cast<VectorType>(OperandT)->getElementCount();
7682 }
7683
7684 Operand = &FPI;
7685 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
7686 "Intrinsic first argument and result disagree on vector use", &FPI);
7687 Check(Operand->getType()->isFPOrFPVectorTy(),
7688 "Intrinsic result must be a floating point", &FPI);
7689 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
7690 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
7691 "Intrinsic first argument and result vector lengths must be equal",
7692 &FPI);
7693 }
7694 break;
7695 }
7696
7697 case Intrinsic::experimental_constrained_fptrunc:
7698 case Intrinsic::experimental_constrained_fpext: {
7699 Value *Operand = FPI.getArgOperand(0);
7700 Type *OperandTy = Operand->getType();
7701 Value *Result = &FPI;
7702 Type *ResultTy = Result->getType();
7703 Check(OperandTy->isFPOrFPVectorTy(),
7704 "Intrinsic first argument must be FP or FP vector", &FPI);
7705 Check(ResultTy->isFPOrFPVectorTy(),
7706 "Intrinsic result must be FP or FP vector", &FPI);
7707 Check(OperandTy->isVectorTy() == ResultTy->isVectorTy(),
7708 "Intrinsic first argument and result disagree on vector use", &FPI);
7709 if (OperandTy->isVectorTy()) {
7710 Check(cast<VectorType>(OperandTy)->getElementCount() ==
7711 cast<VectorType>(ResultTy)->getElementCount(),
7712 "Intrinsic first argument and result vector lengths must be equal",
7713 &FPI);
7714 }
7715 if (FPI.getIntrinsicID() == Intrinsic::experimental_constrained_fptrunc) {
7716 Check(OperandTy->getScalarSizeInBits() > ResultTy->getScalarSizeInBits(),
7717 "Intrinsic first argument's type must be larger than result type",
7718 &FPI);
7719 } else {
7720 Check(OperandTy->getScalarSizeInBits() < ResultTy->getScalarSizeInBits(),
7721 "Intrinsic first argument's type must be smaller than result type",
7722 &FPI);
7723 }
7724 break;
7725 }
7726
7727 default:
7728 break;
7729 }
7730
7731 // If a non-metadata argument is passed in a metadata slot then the
7732 // error will be caught earlier when the incorrect argument doesn't
7733 // match the specification in the intrinsic call table. Thus, no
7734 // argument type check is needed here.
7735
7736 Check(FPI.getExceptionBehavior().has_value(),
7737 "invalid exception behavior argument", &FPI);
7738 if (HasRoundingMD) {
7739 Check(FPI.getRoundingMode().has_value(), "invalid rounding mode argument",
7740 &FPI);
7741 }
7742}
7743
7744void Verifier::verifyFragmentExpression(const DbgVariableRecord &DVR) {
7745 DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(DVR.getRawVariable());
7746 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7747
7748 // We don't know whether this intrinsic verified correctly.
7749 if (!V || !E || !E->isValid())
7750 return;
7751
7752 // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
7753 auto Fragment = E->getFragmentInfo();
7754 if (!Fragment)
7755 return;
7756
7757 // The frontend helps out GDB by emitting the members of local anonymous
7758 // unions as artificial local variables with shared storage. When SROA splits
7759 // the storage for artificial local variables that are smaller than the entire
7760 // union, the overhang piece will be outside of the allotted space for the
7761 // variable and this check fails.
7762 // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
7763 if (V->isArtificial())
7764 return;
7765
7766 verifyFragmentExpression(*V, *Fragment, &DVR);
7767}
7768
7769template <typename ValueOrMetadata>
7770void Verifier::verifyFragmentExpression(const DIVariable &V,
7772 ValueOrMetadata *Desc) {
7773 // If there's no size, the type is broken, but that should be checked
7774 // elsewhere.
7775 auto VarSize = V.getSizeInBits();
7776 if (!VarSize)
7777 return;
7778
7779 unsigned FragSize = Fragment.SizeInBits;
7780 unsigned FragOffset = Fragment.OffsetInBits;
7781 CheckDI(FragSize + FragOffset <= *VarSize,
7782 "fragment is larger than or outside of variable", Desc, &V);
7783 CheckDI(FragSize != *VarSize, "fragment covers entire variable", Desc, &V);
7784}
7785
7786void Verifier::verifyFnArgs(const DbgVariableRecord &DVR) {
7787 // This function does not take the scope of noninlined function arguments into
7788 // account. Don't run it if current function is nodebug, because it may
7789 // contain inlined debug intrinsics.
7790 if (!HasDebugInfo)
7791 return;
7792
7793 // For performance reasons only check non-inlined ones.
7794 if (DVR.getDebugLoc()->getInlinedAt())
7795 return;
7796
7797 DILocalVariable *Var = DVR.getVariable();
7798 CheckDI(Var, "#dbg record without variable");
7799
7800 unsigned ArgNo = Var->getArg();
7801 if (!ArgNo)
7802 return;
7803
7804 // Verify there are no duplicate function argument debug info entries.
7805 // These will cause hard-to-debug assertions in the DWARF backend.
7806 if (DebugFnArgs.size() < ArgNo)
7807 DebugFnArgs.resize(ArgNo, nullptr);
7808
7809 auto *Prev = DebugFnArgs[ArgNo - 1];
7810 DebugFnArgs[ArgNo - 1] = Var;
7811 CheckDI(!Prev || (Prev == Var), "conflicting debug info for argument", &DVR,
7812 Prev, Var);
7813}
7814
7815void Verifier::verifyNotEntryValue(const DbgVariableRecord &DVR) {
7816 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
7817
7818 // We don't know whether this intrinsic verified correctly.
7819 if (!E || !E->isValid())
7820 return;
7821
7823 Value *VarValue = DVR.getVariableLocationOp(0);
7824 if (isa<UndefValue>(VarValue) || isa<PoisonValue>(VarValue))
7825 return;
7826 // We allow EntryValues for swift async arguments, as they have an
7827 // ABI-guarantee to be turned into a specific register.
7828 if (auto *ArgLoc = dyn_cast_or_null<Argument>(VarValue);
7829 ArgLoc && ArgLoc->hasAttribute(Attribute::SwiftAsync))
7830 return;
7831 }
7832
7833 CheckDI(!E->isEntryValue(),
7834 "Entry values are only allowed in MIR unless they target a "
7835 "swiftasync Argument",
7836 &DVR);
7837}
7838
7839void Verifier::verifyCompileUnits() {
7840 // When more than one Module is imported into the same context, such as during
7841 // an LTO build before linking the modules, ODR type uniquing may cause types
7842 // to point to a different CU. This check does not make sense in this case.
7843 if (M.getContext().isODRUniquingDebugTypes())
7844 return;
7845 auto *CUs = M.getNamedMetadata("llvm.dbg.cu");
7846 SmallPtrSet<const Metadata *, 2> Listed;
7847 if (CUs)
7848 Listed.insert_range(CUs->operands());
7849 for (const auto *CU : CUVisited)
7850 CheckDI(Listed.count(CU), "DICompileUnit not listed in llvm.dbg.cu", CU);
7851 CUVisited.clear();
7852}
7853
7854void Verifier::verifyDeoptimizeCallingConvs() {
7855 if (DeoptimizeDeclarations.empty())
7856 return;
7857
7858 const Function *First = DeoptimizeDeclarations[0];
7859 for (const auto *F : ArrayRef(DeoptimizeDeclarations).slice(1)) {
7860 Check(First->getCallingConv() == F->getCallingConv(),
7861 "All llvm.experimental.deoptimize declarations must have the same "
7862 "calling convention",
7863 First, F);
7864 }
7865}
7866
7867void Verifier::verifyAttachedCallBundle(const CallBase &Call,
7868 const OperandBundleUse &BU) {
7869 FunctionType *FTy = Call.getFunctionType();
7870
7871 Check((FTy->getReturnType()->isPointerTy() ||
7872 (Call.doesNotReturn() && FTy->getReturnType()->isVoidTy())),
7873 "a call with operand bundle \"clang.arc.attachedcall\" must call a "
7874 "function returning a pointer or a non-returning function that has a "
7875 "void return type",
7876 Call);
7877
7878 Check(BU.Inputs.size() == 1 && isa<Function>(BU.Inputs.front()),
7879 "operand bundle \"clang.arc.attachedcall\" requires one function as "
7880 "an argument",
7881 Call);
7882
7883 auto *Fn = cast<Function>(BU.Inputs.front());
7884 Intrinsic::ID IID = Fn->getIntrinsicID();
7885
7886 if (IID) {
7887 Check((IID == Intrinsic::objc_retainAutoreleasedReturnValue ||
7888 IID == Intrinsic::objc_claimAutoreleasedReturnValue ||
7889 IID == Intrinsic::objc_unsafeClaimAutoreleasedReturnValue),
7890 "invalid function argument", Call);
7891 } else {
7892 StringRef FnName = Fn->getName();
7893 Check((FnName == "objc_retainAutoreleasedReturnValue" ||
7894 FnName == "objc_claimAutoreleasedReturnValue" ||
7895 FnName == "objc_unsafeClaimAutoreleasedReturnValue"),
7896 "invalid function argument", Call);
7897 }
7898}
7899
7900void Verifier::verifyNoAliasScopeDecl() {
7901 if (NoAliasScopeDecls.empty())
7902 return;
7903
7904 // only a single scope must be declared at a time.
7905 for (auto *II : NoAliasScopeDecls) {
7906 assert(II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl &&
7907 "Not a llvm.experimental.noalias.scope.decl ?");
7908 const auto *ScopeListMV = dyn_cast<MetadataAsValue>(
7910 Check(ScopeListMV != nullptr,
7911 "llvm.experimental.noalias.scope.decl must have a MetadataAsValue "
7912 "argument",
7913 II);
7914
7915 const auto *ScopeListMD = dyn_cast<MDNode>(ScopeListMV->getMetadata());
7916 Check(ScopeListMD != nullptr, "!id.scope.list must point to an MDNode", II);
7917 Check(ScopeListMD->getNumOperands() == 1,
7918 "!id.scope.list must point to a list with a single scope", II);
7919 visitAliasScopeListMetadata(ScopeListMD);
7920 }
7921
7922 // Only check the domination rule when requested. Once all passes have been
7923 // adapted this option can go away.
7925 return;
7926
7927 // Now sort the intrinsics based on the scope MDNode so that declarations of
7928 // the same scopes are next to each other.
7929 auto GetScope = [](IntrinsicInst *II) {
7930 const auto *ScopeListMV = cast<MetadataAsValue>(
7932 return &cast<MDNode>(ScopeListMV->getMetadata())->getOperand(0);
7933 };
7934
7935 // We are sorting on MDNode pointers here. For valid input IR this is ok.
7936 // TODO: Sort on Metadata ID to avoid non-deterministic error messages.
7937 auto Compare = [GetScope](IntrinsicInst *Lhs, IntrinsicInst *Rhs) {
7938 return GetScope(Lhs) < GetScope(Rhs);
7939 };
7940
7941 llvm::sort(NoAliasScopeDecls, Compare);
7942
7943 // Go over the intrinsics and check that for the same scope, they are not
7944 // dominating each other.
7945 auto ItCurrent = NoAliasScopeDecls.begin();
7946 while (ItCurrent != NoAliasScopeDecls.end()) {
7947 auto CurScope = GetScope(*ItCurrent);
7948 auto ItNext = ItCurrent;
7949 do {
7950 ++ItNext;
7951 } while (ItNext != NoAliasScopeDecls.end() &&
7952 GetScope(*ItNext) == CurScope);
7953
7954 // [ItCurrent, ItNext) represents the declarations for the same scope.
7955 // Ensure they are not dominating each other.. but only if it is not too
7956 // expensive.
7957 if (ItNext - ItCurrent < 32)
7958 for (auto *I : llvm::make_range(ItCurrent, ItNext))
7959 for (auto *J : llvm::make_range(ItCurrent, ItNext))
7960 if (I != J)
7961 Check(!DT.dominates(I, J),
7962 "llvm.experimental.noalias.scope.decl dominates another one "
7963 "with the same scope",
7964 I);
7965 ItCurrent = ItNext;
7966 }
7967}
7968
7969//===----------------------------------------------------------------------===//
7970// Implement the public interfaces to this file...
7971//===----------------------------------------------------------------------===//
7972
7974 Function &F = const_cast<Function &>(f);
7975
7976 // Don't use a raw_null_ostream. Printing IR is expensive.
7977 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/true, *f.getParent());
7978
7979 // Note that this function's return value is inverted from what you would
7980 // expect of a function called "verify".
7981 return !V.verify(F);
7982}
7983
7985 bool *BrokenDebugInfo) {
7986 // Don't use a raw_null_ostream. Printing IR is expensive.
7987 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/!BrokenDebugInfo, M);
7988
7989 bool Broken = false;
7990 for (const Function &F : M)
7991 Broken |= !V.verify(F);
7992
7993 Broken |= !V.verify();
7994 if (BrokenDebugInfo)
7995 *BrokenDebugInfo = V.hasBrokenDebugInfo();
7996 // Note that this function's return value is inverted from what you would
7997 // expect of a function called "verify".
7998 return Broken;
7999}
8000
8001namespace {
8002
8003struct VerifierLegacyPass : public FunctionPass {
8004 static char ID;
8005
8006 std::unique_ptr<Verifier> V;
8007 bool FatalErrors = true;
8008
8009 VerifierLegacyPass() : FunctionPass(ID) {}
8010 explicit VerifierLegacyPass(bool FatalErrors)
8011 : FunctionPass(ID), FatalErrors(FatalErrors) {}
8012
8013 bool doInitialization(Module &M) override {
8014 V = std::make_unique<Verifier>(
8015 &dbgs(), /*ShouldTreatBrokenDebugInfoAsError=*/false, M);
8016 return false;
8017 }
8018
8019 bool runOnFunction(Function &F) override {
8020 if (!V->verify(F) && FatalErrors) {
8021 errs() << "in function " << F.getName() << '\n';
8022 report_fatal_error("Broken function found, compilation aborted!");
8023 }
8024 return false;
8025 }
8026
8027 bool doFinalization(Module &M) override {
8028 bool HasErrors = false;
8029 for (Function &F : M)
8030 if (F.isDeclaration())
8031 HasErrors |= !V->verify(F);
8032
8033 HasErrors |= !V->verify();
8034 if (FatalErrors && (HasErrors || V->hasBrokenDebugInfo()))
8035 report_fatal_error("Broken module found, compilation aborted!");
8036 return false;
8037 }
8038
8039 void getAnalysisUsage(AnalysisUsage &AU) const override {
8040 AU.setPreservesAll();
8041 }
8042};
8043
8044} // end anonymous namespace
8045
8046/// Helper to issue failure from the TBAA verification
8047template <typename... Tys> void TBAAVerifier::CheckFailed(Tys &&... Args) {
8048 if (Diagnostic)
8049 return Diagnostic->CheckFailed(Args...);
8050}
8051
8052#define CheckTBAA(C, ...) \
8053 do { \
8054 if (!(C)) { \
8055 CheckFailed(__VA_ARGS__); \
8056 return false; \
8057 } \
8058 } while (false)
8059
8060/// Verify that \p BaseNode can be used as the "base type" in the struct-path
8061/// TBAA scheme. This means \p BaseNode is either a scalar node, or a
8062/// struct-type node describing an aggregate data structure (like a struct).
8063TBAAVerifier::TBAABaseNodeSummary
8064TBAAVerifier::verifyTBAABaseNode(const Instruction *I, const MDNode *BaseNode,
8065 bool IsNewFormat) {
8066 if (BaseNode->getNumOperands() < 2) {
8067 CheckFailed("Base nodes must have at least two operands", I, BaseNode);
8068 return {true, ~0u};
8069 }
8070
8071 auto Itr = TBAABaseNodes.find(BaseNode);
8072 if (Itr != TBAABaseNodes.end())
8073 return Itr->second;
8074
8075 auto Result = verifyTBAABaseNodeImpl(I, BaseNode, IsNewFormat);
8076 auto InsertResult = TBAABaseNodes.insert({BaseNode, Result});
8077 (void)InsertResult;
8078 assert(InsertResult.second && "We just checked!");
8079 return Result;
8080}
8081
8082TBAAVerifier::TBAABaseNodeSummary
8083TBAAVerifier::verifyTBAABaseNodeImpl(const Instruction *I,
8084 const MDNode *BaseNode, bool IsNewFormat) {
8085 const TBAAVerifier::TBAABaseNodeSummary InvalidNode = {true, ~0u};
8086
8087 if (BaseNode->getNumOperands() == 2) {
8088 // Scalar nodes can only be accessed at offset 0.
8089 return isValidScalarTBAANode(BaseNode)
8090 ? TBAAVerifier::TBAABaseNodeSummary({false, 0})
8091 : InvalidNode;
8092 }
8093
8094 if (IsNewFormat) {
8095 if (BaseNode->getNumOperands() % 3 != 0) {
8096 CheckFailed("Access tag nodes must have the number of operands that is a "
8097 "multiple of 3!", BaseNode);
8098 return InvalidNode;
8099 }
8100 } else {
8101 if (BaseNode->getNumOperands() % 2 != 1) {
8102 CheckFailed("Struct tag nodes must have an odd number of operands!",
8103 BaseNode);
8104 return InvalidNode;
8105 }
8106 }
8107
8108 // Check the type size field.
8109 if (IsNewFormat) {
8110 auto *TypeSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
8111 BaseNode->getOperand(1));
8112 if (!TypeSizeNode) {
8113 CheckFailed("Type size nodes must be constants!", I, BaseNode);
8114 return InvalidNode;
8115 }
8116 }
8117
8118 // Check the type name field. In the new format it can be anything.
8119 if (!IsNewFormat && !isa<MDString>(BaseNode->getOperand(0))) {
8120 CheckFailed("Struct tag nodes have a string as their first operand",
8121 BaseNode);
8122 return InvalidNode;
8123 }
8124
8125 bool Failed = false;
8126
8127 std::optional<APInt> PrevOffset;
8128 unsigned BitWidth = ~0u;
8129
8130 // We've already checked that BaseNode is not a degenerate root node with one
8131 // operand in \c verifyTBAABaseNode, so this loop should run at least once.
8132 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
8133 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
8134 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
8135 Idx += NumOpsPerField) {
8136 const MDOperand &FieldTy = BaseNode->getOperand(Idx);
8137 const MDOperand &FieldOffset = BaseNode->getOperand(Idx + 1);
8138 if (!isa<MDNode>(FieldTy)) {
8139 CheckFailed("Incorrect field entry in struct type node!", I, BaseNode);
8140 Failed = true;
8141 continue;
8142 }
8143
8144 auto *OffsetEntryCI =
8146 if (!OffsetEntryCI) {
8147 CheckFailed("Offset entries must be constants!", I, BaseNode);
8148 Failed = true;
8149 continue;
8150 }
8151
8152 if (BitWidth == ~0u)
8153 BitWidth = OffsetEntryCI->getBitWidth();
8154
8155 if (OffsetEntryCI->getBitWidth() != BitWidth) {
8156 CheckFailed(
8157 "Bitwidth between the offsets and struct type entries must match", I,
8158 BaseNode);
8159 Failed = true;
8160 continue;
8161 }
8162
8163 // NB! As far as I can tell, we generate a non-strictly increasing offset
8164 // sequence only from structs that have zero size bit fields. When
8165 // recursing into a contained struct in \c getFieldNodeFromTBAABaseNode we
8166 // pick the field lexically the latest in struct type metadata node. This
8167 // mirrors the actual behavior of the alias analysis implementation.
8168 bool IsAscending =
8169 !PrevOffset || PrevOffset->ule(OffsetEntryCI->getValue());
8170
8171 if (!IsAscending) {
8172 CheckFailed("Offsets must be increasing!", I, BaseNode);
8173 Failed = true;
8174 }
8175
8176 PrevOffset = OffsetEntryCI->getValue();
8177
8178 if (IsNewFormat) {
8179 auto *MemberSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
8180 BaseNode->getOperand(Idx + 2));
8181 if (!MemberSizeNode) {
8182 CheckFailed("Member size entries must be constants!", I, BaseNode);
8183 Failed = true;
8184 continue;
8185 }
8186 }
8187 }
8188
8189 return Failed ? InvalidNode
8190 : TBAAVerifier::TBAABaseNodeSummary(false, BitWidth);
8191}
8192
8193static bool IsRootTBAANode(const MDNode *MD) {
8194 return MD->getNumOperands() < 2;
8195}
8196
8197static bool IsScalarTBAANodeImpl(const MDNode *MD,
8199 if (MD->getNumOperands() != 2 && MD->getNumOperands() != 3)
8200 return false;
8201
8202 if (!isa<MDString>(MD->getOperand(0)))
8203 return false;
8204
8205 if (MD->getNumOperands() == 3) {
8207 if (!(Offset && Offset->isZero() && isa<MDString>(MD->getOperand(0))))
8208 return false;
8209 }
8210
8211 auto *Parent = dyn_cast_or_null<MDNode>(MD->getOperand(1));
8212 return Parent && Visited.insert(Parent).second &&
8213 (IsRootTBAANode(Parent) || IsScalarTBAANodeImpl(Parent, Visited));
8214}
8215
8216bool TBAAVerifier::isValidScalarTBAANode(const MDNode *MD) {
8217 auto ResultIt = TBAAScalarNodes.find(MD);
8218 if (ResultIt != TBAAScalarNodes.end())
8219 return ResultIt->second;
8220
8221 SmallPtrSet<const MDNode *, 4> Visited;
8222 bool Result = IsScalarTBAANodeImpl(MD, Visited);
8223 auto InsertResult = TBAAScalarNodes.insert({MD, Result});
8224 (void)InsertResult;
8225 assert(InsertResult.second && "Just checked!");
8226
8227 return Result;
8228}
8229
8230/// Returns the field node at the offset \p Offset in \p BaseNode. Update \p
8231/// Offset in place to be the offset within the field node returned.
8232///
8233/// We assume we've okayed \p BaseNode via \c verifyTBAABaseNode.
8234MDNode *TBAAVerifier::getFieldNodeFromTBAABaseNode(const Instruction *I,
8235 const MDNode *BaseNode,
8236 APInt &Offset,
8237 bool IsNewFormat) {
8238 assert(BaseNode->getNumOperands() >= 2 && "Invalid base node!");
8239
8240 // Scalar nodes have only one possible "field" -- their parent in the access
8241 // hierarchy. Offset must be zero at this point, but our caller is supposed
8242 // to check that.
8243 if (BaseNode->getNumOperands() == 2)
8244 return cast<MDNode>(BaseNode->getOperand(1));
8245
8246 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
8247 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
8248 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
8249 Idx += NumOpsPerField) {
8250 auto *OffsetEntryCI =
8251 mdconst::extract<ConstantInt>(BaseNode->getOperand(Idx + 1));
8252 if (OffsetEntryCI->getValue().ugt(Offset)) {
8253 if (Idx == FirstFieldOpNo) {
8254 CheckFailed("Could not find TBAA parent in struct type node", I,
8255 BaseNode, &Offset);
8256 return nullptr;
8257 }
8258
8259 unsigned PrevIdx = Idx - NumOpsPerField;
8260 auto *PrevOffsetEntryCI =
8261 mdconst::extract<ConstantInt>(BaseNode->getOperand(PrevIdx + 1));
8262 Offset -= PrevOffsetEntryCI->getValue();
8263 return cast<MDNode>(BaseNode->getOperand(PrevIdx));
8264 }
8265 }
8266
8267 unsigned LastIdx = BaseNode->getNumOperands() - NumOpsPerField;
8268 auto *LastOffsetEntryCI = mdconst::extract<ConstantInt>(
8269 BaseNode->getOperand(LastIdx + 1));
8270 Offset -= LastOffsetEntryCI->getValue();
8271 return cast<MDNode>(BaseNode->getOperand(LastIdx));
8272}
8273
8275 if (!Type || Type->getNumOperands() < 3)
8276 return false;
8277
8278 // In the new format type nodes shall have a reference to the parent type as
8279 // its first operand.
8280 return isa_and_nonnull<MDNode>(Type->getOperand(0));
8281}
8282
8284 CheckTBAA(MD->getNumOperands() > 0, "TBAA metadata cannot have 0 operands", I,
8285 MD);
8286
8287 if (I)
8291 "This instruction shall not have a TBAA access tag!", I);
8292
8293 bool IsStructPathTBAA =
8294 isa<MDNode>(MD->getOperand(0)) && MD->getNumOperands() >= 3;
8295
8296 CheckTBAA(IsStructPathTBAA,
8297 "Old-style TBAA is no longer allowed, use struct-path TBAA instead",
8298 I);
8299
8300 MDNode *BaseNode = dyn_cast_or_null<MDNode>(MD->getOperand(0));
8301 MDNode *AccessType = dyn_cast_or_null<MDNode>(MD->getOperand(1));
8302
8303 bool IsNewFormat = isNewFormatTBAATypeNode(AccessType);
8304
8305 if (IsNewFormat) {
8306 CheckTBAA(MD->getNumOperands() == 4 || MD->getNumOperands() == 5,
8307 "Access tag metadata must have either 4 or 5 operands", I, MD);
8308 } else {
8309 CheckTBAA(MD->getNumOperands() < 5,
8310 "Struct tag metadata must have either 3 or 4 operands", I, MD);
8311 }
8312
8313 // Check the access size field.
8314 if (IsNewFormat) {
8315 auto *AccessSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
8316 MD->getOperand(3));
8317 CheckTBAA(AccessSizeNode, "Access size field must be a constant", I, MD);
8318 }
8319
8320 // Check the immutability flag.
8321 unsigned ImmutabilityFlagOpNo = IsNewFormat ? 4 : 3;
8322 if (MD->getNumOperands() == ImmutabilityFlagOpNo + 1) {
8323 auto *IsImmutableCI = mdconst::dyn_extract_or_null<ConstantInt>(
8324 MD->getOperand(ImmutabilityFlagOpNo));
8325 CheckTBAA(IsImmutableCI,
8326 "Immutability tag on struct tag metadata must be a constant", I,
8327 MD);
8328 CheckTBAA(
8329 IsImmutableCI->isZero() || IsImmutableCI->isOne(),
8330 "Immutability part of the struct tag metadata must be either 0 or 1", I,
8331 MD);
8332 }
8333
8334 CheckTBAA(BaseNode && AccessType,
8335 "Malformed struct tag metadata: base and access-type "
8336 "should be non-null and point to Metadata nodes",
8337 I, MD, BaseNode, AccessType);
8338
8339 if (!IsNewFormat) {
8340 CheckTBAA(isValidScalarTBAANode(AccessType),
8341 "Access type node must be a valid scalar type", I, MD,
8342 AccessType);
8343 }
8344
8346 CheckTBAA(OffsetCI, "Offset must be constant integer", I, MD);
8347
8348 APInt Offset = OffsetCI->getValue();
8349 bool SeenAccessTypeInPath = false;
8350
8351 SmallPtrSet<MDNode *, 4> StructPath;
8352
8353 for (/* empty */; BaseNode && !IsRootTBAANode(BaseNode);
8354 BaseNode =
8355 getFieldNodeFromTBAABaseNode(I, BaseNode, Offset, IsNewFormat)) {
8356 if (!StructPath.insert(BaseNode).second) {
8357 CheckFailed("Cycle detected in struct path", I, MD);
8358 return false;
8359 }
8360
8361 bool Invalid;
8362 unsigned BaseNodeBitWidth;
8363 std::tie(Invalid, BaseNodeBitWidth) =
8364 verifyTBAABaseNode(I, BaseNode, IsNewFormat);
8365
8366 // If the base node is invalid in itself, then we've already printed all the
8367 // errors we wanted to print.
8368 if (Invalid)
8369 return false;
8370
8371 SeenAccessTypeInPath |= BaseNode == AccessType;
8372
8373 if (isValidScalarTBAANode(BaseNode) || BaseNode == AccessType)
8374 CheckTBAA(Offset == 0, "Offset not zero at the point of scalar access", I,
8375 MD, &Offset);
8376
8377 CheckTBAA(BaseNodeBitWidth == Offset.getBitWidth() ||
8378 (BaseNodeBitWidth == 0 && Offset == 0) ||
8379 (IsNewFormat && BaseNodeBitWidth == ~0u),
8380 "Access bit-width not the same as description bit-width", I, MD,
8381 BaseNodeBitWidth, Offset.getBitWidth());
8382
8383 if (IsNewFormat && SeenAccessTypeInPath)
8384 break;
8385 }
8386
8387 CheckTBAA(SeenAccessTypeInPath, "Did not see access type in access path!", I,
8388 MD);
8389 return true;
8390}
8391
8392char VerifierLegacyPass::ID = 0;
8393INITIALIZE_PASS(VerifierLegacyPass, "verify", "Module Verifier", false, false)
8394
8396 return new VerifierLegacyPass(FatalErrors);
8397}
8398
8399AnalysisKey VerifierAnalysis::Key;
8406
8411
8413 auto Res = AM.getResult<VerifierAnalysis>(M);
8414 if (FatalErrors && (Res.IRBroken || Res.DebugInfoBroken))
8415 report_fatal_error("Broken module found, compilation aborted!");
8416
8417 return PreservedAnalyses::all();
8418}
8419
8421 auto res = AM.getResult<VerifierAnalysis>(F);
8422 if (res.IRBroken && FatalErrors)
8423 report_fatal_error("Broken function found, compilation aborted!");
8424
8425 return PreservedAnalyses::all();
8426}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU address space definition.
ArrayRef< TableEntry > TableRef
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Atomic ordering constants.
@ RetAttr
@ FnAttr
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file declares the LLVM IR specialization of the GenericConvergenceVerifier template.
static DISubprogram * getSubprogram(bool IsDistinct, Ts &&...Args)
dxil translate DXIL Translate Metadata
This file defines the DenseMap class.
This file contains constants used for implementing Dwarf debug support.
static bool runOnFunction(Function &F, bool PostInlining)
This file contains the declarations of entities that describe floating point environment and related ...
#define Check(C,...)
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Machine Check Debug Module
This file implements a map that provides insertion order iteration.
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
static bool isContiguous(const ConstantRange &A, const ConstantRange &B)
This file contains the declarations for metadata subclasses.
#define T
#define T1
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t High
uint64_t IntrinsicInst * II
#define P(N)
ppc ctr loops verify
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
static unsigned getNumElements(Type *Ty)
static void visit(BasicBlock &Start, std::function< bool(BasicBlock *)> op)
This file contains some templates that are useful if you are working with the STL at all.
verify safepoint Safepoint IR Verifier
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
static bool IsScalarTBAANodeImpl(const MDNode *MD, SmallPtrSetImpl< const MDNode * > &Visited)
static bool isType(const Metadata *MD)
static Instruction * getSuccPad(Instruction *Terminator)
static bool isMDTuple(const Metadata *MD)
static bool isNewFormatTBAATypeNode(llvm::MDNode *Type)
#define CheckDI(C,...)
We know that a debug info condition should be true, if not print an error message.
Definition Verifier.cpp:690
static void forEachUser(const Value *User, SmallPtrSet< const Value *, 32 > &Visited, llvm::function_ref< bool(const Value *)> Callback)
Definition Verifier.cpp:731
static bool isDINode(const Metadata *MD)
static bool isScope(const Metadata *MD)
static cl::opt< bool > VerifyNoAliasScopeDomination("verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false), cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical " "scopes are not dominating"))
static bool isTypeCongruent(Type *L, Type *R)
Two types are "congruent" if they are identical, or if they are both pointer types with different poi...
#define CheckTBAA(C,...)
static bool isConstantIntMetadataOperand(const Metadata *MD)
static bool IsRootTBAANode(const MDNode *MD)
static Value * getParentPad(Value *EHPad)
static bool hasConflictingReferenceFlags(unsigned Flags)
Detect mutually exclusive flags.
static AttrBuilder getParameterABIAttributes(LLVMContext &C, unsigned I, AttributeList Attrs)
static const char PassName[]
static LLVM_ABI bool isValidArbitraryFPFormat(StringRef Format)
Returns true if the given string is a valid arbitrary floating-point format interpretation for llvm....
Definition APFloat.cpp:5990
bool isFiniteNonZero() const
Definition APFloat.h:1526
bool isNegative() const
Definition APFloat.h:1516
const fltSemantics & getSemantics() const
Definition APFloat.h:1524
Class for arbitrary precision integers.
Definition APInt.h:78
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition APInt.h:1208
bool isMinValue() const
Determine if this is the smallest unsigned value.
Definition APInt.h:418
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition APInt.h:1157
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:441
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1585
bool isMaxValue() const
Determine if this is the largest unsigned value.
Definition APInt.h:400
This class represents a conversion between pointers from one address space to another.
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
unsigned getAddressSpace() const
Return the address space for the allocation.
LLVM_ABI bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
const Value * getArraySize() const
Get the number of elements allocated.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
void setPreservesAll()
Set by analyses that do not transform their input at all.
LLVM_ABI bool hasInRegAttr() const
Return true if this argument has the inreg attribute.
Definition Function.cpp:292
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
static bool isFPOperation(BinOp Op)
BinOp getOperation() const
static LLVM_ABI StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
bool contains(Attribute::AttrKind A) const
Return true if the builder has the specified attribute.
LLVM_ABI bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
LLVM_ABI std::string getAsString(bool InAttrGrp=false) const
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:105
LLVM_ABI const ConstantRange & getValueAsConstantRange() const
Return the attribute's value as a ConstantRange.
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition Attributes.h:124
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:261
LLVM_ABI Type * getValueAsType() const
Return the attribute's value as a Type.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:461
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:530
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
const Instruction & front() const
Definition BasicBlock.h:484
LLVM_ABI const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction; assumes that the block is well-formed.
Definition BasicBlock.h:237
This class represents a no-op cast from one type to another.
static LLVM_ABI BlockAddress * lookup(const BasicBlock *BB)
Lookup an existing BlockAddress constant for the given BasicBlock.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool isInlineAsm() const
Check if this call is an inline asm statement.
bool hasInAllocaArgument() const
Determine if there are is an inalloca argument.
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool doesNotAccessMemory(unsigned OpNo) const
bool hasFnAttr(Attribute::AttrKind Kind) const
Determine whether this call has the given attribute.
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
CallingConv::ID getCallingConv() const
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Get the attribute of a given kind from a given arg.
iterator_range< bundle_op_iterator > bundle_op_infos()
Return the range [bundle_op_info_begin, bundle_op_info_end).
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
bool onlyReadsMemory(unsigned OpNo) const
Value * getCalledOperand() const
Type * getParamElementType(unsigned ArgNo) const
Extract the elementtype type for a parameter.
Value * getArgOperand(unsigned i) const
FunctionType * getFunctionType() const
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
bool doesNotReturn() const
Determine if the call cannot return.
LLVM_ABI bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
bool hasOperandBundles() const
Return true if this User has any operand bundles.
LLVM_ABI Function * getCaller()
Helper to get the caller (the parent function).
BasicBlock * getIndirectDest(unsigned i) const
unsigned getNumIndirectDests() const
Return the number of callbr indirect dest labels.
bool isMustTailCall() const
static LLVM_ABI bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
unsigned getNumHandlers() const
return the number of 'handlers' in this catchswitch instruction, except the default handler
Value * getParentPad() const
BasicBlock * getUnwindDest() const
handler_range handlers()
iteration adapter for range-for loops.
BasicBlock * getUnwindDest() const
bool isFPPredicate() const
Definition InstrTypes.h:782
bool isIntPredicate() const
Definition InstrTypes.h:783
static bool isIntPredicate(Predicate P)
Definition InstrTypes.h:776
Value * getCondition() const
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
Definition Constants.h:231
bool isNegative() const
Definition Constants.h:214
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition Constants.h:219
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition Constants.h:162
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:168
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:159
Constant * getAddrDiscriminator() const
The address discriminator if any, or the null constant.
Definition Constants.h:1239
Constant * getPointer() const
The pointer that is signed in this ptrauth signed pointer.
Definition Constants.h:1226
ConstantInt * getKey() const
The Key ID, an i32 constant.
Definition Constants.h:1229
Constant * getDeactivationSymbol() const
Definition Constants.h:1248
ConstantInt * getDiscriminator() const
The integer discriminator, an i64 constant, or 0.
Definition Constants.h:1232
static LLVM_ABI bool isOrderedRanges(ArrayRef< ConstantRange > RangesRef)
This class represents a range of values.
const APInt & getLower() const
Return the lower value for this range.
const APInt & getUpper() const
Return the upper value for this range.
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
static LLVM_ABI ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition Constants.cpp:74
LLVM_ABI std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
LLVM_ABI std::optional< RoundingMode > getRoundingMode() const
LLVM_ABI unsigned getNonMetadataArgCount() const
DbgVariableFragmentInfo FragmentInfo
@ FixedPointBinary
Scale factor 2^Factor.
@ FixedPointDecimal
Scale factor 10^Factor.
@ FixedPointRational
Arbitrary rational scale factor.
DIGlobalVariable * getVariable() const
LLVM_ABI DISubprogram * getSubprogram() const
Get the subprogram for this scope.
DILocalScope * getScope() const
Get the local scope for this variable.
Metadata * getRawScope() const
Base class for scope-like contexts.
Subprogram description. Uses SubclassData1.
static const DIScope * getRawRetainedNodeScope(const MDNode *N)
Base class for template parameters.
Base class for types.
Base class for variables.
Metadata * getRawType() const
Metadata * getRawScope() const
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
LLVM_ABI void print(raw_ostream &O, bool IsForDebug=false) const
DebugLoc getDebugLoc() const
LLVM_ABI const BasicBlock * getParent() const
LLVM_ABI Function * getFunction()
Record of a variable value-assignment, aka a non instruction representation of the dbg....
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
DIExpression * getExpression() const
DILocalVariable * getVariable() const
Metadata * getRawLocation() const
Returns the metadata operand for the first location description.
@ End
Marks the end of the concrete types.
@ Any
To indicate all LocationTypes in searches.
DIExpression * getAddressExpression() const
MDNode * getAsMDNode() const
Return this as a bar MDNode.
Definition DebugLoc.h:290
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:205
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
bool empty() const
Definition DenseMap.h:109
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:241
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:159
This instruction extracts a single (scalar) element from a VectorType value.
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
ArrayRef< unsigned > getIndices() const
static LLVM_ABI Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
This instruction compares its operands according to the predicate given to the constructor.
This class represents an extension of floating point types.
This class represents a cast from floating point to signed integer.
This class represents a cast from floating point to unsigned integer.
This class represents a truncation of floating point types.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
op_range arg_operands()
arg_operands - iteration adapter for range-for loops.
Value * getParentPad() const
Convenience accessors.
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
Type * getReturnType() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition Function.h:211
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition Function.h:246
DISubprogram * getSubprogram() const
Get the attached subprogram.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:272
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition Function.h:905
const Function & getFunction() const
Definition Function.h:166
const std::string & getGC() const
Definition Function.cpp:818
Type * getReturnType() const
Returns the type of the ret val.
Definition Function.h:216
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
Definition Function.h:229
LLVM_ABI Value * getBasePtr() const
LLVM_ABI Value * getDerivedPtr() const
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
static bool isValidLinkage(LinkageTypes L)
Definition GlobalAlias.h:98
const Constant * getAliasee() const
Definition GlobalAlias.h:87
LLVM_ABI const Function * getResolverFunction() const
Definition Globals.cpp:688
static bool isValidLinkage(LinkageTypes L)
Definition GlobalIFunc.h:86
const Constant * getResolver() const
Definition GlobalIFunc.h:73
LLVM_ABI void getAllMetadata(SmallVectorImpl< std::pair< unsigned, MDNode * > > &MDs) const
Appends all metadata attached to this value to MDs, sorting by KindID.
bool hasComdat() const
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this GlobalObject.
bool hasExternalLinkage() const
bool isDSOLocal() const
bool isImplicitDSOLocal() const
LLVM_ABI bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition Globals.cpp:337
bool hasValidDeclarationLinkage() const
LinkageTypes getLinkage() const
bool hasDefaultVisibility() const
bool hasPrivateLinkage() const
bool hasHiddenVisibility() const
bool hasExternalWeakLinkage() const
bool hasDLLImportStorageClass() const
bool hasDLLExportStorageClass() const
bool isDeclarationForLinker() const
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
PointerType * getType() const
Global values are always pointers.
LLVM_ABI bool isInterposable() const
Return true if this global's definition can be substituted with an arbitrary definition at link time ...
Definition Globals.cpp:116
bool hasComdat() const
bool hasCommonLinkage() const
bool hasGlobalUnnamedAddr() const
bool hasAppendingLinkage() const
bool hasAvailableExternallyLinkage() const
Type * getValueType() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool hasInitializer() const
Definitions have initializers, declarations don't.
MaybeAlign getAlign() const
Returns the alignment of the given variable.
LLVM_ABI uint64_t getGlobalSize(const DataLayout &DL) const
Get the size of this global variable in bytes.
Definition Globals.cpp:569
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
This instruction compares its operands according to the predicate given to the constructor.
BasicBlock * getDestination(unsigned i)
Return the specified destination.
unsigned getNumDestinations() const
return the number of possible destinations in this indirectbr instruction.
unsigned getNumSuccessors() const
This instruction inserts a single (scalar) element into a VectorType value.
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
ArrayRef< unsigned > getIndices() const
Base class for instruction visitors.
Definition InstVisitor.h:78
void visit(Iterator Start, Iterator End)
Definition InstVisitor.h:87
LLVM_ABI unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
This class represents a cast from an integer to a pointer.
static LLVM_ABI bool mayLowerToFunctionCall(Intrinsic::ID IID)
Check if the intrinsic might lower into a regular function call in the course of IR transformations.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Align getAlign() const
Return the alignment of the access that is being performed.
Metadata node.
Definition Metadata.h:1080
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1444
bool isTemporary() const
Definition Metadata.h:1264
ArrayRef< MDOperand > operands() const
Definition Metadata.h:1442
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1450
bool isDistinct() const
Definition Metadata.h:1263
bool isResolved() const
Check if node is fully resolved.
Definition Metadata.h:1260
LLVMContext & getContext() const
Definition Metadata.h:1244
bool equalsStr(StringRef Str) const
Definition Metadata.h:924
Metadata * get() const
Definition Metadata.h:931
LLVM_ABI StringRef getString() const
Definition Metadata.cpp:632
static LLVM_ABI bool isTagMD(const Metadata *MD)
This class implements a map that also provides access to all stored values in a deterministic order.
Definition MapVector.h:36
static LLVM_ABI MetadataAsValue * getIfExists(LLVMContext &Context, Metadata *MD)
Definition Metadata.cpp:118
Metadata * getMetadata() const
Definition Metadata.h:202
Root of the metadata hierarchy.
Definition Metadata.h:64
LLVM_ABI void print(raw_ostream &OS, const Module *M=nullptr, bool IsForDebug=false) const
Print.
unsigned getMetadataID() const
Definition Metadata.h:104
Manage lifetime of a slot tracker for printing IR.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
LLVM_ABI StringRef getName() const
LLVM_ABI void print(raw_ostream &ROS, bool IsForDebug=false) const
LLVM_ABI unsigned getNumOperands() const
iterator_range< op_iterator > operands()
Definition Metadata.h:1856
op_range incoming_values()
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
This class represents a cast from a pointer to an address (non-capturing ptrtoint).
This class represents a cast from a pointer to an integer.
Value * getValue() const
Convenience accessor.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
This class represents a sign extension of integer types.
This class represents a cast from signed integer to floating point.
static LLVM_ABI const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
This instruction constructs a fixed permutation of two input vectors.
static LLVM_ABI bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
static LLVM_ABI void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
void insert_range(Range &&R)
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
void reserve(size_type N)
iterator insert(iterator I, T &&Elt)
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition StringRef.h:730
static constexpr size_t npos
Definition StringRef.h:57
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition StringRef.h:490
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:258
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:140
unsigned getNumElements() const
Random access to the elements.
LLVM_ABI Type * getTypeAtIndex(const Value *V) const
Given an index value into the type, return the type of the element.
Definition Type.cpp:788
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Returns true if this struct contains a scalable vector.
Definition Type.cpp:510
Verify that the TBAA Metadatas are valid.
Definition Verifier.h:40
LLVM_ABI bool visitTBAAMetadata(const Instruction *I, const MDNode *MD)
Visit an instruction, or a TBAA node itself as part of a metadata, and return true if it is valid,...
unsigned size() const
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
bool isByteTy() const
True if this is an instance of ByteType.
Definition Type.h:242
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:290
LLVM_ABI bool containsNonGlobalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a global...
Definition Type.cpp:78
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition Type.h:281
LLVM_ABI bool containsNonLocalTargetExtType(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this type is or contains a target extension type that disallows being used as a local.
Definition Type.cpp:94
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
Definition Type.cpp:65
bool isLabelTy() const
Return true if this is 'label'.
Definition Type.h:230
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition Type.h:263
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:284
LLVM_ABI bool isTokenLikeTy() const
Returns true if this is 'token' or a token-like target type.s.
Definition Type.cpp:1136
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isSingleValueType() const
Return true if the type is a valid type for a register in codegen.
Definition Type.h:313
LLVM_ABI bool canLosslesslyBitCastTo(Type *Ty) const
Return true if this type could be converted with a lossless BitCast to type 'Ty'.
Definition Type.cpp:157
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:370
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition Type.h:328
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:236
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:186
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition Type.h:287
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
Definition Type.h:272
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:257
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition Type.h:227
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:141
bool isMetadataTy() const
Return true if this is 'metadata'.
Definition Type.h:233
This class represents a cast unsigned integer to floating point.
op_range operands()
Definition User.h:267
Value * getOperand(unsigned i) const
Definition User.h:207
unsigned getNumOperands() const
Definition User.h:229
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
Value * getValue() const
Definition Metadata.h:499
LLVM Value Representation.
Definition Value.h:75
iterator_range< user_iterator > materialized_users()
Definition Value.h:420
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:255
LLVM_ABI const Value * stripPointerCastsAndAliases() const
Strip off pointer casts, all-zero GEPs, address space casts, and aliases.
Definition Value.cpp:713
LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.h:258
LLVM_ABI const Value * stripInBoundsOffsets(function_ref< void(const Value *)> Func=[](const Value *) {}) const
Strip off pointer casts and inbounds GEPs.
Definition Value.cpp:820
iterator_range< user_iterator > users()
Definition Value.h:426
bool materialized_use_empty() const
Definition Value.h:351
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:709
bool hasName() const
Definition Value.h:261
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:318
Check a module for errors, and report separate error states for IR and debug info errors.
Definition Verifier.h:109
LLVM_ABI Result run(Module &M, ModuleAnalysisManager &)
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
This class represents zero extension of integer types.
constexpr bool isNonZero() const
Definition TypeSize.h:155
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
CallInst * Call
This file contains the declaration of the Comdat class, which represents a single COMDAT in LLVM.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ FLAT_ADDRESS
Address space for flat memory.
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
@ PRIVATE_ADDRESS
Address space for private memory.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
bool isFlatGlobalAddrSpace(unsigned AS)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Entry
Definition COFF.h:862
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
LLVM_ABI void getIntrinsicInfoTableEntries(ID id, SmallVectorImpl< IITDescriptor > &T)
Return the IIT table descriptor for the specified intrinsic into an array of IITDescriptors.
@ MatchIntrinsicTypes_NoMatchRet
Definition Intrinsics.h:267
@ MatchIntrinsicTypes_NoMatchArg
Definition Intrinsics.h:268
LLVM_ABI MatchIntrinsicTypesResult matchIntrinsicSignature(FunctionType *FTy, ArrayRef< IITDescriptor > &Infos, SmallVectorImpl< Type * > &OverloadTys)
Match the specified function type with the type constraints specified by the .td file.
LLVM_ABI bool hasConstrainedFPRoundingModeOperand(ID QID)
Returns true if the intrinsic ID is for one of the "ConstrainedFloating-Point Intrinsics" that take r...
LLVM_ABI StringRef getName(ID id)
Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
static const int NoAliasScopeDeclScopeArg
Definition Intrinsics.h:41
LLVM_ABI bool matchIntrinsicVarArg(bool isVarArg, ArrayRef< IITDescriptor > &Infos)
Verify if the intrinsic has variable arguments.
std::variant< std::monostate, Loc::Single, Loc::Multi, Loc::MMI, Loc::EntryValue > Variant
Alias for the std::variant specialization base class of DbgVariable.
Definition DwarfDebug.h:190
Flag
These should be considered private to the implementation of the MCInstrDesc class.
@ System
Synchronized with respect to all concurrently executing threads.
Definition LLVMContext.h:58
LLVM_ABI std::optional< VFInfo > tryDemangleForVFABI(StringRef MangledName, const FunctionType *FTy)
Function to construct a VFInfo out of a mangled names in the following format:
@ CE
Windows NT (Windows on ARM)
Definition MCAsmInfo.h:48
LLVM_ABI AssignmentInstRange getAssignmentInsts(DIAssignID *ID)
Return a range of instructions (typically just one) that have ID as an attachment.
initializer< Ty > init(const Ty &Val)
@ DW_MACINFO_undef
Definition Dwarf.h:818
@ DW_MACINFO_start_file
Definition Dwarf.h:819
@ DW_MACINFO_define
Definition Dwarf.h:817
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract_or_null(Y &&MD)
Extract a Value from Metadata, if any, allowing null.
Definition Metadata.h:709
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract(Y &&MD)
Extract a Value from Metadata, if any.
Definition Metadata.h:696
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:668
@ User
could "use" a pointer
NodeAddr< UseNode * > Use
Definition RDFGraph.h:385
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
Definition Threading.h:280
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1739
LLVM_ABI bool canInstructionHaveMMRAs(const Instruction &I)
detail::zippy< detail::zip_first, T, U, Args... > zip_equal(T &&t, U &&u, Args &&...args)
zip iterator that assumes that all iteratees have the same length.
Definition STLExtras.h:841
LLVM_ABI unsigned getBranchWeightOffset(const MDNode *ProfileData)
Return the offset to the first branch weight data.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2554
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
AllocFnKind
Definition Attributes.h:53
testing::Matcher< const detail::ErrorHolder & > Failed()
Definition Error.h:198
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2208
LLVM_ABI DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
Definition MathExtras.h:243
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition MathExtras.h:284
gep_type_iterator gep_type_end(const User *GEP)
bool isa_and_nonnull(const Y &Val)
Definition Casting.h:676
Op::Description Desc
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
GenericConvergenceVerifier< SSAContext > ConvergenceVerifier
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
bool isModSet(const ModRefInfo MRI)
Definition ModRef.h:49
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1636
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
generic_gep_type_iterator<> gep_type_iterator
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
iterator_range< SplittingIterator > split(StringRef Str, StringRef Separator)
Split the specified string over a separator and return a range-compatible iterable over its partition...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI bool isValueProfileMD(const MDNode *ProfileData)
Checks if an MDNode contains value profiling Metadata.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
LLVM_ABI unsigned getNumBranchWeights(const MDNode &ProfileData)
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:74
LLVM_ABI FunctionPass * createVerifierPass(bool FatalErrors=true)
FunctionAddr VTableAddr Next
Definition InstrProf.h:141
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
TinyPtrVector< BasicBlock * > ColorVector
LLVM_ABI const char * LLVMLoopEstimatedTripCount
Profile-based loop metadata that should be accessed only by using llvm::getLoopEstimatedTripCount and...
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
Definition FPEnv.cpp:25
gep_type_iterator gep_type_begin(const User *GEP)
LLVM_ABI std::unique_ptr< GCStrategy > getGCStrategy(const StringRef Name)
Lookup the GCStrategy object associated with the given gc name.
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1947
bool pred_empty(const BasicBlock *BB)
Definition CFG.h:119
bool isHexDigit(char C)
Checks if character C is a hexadecimal numeric character.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
constexpr bool isCallableCC(CallingConv::ID CC)
LLVM_ABI bool verifyModule(const Module &M, raw_ostream *OS=nullptr, bool *BrokenDebugInfo=nullptr)
Check a module for errors.
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
Definition MIRParser.h:39
#define N
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition Analysis.h:29
static LLVM_ABI const char * SyntheticFunctionEntryCount
static LLVM_ABI const char * UnknownBranchWeightsMarker
static LLVM_ABI const char * ValueProfile
static LLVM_ABI const char * FunctionEntryCount
static LLVM_ABI const char * BranchWeights
uint32_t getTagID() const
Return the tag of this operand bundle as an integer.
ArrayRef< Use > Inputs
void DebugInfoCheckFailed(const Twine &Message)
A debug info check failed.
Definition Verifier.cpp:308
VerifierSupport(raw_ostream *OS, const Module &M)
Definition Verifier.cpp:157
bool Broken
Track the brokenness of the module while recursively visiting.
Definition Verifier.cpp:151
void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A check failed (with values to print).
Definition Verifier.cpp:301
bool BrokenDebugInfo
Broken debug info can be "recovered" from by stripping the debug info.
Definition Verifier.cpp:153
LLVMContext & Context
Definition Verifier.cpp:148
bool TreatBrokenDebugInfoAsError
Whether to treat broken debug info as an error.
Definition Verifier.cpp:155
void CheckFailed(const Twine &Message)
A check failed, so printout out the condition and the message.
Definition Verifier.cpp:290
const Module & M
Definition Verifier.cpp:144
const DataLayout & DL
Definition Verifier.cpp:147
void DebugInfoCheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A debug info check failed (with values to print).
Definition Verifier.cpp:317
const Triple & TT
Definition Verifier.cpp:146
ModuleSlotTracker MST
Definition Verifier.cpp:145