LLVM 19.0.0git
Verifier.cpp
Go to the documentation of this file.
1//===-- Verifier.cpp - Implement the Module Verifier -----------------------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the function verifier interface, that can be used for some
10// basic correctness checking of input to the system.
11//
12// Note that this does not provide full `Java style' security and verifications,
13// instead it just tries to ensure that code is well-formed.
14//
15// * Both of a binary operator's parameters are of the same type
16// * Verify that the indices of mem access instructions match other operands
17// * Verify that arithmetic and other things are only performed on first-class
18// types. Verify that shifts & logicals only happen on integrals f.e.
19// * All of the constants in a switch statement are of the correct type
20// * The code is in valid SSA form
21// * It should be illegal to put a label into any other type (like a structure)
22// or to return one. [except constant arrays!]
23// * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad
24// * PHI nodes must have an entry for each predecessor, with no extras.
25// * PHI nodes must be the first thing in a basic block, all grouped together
26// * All basic blocks should only end with terminator insts, not contain them
27// * The entry node to a function must not have predecessors
28// * All Instructions must be embedded into a basic block
29// * Functions cannot take a void-typed parameter
30// * Verify that a function's argument list agrees with it's declared type.
31// * It is illegal to specify a name for a void value.
32// * It is illegal to have a internal global value with no initializer
33// * It is illegal to have a ret instruction that returns a value that does not
34// agree with the function return value type.
35// * Function call argument types match the function prototype
36// * A landing pad is defined by a landingpad instruction, and can be jumped to
37// only by the unwind edge of an invoke instruction.
38// * A landingpad instruction must be the first non-PHI instruction in the
39// block.
40// * Landingpad instructions must be in a function with a personality function.
41// * Convergence control intrinsics are introduced in ConvergentOperations.rst.
42// The applied restrictions are too numerous to list here.
43// * The convergence entry intrinsic and the loop heart must be the first
44// non-PHI instruction in their respective block. This does not conflict with
45// the landing pads, since these two kinds cannot occur in the same block.
46// * All other things that are tested by asserts spread about the code...
47//
48//===----------------------------------------------------------------------===//
49
50#include "llvm/IR/Verifier.h"
51#include "llvm/ADT/APFloat.h"
52#include "llvm/ADT/APInt.h"
53#include "llvm/ADT/ArrayRef.h"
54#include "llvm/ADT/DenseMap.h"
55#include "llvm/ADT/MapVector.h"
57#include "llvm/ADT/STLExtras.h"
59#include "llvm/ADT/SmallSet.h"
62#include "llvm/ADT/StringMap.h"
63#include "llvm/ADT/StringRef.h"
64#include "llvm/ADT/Twine.h"
66#include "llvm/IR/Argument.h"
68#include "llvm/IR/Attributes.h"
69#include "llvm/IR/BasicBlock.h"
70#include "llvm/IR/CFG.h"
71#include "llvm/IR/CallingConv.h"
72#include "llvm/IR/Comdat.h"
73#include "llvm/IR/Constant.h"
75#include "llvm/IR/Constants.h"
77#include "llvm/IR/DataLayout.h"
78#include "llvm/IR/DebugInfo.h"
80#include "llvm/IR/DebugLoc.h"
82#include "llvm/IR/Dominators.h"
84#include "llvm/IR/Function.h"
85#include "llvm/IR/GCStrategy.h"
86#include "llvm/IR/GlobalAlias.h"
87#include "llvm/IR/GlobalValue.h"
89#include "llvm/IR/InlineAsm.h"
90#include "llvm/IR/InstVisitor.h"
91#include "llvm/IR/InstrTypes.h"
92#include "llvm/IR/Instruction.h"
95#include "llvm/IR/Intrinsics.h"
96#include "llvm/IR/IntrinsicsAArch64.h"
97#include "llvm/IR/IntrinsicsAMDGPU.h"
98#include "llvm/IR/IntrinsicsARM.h"
99#include "llvm/IR/IntrinsicsNVPTX.h"
100#include "llvm/IR/IntrinsicsWebAssembly.h"
101#include "llvm/IR/LLVMContext.h"
103#include "llvm/IR/Metadata.h"
104#include "llvm/IR/Module.h"
106#include "llvm/IR/PassManager.h"
107#include "llvm/IR/Statepoint.h"
108#include "llvm/IR/Type.h"
109#include "llvm/IR/Use.h"
110#include "llvm/IR/User.h"
112#include "llvm/IR/Value.h"
114#include "llvm/Pass.h"
116#include "llvm/Support/Casting.h"
120#include "llvm/Support/ModRef.h"
122#include <algorithm>
123#include <cassert>
124#include <cstdint>
125#include <memory>
126#include <optional>
127#include <string>
128#include <utility>
129
130using namespace llvm;
131
133 "verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false),
134 cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical "
135 "scopes are not dominating"));
136
137namespace llvm {
138
141 const Module &M;
146
147 /// Track the brokenness of the module while recursively visiting.
148 bool Broken = false;
149 /// Broken debug info can be "recovered" from by stripping the debug info.
150 bool BrokenDebugInfo = false;
151 /// Whether to treat broken debug info as an error.
153
155 : OS(OS), M(M), MST(&M), TT(M.getTargetTriple()), DL(M.getDataLayout()),
156 Context(M.getContext()) {}
157
158private:
159 void Write(const Module *M) {
160 *OS << "; ModuleID = '" << M->getModuleIdentifier() << "'\n";
161 }
162
163 void Write(const Value *V) {
164 if (V)
165 Write(*V);
166 }
167
168 void Write(const Value &V) {
169 if (isa<Instruction>(V)) {
170 V.print(*OS, MST);
171 *OS << '\n';
172 } else {
173 V.printAsOperand(*OS, true, MST);
174 *OS << '\n';
175 }
176 }
177
178 void Write(const DbgRecord *DR) {
179 if (DR) {
180 DR->print(*OS, MST, false);
181 *OS << '\n';
182 }
183 }
184
186 switch (Type) {
188 *OS << "value";
189 break;
191 *OS << "declare";
192 break;
194 *OS << "assign";
195 break;
197 *OS << "end";
198 break;
200 *OS << "any";
201 break;
202 };
203 }
204
205 void Write(const Metadata *MD) {
206 if (!MD)
207 return;
208 MD->print(*OS, MST, &M);
209 *OS << '\n';
210 }
211
212 template <class T> void Write(const MDTupleTypedArrayWrapper<T> &MD) {
213 Write(MD.get());
214 }
215
216 void Write(const NamedMDNode *NMD) {
217 if (!NMD)
218 return;
219 NMD->print(*OS, MST);
220 *OS << '\n';
221 }
222
223 void Write(Type *T) {
224 if (!T)
225 return;
226 *OS << ' ' << *T;
227 }
228
229 void Write(const Comdat *C) {
230 if (!C)
231 return;
232 *OS << *C;
233 }
234
235 void Write(const APInt *AI) {
236 if (!AI)
237 return;
238 *OS << *AI << '\n';
239 }
240
241 void Write(const unsigned i) { *OS << i << '\n'; }
242
243 // NOLINTNEXTLINE(readability-identifier-naming)
244 void Write(const Attribute *A) {
245 if (!A)
246 return;
247 *OS << A->getAsString() << '\n';
248 }
249
250 // NOLINTNEXTLINE(readability-identifier-naming)
251 void Write(const AttributeSet *AS) {
252 if (!AS)
253 return;
254 *OS << AS->getAsString() << '\n';
255 }
256
257 // NOLINTNEXTLINE(readability-identifier-naming)
258 void Write(const AttributeList *AL) {
259 if (!AL)
260 return;
261 AL->print(*OS);
262 }
263
264 void Write(Printable P) { *OS << P << '\n'; }
265
266 template <typename T> void Write(ArrayRef<T> Vs) {
267 for (const T &V : Vs)
268 Write(V);
269 }
270
271 template <typename T1, typename... Ts>
272 void WriteTs(const T1 &V1, const Ts &... Vs) {
273 Write(V1);
274 WriteTs(Vs...);
275 }
276
277 template <typename... Ts> void WriteTs() {}
278
279public:
280 /// A check failed, so printout out the condition and the message.
281 ///
282 /// This provides a nice place to put a breakpoint if you want to see why
283 /// something is not correct.
284 void CheckFailed(const Twine &Message) {
285 if (OS)
286 *OS << Message << '\n';
287 Broken = true;
288 }
289
290 /// A check failed (with values to print).
291 ///
292 /// This calls the Message-only version so that the above is easier to set a
293 /// breakpoint on.
294 template <typename T1, typename... Ts>
295 void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) {
296 CheckFailed(Message);
297 if (OS)
298 WriteTs(V1, Vs...);
299 }
300
301 /// A debug info check failed.
302 void DebugInfoCheckFailed(const Twine &Message) {
303 if (OS)
304 *OS << Message << '\n';
306 BrokenDebugInfo = true;
307 }
308
309 /// A debug info check failed (with values to print).
310 template <typename T1, typename... Ts>
311 void DebugInfoCheckFailed(const Twine &Message, const T1 &V1,
312 const Ts &... Vs) {
313 DebugInfoCheckFailed(Message);
314 if (OS)
315 WriteTs(V1, Vs...);
316 }
317};
318
319} // namespace llvm
320
321namespace {
322
323class Verifier : public InstVisitor<Verifier>, VerifierSupport {
324 friend class InstVisitor<Verifier>;
325
326 // ISD::ArgFlagsTy::MemAlign only have 4 bits for alignment, so
327 // the alignment size should not exceed 2^15. Since encode(Align)
328 // would plus the shift value by 1, the alignment size should
329 // not exceed 2^14, otherwise it can NOT be properly lowered
330 // in backend.
331 static constexpr unsigned ParamMaxAlignment = 1 << 14;
332 DominatorTree DT;
333
334 /// When verifying a basic block, keep track of all of the
335 /// instructions we have seen so far.
336 ///
337 /// This allows us to do efficient dominance checks for the case when an
338 /// instruction has an operand that is an instruction in the same block.
339 SmallPtrSet<Instruction *, 16> InstsInThisBlock;
340
341 /// Keep track of the metadata nodes that have been checked already.
343
344 /// Keep track which DISubprogram is attached to which function.
346
347 /// Track all DICompileUnits visited.
349
350 /// The result type for a landingpad.
351 Type *LandingPadResultTy;
352
353 /// Whether we've seen a call to @llvm.localescape in this function
354 /// already.
355 bool SawFrameEscape;
356
357 /// Whether the current function has a DISubprogram attached to it.
358 bool HasDebugInfo = false;
359
360 /// The current source language.
362
363 /// Stores the count of how many objects were passed to llvm.localescape for a
364 /// given function and the largest index passed to llvm.localrecover.
366
367 // Maps catchswitches and cleanuppads that unwind to siblings to the
368 // terminators that indicate the unwind, used to detect cycles therein.
370
371 /// Cache which blocks are in which funclet, if an EH funclet personality is
372 /// in use. Otherwise empty.
373 DenseMap<BasicBlock *, ColorVector> BlockEHFuncletColors;
374
375 /// Cache of constants visited in search of ConstantExprs.
376 SmallPtrSet<const Constant *, 32> ConstantExprVisited;
377
378 /// Cache of declarations of the llvm.experimental.deoptimize.<ty> intrinsic.
379 SmallVector<const Function *, 4> DeoptimizeDeclarations;
380
381 /// Cache of attribute lists verified.
382 SmallPtrSet<const void *, 32> AttributeListsVisited;
383
384 // Verify that this GlobalValue is only used in this module.
385 // This map is used to avoid visiting uses twice. We can arrive at a user
386 // twice, if they have multiple operands. In particular for very large
387 // constant expressions, we can arrive at a particular user many times.
388 SmallPtrSet<const Value *, 32> GlobalValueVisited;
389
390 // Keeps track of duplicate function argument debug info.
392
393 TBAAVerifier TBAAVerifyHelper;
394 ConvergenceVerifier ConvergenceVerifyHelper;
395
396 SmallVector<IntrinsicInst *, 4> NoAliasScopeDecls;
397
398 void checkAtomicMemAccessSize(Type *Ty, const Instruction *I);
399
400public:
401 explicit Verifier(raw_ostream *OS, bool ShouldTreatBrokenDebugInfoAsError,
402 const Module &M)
403 : VerifierSupport(OS, M), LandingPadResultTy(nullptr),
404 SawFrameEscape(false), TBAAVerifyHelper(this) {
405 TreatBrokenDebugInfoAsError = ShouldTreatBrokenDebugInfoAsError;
406 }
407
408 bool hasBrokenDebugInfo() const { return BrokenDebugInfo; }
409
410 bool verify(const Function &F) {
411 assert(F.getParent() == &M &&
412 "An instance of this class only works with a specific module!");
413
414 // First ensure the function is well-enough formed to compute dominance
415 // information, and directly compute a dominance tree. We don't rely on the
416 // pass manager to provide this as it isolates us from a potentially
417 // out-of-date dominator tree and makes it significantly more complex to run
418 // this code outside of a pass manager.
419 // FIXME: It's really gross that we have to cast away constness here.
420 if (!F.empty())
421 DT.recalculate(const_cast<Function &>(F));
422
423 for (const BasicBlock &BB : F) {
424 if (!BB.empty() && BB.back().isTerminator())
425 continue;
426
427 if (OS) {
428 *OS << "Basic Block in function '" << F.getName()
429 << "' does not have terminator!\n";
430 BB.printAsOperand(*OS, true, MST);
431 *OS << "\n";
432 }
433 return false;
434 }
435
436 auto FailureCB = [this](const Twine &Message) {
437 this->CheckFailed(Message);
438 };
439 ConvergenceVerifyHelper.initialize(OS, FailureCB, F);
440
441 Broken = false;
442 // FIXME: We strip const here because the inst visitor strips const.
443 visit(const_cast<Function &>(F));
444 verifySiblingFuncletUnwinds();
445
446 if (ConvergenceVerifyHelper.sawTokens())
447 ConvergenceVerifyHelper.verify(DT);
448
449 InstsInThisBlock.clear();
450 DebugFnArgs.clear();
451 LandingPadResultTy = nullptr;
452 SawFrameEscape = false;
453 SiblingFuncletInfo.clear();
454 verifyNoAliasScopeDecl();
455 NoAliasScopeDecls.clear();
456
457 return !Broken;
458 }
459
460 /// Verify the module that this instance of \c Verifier was initialized with.
461 bool verify() {
462 Broken = false;
463
464 // Collect all declarations of the llvm.experimental.deoptimize intrinsic.
465 for (const Function &F : M)
466 if (F.getIntrinsicID() == Intrinsic::experimental_deoptimize)
467 DeoptimizeDeclarations.push_back(&F);
468
469 // Now that we've visited every function, verify that we never asked to
470 // recover a frame index that wasn't escaped.
471 verifyFrameRecoverIndices();
472 for (const GlobalVariable &GV : M.globals())
473 visitGlobalVariable(GV);
474
475 for (const GlobalAlias &GA : M.aliases())
476 visitGlobalAlias(GA);
477
478 for (const GlobalIFunc &GI : M.ifuncs())
479 visitGlobalIFunc(GI);
480
481 for (const NamedMDNode &NMD : M.named_metadata())
482 visitNamedMDNode(NMD);
483
484 for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable())
485 visitComdat(SMEC.getValue());
486
487 visitModuleFlags();
488 visitModuleIdents();
489 visitModuleCommandLines();
490
491 verifyCompileUnits();
492
493 verifyDeoptimizeCallingConvs();
494 DISubprogramAttachments.clear();
495 return !Broken;
496 }
497
498private:
499 /// Whether a metadata node is allowed to be, or contain, a DILocation.
500 enum class AreDebugLocsAllowed { No, Yes };
501
502 // Verification methods...
503 void visitGlobalValue(const GlobalValue &GV);
504 void visitGlobalVariable(const GlobalVariable &GV);
505 void visitGlobalAlias(const GlobalAlias &GA);
506 void visitGlobalIFunc(const GlobalIFunc &GI);
507 void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C);
508 void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited,
509 const GlobalAlias &A, const Constant &C);
510 void visitNamedMDNode(const NamedMDNode &NMD);
511 void visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs);
512 void visitMetadataAsValue(const MetadataAsValue &MD, Function *F);
513 void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F);
514 void visitDIArgList(const DIArgList &AL, Function *F);
515 void visitComdat(const Comdat &C);
516 void visitModuleIdents();
517 void visitModuleCommandLines();
518 void visitModuleFlags();
519 void visitModuleFlag(const MDNode *Op,
521 SmallVectorImpl<const MDNode *> &Requirements);
522 void visitModuleFlagCGProfileEntry(const MDOperand &MDO);
523 void visitFunction(const Function &F);
524 void visitBasicBlock(BasicBlock &BB);
525 void verifyRangeMetadata(const Value &V, const MDNode *Range, Type *Ty,
526 bool IsAbsoluteSymbol);
527 void visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty);
528 void visitDereferenceableMetadata(Instruction &I, MDNode *MD);
529 void visitProfMetadata(Instruction &I, MDNode *MD);
530 void visitCallStackMetadata(MDNode *MD);
531 void visitMemProfMetadata(Instruction &I, MDNode *MD);
532 void visitCallsiteMetadata(Instruction &I, MDNode *MD);
533 void visitDIAssignIDMetadata(Instruction &I, MDNode *MD);
534 void visitMMRAMetadata(Instruction &I, MDNode *MD);
535 void visitAnnotationMetadata(MDNode *Annotation);
536 void visitAliasScopeMetadata(const MDNode *MD);
537 void visitAliasScopeListMetadata(const MDNode *MD);
538 void visitAccessGroupMetadata(const MDNode *MD);
539
540 template <class Ty> bool isValidMetadataArray(const MDTuple &N);
541#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N);
542#include "llvm/IR/Metadata.def"
543 void visitDIScope(const DIScope &N);
544 void visitDIVariable(const DIVariable &N);
545 void visitDILexicalBlockBase(const DILexicalBlockBase &N);
546 void visitDITemplateParameter(const DITemplateParameter &N);
547
548 void visitTemplateParams(const MDNode &N, const Metadata &RawParams);
549
550 void visit(DbgLabelRecord &DLR);
551 void visit(DbgVariableRecord &DVR);
552 // InstVisitor overrides...
554 void visitDbgRecords(Instruction &I);
555 void visit(Instruction &I);
556
557 void visitTruncInst(TruncInst &I);
558 void visitZExtInst(ZExtInst &I);
559 void visitSExtInst(SExtInst &I);
560 void visitFPTruncInst(FPTruncInst &I);
561 void visitFPExtInst(FPExtInst &I);
562 void visitFPToUIInst(FPToUIInst &I);
563 void visitFPToSIInst(FPToSIInst &I);
564 void visitUIToFPInst(UIToFPInst &I);
565 void visitSIToFPInst(SIToFPInst &I);
566 void visitIntToPtrInst(IntToPtrInst &I);
567 void visitPtrToIntInst(PtrToIntInst &I);
568 void visitBitCastInst(BitCastInst &I);
569 void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
570 void visitPHINode(PHINode &PN);
571 void visitCallBase(CallBase &Call);
572 void visitUnaryOperator(UnaryOperator &U);
573 void visitBinaryOperator(BinaryOperator &B);
574 void visitICmpInst(ICmpInst &IC);
575 void visitFCmpInst(FCmpInst &FC);
576 void visitExtractElementInst(ExtractElementInst &EI);
577 void visitInsertElementInst(InsertElementInst &EI);
578 void visitShuffleVectorInst(ShuffleVectorInst &EI);
579 void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); }
580 void visitCallInst(CallInst &CI);
581 void visitInvokeInst(InvokeInst &II);
582 void visitGetElementPtrInst(GetElementPtrInst &GEP);
583 void visitLoadInst(LoadInst &LI);
584 void visitStoreInst(StoreInst &SI);
585 void verifyDominatesUse(Instruction &I, unsigned i);
586 void visitInstruction(Instruction &I);
587 void visitTerminator(Instruction &I);
588 void visitBranchInst(BranchInst &BI);
589 void visitReturnInst(ReturnInst &RI);
590 void visitSwitchInst(SwitchInst &SI);
591 void visitIndirectBrInst(IndirectBrInst &BI);
592 void visitCallBrInst(CallBrInst &CBI);
593 void visitSelectInst(SelectInst &SI);
594 void visitUserOp1(Instruction &I);
595 void visitUserOp2(Instruction &I) { visitUserOp1(I); }
596 void visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call);
597 void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI);
598 void visitVPIntrinsic(VPIntrinsic &VPI);
599 void visitDbgIntrinsic(StringRef Kind, DbgVariableIntrinsic &DII);
600 void visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI);
601 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
602 void visitAtomicRMWInst(AtomicRMWInst &RMWI);
603 void visitFenceInst(FenceInst &FI);
604 void visitAllocaInst(AllocaInst &AI);
605 void visitExtractValueInst(ExtractValueInst &EVI);
606 void visitInsertValueInst(InsertValueInst &IVI);
607 void visitEHPadPredecessors(Instruction &I);
608 void visitLandingPadInst(LandingPadInst &LPI);
609 void visitResumeInst(ResumeInst &RI);
610 void visitCatchPadInst(CatchPadInst &CPI);
611 void visitCatchReturnInst(CatchReturnInst &CatchReturn);
612 void visitCleanupPadInst(CleanupPadInst &CPI);
613 void visitFuncletPadInst(FuncletPadInst &FPI);
614 void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch);
615 void visitCleanupReturnInst(CleanupReturnInst &CRI);
616
617 void verifySwiftErrorCall(CallBase &Call, const Value *SwiftErrorVal);
618 void verifySwiftErrorValue(const Value *SwiftErrorVal);
619 void verifyTailCCMustTailAttrs(const AttrBuilder &Attrs, StringRef Context);
620 void verifyMustTailCall(CallInst &CI);
621 bool verifyAttributeCount(AttributeList Attrs, unsigned Params);
622 void verifyAttributeTypes(AttributeSet Attrs, const Value *V);
623 void verifyParameterAttrs(AttributeSet Attrs, Type *Ty, const Value *V);
624 void checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
625 const Value *V);
626 void verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
627 const Value *V, bool IsIntrinsic, bool IsInlineAsm);
628 void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs);
629
630 void visitConstantExprsRecursively(const Constant *EntryC);
631 void visitConstantExpr(const ConstantExpr *CE);
632 void verifyInlineAsmCall(const CallBase &Call);
633 void verifyStatepoint(const CallBase &Call);
634 void verifyFrameRecoverIndices();
635 void verifySiblingFuncletUnwinds();
636
637 void verifyFragmentExpression(const DbgVariableIntrinsic &I);
638 void verifyFragmentExpression(const DbgVariableRecord &I);
639 template <typename ValueOrMetadata>
640 void verifyFragmentExpression(const DIVariable &V,
642 ValueOrMetadata *Desc);
643 void verifyFnArgs(const DbgVariableIntrinsic &I);
644 void verifyFnArgs(const DbgVariableRecord &DVR);
645 void verifyNotEntryValue(const DbgVariableIntrinsic &I);
646 void verifyNotEntryValue(const DbgVariableRecord &I);
647
648 /// Module-level debug info verification...
649 void verifyCompileUnits();
650
651 /// Module-level verification that all @llvm.experimental.deoptimize
652 /// declarations share the same calling convention.
653 void verifyDeoptimizeCallingConvs();
654
655 void verifyAttachedCallBundle(const CallBase &Call,
656 const OperandBundleUse &BU);
657
658 /// Verify the llvm.experimental.noalias.scope.decl declarations
659 void verifyNoAliasScopeDecl();
660};
661
662} // end anonymous namespace
663
664/// We know that cond should be true, if not print an error message.
665#define Check(C, ...) \
666 do { \
667 if (!(C)) { \
668 CheckFailed(__VA_ARGS__); \
669 return; \
670 } \
671 } while (false)
672
673/// We know that a debug info condition should be true, if not print
674/// an error message.
675#define CheckDI(C, ...) \
676 do { \
677 if (!(C)) { \
678 DebugInfoCheckFailed(__VA_ARGS__); \
679 return; \
680 } \
681 } while (false)
682
683void Verifier::visitDbgRecords(Instruction &I) {
684 if (!I.DebugMarker)
685 return;
686 CheckDI(I.DebugMarker->MarkedInstr == &I,
687 "Instruction has invalid DebugMarker", &I);
688 CheckDI(!isa<PHINode>(&I) || !I.hasDbgRecords(),
689 "PHI Node must not have any attached DbgRecords", &I);
690 for (DbgRecord &DR : I.getDbgRecordRange()) {
691 CheckDI(DR.getMarker() == I.DebugMarker,
692 "DbgRecord had invalid DebugMarker", &I, &DR);
693 if (auto *Loc =
694 dyn_cast_or_null<DILocation>(DR.getDebugLoc().getAsMDNode()))
695 visitMDNode(*Loc, AreDebugLocsAllowed::Yes);
696 if (auto *DVR = dyn_cast<DbgVariableRecord>(&DR)) {
697 visit(*DVR);
698 // These have to appear after `visit` for consistency with existing
699 // intrinsic behaviour.
700 verifyFragmentExpression(*DVR);
701 verifyNotEntryValue(*DVR);
702 } else if (auto *DLR = dyn_cast<DbgLabelRecord>(&DR)) {
703 visit(*DLR);
704 }
705 }
706}
707
708void Verifier::visit(Instruction &I) {
709 visitDbgRecords(I);
710 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
711 Check(I.getOperand(i) != nullptr, "Operand is null", &I);
713}
714
715// Helper to iterate over indirect users. By returning false, the callback can ask to stop traversing further.
716static void forEachUser(const Value *User,
718 llvm::function_ref<bool(const Value *)> Callback) {
719 if (!Visited.insert(User).second)
720 return;
721
724 while (!WorkList.empty()) {
725 const Value *Cur = WorkList.pop_back_val();
726 if (!Visited.insert(Cur).second)
727 continue;
728 if (Callback(Cur))
729 append_range(WorkList, Cur->materialized_users());
730 }
731}
732
733void Verifier::visitGlobalValue(const GlobalValue &GV) {
735 "Global is external, but doesn't have external or weak linkage!", &GV);
736
737 if (const GlobalObject *GO = dyn_cast<GlobalObject>(&GV)) {
738
739 if (MaybeAlign A = GO->getAlign()) {
740 Check(A->value() <= Value::MaximumAlignment,
741 "huge alignment values are unsupported", GO);
742 }
743
744 if (const MDNode *Associated =
745 GO->getMetadata(LLVMContext::MD_associated)) {
746 Check(Associated->getNumOperands() == 1,
747 "associated metadata must have one operand", &GV, Associated);
748 const Metadata *Op = Associated->getOperand(0).get();
749 Check(Op, "associated metadata must have a global value", GO, Associated);
750
751 const auto *VM = dyn_cast_or_null<ValueAsMetadata>(Op);
752 Check(VM, "associated metadata must be ValueAsMetadata", GO, Associated);
753 if (VM) {
754 Check(isa<PointerType>(VM->getValue()->getType()),
755 "associated value must be pointer typed", GV, Associated);
756
757 const Value *Stripped = VM->getValue()->stripPointerCastsAndAliases();
758 Check(isa<GlobalObject>(Stripped) || isa<Constant>(Stripped),
759 "associated metadata must point to a GlobalObject", GO, Stripped);
760 Check(Stripped != GO,
761 "global values should not associate to themselves", GO,
762 Associated);
763 }
764 }
765
766 // FIXME: Why is getMetadata on GlobalValue protected?
767 if (const MDNode *AbsoluteSymbol =
768 GO->getMetadata(LLVMContext::MD_absolute_symbol)) {
769 verifyRangeMetadata(*GO, AbsoluteSymbol, DL.getIntPtrType(GO->getType()),
770 true);
771 }
772 }
773
774 Check(!GV.hasAppendingLinkage() || isa<GlobalVariable>(GV),
775 "Only global variables can have appending linkage!", &GV);
776
777 if (GV.hasAppendingLinkage()) {
778 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV);
779 Check(GVar && GVar->getValueType()->isArrayTy(),
780 "Only global arrays can have appending linkage!", GVar);
781 }
782
783 if (GV.isDeclarationForLinker())
784 Check(!GV.hasComdat(), "Declaration may not be in a Comdat!", &GV);
785
786 if (GV.hasDLLExportStorageClass()) {
788 "dllexport GlobalValue must have default or protected visibility",
789 &GV);
790 }
791 if (GV.hasDLLImportStorageClass()) {
793 "dllimport GlobalValue must have default visibility", &GV);
794 Check(!GV.isDSOLocal(), "GlobalValue with DLLImport Storage is dso_local!",
795 &GV);
796
797 Check((GV.isDeclaration() &&
800 "Global is marked as dllimport, but not external", &GV);
801 }
802
803 if (GV.isImplicitDSOLocal())
804 Check(GV.isDSOLocal(),
805 "GlobalValue with local linkage or non-default "
806 "visibility must be dso_local!",
807 &GV);
808
809 forEachUser(&GV, GlobalValueVisited, [&](const Value *V) -> bool {
810 if (const Instruction *I = dyn_cast<Instruction>(V)) {
811 if (!I->getParent() || !I->getParent()->getParent())
812 CheckFailed("Global is referenced by parentless instruction!", &GV, &M,
813 I);
814 else if (I->getParent()->getParent()->getParent() != &M)
815 CheckFailed("Global is referenced in a different module!", &GV, &M, I,
816 I->getParent()->getParent(),
817 I->getParent()->getParent()->getParent());
818 return false;
819 } else if (const Function *F = dyn_cast<Function>(V)) {
820 if (F->getParent() != &M)
821 CheckFailed("Global is used by function in a different module", &GV, &M,
822 F, F->getParent());
823 return false;
824 }
825 return true;
826 });
827}
828
829void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
830 if (GV.hasInitializer()) {
832 "Global variable initializer type does not match global "
833 "variable type!",
834 &GV);
835 // If the global has common linkage, it must have a zero initializer and
836 // cannot be constant.
837 if (GV.hasCommonLinkage()) {
839 "'common' global must have a zero initializer!", &GV);
840 Check(!GV.isConstant(), "'common' global may not be marked constant!",
841 &GV);
842 Check(!GV.hasComdat(), "'common' global may not be in a Comdat!", &GV);
843 }
844 }
845
846 if (GV.hasName() && (GV.getName() == "llvm.global_ctors" ||
847 GV.getName() == "llvm.global_dtors")) {
849 "invalid linkage for intrinsic global variable", &GV);
851 "invalid uses of intrinsic global variable", &GV);
852
853 // Don't worry about emitting an error for it not being an array,
854 // visitGlobalValue will complain on appending non-array.
855 if (ArrayType *ATy = dyn_cast<ArrayType>(GV.getValueType())) {
856 StructType *STy = dyn_cast<StructType>(ATy->getElementType());
857 PointerType *FuncPtrTy =
858 PointerType::get(Context, DL.getProgramAddressSpace());
859 Check(STy && (STy->getNumElements() == 2 || STy->getNumElements() == 3) &&
860 STy->getTypeAtIndex(0u)->isIntegerTy(32) &&
861 STy->getTypeAtIndex(1) == FuncPtrTy,
862 "wrong type for intrinsic global variable", &GV);
863 Check(STy->getNumElements() == 3,
864 "the third field of the element type is mandatory, "
865 "specify ptr null to migrate from the obsoleted 2-field form");
866 Type *ETy = STy->getTypeAtIndex(2);
867 Check(ETy->isPointerTy(), "wrong type for intrinsic global variable",
868 &GV);
869 }
870 }
871
872 if (GV.hasName() && (GV.getName() == "llvm.used" ||
873 GV.getName() == "llvm.compiler.used")) {
875 "invalid linkage for intrinsic global variable", &GV);
877 "invalid uses of intrinsic global variable", &GV);
878
879 Type *GVType = GV.getValueType();
880 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
881 PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType());
882 Check(PTy, "wrong type for intrinsic global variable", &GV);
883 if (GV.hasInitializer()) {
884 const Constant *Init = GV.getInitializer();
885 const ConstantArray *InitArray = dyn_cast<ConstantArray>(Init);
886 Check(InitArray, "wrong initalizer for intrinsic global variable",
887 Init);
888 for (Value *Op : InitArray->operands()) {
889 Value *V = Op->stripPointerCasts();
890 Check(isa<GlobalVariable>(V) || isa<Function>(V) ||
891 isa<GlobalAlias>(V),
892 Twine("invalid ") + GV.getName() + " member", V);
893 Check(V->hasName(),
894 Twine("members of ") + GV.getName() + " must be named", V);
895 }
896 }
897 }
898 }
899
900 // Visit any debug info attachments.
902 GV.getMetadata(LLVMContext::MD_dbg, MDs);
903 for (auto *MD : MDs) {
904 if (auto *GVE = dyn_cast<DIGlobalVariableExpression>(MD))
905 visitDIGlobalVariableExpression(*GVE);
906 else
907 CheckDI(false, "!dbg attachment of global variable must be a "
908 "DIGlobalVariableExpression");
909 }
910
911 // Scalable vectors cannot be global variables, since we don't know
912 // the runtime size.
914 "Globals cannot contain scalable types", &GV);
915
916 // Check if it's a target extension type that disallows being used as a
917 // global.
918 if (auto *TTy = dyn_cast<TargetExtType>(GV.getValueType()))
919 Check(TTy->hasProperty(TargetExtType::CanBeGlobal),
920 "Global @" + GV.getName() + " has illegal target extension type",
921 TTy);
922
923 if (!GV.hasInitializer()) {
924 visitGlobalValue(GV);
925 return;
926 }
927
928 // Walk any aggregate initializers looking for bitcasts between address spaces
929 visitConstantExprsRecursively(GV.getInitializer());
930
931 visitGlobalValue(GV);
932}
933
934void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) {
936 Visited.insert(&GA);
937 visitAliaseeSubExpr(Visited, GA, C);
938}
939
940void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited,
941 const GlobalAlias &GA, const Constant &C) {
943 Check(isa<GlobalValue>(C) &&
944 cast<GlobalValue>(C).hasAvailableExternallyLinkage(),
945 "available_externally alias must point to available_externally "
946 "global value",
947 &GA);
948 }
949 if (const auto *GV = dyn_cast<GlobalValue>(&C)) {
951 Check(!GV->isDeclarationForLinker(), "Alias must point to a definition",
952 &GA);
953 }
954
955 if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) {
956 Check(Visited.insert(GA2).second, "Aliases cannot form a cycle", &GA);
957
958 Check(!GA2->isInterposable(),
959 "Alias cannot point to an interposable alias", &GA);
960 } else {
961 // Only continue verifying subexpressions of GlobalAliases.
962 // Do not recurse into global initializers.
963 return;
964 }
965 }
966
967 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
968 visitConstantExprsRecursively(CE);
969
970 for (const Use &U : C.operands()) {
971 Value *V = &*U;
972 if (const auto *GA2 = dyn_cast<GlobalAlias>(V))
973 visitAliaseeSubExpr(Visited, GA, *GA2->getAliasee());
974 else if (const auto *C2 = dyn_cast<Constant>(V))
975 visitAliaseeSubExpr(Visited, GA, *C2);
976 }
977}
978
979void Verifier::visitGlobalAlias(const GlobalAlias &GA) {
981 "Alias should have private, internal, linkonce, weak, linkonce_odr, "
982 "weak_odr, external, or available_externally linkage!",
983 &GA);
984 const Constant *Aliasee = GA.getAliasee();
985 Check(Aliasee, "Aliasee cannot be NULL!", &GA);
986 Check(GA.getType() == Aliasee->getType(),
987 "Alias and aliasee types should match!", &GA);
988
989 Check(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee),
990 "Aliasee should be either GlobalValue or ConstantExpr", &GA);
991
992 visitAliaseeSubExpr(GA, *Aliasee);
993
994 visitGlobalValue(GA);
995}
996
997void Verifier::visitGlobalIFunc(const GlobalIFunc &GI) {
999 "IFunc should have private, internal, linkonce, weak, linkonce_odr, "
1000 "weak_odr, or external linkage!",
1001 &GI);
1002 // Pierce through ConstantExprs and GlobalAliases and check that the resolver
1003 // is a Function definition.
1005 Check(Resolver, "IFunc must have a Function resolver", &GI);
1006 Check(!Resolver->isDeclarationForLinker(),
1007 "IFunc resolver must be a definition", &GI);
1008
1009 // Check that the immediate resolver operand (prior to any bitcasts) has the
1010 // correct type.
1011 const Type *ResolverTy = GI.getResolver()->getType();
1012
1013 Check(isa<PointerType>(Resolver->getFunctionType()->getReturnType()),
1014 "IFunc resolver must return a pointer", &GI);
1015
1016 const Type *ResolverFuncTy =
1018 Check(ResolverTy == ResolverFuncTy->getPointerTo(GI.getAddressSpace()),
1019 "IFunc resolver has incorrect type", &GI);
1020}
1021
1022void Verifier::visitNamedMDNode(const NamedMDNode &NMD) {
1023 // There used to be various other llvm.dbg.* nodes, but we don't support
1024 // upgrading them and we want to reserve the namespace for future uses.
1025 if (NMD.getName().starts_with("llvm.dbg."))
1026 CheckDI(NMD.getName() == "llvm.dbg.cu",
1027 "unrecognized named metadata node in the llvm.dbg namespace", &NMD);
1028 for (const MDNode *MD : NMD.operands()) {
1029 if (NMD.getName() == "llvm.dbg.cu")
1030 CheckDI(MD && isa<DICompileUnit>(MD), "invalid compile unit", &NMD, MD);
1031
1032 if (!MD)
1033 continue;
1034
1035 visitMDNode(*MD, AreDebugLocsAllowed::Yes);
1036 }
1037}
1038
1039void Verifier::visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs) {
1040 // Only visit each node once. Metadata can be mutually recursive, so this
1041 // avoids infinite recursion here, as well as being an optimization.
1042 if (!MDNodes.insert(&MD).second)
1043 return;
1044
1045 Check(&MD.getContext() == &Context,
1046 "MDNode context does not match Module context!", &MD);
1047
1048 switch (MD.getMetadataID()) {
1049 default:
1050 llvm_unreachable("Invalid MDNode subclass");
1051 case Metadata::MDTupleKind:
1052 break;
1053#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) \
1054 case Metadata::CLASS##Kind: \
1055 visit##CLASS(cast<CLASS>(MD)); \
1056 break;
1057#include "llvm/IR/Metadata.def"
1058 }
1059
1060 for (const Metadata *Op : MD.operands()) {
1061 if (!Op)
1062 continue;
1063 Check(!isa<LocalAsMetadata>(Op), "Invalid operand for global metadata!",
1064 &MD, Op);
1065 CheckDI(!isa<DILocation>(Op) || AllowLocs == AreDebugLocsAllowed::Yes,
1066 "DILocation not allowed within this metadata node", &MD, Op);
1067 if (auto *N = dyn_cast<MDNode>(Op)) {
1068 visitMDNode(*N, AllowLocs);
1069 continue;
1070 }
1071 if (auto *V = dyn_cast<ValueAsMetadata>(Op)) {
1072 visitValueAsMetadata(*V, nullptr);
1073 continue;
1074 }
1075 }
1076
1077 // Check these last, so we diagnose problems in operands first.
1078 Check(!MD.isTemporary(), "Expected no forward declarations!", &MD);
1079 Check(MD.isResolved(), "All nodes should be resolved!", &MD);
1080}
1081
1082void Verifier::visitValueAsMetadata(const ValueAsMetadata &MD, Function *F) {
1083 Check(MD.getValue(), "Expected valid value", &MD);
1084 Check(!MD.getValue()->getType()->isMetadataTy(),
1085 "Unexpected metadata round-trip through values", &MD, MD.getValue());
1086
1087 auto *L = dyn_cast<LocalAsMetadata>(&MD);
1088 if (!L)
1089 return;
1090
1091 Check(F, "function-local metadata used outside a function", L);
1092
1093 // If this was an instruction, bb, or argument, verify that it is in the
1094 // function that we expect.
1095 Function *ActualF = nullptr;
1096 if (Instruction *I = dyn_cast<Instruction>(L->getValue())) {
1097 Check(I->getParent(), "function-local metadata not in basic block", L, I);
1098 ActualF = I->getParent()->getParent();
1099 } else if (BasicBlock *BB = dyn_cast<BasicBlock>(L->getValue()))
1100 ActualF = BB->getParent();
1101 else if (Argument *A = dyn_cast<Argument>(L->getValue()))
1102 ActualF = A->getParent();
1103 assert(ActualF && "Unimplemented function local metadata case!");
1104
1105 Check(ActualF == F, "function-local metadata used in wrong function", L);
1106}
1107
1108void Verifier::visitDIArgList(const DIArgList &AL, Function *F) {
1109 for (const ValueAsMetadata *VAM : AL.getArgs())
1110 visitValueAsMetadata(*VAM, F);
1111}
1112
1113void Verifier::visitMetadataAsValue(const MetadataAsValue &MDV, Function *F) {
1114 Metadata *MD = MDV.getMetadata();
1115 if (auto *N = dyn_cast<MDNode>(MD)) {
1116 visitMDNode(*N, AreDebugLocsAllowed::No);
1117 return;
1118 }
1119
1120 // Only visit each node once. Metadata can be mutually recursive, so this
1121 // avoids infinite recursion here, as well as being an optimization.
1122 if (!MDNodes.insert(MD).second)
1123 return;
1124
1125 if (auto *V = dyn_cast<ValueAsMetadata>(MD))
1126 visitValueAsMetadata(*V, F);
1127
1128 if (auto *AL = dyn_cast<DIArgList>(MD))
1129 visitDIArgList(*AL, F);
1130}
1131
1132static bool isType(const Metadata *MD) { return !MD || isa<DIType>(MD); }
1133static bool isScope(const Metadata *MD) { return !MD || isa<DIScope>(MD); }
1134static bool isDINode(const Metadata *MD) { return !MD || isa<DINode>(MD); }
1135
1136void Verifier::visitDILocation(const DILocation &N) {
1137 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1138 "location requires a valid scope", &N, N.getRawScope());
1139 if (auto *IA = N.getRawInlinedAt())
1140 CheckDI(isa<DILocation>(IA), "inlined-at should be a location", &N, IA);
1141 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1142 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1143}
1144
1145void Verifier::visitGenericDINode(const GenericDINode &N) {
1146 CheckDI(N.getTag(), "invalid tag", &N);
1147}
1148
1149void Verifier::visitDIScope(const DIScope &N) {
1150 if (auto *F = N.getRawFile())
1151 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1152}
1153
1154void Verifier::visitDISubrange(const DISubrange &N) {
1155 CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1156 bool HasAssumedSizedArraySupport = dwarf::isFortran(CurrentSourceLang);
1157 CheckDI(HasAssumedSizedArraySupport || N.getRawCountNode() ||
1158 N.getRawUpperBound(),
1159 "Subrange must contain count or upperBound", &N);
1160 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1161 "Subrange can have any one of count or upperBound", &N);
1162 auto *CBound = N.getRawCountNode();
1163 CheckDI(!CBound || isa<ConstantAsMetadata>(CBound) ||
1164 isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1165 "Count must be signed constant or DIVariable or DIExpression", &N);
1166 auto Count = N.getCount();
1167 CheckDI(!Count || !isa<ConstantInt *>(Count) ||
1168 cast<ConstantInt *>(Count)->getSExtValue() >= -1,
1169 "invalid subrange count", &N);
1170 auto *LBound = N.getRawLowerBound();
1171 CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1172 isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1173 "LowerBound must be signed constant or DIVariable or DIExpression",
1174 &N);
1175 auto *UBound = N.getRawUpperBound();
1176 CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1177 isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1178 "UpperBound must be signed constant or DIVariable or DIExpression",
1179 &N);
1180 auto *Stride = N.getRawStride();
1181 CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1182 isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1183 "Stride must be signed constant or DIVariable or DIExpression", &N);
1184}
1185
1186void Verifier::visitDIGenericSubrange(const DIGenericSubrange &N) {
1187 CheckDI(N.getTag() == dwarf::DW_TAG_generic_subrange, "invalid tag", &N);
1188 CheckDI(N.getRawCountNode() || N.getRawUpperBound(),
1189 "GenericSubrange must contain count or upperBound", &N);
1190 CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1191 "GenericSubrange can have any one of count or upperBound", &N);
1192 auto *CBound = N.getRawCountNode();
1193 CheckDI(!CBound || isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1194 "Count must be signed constant or DIVariable or DIExpression", &N);
1195 auto *LBound = N.getRawLowerBound();
1196 CheckDI(LBound, "GenericSubrange must contain lowerBound", &N);
1197 CheckDI(isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1198 "LowerBound must be signed constant or DIVariable or DIExpression",
1199 &N);
1200 auto *UBound = N.getRawUpperBound();
1201 CheckDI(!UBound || isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1202 "UpperBound must be signed constant or DIVariable or DIExpression",
1203 &N);
1204 auto *Stride = N.getRawStride();
1205 CheckDI(Stride, "GenericSubrange must contain stride", &N);
1206 CheckDI(isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1207 "Stride must be signed constant or DIVariable or DIExpression", &N);
1208}
1209
1210void Verifier::visitDIEnumerator(const DIEnumerator &N) {
1211 CheckDI(N.getTag() == dwarf::DW_TAG_enumerator, "invalid tag", &N);
1212}
1213
1214void Verifier::visitDIBasicType(const DIBasicType &N) {
1215 CheckDI(N.getTag() == dwarf::DW_TAG_base_type ||
1216 N.getTag() == dwarf::DW_TAG_unspecified_type ||
1217 N.getTag() == dwarf::DW_TAG_string_type,
1218 "invalid tag", &N);
1219}
1220
1221void Verifier::visitDIStringType(const DIStringType &N) {
1222 CheckDI(N.getTag() == dwarf::DW_TAG_string_type, "invalid tag", &N);
1223 CheckDI(!(N.isBigEndian() && N.isLittleEndian()), "has conflicting flags",
1224 &N);
1225}
1226
1227void Verifier::visitDIDerivedType(const DIDerivedType &N) {
1228 // Common scope checks.
1229 visitDIScope(N);
1230
1231 CheckDI(N.getTag() == dwarf::DW_TAG_typedef ||
1232 N.getTag() == dwarf::DW_TAG_pointer_type ||
1233 N.getTag() == dwarf::DW_TAG_ptr_to_member_type ||
1234 N.getTag() == dwarf::DW_TAG_reference_type ||
1235 N.getTag() == dwarf::DW_TAG_rvalue_reference_type ||
1236 N.getTag() == dwarf::DW_TAG_const_type ||
1237 N.getTag() == dwarf::DW_TAG_immutable_type ||
1238 N.getTag() == dwarf::DW_TAG_volatile_type ||
1239 N.getTag() == dwarf::DW_TAG_restrict_type ||
1240 N.getTag() == dwarf::DW_TAG_atomic_type ||
1241 N.getTag() == dwarf::DW_TAG_LLVM_ptrauth_type ||
1242 N.getTag() == dwarf::DW_TAG_member ||
1243 (N.getTag() == dwarf::DW_TAG_variable && N.isStaticMember()) ||
1244 N.getTag() == dwarf::DW_TAG_inheritance ||
1245 N.getTag() == dwarf::DW_TAG_friend ||
1246 N.getTag() == dwarf::DW_TAG_set_type ||
1247 N.getTag() == dwarf::DW_TAG_template_alias,
1248 "invalid tag", &N);
1249 if (N.getTag() == dwarf::DW_TAG_ptr_to_member_type) {
1250 CheckDI(isType(N.getRawExtraData()), "invalid pointer to member type", &N,
1251 N.getRawExtraData());
1252 }
1253
1254 if (N.getTag() == dwarf::DW_TAG_set_type) {
1255 if (auto *T = N.getRawBaseType()) {
1256 auto *Enum = dyn_cast_or_null<DICompositeType>(T);
1257 auto *Basic = dyn_cast_or_null<DIBasicType>(T);
1258 CheckDI(
1259 (Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type) ||
1260 (Basic && (Basic->getEncoding() == dwarf::DW_ATE_unsigned ||
1261 Basic->getEncoding() == dwarf::DW_ATE_signed ||
1262 Basic->getEncoding() == dwarf::DW_ATE_unsigned_char ||
1263 Basic->getEncoding() == dwarf::DW_ATE_signed_char ||
1264 Basic->getEncoding() == dwarf::DW_ATE_boolean)),
1265 "invalid set base type", &N, T);
1266 }
1267 }
1268
1269 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1270 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1271 N.getRawBaseType());
1272
1273 if (N.getDWARFAddressSpace()) {
1274 CheckDI(N.getTag() == dwarf::DW_TAG_pointer_type ||
1275 N.getTag() == dwarf::DW_TAG_reference_type ||
1276 N.getTag() == dwarf::DW_TAG_rvalue_reference_type,
1277 "DWARF address space only applies to pointer or reference types",
1278 &N);
1279 }
1280}
1281
1282/// Detect mutually exclusive flags.
1283static bool hasConflictingReferenceFlags(unsigned Flags) {
1284 return ((Flags & DINode::FlagLValueReference) &&
1285 (Flags & DINode::FlagRValueReference)) ||
1286 ((Flags & DINode::FlagTypePassByValue) &&
1287 (Flags & DINode::FlagTypePassByReference));
1288}
1289
1290void Verifier::visitTemplateParams(const MDNode &N, const Metadata &RawParams) {
1291 auto *Params = dyn_cast<MDTuple>(&RawParams);
1292 CheckDI(Params, "invalid template params", &N, &RawParams);
1293 for (Metadata *Op : Params->operands()) {
1294 CheckDI(Op && isa<DITemplateParameter>(Op), "invalid template parameter",
1295 &N, Params, Op);
1296 }
1297}
1298
1299void Verifier::visitDICompositeType(const DICompositeType &N) {
1300 // Common scope checks.
1301 visitDIScope(N);
1302
1303 CheckDI(N.getTag() == dwarf::DW_TAG_array_type ||
1304 N.getTag() == dwarf::DW_TAG_structure_type ||
1305 N.getTag() == dwarf::DW_TAG_union_type ||
1306 N.getTag() == dwarf::DW_TAG_enumeration_type ||
1307 N.getTag() == dwarf::DW_TAG_class_type ||
1308 N.getTag() == dwarf::DW_TAG_variant_part ||
1309 N.getTag() == dwarf::DW_TAG_namelist,
1310 "invalid tag", &N);
1311
1312 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1313 CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1314 N.getRawBaseType());
1315
1316 CheckDI(!N.getRawElements() || isa<MDTuple>(N.getRawElements()),
1317 "invalid composite elements", &N, N.getRawElements());
1318 CheckDI(isType(N.getRawVTableHolder()), "invalid vtable holder", &N,
1319 N.getRawVTableHolder());
1321 "invalid reference flags", &N);
1322 unsigned DIBlockByRefStruct = 1 << 4;
1323 CheckDI((N.getFlags() & DIBlockByRefStruct) == 0,
1324 "DIBlockByRefStruct on DICompositeType is no longer supported", &N);
1325
1326 if (N.isVector()) {
1327 const DINodeArray Elements = N.getElements();
1328 CheckDI(Elements.size() == 1 &&
1329 Elements[0]->getTag() == dwarf::DW_TAG_subrange_type,
1330 "invalid vector, expected one element of type subrange", &N);
1331 }
1332
1333 if (auto *Params = N.getRawTemplateParams())
1334 visitTemplateParams(N, *Params);
1335
1336 if (auto *D = N.getRawDiscriminator()) {
1337 CheckDI(isa<DIDerivedType>(D) && N.getTag() == dwarf::DW_TAG_variant_part,
1338 "discriminator can only appear on variant part");
1339 }
1340
1341 if (N.getRawDataLocation()) {
1342 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1343 "dataLocation can only appear in array type");
1344 }
1345
1346 if (N.getRawAssociated()) {
1347 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1348 "associated can only appear in array type");
1349 }
1350
1351 if (N.getRawAllocated()) {
1352 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1353 "allocated can only appear in array type");
1354 }
1355
1356 if (N.getRawRank()) {
1357 CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1358 "rank can only appear in array type");
1359 }
1360
1361 if (N.getTag() == dwarf::DW_TAG_array_type) {
1362 CheckDI(N.getRawBaseType(), "array types must have a base type", &N);
1363 }
1364}
1365
1366void Verifier::visitDISubroutineType(const DISubroutineType &N) {
1367 CheckDI(N.getTag() == dwarf::DW_TAG_subroutine_type, "invalid tag", &N);
1368 if (auto *Types = N.getRawTypeArray()) {
1369 CheckDI(isa<MDTuple>(Types), "invalid composite elements", &N, Types);
1370 for (Metadata *Ty : N.getTypeArray()->operands()) {
1371 CheckDI(isType(Ty), "invalid subroutine type ref", &N, Types, Ty);
1372 }
1373 }
1375 "invalid reference flags", &N);
1376}
1377
1378void Verifier::visitDIFile(const DIFile &N) {
1379 CheckDI(N.getTag() == dwarf::DW_TAG_file_type, "invalid tag", &N);
1380 std::optional<DIFile::ChecksumInfo<StringRef>> Checksum = N.getChecksum();
1381 if (Checksum) {
1382 CheckDI(Checksum->Kind <= DIFile::ChecksumKind::CSK_Last,
1383 "invalid checksum kind", &N);
1384 size_t Size;
1385 switch (Checksum->Kind) {
1386 case DIFile::CSK_MD5:
1387 Size = 32;
1388 break;
1389 case DIFile::CSK_SHA1:
1390 Size = 40;
1391 break;
1392 case DIFile::CSK_SHA256:
1393 Size = 64;
1394 break;
1395 }
1396 CheckDI(Checksum->Value.size() == Size, "invalid checksum length", &N);
1397 CheckDI(Checksum->Value.find_if_not(llvm::isHexDigit) == StringRef::npos,
1398 "invalid checksum", &N);
1399 }
1400}
1401
1402void Verifier::visitDICompileUnit(const DICompileUnit &N) {
1403 CheckDI(N.isDistinct(), "compile units must be distinct", &N);
1404 CheckDI(N.getTag() == dwarf::DW_TAG_compile_unit, "invalid tag", &N);
1405
1406 // Don't bother verifying the compilation directory or producer string
1407 // as those could be empty.
1408 CheckDI(N.getRawFile() && isa<DIFile>(N.getRawFile()), "invalid file", &N,
1409 N.getRawFile());
1410 CheckDI(!N.getFile()->getFilename().empty(), "invalid filename", &N,
1411 N.getFile());
1412
1413 CurrentSourceLang = (dwarf::SourceLanguage)N.getSourceLanguage();
1414
1415 CheckDI((N.getEmissionKind() <= DICompileUnit::LastEmissionKind),
1416 "invalid emission kind", &N);
1417
1418 if (auto *Array = N.getRawEnumTypes()) {
1419 CheckDI(isa<MDTuple>(Array), "invalid enum list", &N, Array);
1420 for (Metadata *Op : N.getEnumTypes()->operands()) {
1421 auto *Enum = dyn_cast_or_null<DICompositeType>(Op);
1422 CheckDI(Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type,
1423 "invalid enum type", &N, N.getEnumTypes(), Op);
1424 }
1425 }
1426 if (auto *Array = N.getRawRetainedTypes()) {
1427 CheckDI(isa<MDTuple>(Array), "invalid retained type list", &N, Array);
1428 for (Metadata *Op : N.getRetainedTypes()->operands()) {
1429 CheckDI(
1430 Op && (isa<DIType>(Op) || (isa<DISubprogram>(Op) &&
1431 !cast<DISubprogram>(Op)->isDefinition())),
1432 "invalid retained type", &N, Op);
1433 }
1434 }
1435 if (auto *Array = N.getRawGlobalVariables()) {
1436 CheckDI(isa<MDTuple>(Array), "invalid global variable list", &N, Array);
1437 for (Metadata *Op : N.getGlobalVariables()->operands()) {
1438 CheckDI(Op && (isa<DIGlobalVariableExpression>(Op)),
1439 "invalid global variable ref", &N, Op);
1440 }
1441 }
1442 if (auto *Array = N.getRawImportedEntities()) {
1443 CheckDI(isa<MDTuple>(Array), "invalid imported entity list", &N, Array);
1444 for (Metadata *Op : N.getImportedEntities()->operands()) {
1445 CheckDI(Op && isa<DIImportedEntity>(Op), "invalid imported entity ref",
1446 &N, Op);
1447 }
1448 }
1449 if (auto *Array = N.getRawMacros()) {
1450 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1451 for (Metadata *Op : N.getMacros()->operands()) {
1452 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1453 }
1454 }
1455 CUVisited.insert(&N);
1456}
1457
1458void Verifier::visitDISubprogram(const DISubprogram &N) {
1459 CheckDI(N.getTag() == dwarf::DW_TAG_subprogram, "invalid tag", &N);
1460 CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1461 if (auto *F = N.getRawFile())
1462 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1463 else
1464 CheckDI(N.getLine() == 0, "line specified with no file", &N, N.getLine());
1465 if (auto *T = N.getRawType())
1466 CheckDI(isa<DISubroutineType>(T), "invalid subroutine type", &N, T);
1467 CheckDI(isType(N.getRawContainingType()), "invalid containing type", &N,
1468 N.getRawContainingType());
1469 if (auto *Params = N.getRawTemplateParams())
1470 visitTemplateParams(N, *Params);
1471 if (auto *S = N.getRawDeclaration())
1472 CheckDI(isa<DISubprogram>(S) && !cast<DISubprogram>(S)->isDefinition(),
1473 "invalid subprogram declaration", &N, S);
1474 if (auto *RawNode = N.getRawRetainedNodes()) {
1475 auto *Node = dyn_cast<MDTuple>(RawNode);
1476 CheckDI(Node, "invalid retained nodes list", &N, RawNode);
1477 for (Metadata *Op : Node->operands()) {
1478 CheckDI(Op && (isa<DILocalVariable>(Op) || isa<DILabel>(Op) ||
1479 isa<DIImportedEntity>(Op)),
1480 "invalid retained nodes, expected DILocalVariable, DILabel or "
1481 "DIImportedEntity",
1482 &N, Node, Op);
1483 }
1484 }
1486 "invalid reference flags", &N);
1487
1488 auto *Unit = N.getRawUnit();
1489 if (N.isDefinition()) {
1490 // Subprogram definitions (not part of the type hierarchy).
1491 CheckDI(N.isDistinct(), "subprogram definitions must be distinct", &N);
1492 CheckDI(Unit, "subprogram definitions must have a compile unit", &N);
1493 CheckDI(isa<DICompileUnit>(Unit), "invalid unit type", &N, Unit);
1494 // There's no good way to cross the CU boundary to insert a nested
1495 // DISubprogram definition in one CU into a type defined in another CU.
1496 auto *CT = dyn_cast_or_null<DICompositeType>(N.getRawScope());
1497 if (CT && CT->getRawIdentifier() &&
1498 M.getContext().isODRUniquingDebugTypes())
1499 CheckDI(N.getDeclaration(),
1500 "definition subprograms cannot be nested within DICompositeType "
1501 "when enabling ODR",
1502 &N);
1503 } else {
1504 // Subprogram declarations (part of the type hierarchy).
1505 CheckDI(!Unit, "subprogram declarations must not have a compile unit", &N);
1506 CheckDI(!N.getRawDeclaration(),
1507 "subprogram declaration must not have a declaration field");
1508 }
1509
1510 if (auto *RawThrownTypes = N.getRawThrownTypes()) {
1511 auto *ThrownTypes = dyn_cast<MDTuple>(RawThrownTypes);
1512 CheckDI(ThrownTypes, "invalid thrown types list", &N, RawThrownTypes);
1513 for (Metadata *Op : ThrownTypes->operands())
1514 CheckDI(Op && isa<DIType>(Op), "invalid thrown type", &N, ThrownTypes,
1515 Op);
1516 }
1517
1518 if (N.areAllCallsDescribed())
1519 CheckDI(N.isDefinition(),
1520 "DIFlagAllCallsDescribed must be attached to a definition");
1521}
1522
1523void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) {
1524 CheckDI(N.getTag() == dwarf::DW_TAG_lexical_block, "invalid tag", &N);
1525 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1526 "invalid local scope", &N, N.getRawScope());
1527 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1528 CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1529}
1530
1531void Verifier::visitDILexicalBlock(const DILexicalBlock &N) {
1532 visitDILexicalBlockBase(N);
1533
1534 CheckDI(N.getLine() || !N.getColumn(),
1535 "cannot have column info without line info", &N);
1536}
1537
1538void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) {
1539 visitDILexicalBlockBase(N);
1540}
1541
1542void Verifier::visitDICommonBlock(const DICommonBlock &N) {
1543 CheckDI(N.getTag() == dwarf::DW_TAG_common_block, "invalid tag", &N);
1544 if (auto *S = N.getRawScope())
1545 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1546 if (auto *S = N.getRawDecl())
1547 CheckDI(isa<DIGlobalVariable>(S), "invalid declaration", &N, S);
1548}
1549
1550void Verifier::visitDINamespace(const DINamespace &N) {
1551 CheckDI(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag", &N);
1552 if (auto *S = N.getRawScope())
1553 CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1554}
1555
1556void Verifier::visitDIMacro(const DIMacro &N) {
1557 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_define ||
1558 N.getMacinfoType() == dwarf::DW_MACINFO_undef,
1559 "invalid macinfo type", &N);
1560 CheckDI(!N.getName().empty(), "anonymous macro", &N);
1561 if (!N.getValue().empty()) {
1562 assert(N.getValue().data()[0] != ' ' && "Macro value has a space prefix");
1563 }
1564}
1565
1566void Verifier::visitDIMacroFile(const DIMacroFile &N) {
1567 CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_start_file,
1568 "invalid macinfo type", &N);
1569 if (auto *F = N.getRawFile())
1570 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1571
1572 if (auto *Array = N.getRawElements()) {
1573 CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1574 for (Metadata *Op : N.getElements()->operands()) {
1575 CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1576 }
1577 }
1578}
1579
1580void Verifier::visitDIModule(const DIModule &N) {
1581 CheckDI(N.getTag() == dwarf::DW_TAG_module, "invalid tag", &N);
1582 CheckDI(!N.getName().empty(), "anonymous module", &N);
1583}
1584
1585void Verifier::visitDITemplateParameter(const DITemplateParameter &N) {
1586 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1587}
1588
1589void Verifier::visitDITemplateTypeParameter(const DITemplateTypeParameter &N) {
1590 visitDITemplateParameter(N);
1591
1592 CheckDI(N.getTag() == dwarf::DW_TAG_template_type_parameter, "invalid tag",
1593 &N);
1594}
1595
1596void Verifier::visitDITemplateValueParameter(
1597 const DITemplateValueParameter &N) {
1598 visitDITemplateParameter(N);
1599
1600 CheckDI(N.getTag() == dwarf::DW_TAG_template_value_parameter ||
1601 N.getTag() == dwarf::DW_TAG_GNU_template_template_param ||
1602 N.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack,
1603 "invalid tag", &N);
1604}
1605
1606void Verifier::visitDIVariable(const DIVariable &N) {
1607 if (auto *S = N.getRawScope())
1608 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1609 if (auto *F = N.getRawFile())
1610 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1611}
1612
1613void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) {
1614 // Checks common to all variables.
1615 visitDIVariable(N);
1616
1617 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1618 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1619 // Check only if the global variable is not an extern
1620 if (N.isDefinition())
1621 CheckDI(N.getType(), "missing global variable type", &N);
1622 if (auto *Member = N.getRawStaticDataMemberDeclaration()) {
1623 CheckDI(isa<DIDerivedType>(Member),
1624 "invalid static data member declaration", &N, Member);
1625 }
1626}
1627
1628void Verifier::visitDILocalVariable(const DILocalVariable &N) {
1629 // Checks common to all variables.
1630 visitDIVariable(N);
1631
1632 CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1633 CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1634 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1635 "local variable requires a valid scope", &N, N.getRawScope());
1636 if (auto Ty = N.getType())
1637 CheckDI(!isa<DISubroutineType>(Ty), "invalid type", &N, N.getType());
1638}
1639
1640void Verifier::visitDIAssignID(const DIAssignID &N) {
1641 CheckDI(!N.getNumOperands(), "DIAssignID has no arguments", &N);
1642 CheckDI(N.isDistinct(), "DIAssignID must be distinct", &N);
1643}
1644
1645void Verifier::visitDILabel(const DILabel &N) {
1646 if (auto *S = N.getRawScope())
1647 CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1648 if (auto *F = N.getRawFile())
1649 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1650
1651 CheckDI(N.getTag() == dwarf::DW_TAG_label, "invalid tag", &N);
1652 CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1653 "label requires a valid scope", &N, N.getRawScope());
1654}
1655
1656void Verifier::visitDIExpression(const DIExpression &N) {
1657 CheckDI(N.isValid(), "invalid expression", &N);
1658}
1659
1660void Verifier::visitDIGlobalVariableExpression(
1661 const DIGlobalVariableExpression &GVE) {
1662 CheckDI(GVE.getVariable(), "missing variable");
1663 if (auto *Var = GVE.getVariable())
1664 visitDIGlobalVariable(*Var);
1665 if (auto *Expr = GVE.getExpression()) {
1666 visitDIExpression(*Expr);
1667 if (auto Fragment = Expr->getFragmentInfo())
1668 verifyFragmentExpression(*GVE.getVariable(), *Fragment, &GVE);
1669 }
1670}
1671
1672void Verifier::visitDIObjCProperty(const DIObjCProperty &N) {
1673 CheckDI(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag", &N);
1674 if (auto *T = N.getRawType())
1675 CheckDI(isType(T), "invalid type ref", &N, T);
1676 if (auto *F = N.getRawFile())
1677 CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1678}
1679
1680void Verifier::visitDIImportedEntity(const DIImportedEntity &N) {
1681 CheckDI(N.getTag() == dwarf::DW_TAG_imported_module ||
1682 N.getTag() == dwarf::DW_TAG_imported_declaration,
1683 "invalid tag", &N);
1684 if (auto *S = N.getRawScope())
1685 CheckDI(isa<DIScope>(S), "invalid scope for imported entity", &N, S);
1686 CheckDI(isDINode(N.getRawEntity()), "invalid imported entity", &N,
1687 N.getRawEntity());
1688}
1689
1690void Verifier::visitComdat(const Comdat &C) {
1691 // In COFF the Module is invalid if the GlobalValue has private linkage.
1692 // Entities with private linkage don't have entries in the symbol table.
1693 if (TT.isOSBinFormatCOFF())
1694 if (const GlobalValue *GV = M.getNamedValue(C.getName()))
1695 Check(!GV->hasPrivateLinkage(), "comdat global value has private linkage",
1696 GV);
1697}
1698
1699void Verifier::visitModuleIdents() {
1700 const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident");
1701 if (!Idents)
1702 return;
1703
1704 // llvm.ident takes a list of metadata entry. Each entry has only one string.
1705 // Scan each llvm.ident entry and make sure that this requirement is met.
1706 for (const MDNode *N : Idents->operands()) {
1707 Check(N->getNumOperands() == 1,
1708 "incorrect number of operands in llvm.ident metadata", N);
1709 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1710 ("invalid value for llvm.ident metadata entry operand"
1711 "(the operand should be a string)"),
1712 N->getOperand(0));
1713 }
1714}
1715
1716void Verifier::visitModuleCommandLines() {
1717 const NamedMDNode *CommandLines = M.getNamedMetadata("llvm.commandline");
1718 if (!CommandLines)
1719 return;
1720
1721 // llvm.commandline takes a list of metadata entry. Each entry has only one
1722 // string. Scan each llvm.commandline entry and make sure that this
1723 // requirement is met.
1724 for (const MDNode *N : CommandLines->operands()) {
1725 Check(N->getNumOperands() == 1,
1726 "incorrect number of operands in llvm.commandline metadata", N);
1727 Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1728 ("invalid value for llvm.commandline metadata entry operand"
1729 "(the operand should be a string)"),
1730 N->getOperand(0));
1731 }
1732}
1733
1734void Verifier::visitModuleFlags() {
1735 const NamedMDNode *Flags = M.getModuleFlagsMetadata();
1736 if (!Flags) return;
1737
1738 // Scan each flag, and track the flags and requirements.
1740 SmallVector<const MDNode*, 16> Requirements;
1741 uint64_t PAuthABIPlatform = -1;
1742 uint64_t PAuthABIVersion = -1;
1743 for (const MDNode *MDN : Flags->operands()) {
1744 visitModuleFlag(MDN, SeenIDs, Requirements);
1745 if (MDN->getNumOperands() != 3)
1746 continue;
1747 if (const auto *FlagName = dyn_cast_or_null<MDString>(MDN->getOperand(1))) {
1748 if (FlagName->getString() == "aarch64-elf-pauthabi-platform") {
1749 if (const auto *PAP =
1750 mdconst::dyn_extract_or_null<ConstantInt>(MDN->getOperand(2)))
1751 PAuthABIPlatform = PAP->getZExtValue();
1752 } else if (FlagName->getString() == "aarch64-elf-pauthabi-version") {
1753 if (const auto *PAV =
1754 mdconst::dyn_extract_or_null<ConstantInt>(MDN->getOperand(2)))
1755 PAuthABIVersion = PAV->getZExtValue();
1756 }
1757 }
1758 }
1759
1760 if ((PAuthABIPlatform == uint64_t(-1)) != (PAuthABIVersion == uint64_t(-1)))
1761 CheckFailed("either both or no 'aarch64-elf-pauthabi-platform' and "
1762 "'aarch64-elf-pauthabi-version' module flags must be present");
1763
1764 // Validate that the requirements in the module are valid.
1765 for (const MDNode *Requirement : Requirements) {
1766 const MDString *Flag = cast<MDString>(Requirement->getOperand(0));
1767 const Metadata *ReqValue = Requirement->getOperand(1);
1768
1769 const MDNode *Op = SeenIDs.lookup(Flag);
1770 if (!Op) {
1771 CheckFailed("invalid requirement on flag, flag is not present in module",
1772 Flag);
1773 continue;
1774 }
1775
1776 if (Op->getOperand(2) != ReqValue) {
1777 CheckFailed(("invalid requirement on flag, "
1778 "flag does not have the required value"),
1779 Flag);
1780 continue;
1781 }
1782 }
1783}
1784
1785void
1786Verifier::visitModuleFlag(const MDNode *Op,
1788 SmallVectorImpl<const MDNode *> &Requirements) {
1789 // Each module flag should have three arguments, the merge behavior (a
1790 // constant int), the flag ID (an MDString), and the value.
1791 Check(Op->getNumOperands() == 3,
1792 "incorrect number of operands in module flag", Op);
1794 if (!Module::isValidModFlagBehavior(Op->getOperand(0), MFB)) {
1795 Check(mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0)),
1796 "invalid behavior operand in module flag (expected constant integer)",
1797 Op->getOperand(0));
1798 Check(false,
1799 "invalid behavior operand in module flag (unexpected constant)",
1800 Op->getOperand(0));
1801 }
1802 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
1803 Check(ID, "invalid ID operand in module flag (expected metadata string)",
1804 Op->getOperand(1));
1805
1806 // Check the values for behaviors with additional requirements.
1807 switch (MFB) {
1808 case Module::Error:
1809 case Module::Warning:
1810 case Module::Override:
1811 // These behavior types accept any value.
1812 break;
1813
1814 case Module::Min: {
1815 auto *V = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1816 Check(V && V->getValue().isNonNegative(),
1817 "invalid value for 'min' module flag (expected constant non-negative "
1818 "integer)",
1819 Op->getOperand(2));
1820 break;
1821 }
1822
1823 case Module::Max: {
1824 Check(mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2)),
1825 "invalid value for 'max' module flag (expected constant integer)",
1826 Op->getOperand(2));
1827 break;
1828 }
1829
1830 case Module::Require: {
1831 // The value should itself be an MDNode with two operands, a flag ID (an
1832 // MDString), and a value.
1833 MDNode *Value = dyn_cast<MDNode>(Op->getOperand(2));
1834 Check(Value && Value->getNumOperands() == 2,
1835 "invalid value for 'require' module flag (expected metadata pair)",
1836 Op->getOperand(2));
1837 Check(isa<MDString>(Value->getOperand(0)),
1838 ("invalid value for 'require' module flag "
1839 "(first value operand should be a string)"),
1840 Value->getOperand(0));
1841
1842 // Append it to the list of requirements, to check once all module flags are
1843 // scanned.
1844 Requirements.push_back(Value);
1845 break;
1846 }
1847
1848 case Module::Append:
1849 case Module::AppendUnique: {
1850 // These behavior types require the operand be an MDNode.
1851 Check(isa<MDNode>(Op->getOperand(2)),
1852 "invalid value for 'append'-type module flag "
1853 "(expected a metadata node)",
1854 Op->getOperand(2));
1855 break;
1856 }
1857 }
1858
1859 // Unless this is a "requires" flag, check the ID is unique.
1860 if (MFB != Module::Require) {
1861 bool Inserted = SeenIDs.insert(std::make_pair(ID, Op)).second;
1862 Check(Inserted,
1863 "module flag identifiers must be unique (or of 'require' type)", ID);
1864 }
1865
1866 if (ID->getString() == "wchar_size") {
1868 = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1869 Check(Value, "wchar_size metadata requires constant integer argument");
1870 }
1871
1872 if (ID->getString() == "Linker Options") {
1873 // If the llvm.linker.options named metadata exists, we assume that the
1874 // bitcode reader has upgraded the module flag. Otherwise the flag might
1875 // have been created by a client directly.
1876 Check(M.getNamedMetadata("llvm.linker.options"),
1877 "'Linker Options' named metadata no longer supported");
1878 }
1879
1880 if (ID->getString() == "SemanticInterposition") {
1882 mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1883 Check(Value,
1884 "SemanticInterposition metadata requires constant integer argument");
1885 }
1886
1887 if (ID->getString() == "CG Profile") {
1888 for (const MDOperand &MDO : cast<MDNode>(Op->getOperand(2))->operands())
1889 visitModuleFlagCGProfileEntry(MDO);
1890 }
1891}
1892
1893void Verifier::visitModuleFlagCGProfileEntry(const MDOperand &MDO) {
1894 auto CheckFunction = [&](const MDOperand &FuncMDO) {
1895 if (!FuncMDO)
1896 return;
1897 auto F = dyn_cast<ValueAsMetadata>(FuncMDO);
1898 Check(F && isa<Function>(F->getValue()->stripPointerCasts()),
1899 "expected a Function or null", FuncMDO);
1900 };
1901 auto Node = dyn_cast_or_null<MDNode>(MDO);
1902 Check(Node && Node->getNumOperands() == 3, "expected a MDNode triple", MDO);
1903 CheckFunction(Node->getOperand(0));
1904 CheckFunction(Node->getOperand(1));
1905 auto Count = dyn_cast_or_null<ConstantAsMetadata>(Node->getOperand(2));
1906 Check(Count && Count->getType()->isIntegerTy(),
1907 "expected an integer constant", Node->getOperand(2));
1908}
1909
1910void Verifier::verifyAttributeTypes(AttributeSet Attrs, const Value *V) {
1911 for (Attribute A : Attrs) {
1912
1913 if (A.isStringAttribute()) {
1914#define GET_ATTR_NAMES
1915#define ATTRIBUTE_ENUM(ENUM_NAME, DISPLAY_NAME)
1916#define ATTRIBUTE_STRBOOL(ENUM_NAME, DISPLAY_NAME) \
1917 if (A.getKindAsString() == #DISPLAY_NAME) { \
1918 auto V = A.getValueAsString(); \
1919 if (!(V.empty() || V == "true" || V == "false")) \
1920 CheckFailed("invalid value for '" #DISPLAY_NAME "' attribute: " + V + \
1921 ""); \
1922 }
1923
1924#include "llvm/IR/Attributes.inc"
1925 continue;
1926 }
1927
1928 if (A.isIntAttribute() != Attribute::isIntAttrKind(A.getKindAsEnum())) {
1929 CheckFailed("Attribute '" + A.getAsString() + "' should have an Argument",
1930 V);
1931 return;
1932 }
1933 }
1934}
1935
1936// VerifyParameterAttrs - Check the given attributes for an argument or return
1937// value of the specified type. The value V is printed in error messages.
1938void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty,
1939 const Value *V) {
1940 if (!Attrs.hasAttributes())
1941 return;
1942
1943 verifyAttributeTypes(Attrs, V);
1944
1945 for (Attribute Attr : Attrs)
1946 Check(Attr.isStringAttribute() ||
1947 Attribute::canUseAsParamAttr(Attr.getKindAsEnum()),
1948 "Attribute '" + Attr.getAsString() + "' does not apply to parameters",
1949 V);
1950
1951 if (Attrs.hasAttribute(Attribute::ImmArg)) {
1952 Check(Attrs.getNumAttributes() == 1,
1953 "Attribute 'immarg' is incompatible with other attributes", V);
1954 }
1955
1956 // Check for mutually incompatible attributes. Only inreg is compatible with
1957 // sret.
1958 unsigned AttrCount = 0;
1959 AttrCount += Attrs.hasAttribute(Attribute::ByVal);
1960 AttrCount += Attrs.hasAttribute(Attribute::InAlloca);
1961 AttrCount += Attrs.hasAttribute(Attribute::Preallocated);
1962 AttrCount += Attrs.hasAttribute(Attribute::StructRet) ||
1963 Attrs.hasAttribute(Attribute::InReg);
1964 AttrCount += Attrs.hasAttribute(Attribute::Nest);
1965 AttrCount += Attrs.hasAttribute(Attribute::ByRef);
1966 Check(AttrCount <= 1,
1967 "Attributes 'byval', 'inalloca', 'preallocated', 'inreg', 'nest', "
1968 "'byref', and 'sret' are incompatible!",
1969 V);
1970
1971 Check(!(Attrs.hasAttribute(Attribute::InAlloca) &&
1972 Attrs.hasAttribute(Attribute::ReadOnly)),
1973 "Attributes "
1974 "'inalloca and readonly' are incompatible!",
1975 V);
1976
1977 Check(!(Attrs.hasAttribute(Attribute::StructRet) &&
1978 Attrs.hasAttribute(Attribute::Returned)),
1979 "Attributes "
1980 "'sret and returned' are incompatible!",
1981 V);
1982
1983 Check(!(Attrs.hasAttribute(Attribute::ZExt) &&
1984 Attrs.hasAttribute(Attribute::SExt)),
1985 "Attributes "
1986 "'zeroext and signext' are incompatible!",
1987 V);
1988
1989 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
1990 Attrs.hasAttribute(Attribute::ReadOnly)),
1991 "Attributes "
1992 "'readnone and readonly' are incompatible!",
1993 V);
1994
1995 Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
1996 Attrs.hasAttribute(Attribute::WriteOnly)),
1997 "Attributes "
1998 "'readnone and writeonly' are incompatible!",
1999 V);
2000
2001 Check(!(Attrs.hasAttribute(Attribute::ReadOnly) &&
2002 Attrs.hasAttribute(Attribute::WriteOnly)),
2003 "Attributes "
2004 "'readonly and writeonly' are incompatible!",
2005 V);
2006
2007 Check(!(Attrs.hasAttribute(Attribute::NoInline) &&
2008 Attrs.hasAttribute(Attribute::AlwaysInline)),
2009 "Attributes "
2010 "'noinline and alwaysinline' are incompatible!",
2011 V);
2012
2013 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2014 Attrs.hasAttribute(Attribute::ReadNone)),
2015 "Attributes writable and readnone are incompatible!", V);
2016
2017 Check(!(Attrs.hasAttribute(Attribute::Writable) &&
2018 Attrs.hasAttribute(Attribute::ReadOnly)),
2019 "Attributes writable and readonly are incompatible!", V);
2020
2021 AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible(Ty);
2022 for (Attribute Attr : Attrs) {
2023 if (!Attr.isStringAttribute() &&
2024 IncompatibleAttrs.contains(Attr.getKindAsEnum())) {
2025 CheckFailed("Attribute '" + Attr.getAsString() +
2026 "' applied to incompatible type!", V);
2027 return;
2028 }
2029 }
2030
2031 if (isa<PointerType>(Ty)) {
2032 if (Attrs.hasAttribute(Attribute::ByVal)) {
2033 if (Attrs.hasAttribute(Attribute::Alignment)) {
2034 Align AttrAlign = Attrs.getAlignment().valueOrOne();
2035 Align MaxAlign(ParamMaxAlignment);
2036 Check(AttrAlign <= MaxAlign,
2037 "Attribute 'align' exceed the max size 2^14", V);
2038 }
2039 SmallPtrSet<Type *, 4> Visited;
2040 Check(Attrs.getByValType()->isSized(&Visited),
2041 "Attribute 'byval' does not support unsized types!", V);
2042 }
2043 if (Attrs.hasAttribute(Attribute::ByRef)) {
2044 SmallPtrSet<Type *, 4> Visited;
2045 Check(Attrs.getByRefType()->isSized(&Visited),
2046 "Attribute 'byref' does not support unsized types!", V);
2047 }
2048 if (Attrs.hasAttribute(Attribute::InAlloca)) {
2049 SmallPtrSet<Type *, 4> Visited;
2050 Check(Attrs.getInAllocaType()->isSized(&Visited),
2051 "Attribute 'inalloca' does not support unsized types!", V);
2052 }
2053 if (Attrs.hasAttribute(Attribute::Preallocated)) {
2054 SmallPtrSet<Type *, 4> Visited;
2055 Check(Attrs.getPreallocatedType()->isSized(&Visited),
2056 "Attribute 'preallocated' does not support unsized types!", V);
2057 }
2058 }
2059
2060 if (Attrs.hasAttribute(Attribute::NoFPClass)) {
2061 uint64_t Val = Attrs.getAttribute(Attribute::NoFPClass).getValueAsInt();
2062 Check(Val != 0, "Attribute 'nofpclass' must have at least one test bit set",
2063 V);
2064 Check((Val & ~static_cast<unsigned>(fcAllFlags)) == 0,
2065 "Invalid value for 'nofpclass' test mask", V);
2066 }
2067 if (Attrs.hasAttribute(Attribute::Range)) {
2068 const ConstantRange &CR =
2069 Attrs.getAttribute(Attribute::Range).getValueAsConstantRange();
2071 "Range bit width must match type bit width!", V);
2072 }
2073}
2074
2075void Verifier::checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
2076 const Value *V) {
2077 if (Attrs.hasFnAttr(Attr)) {
2078 StringRef S = Attrs.getFnAttr(Attr).getValueAsString();
2079 unsigned N;
2080 if (S.getAsInteger(10, N))
2081 CheckFailed("\"" + Attr + "\" takes an unsigned integer: " + S, V);
2082 }
2083}
2084
2085// Check parameter attributes against a function type.
2086// The value V is printed in error messages.
2087void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
2088 const Value *V, bool IsIntrinsic,
2089 bool IsInlineAsm) {
2090 if (Attrs.isEmpty())
2091 return;
2092
2093 if (AttributeListsVisited.insert(Attrs.getRawPointer()).second) {
2094 Check(Attrs.hasParentContext(Context),
2095 "Attribute list does not match Module context!", &Attrs, V);
2096 for (const auto &AttrSet : Attrs) {
2097 Check(!AttrSet.hasAttributes() || AttrSet.hasParentContext(Context),
2098 "Attribute set does not match Module context!", &AttrSet, V);
2099 for (const auto &A : AttrSet) {
2100 Check(A.hasParentContext(Context),
2101 "Attribute does not match Module context!", &A, V);
2102 }
2103 }
2104 }
2105
2106 bool SawNest = false;
2107 bool SawReturned = false;
2108 bool SawSRet = false;
2109 bool SawSwiftSelf = false;
2110 bool SawSwiftAsync = false;
2111 bool SawSwiftError = false;
2112
2113 // Verify return value attributes.
2114 AttributeSet RetAttrs = Attrs.getRetAttrs();
2115 for (Attribute RetAttr : RetAttrs)
2116 Check(RetAttr.isStringAttribute() ||
2117 Attribute::canUseAsRetAttr(RetAttr.getKindAsEnum()),
2118 "Attribute '" + RetAttr.getAsString() +
2119 "' does not apply to function return values",
2120 V);
2121
2122 unsigned MaxParameterWidth = 0;
2123 auto GetMaxParameterWidth = [&MaxParameterWidth](Type *Ty) {
2124 if (Ty->isVectorTy()) {
2125 if (auto *VT = dyn_cast<FixedVectorType>(Ty)) {
2126 unsigned Size = VT->getPrimitiveSizeInBits().getFixedValue();
2127 if (Size > MaxParameterWidth)
2128 MaxParameterWidth = Size;
2129 }
2130 }
2131 };
2132 GetMaxParameterWidth(FT->getReturnType());
2133 verifyParameterAttrs(RetAttrs, FT->getReturnType(), V);
2134
2135 // Verify parameter attributes.
2136 for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
2137 Type *Ty = FT->getParamType(i);
2138 AttributeSet ArgAttrs = Attrs.getParamAttrs(i);
2139
2140 if (!IsIntrinsic) {
2141 Check(!ArgAttrs.hasAttribute(Attribute::ImmArg),
2142 "immarg attribute only applies to intrinsics", V);
2143 if (!IsInlineAsm)
2144 Check(!ArgAttrs.hasAttribute(Attribute::ElementType),
2145 "Attribute 'elementtype' can only be applied to intrinsics"
2146 " and inline asm.",
2147 V);
2148 }
2149
2150 verifyParameterAttrs(ArgAttrs, Ty, V);
2151 GetMaxParameterWidth(Ty);
2152
2153 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
2154 Check(!SawNest, "More than one parameter has attribute nest!", V);
2155 SawNest = true;
2156 }
2157
2158 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
2159 Check(!SawReturned, "More than one parameter has attribute returned!", V);
2160 Check(Ty->canLosslesslyBitCastTo(FT->getReturnType()),
2161 "Incompatible argument and return types for 'returned' attribute",
2162 V);
2163 SawReturned = true;
2164 }
2165
2166 if (ArgAttrs.hasAttribute(Attribute::StructRet)) {
2167 Check(!SawSRet, "Cannot have multiple 'sret' parameters!", V);
2168 Check(i == 0 || i == 1,
2169 "Attribute 'sret' is not on first or second parameter!", V);
2170 SawSRet = true;
2171 }
2172
2173 if (ArgAttrs.hasAttribute(Attribute::SwiftSelf)) {
2174 Check(!SawSwiftSelf, "Cannot have multiple 'swiftself' parameters!", V);
2175 SawSwiftSelf = true;
2176 }
2177
2178 if (ArgAttrs.hasAttribute(Attribute::SwiftAsync)) {
2179 Check(!SawSwiftAsync, "Cannot have multiple 'swiftasync' parameters!", V);
2180 SawSwiftAsync = true;
2181 }
2182
2183 if (ArgAttrs.hasAttribute(Attribute::SwiftError)) {
2184 Check(!SawSwiftError, "Cannot have multiple 'swifterror' parameters!", V);
2185 SawSwiftError = true;
2186 }
2187
2188 if (ArgAttrs.hasAttribute(Attribute::InAlloca)) {
2189 Check(i == FT->getNumParams() - 1,
2190 "inalloca isn't on the last parameter!", V);
2191 }
2192 }
2193
2194 if (!Attrs.hasFnAttrs())
2195 return;
2196
2197 verifyAttributeTypes(Attrs.getFnAttrs(), V);
2198 for (Attribute FnAttr : Attrs.getFnAttrs())
2199 Check(FnAttr.isStringAttribute() ||
2200 Attribute::canUseAsFnAttr(FnAttr.getKindAsEnum()),
2201 "Attribute '" + FnAttr.getAsString() +
2202 "' does not apply to functions!",
2203 V);
2204
2205 Check(!(Attrs.hasFnAttr(Attribute::NoInline) &&
2206 Attrs.hasFnAttr(Attribute::AlwaysInline)),
2207 "Attributes 'noinline and alwaysinline' are incompatible!", V);
2208
2209 if (Attrs.hasFnAttr(Attribute::OptimizeNone)) {
2210 Check(Attrs.hasFnAttr(Attribute::NoInline),
2211 "Attribute 'optnone' requires 'noinline'!", V);
2212
2213 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2214 "Attributes 'optsize and optnone' are incompatible!", V);
2215
2216 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2217 "Attributes 'minsize and optnone' are incompatible!", V);
2218
2219 Check(!Attrs.hasFnAttr(Attribute::OptimizeForDebugging),
2220 "Attributes 'optdebug and optnone' are incompatible!", V);
2221 }
2222
2223 if (Attrs.hasFnAttr(Attribute::OptimizeForDebugging)) {
2224 Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2225 "Attributes 'optsize and optdebug' are incompatible!", V);
2226
2227 Check(!Attrs.hasFnAttr(Attribute::MinSize),
2228 "Attributes 'minsize and optdebug' are incompatible!", V);
2229 }
2230
2231 Check(!Attrs.hasAttrSomewhere(Attribute::Writable) ||
2232 isModSet(Attrs.getMemoryEffects().getModRef(IRMemLocation::ArgMem)),
2233 "Attribute writable and memory without argmem: write are incompatible!",
2234 V);
2235
2236 if (Attrs.hasFnAttr("aarch64_pstate_sm_enabled")) {
2237 Check(!Attrs.hasFnAttr("aarch64_pstate_sm_compatible"),
2238 "Attributes 'aarch64_pstate_sm_enabled and "
2239 "aarch64_pstate_sm_compatible' are incompatible!",
2240 V);
2241 }
2242
2243 Check((Attrs.hasFnAttr("aarch64_new_za") + Attrs.hasFnAttr("aarch64_in_za") +
2244 Attrs.hasFnAttr("aarch64_inout_za") +
2245 Attrs.hasFnAttr("aarch64_out_za") +
2246 Attrs.hasFnAttr("aarch64_preserves_za")) <= 1,
2247 "Attributes 'aarch64_new_za', 'aarch64_in_za', 'aarch64_out_za', "
2248 "'aarch64_inout_za' and 'aarch64_preserves_za' are mutually exclusive",
2249 V);
2250
2251 Check(
2252 (Attrs.hasFnAttr("aarch64_new_zt0") + Attrs.hasFnAttr("aarch64_in_zt0") +
2253 Attrs.hasFnAttr("aarch64_inout_zt0") +
2254 Attrs.hasFnAttr("aarch64_out_zt0") +
2255 Attrs.hasFnAttr("aarch64_preserves_zt0")) <= 1,
2256 "Attributes 'aarch64_new_zt0', 'aarch64_in_zt0', 'aarch64_out_zt0', "
2257 "'aarch64_inout_zt0' and 'aarch64_preserves_zt0' are mutually exclusive",
2258 V);
2259
2260 if (Attrs.hasFnAttr(Attribute::JumpTable)) {
2261 const GlobalValue *GV = cast<GlobalValue>(V);
2263 "Attribute 'jumptable' requires 'unnamed_addr'", V);
2264 }
2265
2266 if (auto Args = Attrs.getFnAttrs().getAllocSizeArgs()) {
2267 auto CheckParam = [&](StringRef Name, unsigned ParamNo) {
2268 if (ParamNo >= FT->getNumParams()) {
2269 CheckFailed("'allocsize' " + Name + " argument is out of bounds", V);
2270 return false;
2271 }
2272
2273 if (!FT->getParamType(ParamNo)->isIntegerTy()) {
2274 CheckFailed("'allocsize' " + Name +
2275 " argument must refer to an integer parameter",
2276 V);
2277 return false;
2278 }
2279
2280 return true;
2281 };
2282
2283 if (!CheckParam("element size", Args->first))
2284 return;
2285
2286 if (Args->second && !CheckParam("number of elements", *Args->second))
2287 return;
2288 }
2289
2290 if (Attrs.hasFnAttr(Attribute::AllocKind)) {
2291 AllocFnKind K = Attrs.getAllocKind();
2294 if (!is_contained(
2296 Type))
2297 CheckFailed(
2298 "'allockind()' requires exactly one of alloc, realloc, and free");
2299 if ((Type == AllocFnKind::Free) &&
2302 CheckFailed("'allockind(\"free\")' doesn't allow uninitialized, zeroed, "
2303 "or aligned modifiers.");
2305 if ((K & ZeroedUninit) == ZeroedUninit)
2306 CheckFailed("'allockind()' can't be both zeroed and uninitialized");
2307 }
2308
2309 if (Attrs.hasFnAttr(Attribute::VScaleRange)) {
2310 unsigned VScaleMin = Attrs.getFnAttrs().getVScaleRangeMin();
2311 if (VScaleMin == 0)
2312 CheckFailed("'vscale_range' minimum must be greater than 0", V);
2313 else if (!isPowerOf2_32(VScaleMin))
2314 CheckFailed("'vscale_range' minimum must be power-of-two value", V);
2315 std::optional<unsigned> VScaleMax = Attrs.getFnAttrs().getVScaleRangeMax();
2316 if (VScaleMax && VScaleMin > VScaleMax)
2317 CheckFailed("'vscale_range' minimum cannot be greater than maximum", V);
2318 else if (VScaleMax && !isPowerOf2_32(*VScaleMax))
2319 CheckFailed("'vscale_range' maximum must be power-of-two value", V);
2320 }
2321
2322 if (Attrs.hasFnAttr("frame-pointer")) {
2323 StringRef FP = Attrs.getFnAttr("frame-pointer").getValueAsString();
2324 if (FP != "all" && FP != "non-leaf" && FP != "none")
2325 CheckFailed("invalid value for 'frame-pointer' attribute: " + FP, V);
2326 }
2327
2328 // Check EVEX512 feature.
2329 if (MaxParameterWidth >= 512 && Attrs.hasFnAttr("target-features") &&
2330 TT.isX86()) {
2331 StringRef TF = Attrs.getFnAttr("target-features").getValueAsString();
2332 Check(!TF.contains("+avx512f") || !TF.contains("-evex512"),
2333 "512-bit vector arguments require 'evex512' for AVX512", V);
2334 }
2335
2336 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-prefix", V);
2337 checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-entry", V);
2338 checkUnsignedBaseTenFuncAttr(Attrs, "warn-stack-size", V);
2339
2340 if (auto A = Attrs.getFnAttr("sign-return-address"); A.isValid()) {
2341 StringRef S = A.getValueAsString();
2342 if (S != "none" && S != "all" && S != "non-leaf")
2343 CheckFailed("invalid value for 'sign-return-address' attribute: " + S, V);
2344 }
2345
2346 if (auto A = Attrs.getFnAttr("sign-return-address-key"); A.isValid()) {
2347 StringRef S = A.getValueAsString();
2348 if (S != "a_key" && S != "b_key")
2349 CheckFailed("invalid value for 'sign-return-address-key' attribute: " + S,
2350 V);
2351 }
2352
2353 if (auto A = Attrs.getFnAttr("branch-target-enforcement"); A.isValid()) {
2354 StringRef S = A.getValueAsString();
2355 if (S != "true" && S != "false")
2356 CheckFailed(
2357 "invalid value for 'branch-target-enforcement' attribute: " + S, V);
2358 }
2359
2360 if (auto A = Attrs.getFnAttr("vector-function-abi-variant"); A.isValid()) {
2361 StringRef S = A.getValueAsString();
2362 const std::optional<VFInfo> Info = VFABI::tryDemangleForVFABI(S, FT);
2363 if (!Info)
2364 CheckFailed("invalid name for a VFABI variant: " + S, V);
2365 }
2366}
2367
2368void Verifier::verifyFunctionMetadata(
2369 ArrayRef<std::pair<unsigned, MDNode *>> MDs) {
2370 for (const auto &Pair : MDs) {
2371 if (Pair.first == LLVMContext::MD_prof) {
2372 MDNode *MD = Pair.second;
2373 Check(MD->getNumOperands() >= 2,
2374 "!prof annotations should have no less than 2 operands", MD);
2375
2376 // Check first operand.
2377 Check(MD->getOperand(0) != nullptr, "first operand should not be null",
2378 MD);
2379 Check(isa<MDString>(MD->getOperand(0)),
2380 "expected string with name of the !prof annotation", MD);
2381 MDString *MDS = cast<MDString>(MD->getOperand(0));
2382 StringRef ProfName = MDS->getString();
2383 Check(ProfName == "function_entry_count" ||
2384 ProfName == "synthetic_function_entry_count",
2385 "first operand should be 'function_entry_count'"
2386 " or 'synthetic_function_entry_count'",
2387 MD);
2388
2389 // Check second operand.
2390 Check(MD->getOperand(1) != nullptr, "second operand should not be null",
2391 MD);
2392 Check(isa<ConstantAsMetadata>(MD->getOperand(1)),
2393 "expected integer argument to function_entry_count", MD);
2394 } else if (Pair.first == LLVMContext::MD_kcfi_type) {
2395 MDNode *MD = Pair.second;
2396 Check(MD->getNumOperands() == 1,
2397 "!kcfi_type must have exactly one operand", MD);
2398 Check(MD->getOperand(0) != nullptr, "!kcfi_type operand must not be null",
2399 MD);
2400 Check(isa<ConstantAsMetadata>(MD->getOperand(0)),
2401 "expected a constant operand for !kcfi_type", MD);
2402 Constant *C = cast<ConstantAsMetadata>(MD->getOperand(0))->getValue();
2403 Check(isa<ConstantInt>(C) && isa<IntegerType>(C->getType()),
2404 "expected a constant integer operand for !kcfi_type", MD);
2405 Check(cast<ConstantInt>(C)->getBitWidth() == 32,
2406 "expected a 32-bit integer constant operand for !kcfi_type", MD);
2407 }
2408 }
2409}
2410
2411void Verifier::visitConstantExprsRecursively(const Constant *EntryC) {
2412 if (!ConstantExprVisited.insert(EntryC).second)
2413 return;
2414
2416 Stack.push_back(EntryC);
2417
2418 while (!Stack.empty()) {
2419 const Constant *C = Stack.pop_back_val();
2420
2421 // Check this constant expression.
2422 if (const auto *CE = dyn_cast<ConstantExpr>(C))
2423 visitConstantExpr(CE);
2424
2425 if (const auto *GV = dyn_cast<GlobalValue>(C)) {
2426 // Global Values get visited separately, but we do need to make sure
2427 // that the global value is in the correct module
2428 Check(GV->getParent() == &M, "Referencing global in another module!",
2429 EntryC, &M, GV, GV->getParent());
2430 continue;
2431 }
2432
2433 // Visit all sub-expressions.
2434 for (const Use &U : C->operands()) {
2435 const auto *OpC = dyn_cast<Constant>(U);
2436 if (!OpC)
2437 continue;
2438 if (!ConstantExprVisited.insert(OpC).second)
2439 continue;
2440 Stack.push_back(OpC);
2441 }
2442 }
2443}
2444
2445void Verifier::visitConstantExpr(const ConstantExpr *CE) {
2446 if (CE->getOpcode() == Instruction::BitCast)
2447 Check(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0),
2448 CE->getType()),
2449 "Invalid bitcast", CE);
2450}
2451
2452bool Verifier::verifyAttributeCount(AttributeList Attrs, unsigned Params) {
2453 // There shouldn't be more attribute sets than there are parameters plus the
2454 // function and return value.
2455 return Attrs.getNumAttrSets() <= Params + 2;
2456}
2457
2458void Verifier::verifyInlineAsmCall(const CallBase &Call) {
2459 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
2460 unsigned ArgNo = 0;
2461 unsigned LabelNo = 0;
2462 for (const InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) {
2463 if (CI.Type == InlineAsm::isLabel) {
2464 ++LabelNo;
2465 continue;
2466 }
2467
2468 // Only deal with constraints that correspond to call arguments.
2469 if (!CI.hasArg())
2470 continue;
2471
2472 if (CI.isIndirect) {
2473 const Value *Arg = Call.getArgOperand(ArgNo);
2474 Check(Arg->getType()->isPointerTy(),
2475 "Operand for indirect constraint must have pointer type", &Call);
2476
2477 Check(Call.getParamElementType(ArgNo),
2478 "Operand for indirect constraint must have elementtype attribute",
2479 &Call);
2480 } else {
2481 Check(!Call.paramHasAttr(ArgNo, Attribute::ElementType),
2482 "Elementtype attribute can only be applied for indirect "
2483 "constraints",
2484 &Call);
2485 }
2486
2487 ArgNo++;
2488 }
2489
2490 if (auto *CallBr = dyn_cast<CallBrInst>(&Call)) {
2491 Check(LabelNo == CallBr->getNumIndirectDests(),
2492 "Number of label constraints does not match number of callbr dests",
2493 &Call);
2494 } else {
2495 Check(LabelNo == 0, "Label constraints can only be used with callbr",
2496 &Call);
2497 }
2498}
2499
2500/// Verify that statepoint intrinsic is well formed.
2501void Verifier::verifyStatepoint(const CallBase &Call) {
2502 assert(Call.getCalledFunction() &&
2503 Call.getCalledFunction()->getIntrinsicID() ==
2504 Intrinsic::experimental_gc_statepoint);
2505
2506 Check(!Call.doesNotAccessMemory() && !Call.onlyReadsMemory() &&
2507 !Call.onlyAccessesArgMemory(),
2508 "gc.statepoint must read and write all memory to preserve "
2509 "reordering restrictions required by safepoint semantics",
2510 Call);
2511
2512 const int64_t NumPatchBytes =
2513 cast<ConstantInt>(Call.getArgOperand(1))->getSExtValue();
2514 assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!");
2515 Check(NumPatchBytes >= 0,
2516 "gc.statepoint number of patchable bytes must be "
2517 "positive",
2518 Call);
2519
2520 Type *TargetElemType = Call.getParamElementType(2);
2521 Check(TargetElemType,
2522 "gc.statepoint callee argument must have elementtype attribute", Call);
2523 FunctionType *TargetFuncType = dyn_cast<FunctionType>(TargetElemType);
2524 Check(TargetFuncType,
2525 "gc.statepoint callee elementtype must be function type", Call);
2526
2527 const int NumCallArgs = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
2528 Check(NumCallArgs >= 0,
2529 "gc.statepoint number of arguments to underlying call "
2530 "must be positive",
2531 Call);
2532 const int NumParams = (int)TargetFuncType->getNumParams();
2533 if (TargetFuncType->isVarArg()) {
2534 Check(NumCallArgs >= NumParams,
2535 "gc.statepoint mismatch in number of vararg call args", Call);
2536
2537 // TODO: Remove this limitation
2538 Check(TargetFuncType->getReturnType()->isVoidTy(),
2539 "gc.statepoint doesn't support wrapping non-void "
2540 "vararg functions yet",
2541 Call);
2542 } else
2543 Check(NumCallArgs == NumParams,
2544 "gc.statepoint mismatch in number of call args", Call);
2545
2546 const uint64_t Flags
2547 = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
2548 Check((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0,
2549 "unknown flag used in gc.statepoint flags argument", Call);
2550
2551 // Verify that the types of the call parameter arguments match
2552 // the type of the wrapped callee.
2553 AttributeList Attrs = Call.getAttributes();
2554 for (int i = 0; i < NumParams; i++) {
2555 Type *ParamType = TargetFuncType->getParamType(i);
2556 Type *ArgType = Call.getArgOperand(5 + i)->getType();
2557 Check(ArgType == ParamType,
2558 "gc.statepoint call argument does not match wrapped "
2559 "function type",
2560 Call);
2561
2562 if (TargetFuncType->isVarArg()) {
2563 AttributeSet ArgAttrs = Attrs.getParamAttrs(5 + i);
2564 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
2565 "Attribute 'sret' cannot be used for vararg call arguments!", Call);
2566 }
2567 }
2568
2569 const int EndCallArgsInx = 4 + NumCallArgs;
2570
2571 const Value *NumTransitionArgsV = Call.getArgOperand(EndCallArgsInx + 1);
2572 Check(isa<ConstantInt>(NumTransitionArgsV),
2573 "gc.statepoint number of transition arguments "
2574 "must be constant integer",
2575 Call);
2576 const int NumTransitionArgs =
2577 cast<ConstantInt>(NumTransitionArgsV)->getZExtValue();
2578 Check(NumTransitionArgs == 0,
2579 "gc.statepoint w/inline transition bundle is deprecated", Call);
2580 const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs;
2581
2582 const Value *NumDeoptArgsV = Call.getArgOperand(EndTransitionArgsInx + 1);
2583 Check(isa<ConstantInt>(NumDeoptArgsV),
2584 "gc.statepoint number of deoptimization arguments "
2585 "must be constant integer",
2586 Call);
2587 const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue();
2588 Check(NumDeoptArgs == 0,
2589 "gc.statepoint w/inline deopt operands is deprecated", Call);
2590
2591 const int ExpectedNumArgs = 7 + NumCallArgs;
2592 Check(ExpectedNumArgs == (int)Call.arg_size(),
2593 "gc.statepoint too many arguments", Call);
2594
2595 // Check that the only uses of this gc.statepoint are gc.result or
2596 // gc.relocate calls which are tied to this statepoint and thus part
2597 // of the same statepoint sequence
2598 for (const User *U : Call.users()) {
2599 const CallInst *UserCall = dyn_cast<const CallInst>(U);
2600 Check(UserCall, "illegal use of statepoint token", Call, U);
2601 if (!UserCall)
2602 continue;
2603 Check(isa<GCRelocateInst>(UserCall) || isa<GCResultInst>(UserCall),
2604 "gc.result or gc.relocate are the only value uses "
2605 "of a gc.statepoint",
2606 Call, U);
2607 if (isa<GCResultInst>(UserCall)) {
2608 Check(UserCall->getArgOperand(0) == &Call,
2609 "gc.result connected to wrong gc.statepoint", Call, UserCall);
2610 } else if (isa<GCRelocateInst>(Call)) {
2611 Check(UserCall->getArgOperand(0) == &Call,
2612 "gc.relocate connected to wrong gc.statepoint", Call, UserCall);
2613 }
2614 }
2615
2616 // Note: It is legal for a single derived pointer to be listed multiple
2617 // times. It's non-optimal, but it is legal. It can also happen after
2618 // insertion if we strip a bitcast away.
2619 // Note: It is really tempting to check that each base is relocated and
2620 // that a derived pointer is never reused as a base pointer. This turns
2621 // out to be problematic since optimizations run after safepoint insertion
2622 // can recognize equality properties that the insertion logic doesn't know
2623 // about. See example statepoint.ll in the verifier subdirectory
2624}
2625
2626void Verifier::verifyFrameRecoverIndices() {
2627 for (auto &Counts : FrameEscapeInfo) {
2628 Function *F = Counts.first;
2629 unsigned EscapedObjectCount = Counts.second.first;
2630 unsigned MaxRecoveredIndex = Counts.second.second;
2631 Check(MaxRecoveredIndex <= EscapedObjectCount,
2632 "all indices passed to llvm.localrecover must be less than the "
2633 "number of arguments passed to llvm.localescape in the parent "
2634 "function",
2635 F);
2636 }
2637}
2638
2639static Instruction *getSuccPad(Instruction *Terminator) {
2640 BasicBlock *UnwindDest;
2641 if (auto *II = dyn_cast<InvokeInst>(Terminator))
2642 UnwindDest = II->getUnwindDest();
2643 else if (auto *CSI = dyn_cast<CatchSwitchInst>(Terminator))
2644 UnwindDest = CSI->getUnwindDest();
2645 else
2646 UnwindDest = cast<CleanupReturnInst>(Terminator)->getUnwindDest();
2647 return UnwindDest->getFirstNonPHI();
2648}
2649
2650void Verifier::verifySiblingFuncletUnwinds() {
2653 for (const auto &Pair : SiblingFuncletInfo) {
2654 Instruction *PredPad = Pair.first;
2655 if (Visited.count(PredPad))
2656 continue;
2657 Active.insert(PredPad);
2658 Instruction *Terminator = Pair.second;
2659 do {
2660 Instruction *SuccPad = getSuccPad(Terminator);
2661 if (Active.count(SuccPad)) {
2662 // Found a cycle; report error
2663 Instruction *CyclePad = SuccPad;
2665 do {
2666 CycleNodes.push_back(CyclePad);
2667 Instruction *CycleTerminator = SiblingFuncletInfo[CyclePad];
2668 if (CycleTerminator != CyclePad)
2669 CycleNodes.push_back(CycleTerminator);
2670 CyclePad = getSuccPad(CycleTerminator);
2671 } while (CyclePad != SuccPad);
2672 Check(false, "EH pads can't handle each other's exceptions",
2673 ArrayRef<Instruction *>(CycleNodes));
2674 }
2675 // Don't re-walk a node we've already checked
2676 if (!Visited.insert(SuccPad).second)
2677 break;
2678 // Walk to this successor if it has a map entry.
2679 PredPad = SuccPad;
2680 auto TermI = SiblingFuncletInfo.find(PredPad);
2681 if (TermI == SiblingFuncletInfo.end())
2682 break;
2683 Terminator = TermI->second;
2684 Active.insert(PredPad);
2685 } while (true);
2686 // Each node only has one successor, so we've walked all the active
2687 // nodes' successors.
2688 Active.clear();
2689 }
2690}
2691
2692// visitFunction - Verify that a function is ok.
2693//
2694void Verifier::visitFunction(const Function &F) {
2695 visitGlobalValue(F);
2696
2697 // Check function arguments.
2698 FunctionType *FT = F.getFunctionType();
2699 unsigned NumArgs = F.arg_size();
2700
2701 Check(&Context == &F.getContext(),
2702 "Function context does not match Module context!", &F);
2703
2704 Check(!F.hasCommonLinkage(), "Functions may not have common linkage", &F);
2705 Check(FT->getNumParams() == NumArgs,
2706 "# formal arguments must match # of arguments for function type!", &F,
2707 FT);
2708 Check(F.getReturnType()->isFirstClassType() ||
2709 F.getReturnType()->isVoidTy() || F.getReturnType()->isStructTy(),
2710 "Functions cannot return aggregate values!", &F);
2711
2712 Check(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(),
2713 "Invalid struct return type!", &F);
2714
2715 AttributeList Attrs = F.getAttributes();
2716
2717 Check(verifyAttributeCount(Attrs, FT->getNumParams()),
2718 "Attribute after last parameter!", &F);
2719
2720 CheckDI(F.IsNewDbgInfoFormat == F.getParent()->IsNewDbgInfoFormat,
2721 "Function debug format should match parent module", &F,
2722 F.IsNewDbgInfoFormat, F.getParent(),
2723 F.getParent()->IsNewDbgInfoFormat);
2724
2725 bool IsIntrinsic = F.isIntrinsic();
2726
2727 // Check function attributes.
2728 verifyFunctionAttrs(FT, Attrs, &F, IsIntrinsic, /* IsInlineAsm */ false);
2729
2730 // On function declarations/definitions, we do not support the builtin
2731 // attribute. We do not check this in VerifyFunctionAttrs since that is
2732 // checking for Attributes that can/can not ever be on functions.
2733 Check(!Attrs.hasFnAttr(Attribute::Builtin),
2734 "Attribute 'builtin' can only be applied to a callsite.", &F);
2735
2736 Check(!Attrs.hasAttrSomewhere(Attribute::ElementType),
2737 "Attribute 'elementtype' can only be applied to a callsite.", &F);
2738
2739 // Check that this function meets the restrictions on this calling convention.
2740 // Sometimes varargs is used for perfectly forwarding thunks, so some of these
2741 // restrictions can be lifted.
2742 switch (F.getCallingConv()) {
2743 default:
2744 case CallingConv::C:
2745 break;
2746 case CallingConv::X86_INTR: {
2747 Check(F.arg_empty() || Attrs.hasParamAttr(0, Attribute::ByVal),
2748 "Calling convention parameter requires byval", &F);
2749 break;
2750 }
2755 Check(F.getReturnType()->isVoidTy(),
2756 "Calling convention requires void return type", &F);
2757 [[fallthrough]];
2763 Check(!F.hasStructRetAttr(), "Calling convention does not allow sret", &F);
2764 if (F.getCallingConv() != CallingConv::SPIR_KERNEL) {
2765 const unsigned StackAS = DL.getAllocaAddrSpace();
2766 unsigned i = 0;
2767 for (const Argument &Arg : F.args()) {
2768 Check(!Attrs.hasParamAttr(i, Attribute::ByVal),
2769 "Calling convention disallows byval", &F);
2770 Check(!Attrs.hasParamAttr(i, Attribute::Preallocated),
2771 "Calling convention disallows preallocated", &F);
2772 Check(!Attrs.hasParamAttr(i, Attribute::InAlloca),
2773 "Calling convention disallows inalloca", &F);
2774
2775 if (Attrs.hasParamAttr(i, Attribute::ByRef)) {
2776 // FIXME: Should also disallow LDS and GDS, but we don't have the enum
2777 // value here.
2778 Check(Arg.getType()->getPointerAddressSpace() != StackAS,
2779 "Calling convention disallows stack byref", &F);
2780 }
2781
2782 ++i;
2783 }
2784 }
2785
2786 [[fallthrough]];
2787 case CallingConv::Fast:
2788 case CallingConv::Cold:
2792 Check(!F.isVarArg(),
2793 "Calling convention does not support varargs or "
2794 "perfect forwarding!",
2795 &F);
2796 break;
2797 }
2798
2799 // Check that the argument values match the function type for this function...
2800 unsigned i = 0;
2801 for (const Argument &Arg : F.args()) {
2802 Check(Arg.getType() == FT->getParamType(i),
2803 "Argument value does not match function argument type!", &Arg,
2804 FT->getParamType(i));
2805 Check(Arg.getType()->isFirstClassType(),
2806 "Function arguments must have first-class types!", &Arg);
2807 if (!IsIntrinsic) {
2808 Check(!Arg.getType()->isMetadataTy(),
2809 "Function takes metadata but isn't an intrinsic", &Arg, &F);
2810 Check(!Arg.getType()->isTokenTy(),
2811 "Function takes token but isn't an intrinsic", &Arg, &F);
2812 Check(!Arg.getType()->isX86_AMXTy(),
2813 "Function takes x86_amx but isn't an intrinsic", &Arg, &F);
2814 }
2815
2816 // Check that swifterror argument is only used by loads and stores.
2817 if (Attrs.hasParamAttr(i, Attribute::SwiftError)) {
2818 verifySwiftErrorValue(&Arg);
2819 }
2820 ++i;
2821 }
2822
2823 if (!IsIntrinsic) {
2824 Check(!F.getReturnType()->isTokenTy(),
2825 "Function returns a token but isn't an intrinsic", &F);
2826 Check(!F.getReturnType()->isX86_AMXTy(),
2827 "Function returns a x86_amx but isn't an intrinsic", &F);
2828 }
2829
2830 // Get the function metadata attachments.
2832 F.getAllMetadata(MDs);
2833 assert(F.hasMetadata() != MDs.empty() && "Bit out-of-sync");
2834 verifyFunctionMetadata(MDs);
2835
2836 // Check validity of the personality function
2837 if (F.hasPersonalityFn()) {
2838 auto *Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
2839 if (Per)
2840 Check(Per->getParent() == F.getParent(),
2841 "Referencing personality function in another module!", &F,
2842 F.getParent(), Per, Per->getParent());
2843 }
2844
2845 // EH funclet coloring can be expensive, recompute on-demand
2846 BlockEHFuncletColors.clear();
2847
2848 if (F.isMaterializable()) {
2849 // Function has a body somewhere we can't see.
2850 Check(MDs.empty(), "unmaterialized function cannot have metadata", &F,
2851 MDs.empty() ? nullptr : MDs.front().second);
2852 } else if (F.isDeclaration()) {
2853 for (const auto &I : MDs) {
2854 // This is used for call site debug information.
2855 CheckDI(I.first != LLVMContext::MD_dbg ||
2856 !cast<DISubprogram>(I.second)->isDistinct(),
2857 "function declaration may only have a unique !dbg attachment",
2858 &F);
2859 Check(I.first != LLVMContext::MD_prof,
2860 "function declaration may not have a !prof attachment", &F);
2861
2862 // Verify the metadata itself.
2863 visitMDNode(*I.second, AreDebugLocsAllowed::Yes);
2864 }
2865 Check(!F.hasPersonalityFn(),
2866 "Function declaration shouldn't have a personality routine", &F);
2867 } else {
2868 // Verify that this function (which has a body) is not named "llvm.*". It
2869 // is not legal to define intrinsics.
2870 Check(!IsIntrinsic, "llvm intrinsics cannot be defined!", &F);
2871
2872 // Check the entry node
2873 const BasicBlock *Entry = &F.getEntryBlock();
2874 Check(pred_empty(Entry),
2875 "Entry block to function must not have predecessors!", Entry);
2876
2877 // The address of the entry block cannot be taken, unless it is dead.
2878 if (Entry->hasAddressTaken()) {
2879 Check(!BlockAddress::lookup(Entry)->isConstantUsed(),
2880 "blockaddress may not be used with the entry block!", Entry);
2881 }
2882
2883 unsigned NumDebugAttachments = 0, NumProfAttachments = 0,
2884 NumKCFIAttachments = 0;
2885 // Visit metadata attachments.
2886 for (const auto &I : MDs) {
2887 // Verify that the attachment is legal.
2888 auto AllowLocs = AreDebugLocsAllowed::No;
2889 switch (I.first) {
2890 default:
2891 break;
2892 case LLVMContext::MD_dbg: {
2893 ++NumDebugAttachments;
2894 CheckDI(NumDebugAttachments == 1,
2895 "function must have a single !dbg attachment", &F, I.second);
2896 CheckDI(isa<DISubprogram>(I.second),
2897 "function !dbg attachment must be a subprogram", &F, I.second);
2898 CheckDI(cast<DISubprogram>(I.second)->isDistinct(),
2899 "function definition may only have a distinct !dbg attachment",
2900 &F);
2901
2902 auto *SP = cast<DISubprogram>(I.second);
2903 const Function *&AttachedTo = DISubprogramAttachments[SP];
2904 CheckDI(!AttachedTo || AttachedTo == &F,
2905 "DISubprogram attached to more than one function", SP, &F);
2906 AttachedTo = &F;
2907 AllowLocs = AreDebugLocsAllowed::Yes;
2908 break;
2909 }
2910 case LLVMContext::MD_prof:
2911 ++NumProfAttachments;
2912 Check(NumProfAttachments == 1,
2913 "function must have a single !prof attachment", &F, I.second);
2914 break;
2915 case LLVMContext::MD_kcfi_type:
2916 ++NumKCFIAttachments;
2917 Check(NumKCFIAttachments == 1,
2918 "function must have a single !kcfi_type attachment", &F,
2919 I.second);
2920 break;
2921 }
2922
2923 // Verify the metadata itself.
2924 visitMDNode(*I.second, AllowLocs);
2925 }
2926 }
2927
2928 // If this function is actually an intrinsic, verify that it is only used in
2929 // direct call/invokes, never having its "address taken".
2930 // Only do this if the module is materialized, otherwise we don't have all the
2931 // uses.
2932 if (F.isIntrinsic() && F.getParent()->isMaterialized()) {
2933 const User *U;
2934 if (F.hasAddressTaken(&U, false, true, false,
2935 /*IgnoreARCAttachedCall=*/true))
2936 Check(false, "Invalid user of intrinsic instruction!", U);
2937 }
2938
2939 // Check intrinsics' signatures.
2940 switch (F.getIntrinsicID()) {
2941 case Intrinsic::experimental_gc_get_pointer_base: {
2942 FunctionType *FT = F.getFunctionType();
2943 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
2944 Check(isa<PointerType>(F.getReturnType()),
2945 "gc.get.pointer.base must return a pointer", F);
2946 Check(FT->getParamType(0) == F.getReturnType(),
2947 "gc.get.pointer.base operand and result must be of the same type", F);
2948 break;
2949 }
2950 case Intrinsic::experimental_gc_get_pointer_offset: {
2951 FunctionType *FT = F.getFunctionType();
2952 Check(FT->getNumParams() == 1, "wrong number of parameters", F);
2953 Check(isa<PointerType>(FT->getParamType(0)),
2954 "gc.get.pointer.offset operand must be a pointer", F);
2955 Check(F.getReturnType()->isIntegerTy(),
2956 "gc.get.pointer.offset must return integer", F);
2957 break;
2958 }
2959 }
2960
2961 auto *N = F.getSubprogram();
2962 HasDebugInfo = (N != nullptr);
2963 if (!HasDebugInfo)
2964 return;
2965
2966 // Check that all !dbg attachments lead to back to N.
2967 //
2968 // FIXME: Check this incrementally while visiting !dbg attachments.
2969 // FIXME: Only check when N is the canonical subprogram for F.
2971 auto VisitDebugLoc = [&](const Instruction &I, const MDNode *Node) {
2972 // Be careful about using DILocation here since we might be dealing with
2973 // broken code (this is the Verifier after all).
2974 const DILocation *DL = dyn_cast_or_null<DILocation>(Node);
2975 if (!DL)
2976 return;
2977 if (!Seen.insert(DL).second)
2978 return;
2979
2980 Metadata *Parent = DL->getRawScope();
2981 CheckDI(Parent && isa<DILocalScope>(Parent),
2982 "DILocation's scope must be a DILocalScope", N, &F, &I, DL, Parent);
2983
2984 DILocalScope *Scope = DL->getInlinedAtScope();
2985 Check(Scope, "Failed to find DILocalScope", DL);
2986
2987 if (!Seen.insert(Scope).second)
2988 return;
2989
2990 DISubprogram *SP = Scope->getSubprogram();
2991
2992 // Scope and SP could be the same MDNode and we don't want to skip
2993 // validation in that case
2994 if (SP && ((Scope != SP) && !Seen.insert(SP).second))
2995 return;
2996
2997 CheckDI(SP->describes(&F),
2998 "!dbg attachment points at wrong subprogram for function", N, &F,
2999 &I, DL, Scope, SP);
3000 };
3001 for (auto &BB : F)
3002 for (auto &I : BB) {
3003 VisitDebugLoc(I, I.getDebugLoc().getAsMDNode());
3004 // The llvm.loop annotations also contain two DILocations.
3005 if (auto MD = I.getMetadata(LLVMContext::MD_loop))
3006 for (unsigned i = 1; i < MD->getNumOperands(); ++i)
3007 VisitDebugLoc(I, dyn_cast_or_null<MDNode>(MD->getOperand(i)));
3008 if (BrokenDebugInfo)
3009 return;
3010 }
3011}
3012
3013// verifyBasicBlock - Verify that a basic block is well formed...
3014//
3015void Verifier::visitBasicBlock(BasicBlock &BB) {
3016 InstsInThisBlock.clear();
3017 ConvergenceVerifyHelper.visit(BB);
3018
3019 // Ensure that basic blocks have terminators!
3020 Check(BB.getTerminator(), "Basic Block does not have terminator!", &BB);
3021
3022 // Check constraints that this basic block imposes on all of the PHI nodes in
3023 // it.
3024 if (isa<PHINode>(BB.front())) {
3027 llvm::sort(Preds);
3028 for (const PHINode &PN : BB.phis()) {
3029 Check(PN.getNumIncomingValues() == Preds.size(),
3030 "PHINode should have one entry for each predecessor of its "
3031 "parent basic block!",
3032 &PN);
3033
3034 // Get and sort all incoming values in the PHI node...
3035 Values.clear();
3036 Values.reserve(PN.getNumIncomingValues());
3037 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
3038 Values.push_back(
3039 std::make_pair(PN.getIncomingBlock(i), PN.getIncomingValue(i)));
3040 llvm::sort(Values);
3041
3042 for (unsigned i = 0, e = Values.size(); i != e; ++i) {
3043 // Check to make sure that if there is more than one entry for a
3044 // particular basic block in this PHI node, that the incoming values are
3045 // all identical.
3046 //
3047 Check(i == 0 || Values[i].first != Values[i - 1].first ||
3048 Values[i].second == Values[i - 1].second,
3049 "PHI node has multiple entries for the same basic block with "
3050 "different incoming values!",
3051 &PN, Values[i].first, Values[i].second, Values[i - 1].second);
3052
3053 // Check to make sure that the predecessors and PHI node entries are
3054 // matched up.
3055 Check(Values[i].first == Preds[i],
3056 "PHI node entries do not match predecessors!", &PN,
3057 Values[i].first, Preds[i]);
3058 }
3059 }
3060 }
3061
3062 // Check that all instructions have their parent pointers set up correctly.
3063 for (auto &I : BB)
3064 {
3065 Check(I.getParent() == &BB, "Instruction has bogus parent pointer!");
3066 }
3067
3068 CheckDI(BB.IsNewDbgInfoFormat == BB.getParent()->IsNewDbgInfoFormat,
3069 "BB debug format should match parent function", &BB,
3070 BB.IsNewDbgInfoFormat, BB.getParent(),
3071 BB.getParent()->IsNewDbgInfoFormat);
3072
3073 // Confirm that no issues arise from the debug program.
3074 if (BB.IsNewDbgInfoFormat)
3075 CheckDI(!BB.getTrailingDbgRecords(), "Basic Block has trailing DbgRecords!",
3076 &BB);
3077}
3078
3079void Verifier::visitTerminator(Instruction &I) {
3080 // Ensure that terminators only exist at the end of the basic block.
3081 Check(&I == I.getParent()->getTerminator(),
3082 "Terminator found in the middle of a basic block!", I.getParent());
3084}
3085
3086void Verifier::visitBranchInst(BranchInst &BI) {
3087 if (BI.isConditional()) {
3089 "Branch condition is not 'i1' type!", &BI, BI.getCondition());
3090 }
3091 visitTerminator(BI);
3092}
3093
3094void Verifier::visitReturnInst(ReturnInst &RI) {
3095 Function *F = RI.getParent()->getParent();
3096 unsigned N = RI.getNumOperands();
3097 if (F->getReturnType()->isVoidTy())
3098 Check(N == 0,
3099 "Found return instr that returns non-void in Function of void "
3100 "return type!",
3101 &RI, F->getReturnType());
3102 else
3103 Check(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(),
3104 "Function return type does not match operand "
3105 "type of return inst!",
3106 &RI, F->getReturnType());
3107
3108 // Check to make sure that the return value has necessary properties for
3109 // terminators...
3110 visitTerminator(RI);
3111}
3112
3113void Verifier::visitSwitchInst(SwitchInst &SI) {
3114 Check(SI.getType()->isVoidTy(), "Switch must have void result type!", &SI);
3115 // Check to make sure that all of the constants in the switch instruction
3116 // have the same type as the switched-on value.
3117 Type *SwitchTy = SI.getCondition()->getType();
3119 for (auto &Case : SI.cases()) {
3120 Check(isa<ConstantInt>(SI.getOperand(Case.getCaseIndex() * 2 + 2)),
3121 "Case value is not a constant integer.", &SI);
3122 Check(Case.getCaseValue()->getType() == SwitchTy,
3123 "Switch constants must all be same type as switch value!", &SI);
3124 Check(Constants.insert(Case.getCaseValue()).second,
3125 "Duplicate integer as switch case", &SI, Case.getCaseValue());
3126 }
3127
3128 visitTerminator(SI);
3129}
3130
3131void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
3133 "Indirectbr operand must have pointer type!", &BI);
3134 for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i)
3136 "Indirectbr destinations must all have pointer type!", &BI);
3137
3138 visitTerminator(BI);
3139}
3140
3141void Verifier::visitCallBrInst(CallBrInst &CBI) {
3142 Check(CBI.isInlineAsm(), "Callbr is currently only used for asm-goto!", &CBI);
3143 const InlineAsm *IA = cast<InlineAsm>(CBI.getCalledOperand());
3144 Check(!IA->canThrow(), "Unwinding from Callbr is not allowed");
3145
3146 verifyInlineAsmCall(CBI);
3147 visitTerminator(CBI);
3148}
3149
3150void Verifier::visitSelectInst(SelectInst &SI) {
3151 Check(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1),
3152 SI.getOperand(2)),
3153 "Invalid operands for select instruction!", &SI);
3154
3155 Check(SI.getTrueValue()->getType() == SI.getType(),
3156 "Select values must have same type as select instruction!", &SI);
3157 visitInstruction(SI);
3158}
3159
3160/// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of
3161/// a pass, if any exist, it's an error.
3162///
3163void Verifier::visitUserOp1(Instruction &I) {
3164 Check(false, "User-defined operators should not live outside of a pass!", &I);
3165}
3166
3167void Verifier::visitTruncInst(TruncInst &I) {
3168 // Get the source and destination types
3169 Type *SrcTy = I.getOperand(0)->getType();
3170 Type *DestTy = I.getType();
3171
3172 // Get the size of the types in bits, we'll need this later
3173 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3174 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3175
3176 Check(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer", &I);
3177 Check(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer", &I);
3178 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3179 "trunc source and destination must both be a vector or neither", &I);
3180 Check(SrcBitSize > DestBitSize, "DestTy too big for Trunc", &I);
3181
3183}
3184
3185void Verifier::visitZExtInst(ZExtInst &I) {
3186 // Get the source and destination types
3187 Type *SrcTy = I.getOperand(0)->getType();
3188 Type *DestTy = I.getType();
3189
3190 // Get the size of the types in bits, we'll need this later
3191 Check(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I);
3192 Check(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer", &I);
3193 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3194 "zext source and destination must both be a vector or neither", &I);
3195 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3196 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3197
3198 Check(SrcBitSize < DestBitSize, "Type too small for ZExt", &I);
3199
3201}
3202
3203void Verifier::visitSExtInst(SExtInst &I) {
3204 // Get the source and destination types
3205 Type *SrcTy = I.getOperand(0)->getType();
3206 Type *DestTy = I.getType();
3207
3208 // Get the size of the types in bits, we'll need this later
3209 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3210 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3211
3212 Check(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer", &I);
3213 Check(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer", &I);
3214 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3215 "sext source and destination must both be a vector or neither", &I);
3216 Check(SrcBitSize < DestBitSize, "Type too small for SExt", &I);
3217
3219}
3220
3221void Verifier::visitFPTruncInst(FPTruncInst &I) {
3222 // Get the source and destination types
3223 Type *SrcTy = I.getOperand(0)->getType();
3224 Type *DestTy = I.getType();
3225 // Get the size of the types in bits, we'll need this later
3226 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3227 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3228
3229 Check(SrcTy->isFPOrFPVectorTy(), "FPTrunc only operates on FP", &I);
3230 Check(DestTy->isFPOrFPVectorTy(), "FPTrunc only produces an FP", &I);
3231 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3232 "fptrunc source and destination must both be a vector or neither", &I);
3233 Check(SrcBitSize > DestBitSize, "DestTy too big for FPTrunc", &I);
3234
3236}
3237
3238void Verifier::visitFPExtInst(FPExtInst &I) {
3239 // Get the source and destination types
3240 Type *SrcTy = I.getOperand(0)->getType();
3241 Type *DestTy = I.getType();
3242
3243 // Get the size of the types in bits, we'll need this later
3244 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3245 unsigned DestBitSize = DestTy->getScalarSizeInBits();
3246
3247 Check(SrcTy->isFPOrFPVectorTy(), "FPExt only operates on FP", &I);
3248 Check(DestTy->isFPOrFPVectorTy(), "FPExt only produces an FP", &I);
3249 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3250 "fpext source and destination must both be a vector or neither", &I);
3251 Check(SrcBitSize < DestBitSize, "DestTy too small for FPExt", &I);
3252
3254}
3255
3256void Verifier::visitUIToFPInst(UIToFPInst &I) {
3257 // Get the source and destination types
3258 Type *SrcTy = I.getOperand(0)->getType();
3259 Type *DestTy = I.getType();
3260
3261 bool SrcVec = SrcTy->isVectorTy();
3262 bool DstVec = DestTy->isVectorTy();
3263
3264 Check(SrcVec == DstVec,
3265 "UIToFP source and dest must both be vector or scalar", &I);
3266 Check(SrcTy->isIntOrIntVectorTy(),
3267 "UIToFP source must be integer or integer vector", &I);
3268 Check(DestTy->isFPOrFPVectorTy(), "UIToFP result must be FP or FP vector",
3269 &I);
3270
3271 if (SrcVec && DstVec)
3272 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3273 cast<VectorType>(DestTy)->getElementCount(),
3274 "UIToFP source and dest vector length mismatch", &I);
3275
3277}
3278
3279void Verifier::visitSIToFPInst(SIToFPInst &I) {
3280 // Get the source and destination types
3281 Type *SrcTy = I.getOperand(0)->getType();
3282 Type *DestTy = I.getType();
3283
3284 bool SrcVec = SrcTy->isVectorTy();
3285 bool DstVec = DestTy->isVectorTy();
3286
3287 Check(SrcVec == DstVec,
3288 "SIToFP source and dest must both be vector or scalar", &I);
3289 Check(SrcTy->isIntOrIntVectorTy(),
3290 "SIToFP source must be integer or integer vector", &I);
3291 Check(DestTy->isFPOrFPVectorTy(), "SIToFP result must be FP or FP vector",
3292 &I);
3293
3294 if (SrcVec && DstVec)
3295 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3296 cast<VectorType>(DestTy)->getElementCount(),
3297 "SIToFP source and dest vector length mismatch", &I);
3298
3300}
3301
3302void Verifier::visitFPToUIInst(FPToUIInst &I) {
3303 // Get the source and destination types
3304 Type *SrcTy = I.getOperand(0)->getType();
3305 Type *DestTy = I.getType();
3306
3307 bool SrcVec = SrcTy->isVectorTy();
3308 bool DstVec = DestTy->isVectorTy();
3309
3310 Check(SrcVec == DstVec,
3311 "FPToUI source and dest must both be vector or scalar", &I);
3312 Check(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector", &I);
3313 Check(DestTy->isIntOrIntVectorTy(),
3314 "FPToUI result must be integer or integer vector", &I);
3315
3316 if (SrcVec && DstVec)
3317 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3318 cast<VectorType>(DestTy)->getElementCount(),
3319 "FPToUI source and dest vector length mismatch", &I);
3320
3322}
3323
3324void Verifier::visitFPToSIInst(FPToSIInst &I) {
3325 // Get the source and destination types
3326 Type *SrcTy = I.getOperand(0)->getType();
3327 Type *DestTy = I.getType();
3328
3329 bool SrcVec = SrcTy->isVectorTy();
3330 bool DstVec = DestTy->isVectorTy();
3331
3332 Check(SrcVec == DstVec,
3333 "FPToSI source and dest must both be vector or scalar", &I);
3334 Check(SrcTy->isFPOrFPVectorTy(), "FPToSI source must be FP or FP vector", &I);
3335 Check(DestTy->isIntOrIntVectorTy(),
3336 "FPToSI result must be integer or integer vector", &I);
3337
3338 if (SrcVec && DstVec)
3339 Check(cast<VectorType>(SrcTy)->getElementCount() ==
3340 cast<VectorType>(DestTy)->getElementCount(),
3341 "FPToSI source and dest vector length mismatch", &I);
3342
3344}
3345
3346void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
3347 // Get the source and destination types
3348 Type *SrcTy = I.getOperand(0)->getType();
3349 Type *DestTy = I.getType();
3350
3351 Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToInt source must be pointer", &I);
3352
3353 Check(DestTy->isIntOrIntVectorTy(), "PtrToInt result must be integral", &I);
3354 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch",
3355 &I);
3356
3357 if (SrcTy->isVectorTy()) {
3358 auto *VSrc = cast<VectorType>(SrcTy);
3359 auto *VDest = cast<VectorType>(DestTy);
3360 Check(VSrc->getElementCount() == VDest->getElementCount(),
3361 "PtrToInt Vector width mismatch", &I);
3362 }
3363
3365}
3366
3367void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
3368 // Get the source and destination types
3369 Type *SrcTy = I.getOperand(0)->getType();
3370 Type *DestTy = I.getType();
3371
3372 Check(SrcTy->isIntOrIntVectorTy(), "IntToPtr source must be an integral", &I);
3373 Check(DestTy->isPtrOrPtrVectorTy(), "IntToPtr result must be a pointer", &I);
3374
3375 Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch",
3376 &I);
3377 if (SrcTy->isVectorTy()) {
3378 auto *VSrc = cast<VectorType>(SrcTy);
3379 auto *VDest = cast<VectorType>(DestTy);
3380 Check(VSrc->getElementCount() == VDest->getElementCount(),
3381 "IntToPtr Vector width mismatch", &I);
3382 }
3384}
3385
3386void Verifier::visitBitCastInst(BitCastInst &I) {
3387 Check(
3388 CastInst::castIsValid(Instruction::BitCast, I.getOperand(0), I.getType()),
3389 "Invalid bitcast", &I);
3391}
3392
3393void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
3394 Type *SrcTy = I.getOperand(0)->getType();
3395 Type *DestTy = I.getType();
3396
3397 Check(SrcTy->isPtrOrPtrVectorTy(), "AddrSpaceCast source must be a pointer",
3398 &I);
3399 Check(DestTy->isPtrOrPtrVectorTy(), "AddrSpaceCast result must be a pointer",
3400 &I);
3402 "AddrSpaceCast must be between different address spaces", &I);
3403 if (auto *SrcVTy = dyn_cast<VectorType>(SrcTy))
3404 Check(SrcVTy->getElementCount() ==
3405 cast<VectorType>(DestTy)->getElementCount(),
3406 "AddrSpaceCast vector pointer number of elements mismatch", &I);
3408}
3409
3410/// visitPHINode - Ensure that a PHI node is well formed.
3411///
3412void Verifier::visitPHINode(PHINode &PN) {
3413 // Ensure that the PHI nodes are all grouped together at the top of the block.
3414 // This can be tested by checking whether the instruction before this is
3415 // either nonexistent (because this is begin()) or is a PHI node. If not,
3416 // then there is some other instruction before a PHI.
3417 Check(&PN == &PN.getParent()->front() ||
3418 isa<PHINode>(--BasicBlock::iterator(&PN)),
3419 "PHI nodes not grouped at top of basic block!", &PN, PN.getParent());
3420
3421 // Check that a PHI doesn't yield a Token.
3422 Check(!PN.getType()->isTokenTy(), "PHI nodes cannot have token type!");
3423
3424 // Check that all of the values of the PHI node have the same type as the
3425 // result.
3426 for (Value *IncValue : PN.incoming_values()) {
3427 Check(PN.getType() == IncValue->getType(),
3428 "PHI node operands are not the same type as the result!", &PN);
3429 }
3430
3431 // All other PHI node constraints are checked in the visitBasicBlock method.
3432
3433 visitInstruction(PN);
3434}
3435
3436void Verifier::visitCallBase(CallBase &Call) {
3437 Check(Call.getCalledOperand()->getType()->isPointerTy(),
3438 "Called function must be a pointer!", Call);
3439 FunctionType *FTy = Call.getFunctionType();
3440
3441 // Verify that the correct number of arguments are being passed
3442 if (FTy->isVarArg())
3443 Check(Call.arg_size() >= FTy->getNumParams(),
3444 "Called function requires more parameters than were provided!", Call);
3445 else
3446 Check(Call.arg_size() == FTy->getNumParams(),
3447 "Incorrect number of arguments passed to called function!", Call);
3448
3449 // Verify that all arguments to the call match the function type.
3450 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
3451 Check(Call.getArgOperand(i)->getType() == FTy->getParamType(i),
3452 "Call parameter type does not match function signature!",
3453 Call.getArgOperand(i), FTy->getParamType(i), Call);
3454
3455 AttributeList Attrs = Call.getAttributes();
3456
3457 Check(verifyAttributeCount(Attrs, Call.arg_size()),
3458 "Attribute after last parameter!", Call);
3459
3460 Function *Callee =
3461 dyn_cast<Function>(Call.getCalledOperand()->stripPointerCasts());
3462 bool IsIntrinsic = Callee && Callee->isIntrinsic();
3463 if (IsIntrinsic)
3464 Check(Callee->getValueType() == FTy,
3465 "Intrinsic called with incompatible signature", Call);
3466
3467 // Disallow calls to functions with the amdgpu_cs_chain[_preserve] calling
3468 // convention.
3469 auto CC = Call.getCallingConv();
3472 "Direct calls to amdgpu_cs_chain/amdgpu_cs_chain_preserve functions "
3473 "not allowed. Please use the @llvm.amdgpu.cs.chain intrinsic instead.",
3474 Call);
3475
3476 auto VerifyTypeAlign = [&](Type *Ty, const Twine &Message) {
3477 if (!Ty->isSized())
3478 return;
3479 Align ABIAlign = DL.getABITypeAlign(Ty);
3480 Align MaxAlign(ParamMaxAlignment);
3481 Check(ABIAlign <= MaxAlign,
3482 "Incorrect alignment of " + Message + " to called function!", Call);
3483 };
3484
3485 if (!IsIntrinsic) {
3486 VerifyTypeAlign(FTy->getReturnType(), "return type");
3487 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3488 Type *Ty = FTy->getParamType(i);
3489 VerifyTypeAlign(Ty, "argument passed");
3490 }
3491 }
3492
3493 if (Attrs.hasFnAttr(Attribute::Speculatable)) {
3494 // Don't allow speculatable on call sites, unless the underlying function
3495 // declaration is also speculatable.
3496 Check(Callee && Callee->isSpeculatable(),
3497 "speculatable attribute may not apply to call sites", Call);
3498 }
3499
3500 if (Attrs.hasFnAttr(Attribute::Preallocated)) {
3501 Check(Call.getCalledFunction()->getIntrinsicID() ==
3502 Intrinsic::call_preallocated_arg,
3503 "preallocated as a call site attribute can only be on "
3504 "llvm.call.preallocated.arg");
3505 }
3506
3507 // Verify call attributes.
3508 verifyFunctionAttrs(FTy, Attrs, &Call, IsIntrinsic, Call.isInlineAsm());
3509
3510 // Conservatively check the inalloca argument.
3511 // We have a bug if we can find that there is an underlying alloca without
3512 // inalloca.
3513 if (Call.hasInAllocaArgument()) {
3514 Value *InAllocaArg = Call.getArgOperand(FTy->getNumParams() - 1);
3515 if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets()))
3516 Check(AI->isUsedWithInAlloca(),
3517 "inalloca argument for call has mismatched alloca", AI, Call);
3518 }
3519
3520 // For each argument of the callsite, if it has the swifterror argument,
3521 // make sure the underlying alloca/parameter it comes from has a swifterror as
3522 // well.
3523 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3524 if (Call.paramHasAttr(i, Attribute::SwiftError)) {
3525 Value *SwiftErrorArg = Call.getArgOperand(i);
3526 if (auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets())) {
3527 Check(AI->isSwiftError(),
3528 "swifterror argument for call has mismatched alloca", AI, Call);
3529 continue;
3530 }
3531 auto ArgI = dyn_cast<Argument>(SwiftErrorArg);
3532 Check(ArgI, "swifterror argument should come from an alloca or parameter",
3533 SwiftErrorArg, Call);
3534 Check(ArgI->hasSwiftErrorAttr(),
3535 "swifterror argument for call has mismatched parameter", ArgI,
3536 Call);
3537 }
3538
3539 if (Attrs.hasParamAttr(i, Attribute::ImmArg)) {
3540 // Don't allow immarg on call sites, unless the underlying declaration
3541 // also has the matching immarg.
3542 Check(Callee && Callee->hasParamAttribute(i, Attribute::ImmArg),
3543 "immarg may not apply only to call sites", Call.getArgOperand(i),
3544 Call);
3545 }
3546
3547 if (Call.paramHasAttr(i, Attribute::ImmArg)) {
3548 Value *ArgVal = Call.getArgOperand(i);
3549 Check(isa<ConstantInt>(ArgVal) || isa<ConstantFP>(ArgVal),
3550 "immarg operand has non-immediate parameter", ArgVal, Call);
3551 }
3552
3553 if (Call.paramHasAttr(i, Attribute::Preallocated)) {
3554 Value *ArgVal = Call.getArgOperand(i);
3555 bool hasOB =
3556 Call.countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0;
3557 bool isMustTail = Call.isMustTailCall();
3558 Check(hasOB != isMustTail,
3559 "preallocated operand either requires a preallocated bundle or "
3560 "the call to be musttail (but not both)",
3561 ArgVal, Call);
3562 }
3563 }
3564
3565 if (FTy->isVarArg()) {
3566 // FIXME? is 'nest' even legal here?
3567 bool SawNest = false;
3568 bool SawReturned = false;
3569
3570 for (unsigned Idx = 0; Idx < FTy->getNumParams(); ++Idx) {
3571 if (Attrs.hasParamAttr(Idx, Attribute::Nest))
3572 SawNest = true;
3573 if (Attrs.hasParamAttr(Idx, Attribute::Returned))
3574 SawReturned = true;
3575 }
3576
3577 // Check attributes on the varargs part.
3578 for (unsigned Idx = FTy->getNumParams(); Idx < Call.arg_size(); ++Idx) {
3579 Type *Ty = Call.getArgOperand(Idx)->getType();
3580 AttributeSet ArgAttrs = Attrs.getParamAttrs(Idx);
3581 verifyParameterAttrs(ArgAttrs, Ty, &Call);
3582
3583 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
3584 Check(!SawNest, "More than one parameter has attribute nest!", Call);
3585 SawNest = true;
3586 }
3587
3588 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
3589 Check(!SawReturned, "More than one parameter has attribute returned!",
3590 Call);
3591 Check(Ty->canLosslesslyBitCastTo(FTy->getReturnType()),
3592 "Incompatible argument and return types for 'returned' "
3593 "attribute",
3594 Call);
3595 SawReturned = true;
3596 }
3597
3598 // Statepoint intrinsic is vararg but the wrapped function may be not.
3599 // Allow sret here and check the wrapped function in verifyStatepoint.
3600 if (!Call.getCalledFunction() ||
3601 Call.getCalledFunction()->getIntrinsicID() !=
3602 Intrinsic::experimental_gc_statepoint)
3603 Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
3604 "Attribute 'sret' cannot be used for vararg call arguments!",
3605 Call);
3606
3607 if (ArgAttrs.hasAttribute(Attribute::InAlloca))
3608 Check(Idx == Call.arg_size() - 1,
3609 "inalloca isn't on the last argument!", Call);
3610 }
3611 }
3612
3613 // Verify that there's no metadata unless it's a direct call to an intrinsic.
3614 if (!IsIntrinsic) {
3615 for (Type *ParamTy : FTy->params()) {
3616 Check(!ParamTy->isMetadataTy(),
3617 "Function has metadata parameter but isn't an intrinsic", Call);
3618 Check(!ParamTy->isTokenTy(),
3619 "Function has token parameter but isn't an intrinsic", Call);
3620 }
3621 }
3622
3623 // Verify that indirect calls don't return tokens.
3624 if (!Call.getCalledFunction()) {
3625 Check(!FTy->getReturnType()->isTokenTy(),
3626 "Return type cannot be token for indirect call!");
3627 Check(!FTy->getReturnType()->isX86_AMXTy(),
3628 "Return type cannot be x86_amx for indirect call!");
3629 }
3630
3631 if (Function *F = Call.getCalledFunction())
3632 if (Intrinsic::ID ID = (Intrinsic::ID)F->getIntrinsicID())
3633 visitIntrinsicCall(ID, Call);
3634
3635 // Verify that a callsite has at most one "deopt", at most one "funclet", at
3636 // most one "gc-transition", at most one "cfguardtarget", at most one
3637 // "preallocated" operand bundle, and at most one "ptrauth" operand bundle.
3638 bool FoundDeoptBundle = false, FoundFuncletBundle = false,
3639 FoundGCTransitionBundle = false, FoundCFGuardTargetBundle = false,
3640 FoundPreallocatedBundle = false, FoundGCLiveBundle = false,
3641 FoundPtrauthBundle = false, FoundKCFIBundle = false,
3642 FoundAttachedCallBundle = false;
3643 for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) {
3644 OperandBundleUse BU = Call.getOperandBundleAt(i);
3645 uint32_t Tag = BU.getTagID();
3646 if (Tag == LLVMContext::OB_deopt) {
3647 Check(!FoundDeoptBundle, "Multiple deopt operand bundles", Call);
3648 FoundDeoptBundle = true;
3649 } else if (Tag == LLVMContext::OB_gc_transition) {
3650 Check(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles",
3651 Call);
3652 FoundGCTransitionBundle = true;
3653 } else if (Tag == LLVMContext::OB_funclet) {
3654 Check(!FoundFuncletBundle, "Multiple funclet operand bundles", Call);
3655 FoundFuncletBundle = true;
3656 Check(BU.Inputs.size() == 1,
3657 "Expected exactly one funclet bundle operand", Call);
3658 Check(isa<FuncletPadInst>(BU.Inputs.front()),
3659 "Funclet bundle operands should correspond to a FuncletPadInst",
3660 Call);
3661 } else if (Tag == LLVMContext::OB_cfguardtarget) {
3662 Check(!FoundCFGuardTargetBundle, "Multiple CFGuardTarget operand bundles",
3663 Call);
3664 FoundCFGuardTargetBundle = true;
3665 Check(BU.Inputs.size() == 1,
3666 "Expected exactly one cfguardtarget bundle operand", Call);
3667 } else if (Tag == LLVMContext::OB_ptrauth) {
3668 Check(!FoundPtrauthBundle, "Multiple ptrauth operand bundles", Call);
3669 FoundPtrauthBundle = true;
3670 Check(BU.Inputs.size() == 2,
3671 "Expected exactly two ptrauth bundle operands", Call);
3672 Check(isa<ConstantInt>(BU.Inputs[0]) &&
3673 BU.Inputs[0]->getType()->isIntegerTy(32),
3674 "Ptrauth bundle key operand must be an i32 constant", Call);
3675 Check(BU.Inputs[1]->getType()->isIntegerTy(64),
3676 "Ptrauth bundle discriminator operand must be an i64", Call);
3677 } else if (Tag == LLVMContext::OB_kcfi) {
3678 Check(!FoundKCFIBundle, "Multiple kcfi operand bundles", Call);
3679 FoundKCFIBundle = true;
3680 Check(BU.Inputs.size() == 1, "Expected exactly one kcfi bundle operand",
3681 Call);
3682 Check(isa<ConstantInt>(BU.Inputs[0]) &&
3683 BU.Inputs[0]->getType()->isIntegerTy(32),
3684 "Kcfi bundle operand must be an i32 constant", Call);
3685 } else if (Tag == LLVMContext::OB_preallocated) {
3686 Check(!FoundPreallocatedBundle, "Multiple preallocated operand bundles",
3687 Call);
3688 FoundPreallocatedBundle = true;
3689 Check(BU.Inputs.size() == 1,
3690 "Expected exactly one preallocated bundle operand", Call);
3691 auto Input = dyn_cast<IntrinsicInst>(BU.Inputs.front());
3692 Check(Input &&
3693 Input->getIntrinsicID() == Intrinsic::call_preallocated_setup,
3694 "\"preallocated\" argument must be a token from "
3695 "llvm.call.preallocated.setup",
3696 Call);
3697 } else if (Tag == LLVMContext::OB_gc_live) {
3698 Check(!FoundGCLiveBundle, "Multiple gc-live operand bundles", Call);
3699 FoundGCLiveBundle = true;
3701 Check(!FoundAttachedCallBundle,
3702 "Multiple \"clang.arc.attachedcall\" operand bundles", Call);
3703 FoundAttachedCallBundle = true;
3704 verifyAttachedCallBundle(Call, BU);
3705 }
3706 }
3707
3708 // Verify that callee and callsite agree on whether to use pointer auth.
3709 Check(!(Call.getCalledFunction() && FoundPtrauthBundle),
3710 "Direct call cannot have a ptrauth bundle", Call);
3711
3712 // Verify that each inlinable callsite of a debug-info-bearing function in a
3713 // debug-info-bearing function has a debug location attached to it. Failure to
3714 // do so causes assertion failures when the inliner sets up inline scope info
3715 // (Interposable functions are not inlinable, neither are functions without
3716 // definitions.)
3717 if (Call.getFunction()->getSubprogram() && Call.getCalledFunction() &&
3718 !Call.getCalledFunction()->isInterposable() &&
3719 !Call.getCalledFunction()->isDeclaration() &&
3720 Call.getCalledFunction()->getSubprogram())
3721 CheckDI(Call.getDebugLoc(),
3722 "inlinable function call in a function with "
3723 "debug info must have a !dbg location",
3724 Call);
3725
3726 if (Call.isInlineAsm())
3727 verifyInlineAsmCall(Call);
3728
3729 ConvergenceVerifyHelper.visit(Call);
3730
3731 visitInstruction(Call);
3732}
3733
3734void Verifier::verifyTailCCMustTailAttrs(const AttrBuilder &Attrs,
3735 StringRef Context) {
3736 Check(!Attrs.contains(Attribute::InAlloca),
3737 Twine("inalloca attribute not allowed in ") + Context);
3738 Check(!Attrs.contains(Attribute::InReg),
3739 Twine("inreg attribute not allowed in ") + Context);
3740 Check(!Attrs.contains(Attribute::SwiftError),
3741 Twine("swifterror attribute not allowed in ") + Context);
3742 Check(!Attrs.contains(Attribute::Preallocated),
3743 Twine("preallocated attribute not allowed in ") + Context);
3744 Check(!Attrs.contains(Attribute::ByRef),
3745 Twine("byref attribute not allowed in ") + Context);
3746}
3747
3748/// Two types are "congruent" if they are identical, or if they are both pointer
3749/// types with different pointee types and the same address space.
3750static bool isTypeCongruent(Type *L, Type *R) {
3751 if (L == R)
3752 return true;
3753 PointerType *PL = dyn_cast<PointerType>(L);
3754 PointerType *PR = dyn_cast<PointerType>(R);
3755 if (!PL || !PR)
3756 return false;
3757 return PL->getAddressSpace() == PR->getAddressSpace();
3758}
3759
3761 static const Attribute::AttrKind ABIAttrs[] = {
3762 Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
3763 Attribute::InReg, Attribute::StackAlignment, Attribute::SwiftSelf,
3764 Attribute::SwiftAsync, Attribute::SwiftError, Attribute::Preallocated,
3765 Attribute::ByRef};
3766 AttrBuilder Copy(C);
3767 for (auto AK : ABIAttrs) {
3768 Attribute Attr = Attrs.getParamAttrs(I).getAttribute(AK);
3769 if (Attr.isValid())
3770 Copy.addAttribute(Attr);
3771 }
3772
3773 // `align` is ABI-affecting only in combination with `byval` or `byref`.
3774 if (Attrs.hasParamAttr(I, Attribute::Alignment) &&
3775 (Attrs.hasParamAttr(I, Attribute::ByVal) ||
3776 Attrs.hasParamAttr(I, Attribute::ByRef)))
3777 Copy.addAlignmentAttr(Attrs.getParamAlignment(I));
3778 return Copy;
3779}
3780
3781void Verifier::verifyMustTailCall(CallInst &CI) {
3782 Check(!CI.isInlineAsm(), "cannot use musttail call with inline asm", &CI);
3783
3784 Function *F = CI.getParent()->getParent();
3785 FunctionType *CallerTy = F->getFunctionType();
3786 FunctionType *CalleeTy = CI.getFunctionType();
3787 Check(CallerTy->isVarArg() == CalleeTy->isVarArg(),
3788 "cannot guarantee tail call due to mismatched varargs", &CI);
3789 Check(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()),
3790 "cannot guarantee tail call due to mismatched return types", &CI);
3791
3792 // - The calling conventions of the caller and callee must match.
3793 Check(F->getCallingConv() == CI.getCallingConv(),
3794 "cannot guarantee tail call due to mismatched calling conv", &CI);
3795
3796 // - The call must immediately precede a :ref:`ret <i_ret>` instruction,
3797 // or a pointer bitcast followed by a ret instruction.
3798 // - The ret instruction must return the (possibly bitcasted) value
3799 // produced by the call or void.
3800 Value *RetVal = &CI;
3801 Instruction *Next = CI.getNextNode();
3802
3803 // Handle the optional bitcast.
3804 if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Next)) {
3805 Check(BI->getOperand(0) == RetVal,
3806 "bitcast following musttail call must use the call", BI);
3807 RetVal = BI;
3808 Next = BI->getNextNode();
3809 }
3810
3811 // Check the return.
3812 ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Next);
3813 Check(Ret, "musttail call must precede a ret with an optional bitcast", &CI);
3814 Check(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal ||
3815 isa<UndefValue>(Ret->getReturnValue()),
3816 "musttail call result must be returned", Ret);
3817
3818 AttributeList CallerAttrs = F->getAttributes();
3819 AttributeList CalleeAttrs = CI.getAttributes();
3822 StringRef CCName =
3823 CI.getCallingConv() == CallingConv::Tail ? "tailcc" : "swifttailcc";
3824
3825 // - Only sret, byval, swiftself, and swiftasync ABI-impacting attributes
3826 // are allowed in swifttailcc call
3827 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
3828 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
3829 SmallString<32> Context{CCName, StringRef(" musttail caller")};
3830 verifyTailCCMustTailAttrs(ABIAttrs, Context);
3831 }
3832 for (unsigned I = 0, E = CalleeTy->getNumParams(); I != E; ++I) {
3833 AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
3834 SmallString<32> Context{CCName, StringRef(" musttail callee")};
3835 verifyTailCCMustTailAttrs(ABIAttrs, Context);
3836 }
3837 // - Varargs functions are not allowed
3838 Check(!CallerTy->isVarArg(), Twine("cannot guarantee ") + CCName +
3839 " tail call for varargs function");
3840 return;
3841 }
3842
3843 // - The caller and callee prototypes must match. Pointer types of
3844 // parameters or return types may differ in pointee type, but not
3845 // address space.
3846 if (!CI.getCalledFunction() || !CI.getCalledFunction()->isIntrinsic()) {
3847 Check(CallerTy->getNumParams() == CalleeTy->getNumParams(),
3848 "cannot guarantee tail call due to mismatched parameter counts", &CI);
3849 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
3850 Check(
3851 isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)),
3852 "cannot guarantee tail call due to mismatched parameter types", &CI);
3853 }
3854 }
3855
3856 // - All ABI-impacting function attributes, such as sret, byval, inreg,
3857 // returned, preallocated, and inalloca, must match.
3858 for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
3859 AttrBuilder CallerABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
3860 AttrBuilder CalleeABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
3861 Check(CallerABIAttrs == CalleeABIAttrs,
3862 "cannot guarantee tail call due to mismatched ABI impacting "
3863 "function attributes",
3864 &CI, CI.getOperand(I));
3865 }
3866}
3867
3868void Verifier::visitCallInst(CallInst &CI) {
3869 visitCallBase(CI);
3870
3871 if (CI.isMustTailCall())
3872 verifyMustTailCall(CI);
3873}
3874
3875void Verifier::visitInvokeInst(InvokeInst &II) {
3876 visitCallBase(II);
3877
3878 // Verify that the first non-PHI instruction of the unwind destination is an
3879 // exception handling instruction.
3880 Check(
3881 II.getUnwindDest()->isEHPad(),
3882 "The unwind destination does not have an exception handling instruction!",
3883 &II);
3884
3885 visitTerminator(II);
3886}
3887
3888/// visitUnaryOperator - Check the argument to the unary operator.
3889///
3890void Verifier::visitUnaryOperator(UnaryOperator &U) {
3891 Check(U.getType() == U.getOperand(0)->getType(),
3892 "Unary operators must have same type for"
3893 "operands and result!",
3894 &U);
3895
3896 switch (U.getOpcode()) {
3897 // Check that floating-point arithmetic operators are only used with
3898 // floating-point operands.
3899 case Instruction::FNeg:
3900 Check(U.getType()->isFPOrFPVectorTy(),
3901 "FNeg operator only works with float types!", &U);
3902 break;
3903 default:
3904 llvm_unreachable("Unknown UnaryOperator opcode!");
3905 }
3906
3908}
3909
3910/// visitBinaryOperator - Check that both arguments to the binary operator are
3911/// of the same type!
3912///
3913void Verifier::visitBinaryOperator(BinaryOperator &B) {
3914 Check(B.getOperand(0)->getType() == B.getOperand(1)->getType(),
3915 "Both operands to a binary operator are not of the same type!", &B);
3916
3917 switch (B.getOpcode()) {
3918 // Check that integer arithmetic operators are only used with
3919 // integral operands.
3920 case Instruction::Add:
3921 case Instruction::Sub:
3922 case Instruction::Mul:
3923 case Instruction::SDiv:
3924 case Instruction::UDiv:
3925 case Instruction::SRem:
3926 case Instruction::URem:
3927 Check(B.getType()->isIntOrIntVectorTy(),
3928 "Integer arithmetic operators only work with integral types!", &B);
3929 Check(B.getType() == B.getOperand(0)->getType(),
3930 "Integer arithmetic operators must have same type "
3931 "for operands and result!",
3932 &B);
3933 break;
3934 // Check that floating-point arithmetic operators are only used with
3935 // floating-point operands.
3936 case Instruction::FAdd:
3937 case Instruction::FSub:
3938 case Instruction::FMul:
3939 case Instruction::FDiv:
3940 case Instruction::FRem:
3941 Check(B.getType()->isFPOrFPVectorTy(),
3942 "Floating-point arithmetic operators only work with "
3943 "floating-point types!",
3944 &B);
3945 Check(B.getType() == B.getOperand(0)->getType(),
3946 "Floating-point arithmetic operators must have same type "
3947 "for operands and result!",
3948 &B);
3949 break;
3950 // Check that logical operators are only used with integral operands.
3951 case Instruction::And:
3952 case Instruction::Or:
3953 case Instruction::Xor:
3954 Check(B.getType()->isIntOrIntVectorTy(),
3955 "Logical operators only work with integral types!", &B);
3956 Check(B.getType() == B.getOperand(0)->getType(),
3957 "Logical operators must have same type for operands and result!", &B);
3958 break;
3959 case Instruction::Shl:
3960 case Instruction::LShr:
3961 case Instruction::AShr:
3962 Check(B.getType()->isIntOrIntVectorTy(),
3963 "Shifts only work with integral types!", &B);
3964 Check(B.getType() == B.getOperand(0)->getType(),
3965 "Shift return type must be same as operands!", &B);
3966 break;
3967 default:
3968 llvm_unreachable("Unknown BinaryOperator opcode!");
3969 }
3970
3972}
3973
3974void Verifier::visitICmpInst(ICmpInst &IC) {
3975 // Check that the operands are the same type
3976 Type *Op0Ty = IC.getOperand(0)->getType();
3977 Type *Op1Ty = IC.getOperand(1)->getType();
3978 Check(Op0Ty == Op1Ty,
3979 "Both operands to ICmp instruction are not of the same type!", &IC);
3980 // Check that the operands are the right type
3981 Check(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPtrOrPtrVectorTy(),
3982 "Invalid operand types for ICmp instruction", &IC);
3983 // Check that the predicate is valid.
3984 Check(IC.isIntPredicate(), "Invalid predicate in ICmp instruction!", &IC);
3985
3986 visitInstruction(IC);
3987}
3988
3989void Verifier::visitFCmpInst(FCmpInst &FC) {
3990 // Check that the operands are the same type
3991 Type *Op0Ty = FC.getOperand(0)->getType();
3992 Type *Op1Ty = FC.getOperand(1)->getType();
3993 Check(Op0Ty == Op1Ty,
3994 "Both operands to FCmp instruction are not of the same type!", &FC);
3995 // Check that the operands are the right type
3996 Check(Op0Ty->isFPOrFPVectorTy(), "Invalid operand types for FCmp instruction",
3997 &FC);
3998 // Check that the predicate is valid.
3999 Check(FC.isFPPredicate(), "Invalid predicate in FCmp instruction!", &FC);
4000
4001 visitInstruction(FC);
4002}
4003
4004void Verifier::visitExtractElementInst(ExtractElementInst &EI) {
4006 "Invalid extractelement operands!", &EI);
4007 visitInstruction(EI);
4008}
4009
4010void Verifier::visitInsertElementInst(InsertElementInst &IE) {
4011 Check(InsertElementInst::isValidOperands(IE.getOperand(0), IE.getOperand(1),
4012 IE.getOperand(2)),
4013 "Invalid insertelement operands!", &IE);
4014 visitInstruction(IE);
4015}
4016
4017void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) {
4019 SV.getShuffleMask()),
4020 "Invalid shufflevector operands!", &SV);
4021 visitInstruction(SV);
4022}
4023
4024void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
4025 Type *TargetTy = GEP.getPointerOperandType()->getScalarType();
4026
4027 Check(isa<PointerType>(TargetTy),
4028 "GEP base pointer is not a vector or a vector of pointers", &GEP);
4029 Check(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP);
4030
4031 if (auto *STy = dyn_cast<StructType>(GEP.getSourceElementType())) {
4032 SmallPtrSet<Type *, 4> Visited;
4033 Check(!STy->containsScalableVectorType(&Visited),
4034 "getelementptr cannot target structure that contains scalable vector"
4035 "type",
4036 &GEP);
4037 }
4038
4039 SmallVector<Value *, 16> Idxs(GEP.indices());
4040 Check(
4041 all_of(Idxs, [](Value *V) { return V->getType()->isIntOrIntVectorTy(); }),
4042 "GEP indexes must be integers", &GEP);
4043 Type *ElTy =
4044 GetElementPtrInst::getIndexedType(GEP.getSourceElementType(), Idxs);
4045 Check(ElTy, "Invalid indices for GEP pointer type!", &GEP);
4046
4047 Check(GEP.getType()->isPtrOrPtrVectorTy() &&
4048 GEP.getResultElementType() == ElTy,
4049 "GEP is not of right type for indices!", &GEP, ElTy);
4050
4051 if (auto *GEPVTy = dyn_cast<VectorType>(GEP.getType())) {
4052 // Additional checks for vector GEPs.
4053 ElementCount GEPWidth = GEPVTy->getElementCount();
4054 if (GEP.getPointerOperandType()->isVectorTy())
4055 Check(
4056 GEPWidth ==
4057 cast<VectorType>(GEP.getPointerOperandType())->getElementCount(),
4058 "Vector GEP result width doesn't match operand's", &GEP);
4059 for (Value *Idx : Idxs) {
4060 Type *IndexTy = Idx->getType();
4061 if (auto *IndexVTy = dyn_cast<VectorType>(IndexTy)) {
4062 ElementCount IndexWidth = IndexVTy->getElementCount();
4063 Check(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP);
4064 }
4065 Check(IndexTy->isIntOrIntVectorTy(),
4066 "All GEP indices should be of integer type");
4067 }
4068 }
4069
4070 if (auto *PTy = dyn_cast<PointerType>(GEP.getType())) {
4071 Check(GEP.getAddressSpace() == PTy->getAddressSpace(),
4072 "GEP address space doesn't match type", &GEP);
4073 }
4074
4076}
4077
4078static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
4079 return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
4080}
4081
4082/// Verify !range and !absolute_symbol metadata. These have the same
4083/// restrictions, except !absolute_symbol allows the full set.
4084void Verifier::verifyRangeMetadata(const Value &I, const MDNode *Range,
4085 Type *Ty, bool IsAbsoluteSymbol) {
4086 unsigned NumOperands = Range->getNumOperands();
4087 Check(NumOperands % 2 == 0, "Unfinished range!", Range);
4088 unsigned NumRanges = NumOperands / 2;
4089 Check(NumRanges >= 1, "It should have at least one range!", Range);
4090
4091 ConstantRange LastRange(1, true); // Dummy initial value
4092 for (unsigned i = 0; i < NumRanges; ++i) {
4093 ConstantInt *Low =
4094 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i));
4095 Check(Low, "The lower limit must be an integer!", Low);
4096 ConstantInt *High =
4097 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i + 1));
4098 Check(High, "The upper limit must be an integer!", High);
4099 Check(High->getType() == Low->getType() &&
4100 High->getType() == Ty->getScalarType(),
4101 "Range types must match instruction type!", &I);
4102
4103 APInt HighV = High->getValue();
4104 APInt LowV = Low->getValue();
4105
4106 // ConstantRange asserts if the ranges are the same except for the min/max
4107 // value. Leave the cases it tolerates for the empty range error below.
4108 Check(LowV != HighV || LowV.isMaxValue() || LowV.isMinValue(),
4109 "The upper and lower limits cannot be the same value", &I);
4110
4111 ConstantRange CurRange(LowV, HighV);
4112 Check(!CurRange.isEmptySet() && (IsAbsoluteSymbol || !CurRange.isFullSet()),
4113 "Range must not be empty!", Range);
4114 if (i != 0) {
4115 Check(CurRange.intersectWith(LastRange).isEmptySet(),
4116 "Intervals are overlapping", Range);
4117 Check(LowV.sgt(LastRange.getLower()), "Intervals are not in order",
4118 Range);
4119 Check(!isContiguous(CurRange, LastRange), "Intervals are contiguous",
4120 Range);
4121 }
4122 LastRange = ConstantRange(LowV, HighV);
4123 }
4124 if (NumRanges > 2) {
4125 APInt FirstLow =
4126 mdconst::dyn_extract<ConstantInt>(Range->getOperand(0))->getValue();
4127 APInt FirstHigh =
4128 mdconst::dyn_extract<ConstantInt>(Range->getOperand(1))->getValue();
4129 ConstantRange FirstRange(FirstLow, FirstHigh);
4130 Check(FirstRange.intersectWith(LastRange).isEmptySet(),
4131 "Intervals are overlapping", Range);
4132 Check(!isContiguous(FirstRange, LastRange), "Intervals are contiguous",
4133 Range);
4134 }
4135}
4136
4137void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) {
4138 assert(Range && Range == I.getMetadata(LLVMContext::MD_range) &&
4139 "precondition violation");
4140 verifyRangeMetadata(I, Range, Ty, false);
4141}
4142
4143void Verifier::checkAtomicMemAccessSize(Type *Ty, const Instruction *I) {
4144 unsigned Size = DL.getTypeSizeInBits(Ty);
4145 Check(Size >= 8, "atomic memory access' size must be byte-sized", Ty, I);
4146 Check(!(Size & (Size - 1)),
4147 "atomic memory access' operand must have a power-of-two size", Ty, I);
4148}
4149
4150void Verifier::visitLoadInst(LoadInst &LI) {
4151 PointerType *PTy = dyn_cast<PointerType>(LI.getOperand(0)->getType());
4152 Check(PTy, "Load operand must be a pointer.", &LI);
4153 Type *ElTy = LI.getType();
4154 if (MaybeAlign A = LI.getAlign()) {
4155 Check(A->value() <= Value::MaximumAlignment,
4156 "huge alignment values are unsupported", &LI);
4157 }
4158 Check(ElTy->isSized(), "loading unsized types is not allowed", &LI);
4159 if (LI.isAtomic()) {
4162 "Load cannot have Release ordering", &LI);
4163 Check(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
4164 "atomic load operand must have integer, pointer, or floating point "
4165 "type!",
4166 ElTy, &LI);
4167 checkAtomicMemAccessSize(ElTy, &LI);
4168 } else {
4170 "Non-atomic load cannot have SynchronizationScope specified", &LI);
4171 }
4172
4173 visitInstruction(LI);
4174}
4175
4176void Verifier::visitStoreInst(StoreInst &SI) {
4177 PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType());
4178 Check(PTy, "Store operand must be a pointer.", &SI);
4179 Type *ElTy = SI.getOperand(0)->getType();
4180 if (MaybeAlign A = SI.getAlign()) {
4181 Check(A->value() <= Value::MaximumAlignment,
4182 "huge alignment values are unsupported", &SI);
4183 }
4184 Check(ElTy->isSized(), "storing unsized types is not allowed", &SI);
4185 if (SI.isAtomic()) {
4186 Check(SI.getOrdering() != AtomicOrdering::Acquire &&
4187 SI.getOrdering() != AtomicOrdering::AcquireRelease,
4188 "Store cannot have Acquire ordering", &SI);
4189 Check(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
4190 "atomic store operand must have integer, pointer, or floating point "
4191 "type!",
4192 ElTy, &SI);
4193 checkAtomicMemAccessSize(ElTy, &SI);
4194 } else {
4195 Check(SI.getSyncScopeID() == SyncScope::System,
4196 "Non-atomic store cannot have SynchronizationScope specified", &SI);
4197 }
4198 visitInstruction(SI);
4199}
4200
4201/// Check that SwiftErrorVal is used as a swifterror argument in CS.
4202void Verifier::verifySwiftErrorCall(CallBase &Call,
4203 const Value *SwiftErrorVal) {
4204 for (const auto &I : llvm::enumerate(Call.args())) {
4205 if (I.value() == SwiftErrorVal) {
4206 Check(Call.paramHasAttr(I.index(), Attribute::SwiftError),
4207 "swifterror value when used in a callsite should be marked "
4208 "with swifterror attribute",
4209 SwiftErrorVal, Call);
4210 }
4211 }
4212}
4213
4214void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) {
4215 // Check that swifterror value is only used by loads, stores, or as
4216 // a swifterror argument.
4217 for (const User *U : SwiftErrorVal->users()) {
4218 Check(isa<LoadInst>(U) || isa<StoreInst>(U) || isa<CallInst>(U) ||
4219 isa<InvokeInst>(U),
4220 "swifterror value can only be loaded and stored from, or "
4221 "as a swifterror argument!",
4222 SwiftErrorVal, U);
4223 // If it is used by a store, check it is the second operand.
4224 if (auto StoreI = dyn_cast<StoreInst>(U))
4225 Check(StoreI->getOperand(1) == SwiftErrorVal,
4226 "swifterror value should be the second operand when used "
4227 "by stores",
4228 SwiftErrorVal, U);
4229 if (auto *Call = dyn_cast<CallBase>(U))
4230 verifySwiftErrorCall(*const_cast<CallBase *>(Call), SwiftErrorVal);
4231 }
4232}
4233
4234void Verifier::visitAllocaInst(AllocaInst &AI) {
4235 SmallPtrSet<Type*, 4> Visited;
4236 Check(AI.getAllocatedType()->isSized(&Visited),
4237 "Cannot allocate unsized type", &AI);
4239 "Alloca array size must have integer type", &AI);
4240 if (MaybeAlign A = AI.getAlign()) {
4241 Check(A->value() <= Value::MaximumAlignment,
4242 "huge alignment values are unsupported", &AI);
4243 }
4244
4245 if (AI.isSwiftError()) {
4247 "swifterror alloca must have pointer type", &AI);
4249 "swifterror alloca must not be array allocation", &AI);
4250 verifySwiftErrorValue(&AI);
4251 }
4252
4253 visitInstruction(AI);
4254}
4255
4256void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
4257 Type *ElTy = CXI.getOperand(1)->getType();
4258 Check(ElTy->isIntOrPtrTy(),
4259 "cmpxchg operand must have integer or pointer type", ElTy, &CXI);
4260 checkAtomicMemAccessSize(ElTy, &CXI);
4261 visitInstruction(CXI);
4262}
4263
4264void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
4266 "atomicrmw instructions cannot be unordered.", &RMWI);
4267 auto Op = RMWI.getOperation();
4268 Type *ElTy = RMWI.getOperand(1)->getType();
4269 if (Op == AtomicRMWInst::Xchg) {
4270 Check(ElTy->isIntegerTy() || ElTy->isFloatingPointTy() ||
4271 ElTy->isPointerTy(),
4272 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4273 " operand must have integer or floating point type!",
4274 &RMWI, ElTy);
4275 } else if (AtomicRMWInst::isFPOperation(Op)) {
4276 Check(ElTy->isFPOrFPVectorTy() && !isa<ScalableVectorType>(ElTy),
4277 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4278 " operand must have floating-point or fixed vector of floating-point "
4279 "type!",
4280 &RMWI, ElTy);
4281 } else {
4282 Check(ElTy->isIntegerTy(),
4283 "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4284 " operand must have integer type!",
4285 &RMWI, ElTy);
4286 }
4287 checkAtomicMemAccessSize(ElTy, &RMWI);
4289 "Invalid binary operation!", &RMWI);
4290 visitInstruction(RMWI);
4291}
4292
4293void Verifier::visitFenceInst(FenceInst &FI) {
4294 const AtomicOrdering Ordering = FI.getOrdering();
4295 Check(Ordering == AtomicOrdering::Acquire ||
4296 Ordering == AtomicOrdering::Release ||
4297 Ordering == AtomicOrdering::AcquireRelease ||
4299 "fence instructions may only have acquire, release, acq_rel, or "
4300 "seq_cst ordering.",
4301 &FI);
4302 visitInstruction(FI);
4303}
4304
4305void Verifier::visitExtractValueInst(ExtractValueInst &EVI) {
4307 EVI.getIndices()) == EVI.getType(),
4308 "Invalid ExtractValueInst operands!", &EVI);
4309
4310 visitInstruction(EVI);
4311}
4312
4313void Verifier::visitInsertValueInst(InsertValueInst &IVI) {
4315 IVI.getIndices()) ==
4316 IVI.getOperand(1)->getType(),
4317 "Invalid InsertValueInst operands!", &IVI);
4318
4319 visitInstruction(IVI);
4320}
4321
4322static Value *getParentPad(Value *EHPad) {
4323 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
4324 return FPI->getParentPad();
4325
4326 return cast<CatchSwitchInst>(EHPad)->getParentPad();
4327}
4328
4329void Verifier::visitEHPadPredecessors(Instruction &I) {
4330 assert(I.isEHPad());
4331
4332 BasicBlock *BB = I.getParent();
4333 Function *F = BB->getParent();
4334
4335 Check(BB != &F->getEntryBlock(), "EH pad cannot be in entry block.", &I);
4336
4337 if (auto *LPI = dyn_cast<LandingPadInst>(&I)) {
4338 // The landingpad instruction defines its parent as a landing pad block. The
4339 // landing pad block may be branched to only by the unwind edge of an
4340 // invoke.
4341 for (BasicBlock *PredBB : predecessors(BB)) {
4342 const auto *II = dyn_cast<InvokeInst>(PredBB->getTerminator());
4343 Check(II && II->getUnwindDest() == BB && II->getNormalDest() != BB,
4344 "Block containing LandingPadInst must be jumped to "
4345 "only by the unwind edge of an invoke.",
4346 LPI);
4347 }
4348 return;
4349 }
4350 if (auto *CPI = dyn_cast<CatchPadInst>(&I)) {
4351 if (!pred_empty(BB))
4352 Check(BB->getUniquePredecessor() == CPI->getCatchSwitch()->getParent(),
4353 "Block containg CatchPadInst must be jumped to "
4354 "only by its catchswitch.",
4355 CPI);
4356 Check(BB != CPI->getCatchSwitch()->getUnwindDest(),
4357 "Catchswitch cannot unwind to one of its catchpads",
4358 CPI->getCatchSwitch(), CPI);
4359 return;
4360 }
4361
4362 // Verify that each pred has a legal terminator with a legal to/from EH
4363 // pad relationship.
4364 Instruction *ToPad = &I;
4365 Value *ToPadParent = getParentPad(ToPad);
4366 for (BasicBlock *PredBB : predecessors(BB)) {
4367 Instruction *TI = PredBB->getTerminator();
4368 Value *FromPad;
4369 if (auto *II = dyn_cast<InvokeInst>(TI)) {
4370 Check(II->getUnwindDest() == BB && II->getNormalDest() != BB,
4371 "EH pad must be jumped to via an unwind edge", ToPad, II);
4372 auto *CalledFn =
4373 dyn_cast<Function>(II->getCalledOperand()->stripPointerCasts());
4374 if (CalledFn && CalledFn->isIntrinsic() && II->doesNotThrow() &&
4375 !IntrinsicInst::mayLowerToFunctionCall(CalledFn->getIntrinsicID()))
4376 continue;
4377 if (auto Bundle = II->getOperandBundle(LLVMContext::OB_funclet))
4378 FromPad = Bundle->Inputs[0];
4379 else
4380 FromPad = ConstantTokenNone::get(II->getContext());
4381 } else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
4382 FromPad = CRI->getOperand(0);
4383 Check(FromPad != ToPadParent, "A cleanupret must exit its cleanup", CRI);
4384 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) {
4385 FromPad = CSI;
4386 } else {
4387 Check(false, "EH pad must be jumped to via an unwind edge", ToPad, TI);
4388 }
4389
4390 // The edge may exit from zero or more nested pads.
4392 for (;; FromPad = getParentPad(FromPad)) {
4393 Check(FromPad != ToPad,
4394 "EH pad cannot handle exceptions raised within it", FromPad, TI);
4395 if (FromPad == ToPadParent) {
4396 // This is a legal unwind edge.
4397 break;
4398 }
4399 Check(!isa<ConstantTokenNone>(FromPad),
4400 "A single unwind edge may only enter one EH pad", TI);
4401 Check(Seen.insert(FromPad).second, "EH pad jumps through a cycle of pads",
4402 FromPad);
4403
4404 // This will be diagnosed on the corresponding instruction already. We
4405 // need the extra check here to make sure getParentPad() works.
4406 Check(isa<FuncletPadInst>(FromPad) || isa<CatchSwitchInst>(FromPad),
4407 "Parent pad must be catchpad/cleanuppad/catchswitch", TI);
4408 }
4409 }
4410}
4411
4412void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
4413 // The landingpad instruction is ill-formed if it doesn't have any clauses and
4414 // isn't a cleanup.
4415 Check(LPI.getNumClauses() > 0 || LPI.isCleanup(),
4416 "LandingPadInst needs at least one clause or to be a cleanup.", &LPI);
4417
4418 visitEHPadPredecessors(LPI);
4419
4420 if (!LandingPadResultTy)
4421 LandingPadResultTy = LPI.getType();
4422 else
4423 Check(LandingPadResultTy == LPI.getType(),
4424 "The landingpad instruction should have a consistent result type "
4425 "inside a function.",
4426 &LPI);
4427
4428 Function *F = LPI.getParent()->getParent();
4429 Check(F->hasPersonalityFn(),
4430 "LandingPadInst needs to be in a function with a personality.", &LPI);
4431
4432 // The landingpad instruction must be the first non-PHI instruction in the
4433 // block.
4434 Check(LPI.getParent()->getLandingPadInst() == &LPI,
4435 "LandingPadInst not the first non-PHI instruction in the block.", &LPI);
4436
4437 for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) {
4438 Constant *Clause = LPI.getClause(i);
4439 if (LPI.isCatch(i)) {
4440 Check(isa<PointerType>(Clause->getType()),
4441 "Catch operand does not have pointer type!", &LPI);
4442 } else {
4443 Check(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI);
4444 Check(isa<ConstantArray>(Clause) || isa<ConstantAggregateZero>(Clause),
4445 "Filter operand is not an array of constants!", &LPI);
4446 }
4447 }
4448
4449 visitInstruction(LPI);
4450}
4451
4452void Verifier::visitResumeInst(ResumeInst &RI) {
4454 "ResumeInst needs to be in a function with a personality.", &RI);
4455
4456 if (!LandingPadResultTy)
4457 LandingPadResultTy = RI.getValue()->getType();
4458 else
4459 Check(LandingPadResultTy == RI.getValue()->getType(),
4460 "The resume instruction should have a consistent result type "
4461 "inside a function.",
4462 &RI);
4463
4464 visitTerminator(RI);
4465}
4466
4467void Verifier::visitCatchPadInst(CatchPadInst &CPI) {
4468 BasicBlock *BB = CPI.getParent();
4469
4470 Function *F = BB->getParent();
4471 Check(F->hasPersonalityFn(),
4472 "CatchPadInst needs to be in a function with a personality.", &CPI);
4473
4474 Check(isa<CatchSwitchInst>(CPI.getParentPad()),
4475 "CatchPadInst needs to be directly nested in a CatchSwitchInst.",
4476 CPI.getParentPad());
4477
4478 // The catchpad instruction must be the first non-PHI instruction in the
4479 // block.
4480 Check(BB->getFirstNonPHI() == &CPI,
4481 "CatchPadInst not the first non-PHI instruction in the block.", &CPI);
4482
4483 visitEHPadPredecessors(CPI);
4485}
4486
4487void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) {
4488 Check(isa<CatchPadInst>(CatchReturn.getOperand(0)),
4489 "CatchReturnInst needs to be provided a CatchPad", &CatchReturn,
4490 CatchReturn.getOperand(0));
4491
4492 visitTerminator(CatchReturn);
4493}
4494
4495void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) {
4496 BasicBlock *BB = CPI.getParent();
4497
4498 Function *F = BB->getParent();
4499 Check(F->hasPersonalityFn(),
4500 "CleanupPadInst needs to be in a function with a personality.", &CPI);
4501
4502 // The cleanuppad instruction must be the first non-PHI instruction in the
4503 // block.
4504 Check(BB->getFirstNonPHI() == &CPI,
4505 "CleanupPadInst not the first non-PHI instruction in the block.", &CPI);
4506
4507 auto *ParentPad = CPI.getParentPad();
4508 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
4509 "CleanupPadInst has an invalid parent.", &CPI);
4510
4511 visitEHPadPredecessors(CPI);
4513}
4514
4515void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) {
4516 User *FirstUser = nullptr;
4517 Value *FirstUnwindPad = nullptr;
4518 SmallVector<FuncletPadInst *, 8> Worklist({&FPI});
4520
4521 while (!Worklist.empty()) {
4522 FuncletPadInst *CurrentPad = Worklist.pop_back_val();
4523 Check(Seen.insert(CurrentPad).second,
4524 "FuncletPadInst must not be nested within itself", CurrentPad);
4525 Value *UnresolvedAncestorPad = nullptr;
4526 for (User *U : CurrentPad->users()) {
4527 BasicBlock *UnwindDest;
4528 if (auto *CRI = dyn_cast<CleanupReturnInst>(U)) {
4529 UnwindDest = CRI->getUnwindDest();
4530 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(U)) {
4531 // We allow catchswitch unwind to caller to nest
4532 // within an outer pad that unwinds somewhere else,
4533 // because catchswitch doesn't have a nounwind variant.
4534 // See e.g. SimplifyCFGOpt::SimplifyUnreachable.
4535 if (CSI->unwindsToCaller())
4536 continue;
4537 UnwindDest = CSI->getUnwindDest();
4538 } else if (auto *II = dyn_cast<InvokeInst>(U)) {
4539 UnwindDest = II->getUnwindDest();
4540 } else if (isa<CallInst>(U)) {
4541 // Calls which don't unwind may be found inside funclet
4542 // pads that unwind somewhere else. We don't *require*
4543 // such calls to be annotated nounwind.
4544 continue;
4545 } else if (auto *CPI = dyn_cast<CleanupPadInst>(U)) {
4546 // The unwind dest for a cleanup can only be found by
4547 // recursive search. Add it to the worklist, and we'll
4548 // search for its first use that determines where it unwinds.
4549 Worklist.push_back(CPI);
4550 continue;
4551 } else {
4552 Check(isa<CatchReturnInst>(U), "Bogus funclet pad use", U);
4553 continue;
4554 }
4555
4556 Value *UnwindPad;
4557 bool ExitsFPI;
4558 if (UnwindDest) {
4559 UnwindPad = UnwindDest->getFirstNonPHI();
4560 if (!cast<Instruction>(UnwindPad)->isEHPad())
4561 continue;
4562 Value *UnwindParent = getParentPad(UnwindPad);
4563 // Ignore unwind edges that don't exit CurrentPad.
4564 if (UnwindParent == CurrentPad)
4565 continue;
4566 // Determine whether the original funclet pad is exited,
4567 // and if we are scanning nested pads determine how many
4568 // of them are exited so we can stop searching their
4569 // children.
4570 Value *ExitedPad = CurrentPad;
4571 ExitsFPI = false;
4572 do {
4573 if (ExitedPad == &FPI) {
4574 ExitsFPI = true;
4575 // Now we can resolve any ancestors of CurrentPad up to
4576 // FPI, but not including FPI since we need to make sure
4577 // to check all direct users of FPI for consistency.
4578 UnresolvedAncestorPad = &FPI;
4579 break;
4580 }
4581 Value *ExitedParent = getParentPad(ExitedPad);
4582 if (ExitedParent == UnwindParent) {
4583 // ExitedPad is the ancestor-most pad which this unwind
4584 // edge exits, so we can resolve up to it, meaning that
4585 // ExitedParent is the first ancestor still unresolved.
4586 UnresolvedAncestorPad = ExitedParent;
4587 break;
4588 }
4589 ExitedPad = ExitedParent;
4590 } while (!isa<ConstantTokenNone>(ExitedPad));
4591 } else {
4592 // Unwinding to caller exits all pads.
4593 UnwindPad = ConstantTokenNone::get(FPI.getContext());
4594 ExitsFPI = true;
4595 UnresolvedAncestorPad = &FPI;
4596 }
4597
4598 if (ExitsFPI) {
4599 // This unwind edge exits FPI. Make sure it agrees with other
4600 // such edges.
4601 if (FirstUser) {
4602 Check(UnwindPad == FirstUnwindPad,
4603 "Unwind edges out of a funclet "
4604 "pad must have the same unwind "
4605 "dest",
4606 &FPI, U, FirstUser);
4607 } else {
4608 FirstUser = U;
4609 FirstUnwindPad = UnwindPad;
4610 // Record cleanup sibling unwinds for verifySiblingFuncletUnwinds
4611 if (isa<CleanupPadInst>(&FPI) && !isa<ConstantTokenNone>(UnwindPad) &&
4612 getParentPad(UnwindPad) == getParentPad(&FPI))
4613 SiblingFuncletInfo[&FPI] = cast<Instruction>(U);
4614 }
4615 }
4616 // Make sure we visit all uses of FPI, but for nested pads stop as
4617 // soon as we know where they unwind to.
4618 if (CurrentPad != &FPI)
4619 break;
4620 }
4621 if (UnresolvedAncestorPad) {
4622 if (CurrentPad == UnresolvedAncestorPad) {
4623 // When CurrentPad is FPI itself, we don't mark it as resolved even if
4624 // we've found an unwind edge that exits it, because we need to verify
4625 // all direct uses of FPI.
4626 assert(CurrentPad == &FPI);
4627 continue;
4628 }
4629 // Pop off the worklist any nested pads that we've found an unwind
4630 // destination for. The pads on the worklist are the uncles,
4631 // great-uncles, etc. of CurrentPad. We've found an unwind destination
4632 // for all ancestors of CurrentPad up to but not including
4633 // UnresolvedAncestorPad.
4634 Value *ResolvedPad = CurrentPad;
4635 while (!Worklist.empty()) {
4636 Value *UnclePad = Worklist.back();
4637 Value *AncestorPad = getParentPad(UnclePad);
4638 // Walk ResolvedPad up the ancestor list until we either find the
4639 // uncle's parent or the last resolved ancestor.
4640 while (ResolvedPad != AncestorPad) {
4641 Value *ResolvedParent = getParentPad(ResolvedPad);
4642 if (ResolvedParent == UnresolvedAncestorPad) {
4643 break;
4644 }
4645 ResolvedPad = ResolvedParent;
4646 }
4647 // If the resolved ancestor search didn't find the uncle's parent,
4648 // then the uncle is not yet resolved.
4649 if (ResolvedPad != AncestorPad)
4650 break;
4651 // This uncle is resolved, so pop it from the worklist.
4652 Worklist.pop_back();
4653 }
4654 }
4655 }
4656
4657 if (FirstUnwindPad) {
4658 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FPI.getParentPad())) {
4659 BasicBlock *SwitchUnwindDest = CatchSwitch->getUnwindDest();
4660 Value *SwitchUnwindPad;
4661 if (SwitchUnwindDest)
4662 SwitchUnwindPad = SwitchUnwindDest->getFirstNonPHI();
4663 else
4664 SwitchUnwindPad = ConstantTokenNone::get(FPI.getContext());
4665 Check(SwitchUnwindPad == FirstUnwindPad,
4666 "Unwind edges out of a catch must have the same unwind dest as "
4667 "the parent catchswitch",
4668 &FPI, FirstUser, CatchSwitch);
4669 }
4670 }
4671
4672 visitInstruction(FPI);
4673}
4674
4675void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) {
4676 BasicBlock *BB = CatchSwitch.getParent();
4677
4678 Function *F = BB->getParent();
4679 Check(F->hasPersonalityFn(),
4680 "CatchSwitchInst needs to be in a function with a personality.",
4681 &CatchSwitch);
4682
4683 // The catchswitch instruction must be the first non-PHI instruction in the
4684 // block.
4685 Check(BB->getFirstNonPHI() == &CatchSwitch,
4686 "CatchSwitchInst not the first non-PHI instruction in the block.",
4687 &CatchSwitch);
4688
4689 auto *ParentPad = CatchSwitch.getParentPad();
4690 Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
4691 "CatchSwitchInst has an invalid parent.", ParentPad);
4692
4693 if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) {
4694 Instruction *I = UnwindDest->getFirstNonPHI();
4695 Check(I->isEHPad() && !isa<LandingPadInst>(I),
4696 "CatchSwitchInst must unwind to an EH block which is not a "
4697 "landingpad.",
4698 &CatchSwitch);
4699
4700 // Record catchswitch sibling unwinds for verifySiblingFuncletUnwinds
4701 if (getParentPad(I) == ParentPad)
4702 SiblingFuncletInfo[&CatchSwitch] = &CatchSwitch;
4703 }
4704
4705 Check(CatchSwitch.getNumHandlers() != 0,
4706 "CatchSwitchInst cannot have empty handler list", &CatchSwitch);
4707
4708 for (BasicBlock *Handler : CatchSwitch.handlers()) {
4709 Check(isa<CatchPadInst>(Handler->getFirstNonPHI()),
4710 "CatchSwitchInst handlers must be catchpads", &CatchSwitch, Handler);
4711 }
4712
4713 visitEHPadPredecessors(CatchSwitch);
4714 visitTerminator(CatchSwitch);
4715}
4716
4717void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) {
4718 Check(isa<CleanupPadInst>(CRI.getOperand(0)),
4719 "CleanupReturnInst needs to be provided a CleanupPad", &CRI,
4720 CRI.getOperand(0));
4721
4722 if (BasicBlock *UnwindDest = CRI.getUnwindDest()) {
4723 Instruction *I = UnwindDest->getFirstNonPHI();
4724 Check(I->isEHPad() && !isa<LandingPadInst>(I),
4725 "CleanupReturnInst must unwind to an EH block which is not a "
4726 "landingpad.",
4727 &CRI);
4728 }
4729
4730 visitTerminator(CRI);
4731}
4732
4733void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
4734 Instruction *Op = cast<Instruction>(I.getOperand(i));
4735 // If the we have an invalid invoke, don't try to compute the dominance.
4736 // We already reject it in the invoke specific checks and the dominance
4737 // computation doesn't handle multiple edges.
4738 if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) {
4739 if (II->getNormalDest() == II->getUnwindDest())
4740 return;
4741 }
4742
4743 // Quick check whether the def has already been encountered in the same block.
4744 // PHI nodes are not checked to prevent accepting preceding PHIs, because PHI
4745 // uses are defined to happen on the incoming edge, not at the instruction.
4746 //
4747 // FIXME: If this operand is a MetadataAsValue (wrapping a LocalAsMetadata)
4748 // wrapping an SSA value, assert that we've already encountered it. See
4749 // related FIXME in Mapper::mapLocalAsMetadata in ValueMapper.cpp.
4750 if (!isa<PHINode>(I) && InstsInThisBlock.count(Op))
4751 return;
4752
4753 const Use &U = I.getOperandUse(i);
4754 Check(DT.dominates(Op, U), "Instruction does not dominate all uses!", Op, &I);
4755}
4756
4757void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) {
4758 Check(I.getType()->isPointerTy(),
4759 "dereferenceable, dereferenceable_or_null "
4760 "apply only to pointer types",
4761 &I);
4762 Check((isa<LoadInst>(I) || isa<IntToPtrInst>(I)),
4763 "dereferenceable, dereferenceable_or_null apply only to load"
4764 " and inttoptr instructions, use attributes for calls or invokes",
4765 &I);
4766 Check(MD->getNumOperands() == 1,
4767 "dereferenceable, dereferenceable_or_null "
4768 "take one operand!",
4769 &I);
4770 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0));
4771 Check(CI && CI->getType()->isIntegerTy(64),
4772 "dereferenceable, "
4773 "dereferenceable_or_null metadata value must be an i64!",
4774 &I);
4775}
4776
4777void Verifier::visitProfMetadata(Instruction &I, MDNode *MD) {
4778 Check(MD->getNumOperands() >= 2,
4779 "!prof annotations should have no less than 2 operands", MD);
4780
4781 // Check first operand.
4782 Check(MD->getOperand(0) != nullptr, "first operand should not be null", MD);
4783 Check(isa<MDString>(MD->getOperand(0)),
4784 "expected string with name of the !prof annotation", MD);
4785 MDString *MDS = cast<MDString>(MD->getOperand(0));
4786 StringRef ProfName = MDS->getString();
4787
4788 // Check consistency of !prof branch_weights metadata.
4789 if (ProfName == "branch_weights") {
4790 if (isa<InvokeInst>(&I)) {
4791 Check(MD->getNumOperands() == 2 || MD->getNumOperands() == 3,
4792 "Wrong number of InvokeInst branch_weights operands", MD);
4793 } else {
4794 unsigned ExpectedNumOperands = 0;
4795 if (BranchInst *BI = dyn_cast<BranchInst>(&I))
4796 ExpectedNumOperands = BI->getNumSuccessors();
4797 else if (SwitchInst *SI = dyn_cast<SwitchInst>(&I))
4798 ExpectedNumOperands = SI->getNumSuccessors();
4799 else if (isa<CallInst>(&I))
4800 ExpectedNumOperands = 1;
4801 else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(&I))
4802 ExpectedNumOperands = IBI->getNumDestinations();
4803 else if (isa<SelectInst>(&I))
4804 ExpectedNumOperands = 2;
4805 else if (CallBrInst *CI = dyn_cast<CallBrInst>(&I))
4806 ExpectedNumOperands = CI->getNumSuccessors();
4807 else
4808 CheckFailed("!prof branch_weights are not allowed for this instruction",
4809 MD);
4810
4811 Check(MD->getNumOperands() == 1 + ExpectedNumOperands,
4812 "Wrong number of operands", MD);
4813 }
4814 for (unsigned i = 1; i < MD->getNumOperands(); ++i) {
4815 auto &MDO = MD->getOperand(i);
4816 Check(MDO, "second operand should not be null", MD);
4817 Check(mdconst::dyn_extract<ConstantInt>(MDO),
4818 "!prof brunch_weights operand is not a const int");
4819 }
4820 }
4821}
4822
4823void Verifier::visitDIAssignIDMetadata(Instruction &I, MDNode *MD) {
4824 assert(I.hasMetadata(LLVMContext::MD_DIAssignID));
4825 bool ExpectedInstTy =
4826 isa<AllocaInst>(I) || isa<StoreInst>(I) || isa<MemIntrinsic>(I);
4827 CheckDI(ExpectedInstTy, "!DIAssignID attached to unexpected instruction kind",
4828 I, MD);
4829 // Iterate over the MetadataAsValue uses of the DIAssignID - these should
4830 // only be found as DbgAssignIntrinsic operands.
4831 if (auto *AsValue = MetadataAsValue::getIfExists(Context, MD)) {
4832 for (auto *User : AsValue->users()) {
4833 CheckDI(isa<DbgAssignIntrinsic>(User),
4834 "!DIAssignID should only be used by llvm.dbg.assign intrinsics",
4835 MD, User);
4836 // All of the dbg.assign intrinsics should be in the same function as I.
4837 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(User))
4838 CheckDI(DAI->getFunction() == I.getFunction(),
4839 "dbg.assign not in same function as inst", DAI, &I);
4840 }
4841 }
4842 for (DbgVariableRecord *DVR :
4843 cast<DIAssignID>(MD)->getAllDbgVariableRecordUsers()) {
4844 CheckDI(DVR->isDbgAssign(),
4845 "!DIAssignID should only be used by Assign DVRs.", MD, DVR);
4846 CheckDI(DVR->getFunction() == I.getFunction(),
4847 "DVRAssign not in same function as inst", DVR, &I);
4848 }
4849}
4850
4851void Verifier::visitMMRAMetadata(Instruction &I, MDNode *MD) {
4853 "!mmra metadata attached to unexpected instruction kind", I, MD);
4854
4855 // MMRA Metadata should either be a tag, e.g. !{!"foo", !"bar"}, or a
4856 // list of tags such as !2 in the following example:
4857 // !0 = !{!"a", !"b"}
4858 // !1 = !{!"c", !"d"}
4859 // !2 = !{!0, !1}
4860 if (MMRAMetadata::isTagMD(MD))
4861 return;
4862
4863 Check(isa<MDTuple>(MD), "!mmra expected to be a metadata tuple", I, MD);
4864 for (const MDOperand &MDOp : MD->operands())
4865 Check(MMRAMetadata::isTagMD(MDOp.get()),
4866 "!mmra metadata tuple operand is not an MMRA tag", I, MDOp.get());
4867}
4868
4869void Verifier::visitCallStackMetadata(MDNode *MD) {
4870 // Call stack metadata should consist of a list of at least 1 constant int
4871 // (representing a hash of the location).
4872 Check(MD->getNumOperands() >= 1,
4873 "call stack metadata should have at least 1 operand", MD);
4874
4875 for (const auto &Op : MD->operands())
4876 Check(mdconst::dyn_extract_or_null<ConstantInt>(Op),
4877 "call stack metadata operand should be constant integer", Op);
4878}
4879
4880void Verifier::visitMemProfMetadata(Instruction &I, MDNode *MD) {
4881 Check(isa<CallBase>(I), "!memprof metadata should only exist on calls", &I);
4882 Check(MD->getNumOperands() >= 1,
4883 "!memprof annotations should have at least 1 metadata operand "
4884 "(MemInfoBlock)",
4885 MD);
4886
4887 // Check each MIB
4888 for (auto &MIBOp : MD->operands()) {
4889 MDNode *MIB = dyn_cast<MDNode>(MIBOp);
4890 // The first operand of an MIB should be the call stack metadata.
4891 // There rest of the operands should be MDString tags, and there should be
4892 // at least one.
4893 Check(MIB->getNumOperands() >= 2,
4894 "Each !memprof MemInfoBlock should have at least 2 operands", MIB);
4895
4896 // Check call stack metadata (first operand).
4897 Check(MIB->getOperand(0) != nullptr,
4898 "!memprof MemInfoBlock first operand should not be null", MIB);
4899 Check(isa<MDNode>(MIB->getOperand(0)),
4900 "!memprof MemInfoBlock first operand should be an MDNode", MIB);
4901 MDNode *StackMD = dyn_cast<MDNode>(MIB->getOperand(0));
4902 visitCallStackMetadata(StackMD);
4903
4904 // Check that remaining operands are MDString.
4906 [](const MDOperand &Op) { return isa<MDString>(Op); }),
4907 "Not all !memprof MemInfoBlock operands 1 to N are MDString", MIB);
4908 }
4909}
4910
4911void Verifier::visitCallsiteMetadata(Instruction &I, MDNode *MD) {
4912 Check(isa<CallBase>(I), "!callsite metadata should only exist on calls", &I);
4913 // Verify the partial callstack annotated from memprof profiles. This callsite
4914 // is a part of a profiled allocation callstack.
4915 visitCallStackMetadata(MD);
4916}
4917
4918void Verifier::visitAnnotationMetadata(MDNode *Annotation) {
4919 Check(isa<MDTuple>(Annotation), "annotation must be a tuple");
4920 Check(Annotation->getNumOperands() >= 1,
4921 "annotation must have at least one operand");
4922 for (const MDOperand &Op : Annotation->operands()) {
4923 bool TupleOfStrings =
4924 isa<MDTuple>(Op.get()) &&
4925 all_of(cast<MDTuple>(Op)->operands(), [](auto &Annotation) {
4926 return isa<MDString>(Annotation.get());
4927 });
4928 Check(isa<MDString>(Op.get()) || TupleOfStrings,
4929 "operands must be a string or a tuple of strings");
4930 }
4931}
4932
4933void Verifier::visitAliasScopeMetadata(const MDNode *MD) {
4934 unsigned NumOps = MD->getNumOperands();
4935 Check(NumOps >= 2 && NumOps <= 3, "scope must have two or three operands",
4936 MD);
4937 Check(MD->getOperand(0).get() == MD || isa<MDString>(MD->getOperand(0)),
4938 "first scope operand must be self-referential or string", MD);
4939 if (NumOps == 3)
4940 Check(isa<MDString>(MD->getOperand(2)),
4941 "third scope operand must be string (if used)", MD);
4942
4943 MDNode *Domain = dyn_cast<MDNode>(MD->getOperand(1));
4944 Check(Domain != nullptr, "second scope operand must be MDNode", MD);
4945
4946 unsigned NumDomainOps = Domain->getNumOperands();
4947 Check(NumDomainOps >= 1 && NumDomainOps <= 2,
4948 "domain must have one or two operands", Domain);
4949 Check(Domain->getOperand(0).get() == Domain ||
4950 isa<MDString>(Domain->getOperand(0)),
4951 "first domain operand must be self-referential or string", Domain);
4952 if (NumDomainOps == 2)
4953 Check(isa<MDString>(Domain->getOperand(1)),
4954 "second domain operand must be string (if used)", Domain);
4955}
4956
4957void Verifier::visitAliasScopeListMetadata(const MDNode *MD) {
4958 for (const MDOperand &Op : MD->operands()) {
4959 const MDNode *OpMD = dyn_cast<MDNode>(Op);
4960 Check(OpMD != nullptr, "scope list must consist of MDNodes", MD);
4961 visitAliasScopeMetadata(OpMD);
4962 }
4963}
4964
4965void Verifier::visitAccessGroupMetadata(const MDNode *MD) {
4966 auto IsValidAccessScope = [](const MDNode *MD) {
4967 return MD->getNumOperands() == 0 && MD->isDistinct();
4968 };
4969
4970 // It must be either an access scope itself...
4971 if (IsValidAccessScope(MD))
4972 return;
4973
4974 // ...or a list of access scopes.
4975 for (const MDOperand &Op : MD->operands()) {
4976 const MDNode *OpMD = dyn_cast<MDNode>(Op);
4977 Check(OpMD != nullptr, "Access scope list must consist of MDNodes", MD);
4978 Check(IsValidAccessScope(OpMD),
4979 "Access scope list contains invalid access scope", MD);
4980 }
4981}
4982
4983/// verifyInstruction - Verify that an instruction is well formed.
4984///
4985void Verifier::visitInstruction(Instruction &I) {
4986 BasicBlock *BB = I.getParent();
4987 Check(BB, "Instruction not embedded in basic block!", &I);
4988
4989 if (!isa<PHINode>(I)) { // Check that non-phi nodes are not self referential
4990 for (User *U : I.users()) {
4991 Check(U != (User *)&I || !DT.isReachableFromEntry(BB),
4992 "Only PHI nodes may reference their own value!", &I);
4993 }
4994 }
4995
4996 // Check that void typed values don't have names
4997 Check(!I.getType()->isVoidTy() || !I.hasName(),
4998 "Instruction has a name, but provides a void value!", &I);
4999
5000 // Check that the return value of the instruction is either void or a legal
5001 // value type.
5002 Check(I.getType()->isVoidTy() || I.getType()->isFirstClassType(),
5003 "Instruction returns a non-scalar type!", &I);
5004
5005 // Check that the instruction doesn't produce metadata. Calls are already
5006 // checked against the callee type.
5007 Check(!I.getType()->isMetadataTy() || isa<CallInst>(I) || isa<InvokeInst>(I),
5008 "Invalid use of metadata!", &I);
5009
5010 // Check that all uses of the instruction, if they are instructions
5011 // themselves, actually have parent basic blocks. If the use is not an
5012 // instruction, it is an error!
5013 for (Use &U : I.uses()) {
5014 if (Instruction *Used = dyn_cast<Instruction>(U.getUser()))
5015 Check(Used->getParent() != nullptr,
5016 "Instruction referencing"
5017 " instruction not embedded in a basic block!",
5018 &I, Used);
5019 else {
5020 CheckFailed("Use of instruction is not an instruction!", U);
5021 return;
5022 }
5023 }
5024
5025 // Get a pointer to the call base of the instruction if it is some form of
5026 // call.
5027 const CallBase *CBI = dyn_cast<CallBase>(&I);
5028
5029 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
5030 Check(I.getOperand(i) != nullptr, "Instruction has null operand!", &I);
5031
5032 // Check to make sure that only first-class-values are operands to
5033 // instructions.
5034 if (!I.getOperand(i)->getType()->isFirstClassType()) {
5035 Check(false, "Instruction operands must be first-class values!", &I);
5036 }
5037
5038 if (Function *F = dyn_cast<Function>(I.getOperand(i))) {
5039 // This code checks whether the function is used as the operand of a
5040 // clang_arc_attachedcall operand bundle.
5041 auto IsAttachedCallOperand = [](Function *F, const CallBase *CBI,
5042 int Idx) {
5043 return CBI && CBI->isOperandBundleOfType(
5045 };
5046
5047 // Check to make sure that the "address of" an intrinsic function is never
5048 // taken. Ignore cases where the address of the intrinsic function is used
5049 // as the argument of operand bundle "clang.arc.attachedcall" as those
5050 // cases are handled in verifyAttachedCallBundle.
5051 Check((!F->isIntrinsic() ||
5052 (CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i)) ||
5053 IsAttachedCallOperand(F, CBI, i)),
5054 "Cannot take the address of an intrinsic!", &I);
5055 Check(!F->isIntrinsic() || isa<CallInst>(I) ||
5056 F->getIntrinsicID() == Intrinsic::donothing ||
5057 F->getIntrinsicID() == Intrinsic::seh_try_begin ||
5058 F->getIntrinsicID() == Intrinsic::seh_try_end ||
5059 F->getIntrinsicID() == Intrinsic::seh_scope_begin ||
5060 F->getIntrinsicID() == Intrinsic::seh_scope_end ||
5061 F->getIntrinsicID() == Intrinsic::coro_resume ||
5062 F->getIntrinsicID() == Intrinsic::coro_destroy ||
5063 F->getIntrinsicID() == Intrinsic::coro_await_suspend_void ||
5064 F->getIntrinsicID() == Intrinsic::coro_await_suspend_bool ||
5065 F->getIntrinsicID() == Intrinsic::coro_await_suspend_handle ||
5066 F->getIntrinsicID() ==
5067 Intrinsic::experimental_patchpoint_void ||
5068 F->getIntrinsicID() == Intrinsic::experimental_patchpoint ||
5069 F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint ||
5070 F->getIntrinsicID() == Intrinsic::wasm_rethrow ||
5071 IsAttachedCallOperand(F, CBI, i),
5072 "Cannot invoke an intrinsic other than donothing, patchpoint, "
5073 "statepoint, coro_resume, coro_destroy or clang.arc.attachedcall",
5074 &I);
5075 Check(F->getParent() == &M, "Referencing function in another module!", &I,
5076 &M, F, F->getParent());
5077 } else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) {
5078 Check(OpBB->getParent() == BB->getParent(),
5079 "Referring to a basic block in another function!", &I);
5080 } else if (Argument *OpArg = dyn_cast<Argument>(I.getOperand(i))) {
5081 Check(OpArg->getParent() == BB->getParent(),
5082 "Referring to an argument in another function!", &I);
5083 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) {
5084 Check(GV->getParent() == &M, "Referencing global in another module!", &I,
5085 &M, GV, GV->getParent());
5086 } else if (Instruction *OpInst = dyn_cast<Instruction>(I.getOperand(i))) {
5087 Check(OpInst->getFunction() == BB->getParent(),
5088 "Referring to an instruction in another function!", &I);
5089 verifyDominatesUse(I, i);
5090 } else if (isa<InlineAsm>(I.getOperand(i))) {
5091 Check(CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i),
5092 "Cannot take the address of an inline asm!", &I);
5093 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(I.getOperand(i))) {
5094 if (CE->getType()->isPtrOrPtrVectorTy()) {
5095 // If we have a ConstantExpr pointer, we need to see if it came from an
5096 // illegal bitcast.
5097 visitConstantExprsRecursively(CE);
5098 }
5099 }
5100 }
5101
5102 if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) {
5103 Check(I.getType()->isFPOrFPVectorTy(),
5104 "fpmath requires a floating point result!", &I);
5105 Check(MD->getNumOperands() == 1, "fpmath takes one operand!", &I);
5106 if (ConstantFP *CFP0 =
5107 mdconst::dyn_extract_or_null<ConstantFP>(MD->getOperand(0))) {
5108 const APFloat &Accuracy = CFP0->getValueAPF();
5109 Check(&Accuracy.getSemantics() == &APFloat::IEEEsingle(),
5110 "fpmath accuracy must have float type", &I);
5111 Check(Accuracy.isFiniteNonZero() && !Accuracy.isNegative(),
5112 "fpmath accuracy not a positive number!", &I);
5113 } else {
5114 Check(false, "invalid fpmath accuracy!", &I);
5115 }
5116 }
5117
5118 if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) {
5119 Check(isa<LoadInst>(I) || isa<CallInst>(I) || isa<InvokeInst>(I),
5120 "Ranges are only for loads, calls and invokes!", &I);
5121 visitRangeMetadata(I, Range, I.getType());
5122 }
5123
5124 if (I.hasMetadata(LLVMContext::MD_invariant_group)) {
5125 Check(isa<LoadInst>(I) || isa<StoreInst>(I),
5126 "invariant.group metadata is only for loads and stores", &I);
5127 }
5128
5129 if (MDNode *MD = I.getMetadata(LLVMContext::MD_nonnull)) {
5130 Check(I.getType()->isPointerTy(), "nonnull applies only to pointer types",
5131 &I);
5132 Check(isa<LoadInst>(I),
5133 "nonnull applies only to load instructions, use attributes"
5134 " for calls or invokes",
5135 &I);
5136 Check(MD->getNumOperands() == 0, "nonnull metadata must be empty", &I);
5137 }
5138
5139 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable))
5140 visitDereferenceableMetadata(I, MD);
5141
5142 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable_or_null))
5143 visitDereferenceableMetadata(I, MD);
5144
5145 if (MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa))
5146 TBAAVerifyHelper.visitTBAAMetadata(I, TBAA);
5147
5148 if (MDNode *MD = I.getMetadata(LLVMContext::MD_noalias))
5149 visitAliasScopeListMetadata(MD);
5150 if (MDNode *MD = I.getMetadata(LLVMContext::MD_alias_scope))
5151 visitAliasScopeListMetadata(MD);
5152
5153 if (MDNode *MD = I.getMetadata(LLVMContext::MD_access_group))
5154 visitAccessGroupMetadata(MD);
5155
5156 if (MDNode *AlignMD = I.getMetadata(LLVMContext::MD_align)) {
5157 Check(I.getType()->isPointerTy(), "align applies only to pointer types",
5158 &I);
5159 Check(isa<LoadInst>(I),
5160 "align applies only to load instructions, "
5161 "use attributes for calls or invokes",
5162 &I);
5163 Check(AlignMD->getNumOperands() == 1, "align takes one operand!", &I);
5164 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(AlignMD->getOperand(0));
5165 Check(CI && CI->getType()->isIntegerTy(64),
5166 "align metadata value must be an i64!", &I);
5167 uint64_t Align = CI->getZExtValue();
5168 Check(isPowerOf2_64(Align), "align metadata value must be a power of 2!",
5169 &I);
5171 "alignment is larger that implementation defined limit", &I);
5172 }
5173
5174 if (MDNode *MD = I.getMetadata(LLVMContext::MD_prof))
5175 visitProfMetadata(I, MD);
5176
5177 if (MDNode *MD = I.getMetadata(LLVMContext::MD_memprof))
5178 visitMemProfMetadata(I, MD);
5179
5180 if (MDNode *MD = I.getMetadata(LLVMContext::MD_callsite))
5181 visitCallsiteMetadata(I, MD);
5182
5183 if (MDNode *MD = I.getMetadata(LLVMContext::MD_DIAssignID))
5184 visitDIAssignIDMetadata(I, MD);
5185
5186 if (MDNode *MMRA = I.getMetadata(LLVMContext::MD_mmra))
5187 visitMMRAMetadata(I, MMRA);
5188
5189 if (MDNode *Annotation = I.getMetadata(LLVMContext::MD_annotation))
5190 visitAnnotationMetadata(Annotation);
5191
5192 if (MDNode *N = I.getDebugLoc().getAsMDNode()) {
5193 CheckDI(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N);
5194 visitMDNode(*N, AreDebugLocsAllowed::Yes);
5195 }
5196
5197 if (auto *DII = dyn_cast<DbgVariableIntrinsic>(&I)) {
5198 verifyFragmentExpression(*DII);
5199 verifyNotEntryValue(*DII);
5200 }
5201
5203 I.getAllMetadata(MDs);
5204 for (auto Attachment : MDs) {
5205 unsigned Kind = Attachment.first;
5206 auto AllowLocs =
5207 (Kind == LLVMContext::MD_dbg || Kind == LLVMContext::MD_loop)
5208 ? AreDebugLocsAllowed::Yes
5209 : AreDebugLocsAllowed::No;
5210 visitMDNode(*Attachment.second, AllowLocs);
5211 }
5212
5213 InstsInThisBlock.insert(&I);
5214}
5215
5216/// Allow intrinsics to be verified in different ways.
5217void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
5218 Function *IF = Call.getCalledFunction();
5219 Check(IF->isDeclaration(), "Intrinsic functions should never be defined!",
5220 IF);
5221
5222 // Verify that the intrinsic prototype lines up with what the .td files
5223 // describe.
5224 FunctionType *IFTy = IF->getFunctionType();
5225 bool IsVarArg = IFTy->isVarArg();
5226
5230
5231 // Walk the descriptors to extract overloaded types.
5236 "Intrinsic has incorrect return type!", IF);
5238 "Intrinsic has incorrect argument type!", IF);
5239
5240 // Verify if the intrinsic call matches the vararg property.
5241 if (IsVarArg)
5243 "Intrinsic was not defined with variable arguments!", IF);
5244 else
5246 "Callsite was not defined with variable arguments!", IF);
5247
5248 // All descriptors should be absorbed by now.
5249 Check(TableRef.empty(), "Intrinsic has too few arguments!", IF);
5250
5251 // Now that we have the intrinsic ID and the actual argument types (and we
5252 // know they are legal for the intrinsic!) get the intrinsic name through the
5253 // usual means. This allows us to verify the mangling of argument types into
5254 // the name.
5255 const std::string ExpectedName =
5256 Intrinsic::getName(ID, ArgTys, IF->getParent(), IFTy);
5257 Check(ExpectedName == IF->getName(),
5258 "Intrinsic name not mangled correctly for type arguments! "
5259 "Should be: " +
5260 ExpectedName,
5261 IF);
5262
5263 // If the intrinsic takes MDNode arguments, verify that they are either global
5264 // or are local to *this* function.
5265 for (Value *V : Call.args()) {
5266 if (auto *MD = dyn_cast<MetadataAsValue>(V))
5267 visitMetadataAsValue(*MD, Call.getCaller());
5268 if (auto *Const = dyn_cast<Constant>(V))
5269 Check(!Const->getType()->isX86_AMXTy(),
5270 "const x86_amx is not allowed in argument!");
5271 }
5272
5273 switch (ID) {
5274 default:
5275 break;
5276 case Intrinsic::assume: {
5277 for (auto &Elem : Call.bundle_op_infos()) {
5278 unsigned ArgCount = Elem.End - Elem.Begin;
5279 // Separate storage assumptions are special insofar as they're the only
5280 // operand bundles allowed on assumes that aren't parameter attributes.
5281 if (Elem.Tag->getKey() == "separate_storage") {
5282 Check(ArgCount == 2,
5283 "separate_storage assumptions should have 2 arguments", Call);
5284 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy() &&
5285 Call.getOperand(Elem.Begin + 1)->getType()->isPointerTy(),
5286 "arguments to separate_storage assumptions should be pointers",
5287 Call);
5288 return;
5289 }
5290 Check(Elem.Tag->getKey() == "ignore" ||
5291 Attribute::isExistingAttribute(Elem.Tag->getKey()),
5292 "tags must be valid attribute names", Call);
5294 Attribute::getAttrKindFromName(Elem.Tag->getKey());
5295 if (Kind == Attribute::Alignment) {
5296 Check(ArgCount <= 3 && ArgCount >= 2,
5297 "alignment assumptions should have 2 or 3 arguments", Call);
5298 Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5299 "first argument should be a pointer", Call);
5300 Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5301 "second argument should be an integer", Call);
5302 if (ArgCount == 3)
5303 Check(Call.getOperand(Elem.Begin + 2)->getType()->isIntegerTy(),
5304 "third argument should be an integer if present", Call);
5305 return;
5306 }
5307 Check(ArgCount <= 2, "too many arguments", Call);
5308 if (Kind == Attribute::None)
5309 break;
5310 if (Attribute::isIntAttrKind(Kind)) {
5311 Check(ArgCount == 2, "this attribute should have 2 arguments", Call);
5312 Check(isa<ConstantInt>(Call.getOperand(Elem.Begin + 1)),
5313 "the second argument should be a constant integral value", Call);
5314 } else if (Attribute::canUseAsParamAttr(Kind)) {
5315 Check((ArgCount) == 1, "this attribute should have one argument", Call);
5316 } else if (Attribute::canUseAsFnAttr(Kind)) {
5317 Check((ArgCount) == 0, "this attribute has no argument", Call);
5318 }
5319 }
5320 break;
5321 }
5322 case Intrinsic::ucmp:
5323 case Intrinsic::scmp: {
5324 Type *SrcTy = Call.getOperand(0)->getType();
5325 Type *DestTy = Call.getType();
5326
5327 Check(DestTy->getScalarSizeInBits() >= 2,
5328 "result type must be at least 2 bits wide", Call);
5329
5330 bool IsDestTypeVector = DestTy->isVectorTy();
5331 Check(SrcTy->isVectorTy() == IsDestTypeVector,
5332 "ucmp/scmp argument and result types must both be either vector or "
5333 "scalar types",
5334 Call);
5335 if (IsDestTypeVector) {
5336 auto SrcVecLen = cast<VectorType>(SrcTy)->getElementCount();
5337 auto DestVecLen = cast<VectorType>(DestTy)->getElementCount();
5338 Check(SrcVecLen == DestVecLen,
5339 "return type and arguments must have the same number of "
5340 "elements",
5341 Call);
5342 }
5343 break;
5344 }
5345 case Intrinsic::coro_id: {
5346 auto *InfoArg = Call.getArgOperand(3)->stripPointerCasts();
5347 if (isa<ConstantPointerNull>(InfoArg))
5348 break;
5349 auto *GV = dyn_cast<GlobalVariable>(InfoArg);
5350 Check(GV && GV->isConstant() && GV->hasDefinitiveInitializer(),
5351 "info argument of llvm.coro.id must refer to an initialized "
5352 "constant");
5353 Constant *Init = GV->getInitializer();
5354 Check(isa<ConstantStruct>(Init) || isa<ConstantArray>(Init),
5355 "info argument of llvm.coro.id must refer to either a struct or "
5356 "an array");
5357 break;
5358 }
5359 case Intrinsic::is_fpclass: {
5360 const ConstantInt *TestMask = cast<ConstantInt>(Call.getOperand(1));
5361 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
5362 "unsupported bits for llvm.is.fpclass test mask");
5363 break;
5364 }
5365 case Intrinsic::fptrunc_round: {
5366 // Check the rounding mode
5367 Metadata *MD = nullptr;
5368 auto *MAV = dyn_cast<MetadataAsValue>(Call.getOperand(1));
5369 if (MAV)
5370 MD = MAV->getMetadata();
5371
5372 Check(MD != nullptr, "missing rounding mode argument", Call);
5373
5374 Check(isa<MDString>(MD),
5375 ("invalid value for llvm.fptrunc.round metadata operand"
5376 " (the operand should be a string)"),
5377 MD);
5378
5379 std::optional<RoundingMode> RoundMode =
5380 convertStrToRoundingMode(cast<MDString>(MD)->getString());
5381 Check(RoundMode && *RoundMode != RoundingMode::Dynamic,
5382 "unsupported rounding mode argument", Call);
5383 break;
5384 }
5385#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
5386#include "llvm/IR/VPIntrinsics.def"
5387 visitVPIntrinsic(cast<VPIntrinsic>(Call));
5388 break;
5389#define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC) \
5390 case Intrinsic::INTRINSIC:
5391#include "llvm/IR/ConstrainedOps.def"
5392 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(Call));
5393 break;
5394 case Intrinsic::dbg_declare: // llvm.dbg.declare
5395 Check(isa<MetadataAsValue>(Call.getArgOperand(0)),
5396 "invalid llvm.dbg.declare intrinsic call 1", Call);
5397 visitDbgIntrinsic("declare", cast<DbgVariableIntrinsic>(Call));
5398 break;
5399 case Intrinsic::dbg_value: // llvm.dbg.value
5400 visitDbgIntrinsic("value", cast<DbgVariableIntrinsic>(Call));
5401 break;
5402 case Intrinsic::dbg_assign: // llvm.dbg.assign
5403 visitDbgIntrinsic("assign", cast<DbgVariableIntrinsic>(Call));
5404 break;
5405 case Intrinsic::dbg_label: // llvm.dbg.label
5406 visitDbgLabelIntrinsic("label", cast<DbgLabelInst>(Call));
5407 break;
5408 case Intrinsic::memcpy:
5409 case Intrinsic::memcpy_inline:
5410 case Intrinsic::memmove:
5411 case Intrinsic::memset:
5412 case Intrinsic::memset_inline: {
5413 break;
5414 }
5415 case Intrinsic::memcpy_element_unordered_atomic:
5416 case Intrinsic::memmove_element_unordered_atomic:
5417 case Intrinsic::memset_element_unordered_atomic: {
5418 const auto *AMI = cast<AtomicMemIntrinsic>(&Call);
5419
5420 ConstantInt *ElementSizeCI =
5421 cast<ConstantInt>(AMI->getRawElementSizeInBytes());
5422 const APInt &ElementSizeVal = ElementSizeCI->getValue();
5423 Check(ElementSizeVal.isPowerOf2(),
5424 "element size of the element-wise atomic memory intrinsic "
5425 "must be a power of 2",
5426 Call);
5427
5428 auto IsValidAlignment = [&](MaybeAlign Alignment) {
5429 return Alignment && ElementSizeVal.ule(Alignment->value());
5430 };
5431 Check(IsValidAlignment(AMI->getDestAlign()),
5432 "incorrect alignment of the destination argument", Call);
5433 if (const auto *AMT = dyn_cast<AtomicMemTransferInst>(AMI)) {
5434 Check(IsValidAlignment(AMT->getSourceAlign()),
5435 "incorrect alignment of the source argument", Call);
5436 }
5437 break;
5438 }
5439 case Intrinsic::call_preallocated_setup: {
5440 auto *NumArgs = dyn_cast<ConstantInt>(Call.getArgOperand(0));
5441 Check(NumArgs != nullptr,
5442 "llvm.call.preallocated.setup argument must be a constant");
5443 bool FoundCall = false;
5444 for (User *U : Call.users()) {
5445 auto *UseCall = dyn_cast<CallBase>(U);
5446 Check(UseCall != nullptr,
5447 "Uses of llvm.call.preallocated.setup must be calls");
5448 const Function *Fn = UseCall->getCalledFunction();
5449 if (Fn && Fn->getIntrinsicID() == Intrinsic::call_preallocated_arg) {
5450 auto *AllocArgIndex = dyn_cast<ConstantInt>(UseCall->getArgOperand(1));
5451 Check(AllocArgIndex != nullptr,
5452 "llvm.call.preallocated.alloc arg index must be a constant");
5453 auto AllocArgIndexInt = AllocArgIndex->getValue();
5454 Check(AllocArgIndexInt.sge(0) &&
5455 AllocArgIndexInt.slt(NumArgs->getValue()),
5456 "llvm.call.preallocated.alloc arg index must be between 0 and "
5457 "corresponding "
5458 "llvm.call.preallocated.setup's argument count");
5459 } else if (Fn && Fn->getIntrinsicID() ==
5460 Intrinsic::call_preallocated_teardown) {
5461 // nothing to do
5462 } else {
5463 Check(!FoundCall, "Can have at most one call corresponding to a "
5464 "llvm.call.preallocated.setup");
5465 FoundCall = true;
5466 size_t NumPreallocatedArgs = 0;
5467 for (unsigned i = 0; i < UseCall->arg_size(); i++) {
5468 if (UseCall->paramHasAttr(i, Attribute::Preallocated)) {
5469 ++NumPreallocatedArgs;
5470 }
5471 }
5472 Check(NumPreallocatedArgs != 0,
5473 "cannot use preallocated intrinsics on a call without "
5474 "preallocated arguments");
5475 Check(NumArgs->equalsInt(NumPreallocatedArgs),
5476 "llvm.call.preallocated.setup arg size must be equal to number "
5477 "of preallocated arguments "
5478 "at call site",
5479 Call, *UseCall);
5480 // getOperandBundle() cannot be called if more than one of the operand
5481 // bundle exists. There is already a check elsewhere for this, so skip
5482 // here if we see more than one.
5483 if (UseCall->countOperandBundlesOfType(LLVMContext::OB_preallocated) >
5484 1) {
5485 return;
5486 }
5487 auto PreallocatedBundle =
5488 UseCall->getOperandBundle(LLVMContext::OB_preallocated);
5489 Check(PreallocatedBundle,
5490 "Use of llvm.call.preallocated.setup outside intrinsics "
5491 "must be in \"preallocated\" operand bundle");
5492 Check(PreallocatedBundle->Inputs.front().get() == &Call,
5493 "preallocated bundle must have token from corresponding "
5494 "llvm.call.preallocated.setup");
5495 }
5496 }
5497 break;
5498 }
5499 case Intrinsic::call_preallocated_arg: {
5500 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
5501 Check(Token && Token->getCalledFunction()->getIntrinsicID() ==
5502 Intrinsic::call_preallocated_setup,
5503 "llvm.call.preallocated.arg token argument must be a "
5504 "llvm.call.preallocated.setup");
5505 Check(Call.hasFnAttr(Attribute::Preallocated),
5506 "llvm.call.preallocated.arg must be called with a \"preallocated\" "
5507 "call site attribute");
5508 break;
5509 }
5510 case Intrinsic::call_preallocated_teardown: {
5511 auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
5512 Check(Token && Token->getCalledFunction()->getIntrinsicID() ==
5513 Intrinsic::call_preallocated_setup,
5514 "llvm.call.preallocated.teardown token argument must be a "
5515 "llvm.call.preallocated.setup");
5516 break;
5517 }
5518 case Intrinsic::gcroot:
5519 case Intrinsic::gcwrite:
5520 case Intrinsic::gcread:
5521 if (ID == Intrinsic::gcroot) {
5522 AllocaInst *AI =
5523 dyn_cast<AllocaInst>(Call.getArgOperand(0)->stripPointerCasts());
5524 Check(AI, "llvm.gcroot parameter #1 must be an alloca.", Call);
5525 Check(isa<Constant>(Call.getArgOperand(1)),
5526 "llvm.gcroot parameter #2 must be a constant.", Call);
5527 if (!AI->getAllocatedType()->isPointerTy()) {
5528 Check(!isa<ConstantPointerNull>(Call.getArgOperand(1)),
5529 "llvm.gcroot parameter #1 must either be a pointer alloca, "
5530 "or argument #2 must be a non-null constant.",
5531 Call);
5532 }
5533 }
5534
5535 Check(Call.getParent()->getParent()->hasGC(),
5536 "Enclosing function does not use GC.", Call);
5537 break;
5538 case Intrinsic::init_trampoline:
5539 Check(isa<Function>(Call.getArgOperand(1)->stripPointerCasts()),
5540 "llvm.init_trampoline parameter #2 must resolve to a function.",
5541 Call);
5542 break;
5543 case Intrinsic::prefetch:
5544 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
5545 "rw argument to llvm.prefetch must be 0-1", Call);
5546 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
5547 "locality argument to llvm.prefetch must be 0-3", Call);
5548 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
5549 "cache type argument to llvm.prefetch must be 0-1", Call);
5550 break;
5551 case Intrinsic::stackprotector:
5552 Check(isa<AllocaInst>(Call.getArgOperand(1)->stripPointerCasts()),
5553 "llvm.stackprotector parameter #2 must resolve to an alloca.", Call);
5554 break;
5555 case Intrinsic::localescape: {
5556 BasicBlock *BB = Call.getParent();
5557 Check(BB->isEntryBlock(), "llvm.localescape used outside of entry block",
5558 Call);
5559 Check(!SawFrameEscape, "multiple calls to llvm.localescape in one function",
5560 Call);
5561 for (Value *Arg : Call.args()) {
5562 if (isa<ConstantPointerNull>(Arg))
5563 continue; // Null values are allowed as placeholders.
5564 auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
5565 Check(AI && AI->isStaticAlloca(),
5566 "llvm.localescape only accepts static allocas", Call);
5567 }
5568 FrameEscapeInfo[BB->getParent()].first = Call.arg_size();
5569 SawFrameEscape = true;
5570 break;
5571 }
5572 case Intrinsic::localrecover: {
5573 Value *FnArg = Call.getArgOperand(0)->stripPointerCasts();
5574 Function *Fn = dyn_cast<Function>(FnArg);
5575 Check(Fn && !Fn->isDeclaration(),
5576 "llvm.localrecover first "
5577 "argument must be function defined in this module",
5578 Call);
5579 auto *IdxArg = cast<ConstantInt>(Call.getArgOperand(2));
5580 auto &Entry = FrameEscapeInfo[Fn];
5581 Entry.second = unsigned(
5582 std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1));
5583 break;
5584 }
5585
5586 case Intrinsic::experimental_gc_statepoint:
5587 if (auto *CI = dyn_cast<CallInst>(&Call))
5588 Check(!CI->isInlineAsm(),
5589 "gc.statepoint support for inline assembly unimplemented", CI);
5590 Check(Call.getParent()->getParent()->hasGC(),
5591 "Enclosing function does not use GC.", Call);
5592
5593 verifyStatepoint(Call);
5594 break;
5595 case Intrinsic::experimental_gc_result: {
5596 Check(Call.getParent()->getParent()->hasGC(),
5597 "Enclosing function does not use GC.", Call);
5598
5599 auto *Statepoint = Call.getArgOperand(0);
5600 if (isa<UndefValue>(Statepoint))
5601 break;
5602
5603 // Are we tied to a statepoint properly?
5604 const auto *StatepointCall = dyn_cast<CallBase>(Statepoint);
5605 const Function *StatepointFn =
5606 StatepointCall ? StatepointCall->getCalledFunction() : nullptr;
5607 Check(StatepointFn && StatepointFn->isDeclaration() &&
5608 StatepointFn->getIntrinsicID() ==
5609 Intrinsic::experimental_gc_statepoint,
5610 "gc.result operand #1 must be from a statepoint", Call,
5611 Call.getArgOperand(0));
5612
5613 // Check that result type matches wrapped callee.
5614 auto *TargetFuncType =
5615 cast<FunctionType>(StatepointCall->getParamElementType(2));
5616 Check(Call.getType() == TargetFuncType->getReturnType(),
5617 "gc.result result type does not match wrapped callee", Call);
5618 break;
5619 }
5620 case Intrinsic::experimental_gc_relocate: {
5621 Check(Call.arg_size() == 3, "wrong number of arguments", Call);
5622
5623 Check(isa<PointerType>(Call.getType()->getScalarType()),
5624 "gc.relocate must return a pointer or a vector of pointers", Call);
5625
5626 // Check that this relocate is correctly tied to the statepoint
5627
5628 // This is case for relocate on the unwinding path of an invoke statepoint
5629 if (LandingPadInst *LandingPad =
5630 dyn_cast<LandingPadInst>(Call.getArgOperand(0))) {
5631
5632 const BasicBlock *InvokeBB =
5633 LandingPad->getParent()->getUniquePredecessor();
5634
5635 // Landingpad relocates should have only one predecessor with invoke
5636 // statepoint terminator
5637 Check(InvokeBB, "safepoints should have unique landingpads",
5638 LandingPad->getParent());
5639 Check(InvokeBB->getTerminator(), "safepoint block should be well formed",
5640 InvokeBB);
5641 Check(isa<GCStatepointInst>(InvokeBB->getTerminator()),
5642 "gc relocate should be linked to a statepoint", InvokeBB);
5643 } else {
5644 // In all other cases relocate should be tied to the statepoint directly.
5645 // This covers relocates on a normal return path of invoke statepoint and
5646 // relocates of a call statepoint.
5647 auto *Token = Call.getArgOperand(0);
5648 Check(isa<GCStatepointInst>(Token) || isa<UndefValue>(Token),
5649 "gc relocate is incorrectly tied to the statepoint", Call, Token);
5650 }
5651
5652 // Verify rest of the relocate arguments.
5653 const Value &StatepointCall = *cast<GCRelocateInst>(Call).getStatepoint();
5654
5655 // Both the base and derived must be piped through the safepoint.
5656 Value *Base = Call.getArgOperand(1);
5657 Check(isa<ConstantInt>(Base),
5658 "gc.relocate operand #2 must be integer offset", Call);
5659
5660 Value *Derived = Call.getArgOperand(2);
5661 Check(isa<ConstantInt>(Derived),
5662 "gc.relocate operand #3 must be integer offset", Call);
5663
5664 const uint64_t BaseIndex = cast<ConstantInt>(Base)->getZExtValue();
5665 const uint64_t DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue();
5666
5667 // Check the bounds
5668 if (isa<UndefValue>(StatepointCall))
5669 break;
5670 if (auto Opt = cast<GCStatepointInst>(StatepointCall)
5671 .getOperandBundle(LLVMContext::OB_gc_live)) {
5672 Check(BaseIndex < Opt->Inputs.size(),
5673 "gc.relocate: statepoint base index out of bounds", Call);
5674 Check(DerivedIndex < Opt->Inputs.size(),
5675 "gc.relocate: statepoint derived index out of bounds", Call);
5676 }
5677
5678 // Relocated value must be either a pointer type or vector-of-pointer type,
5679 // but gc_relocate does not need to return the same pointer type as the
5680 // relocated pointer. It can be casted to the correct type later if it's
5681 // desired. However, they must have the same address space and 'vectorness'
5682 GCRelocateInst &Relocate = cast<GCRelocateInst>(Call);
5683 auto *ResultType = Call.getType();
5684 auto *DerivedType = Relocate.getDerivedPtr()->getType();
5685 auto *BaseType = Relocate.getBasePtr()->getType();
5686
5687 Check(BaseType->isPtrOrPtrVectorTy(),
5688 "gc.relocate: relocated value must be a pointer", Call);
5689 Check(DerivedType->isPtrOrPtrVectorTy(),
5690 "gc.relocate: relocated value must be a pointer", Call);
5691
5692 Check(ResultType->isVectorTy() == DerivedType->isVectorTy(),
5693 "gc.relocate: vector relocates to vector and pointer to pointer",
5694 Call);
5695 Check(
5696 ResultType->getPointerAddressSpace() ==
5697 DerivedType->getPointerAddressSpace(),
5698 "gc.relocate: relocating a pointer shouldn't change its address space",
5699 Call);
5700
5701 auto GC = llvm::getGCStrategy(Relocate.getFunction()->getGC());
5702 Check(GC, "gc.relocate: calling function must have GCStrategy",
5703 Call.getFunction());
5704 if (GC) {
5705 auto isGCPtr = [&GC](Type *PTy) {
5706 return GC->isGCManagedPointer(PTy->getScalarType()).value_or(true);
5707 };
5708 Check(isGCPtr(ResultType), "gc.relocate: must return gc pointer", Call);
5709 Check(isGCPtr(BaseType),
5710 "gc.relocate: relocated value must be a gc pointer", Call);
5711 Check(isGCPtr(DerivedType),
5712 "gc.relocate: relocated value must be a gc pointer", Call);
5713 }
5714 break;
5715 }
5716 case Intrinsic::experimental_patchpoint: {
5717 if (Call.getCallingConv() == CallingConv::AnyReg) {
5718 Check(Call.getType()->isSingleValueType(),
5719 "patchpoint: invalid return type used with anyregcc", Call);
5720 }
5721 break;
5722 }
5723 case Intrinsic::eh_exceptioncode:
5724 case Intrinsic::eh_exceptionpointer: {
5725 Check(isa<CatchPadInst>(Call.getArgOperand(0)),
5726 "eh.exceptionpointer argument must be a catchpad", Call);
5727 break;
5728 }
5729 case Intrinsic::get_active_lane_mask: {
5730 Check(Call.getType()->isVectorTy(),
5731 "get_active_lane_mask: must return a "
5732 "vector",
5733 Call);
5734 auto *ElemTy = Call.getType()->getScalarType();
5735 Check(ElemTy->isIntegerTy(1),
5736 "get_active_lane_mask: element type is not "
5737 "i1",
5738 Call);
5739 break;
5740 }
5741 case Intrinsic::experimental_get_vector_length: {
5742 ConstantInt *VF = cast<ConstantInt>(Call.getArgOperand(1));
5743 Check(!VF->isNegative() && !VF->isZero(),
5744 "get_vector_length: VF must be positive", Call);
5745 break;
5746 }
5747 case Intrinsic::masked_load: {
5748 Check(Call.getType()->isVectorTy(), "masked_load: must return a vector",
5749 Call);
5750
5751 ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(1));
5752 Value *Mask = Call.getArgOperand(2);
5753 Value *PassThru = Call.getArgOperand(3);
5754 Check(Mask->getType()->isVectorTy(), "masked_load: mask must be vector",
5755 Call);
5756 Check(Alignment->getValue().isPowerOf2(),
5757 "masked_load: alignment must be a power of 2", Call);
5758 Check(PassThru->getType() == Call.getType(),
5759 "masked_load: pass through and return type must match", Call);
5760 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
5761 cast<VectorType>(Call.getType())->getElementCount(),
5762 "masked_load: vector mask must be same length as return", Call);
5763 break;
5764 }
5765 case Intrinsic::masked_store: {
5766 Value *Val = Call.getArgOperand(0);
5767 ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(2));
5768 Value *Mask = Call.getArgOperand(3);
5769 Check(Mask->getType()->isVectorTy(), "masked_store: mask must be vector",
5770 Call);
5771 Check(Alignment->getValue().isPowerOf2(),
5772 "masked_store: alignment must be a power of 2", Call);
5773 Check(cast<VectorType>(Mask->getType())->getElementCount() ==
5774 cast<VectorType>(Val->getType())->getElementCount(),
5775 "masked_store: vector mask must be same length as value", Call);
5776 break;
5777 }
5778
5779 case Intrinsic::masked_gather: {
5780 const APInt &Alignment =
5781 cast<ConstantInt>(Call.getArgOperand(1))->getValue();
5782 Check(Alignment.isZero() || Alignment.isPowerOf2(),
5783 "masked_gather: alignment must be 0 or a power of 2", Call);
5784 break;
5785 }
5786 case Intrinsic::masked_scatter: {
5787 const APInt &Alignment =
5788 cast<ConstantInt>(Call.getArgOperand(2))->getValue();
5789 Check(Alignment.isZero() || Alignment.isPowerOf2(),
5790 "masked_scatter: alignment must be 0 or a power of 2", Call);
5791 break;
5792 }
5793
5794 case Intrinsic::experimental_guard: {
5795 Check(isa<CallInst>(Call), "experimental_guard cannot be invoked", Call);
5796 Check(Call.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
5797 "experimental_guard must have exactly one "
5798 "\"deopt\" operand bundle");
5799 break;
5800 }
5801
5802 case Intrinsic::experimental_deoptimize: {
5803 Check(isa<CallInst>(Call), "experimental_deoptimize cannot be invoked",
5804 Call);
5805 Check(Call.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
5806 "experimental_deoptimize must have exactly one "
5807 "\"deopt\" operand bundle");
5808 Check(Call.getType() == Call.getFunction()->getReturnType(),
5809 "experimental_deoptimize return type must match caller return type");
5810
5811 if (isa<CallInst>(Call)) {
5812 auto *RI = dyn_cast<ReturnInst>(Call.getNextNode());
5813 Check(RI,
5814 "calls to experimental_deoptimize must be followed by a return");
5815
5816 if (!Call.getType()->isVoidTy() && RI)
5817 Check(RI->getReturnValue() == &Call,
5818 "calls to experimental_deoptimize must be followed by a return "
5819 "of the value computed by experimental_deoptimize");
5820 }
5821
5822 break;
5823 }
5824 case Intrinsic::vastart: {
5825 Check(Call.getFunction()->isVarArg(),
5826 "va_start called in a non-varargs function");
5827 break;
5828 }
5829 case Intrinsic::vector_reduce_and:
5830 case Intrinsic::vector_reduce_or:
5831 case Intrinsic::vector_reduce_xor:
5832 case Intrinsic::vector_reduce_add:
5833 case Intrinsic::vector_reduce_mul:
5834 case Intrinsic::vector_reduce_smax:
5835 case Intrinsic::vector_reduce_smin:
5836 case Intrinsic::vector_reduce_umax:
5837 case Intrinsic::vector_reduce_umin: {
5838 Type *ArgTy = Call.getArgOperand(0)->getType();
5839 Check(ArgTy->isIntOrIntVectorTy() && ArgTy->isVectorTy(),
5840 "Intrinsic has incorrect argument type!");
5841 break;
5842 }
5843 case Intrinsic::vector_reduce_fmax:
5844 case Intrinsic::vector_reduce_fmin: {
5845 Type *ArgTy = Call.getArgOperand(0)->getType();
5846 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
5847 "Intrinsic has incorrect argument type!");
5848 break;
5849 }
5850 case Intrinsic::vector_reduce_fadd:
5851 case Intrinsic::vector_reduce_fmul: {
5852 // Unlike the other reductions, the first argument is a start value. The
5853 // second argument is the vector to be reduced.
5854 Type *ArgTy = Call.getArgOperand(1)->getType();
5855 Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
5856 "Intrinsic has incorrect argument type!");
5857 break;
5858 }
5859 case Intrinsic::smul_fix:
5860 case Intrinsic::smul_fix_sat:
5861 case Intrinsic::umul_fix:
5862 case Intrinsic::umul_fix_sat:
5863 case Intrinsic::sdiv_fix:
5864 case Intrinsic::sdiv_fix_sat:
5865 case Intrinsic::udiv_fix:
5866 case Intrinsic::udiv_fix_sat: {
5867 Value *Op1 = Call.getArgOperand(0);
5868 Value *Op2 = Call.getArgOperand(1);
5870 "first operand of [us][mul|div]_fix[_sat] must be an int type or "
5871 "vector of ints");
5873 "second operand of [us][mul|div]_fix[_sat] must be an int type or "
5874 "vector of ints");
5875
5876 auto *Op3 = cast<ConstantInt>(Call.getArgOperand(2));
5877 Check(Op3->getType()->isIntegerTy(),
5878 "third operand of [us][mul|div]_fix[_sat] must be an int type");
5879 Check(Op3->getBitWidth() <= 32,
5880 "third operand of [us][mul|div]_fix[_sat] must fit within 32 bits");
5881
5882 if (ID == Intrinsic::smul_fix || ID == Intrinsic::smul_fix_sat ||
5883 ID == Intrinsic::sdiv_fix || ID == Intrinsic::sdiv_fix_sat) {
5884 Check(Op3->getZExtValue() < Op1->getType()->getScalarSizeInBits(),
5885 "the scale of s[mul|div]_fix[_sat] must be less than the width of "
5886 "the operands");
5887 } else {
5888 Check(Op3->getZExtValue() <= Op1->getType()->getScalarSizeInBits(),
5889 "the scale of u[mul|div]_fix[_sat] must be less than or equal "
5890 "to the width of the operands");
5891 }
5892 break;
5893 }
5894 case Intrinsic::lrint:
5895 case Intrinsic::llrint: {
5896 Type *ValTy = Call.getArgOperand(0)->getType();
5897 Type *ResultTy = Call.getType();
5898 Check(
5899 ValTy->isFPOrFPVectorTy() && ResultTy->isIntOrIntVectorTy(),
5900 "llvm.lrint, llvm.llrint: argument must be floating-point or vector "
5901 "of floating-points, and result must be integer or vector of integers",
5902 &Call);
5903 Check(ValTy->isVectorTy() == ResultTy->isVectorTy(),
5904 "llvm.lrint, llvm.llrint: argument and result disagree on vector use",
5905 &Call);
5906 if (ValTy->isVectorTy()) {
5907 Check(cast<VectorType>(ValTy)->getElementCount() ==
5908 cast<VectorType>(ResultTy)->getElementCount(),
5909 "llvm.lrint, llvm.llrint: argument must be same length as result",
5910 &Call);
5911 }
5912 break;
5913 }
5914 case Intrinsic::lround:
5915 case Intrinsic::llround: {
5916 Type *ValTy = Call.getArgOperand(0)->getType();
5917 Type *ResultTy = Call.getType();
5918 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
5919 "Intrinsic does not support vectors", &Call);
5920 break;
5921 }
5922 case Intrinsic::bswap: {
5923 Type *Ty = Call.getType();
5924 unsigned Size = Ty->getScalarSizeInBits();
5925 Check(Size % 16 == 0, "bswap must be an even number of bytes", &Call);
5926 break;
5927 }
5928 case Intrinsic::invariant_start: {
5929 ConstantInt *InvariantSize = dyn_cast<ConstantInt>(Call.getArgOperand(0));
5930 Check(InvariantSize &&
5931 (!InvariantSize->isNegative() || InvariantSize->isMinusOne()),
5932 "invariant_start parameter must be -1, 0 or a positive number",
5933 &Call);
5934 break;
5935 }
5936 case Intrinsic::matrix_multiply:
5937 case Intrinsic::matrix_transpose:
5938 case Intrinsic::matrix_column_major_load:
5939 case Intrinsic::matrix_column_major_store: {
5940 Function *IF = Call.getCalledFunction();
5941 ConstantInt *Stride = nullptr;
5942 ConstantInt *NumRows;
5943 ConstantInt *NumColumns;
5944 VectorType *ResultTy;
5945 Type *Op0ElemTy = nullptr;
5946 Type *Op1ElemTy = nullptr;
5947 switch (ID) {
5948 case Intrinsic::matrix_multiply: {
5949 NumRows = cast<ConstantInt>(Call.getArgOperand(2));
5950 ConstantInt *N = cast<ConstantInt>(Call.getArgOperand(3));
5951 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
5952 Check(cast<FixedVectorType>(Call.getArgOperand(0)->getType())
5953 ->getNumElements() ==
5954 NumRows->getZExtValue() * N->getZExtValue(),
5955 "First argument of a matrix operation does not match specified "
5956 "shape!");
5957 Check(cast<FixedVectorType>(Call.getArgOperand(1)->getType())
5958 ->getNumElements() ==
5959 N->getZExtValue() * NumColumns->getZExtValue(),
5960 "Second argument of a matrix operation does not match specified "
5961 "shape!");
5962
5963 ResultTy = cast<VectorType>(Call.getType());
5964 Op0ElemTy =
5965 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
5966 Op1ElemTy =
5967 cast<VectorType>(Call.getArgOperand(1)->getType())->getElementType();
5968 break;
5969 }
5970 case Intrinsic::matrix_transpose:
5971 NumRows = cast<ConstantInt>(Call.getArgOperand(1));
5972 NumColumns = cast<ConstantInt>(Call.getArgOperand(2));
5973 ResultTy = cast<VectorType>(Call.getType());
5974 Op0ElemTy =
5975 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
5976 break;
5977 case Intrinsic::matrix_column_major_load: {
5978 Stride = dyn_cast<ConstantInt>(Call.getArgOperand(1));
5979 NumRows = cast<ConstantInt>(Call.getArgOperand(3));
5980 NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
5981 ResultTy = cast<VectorType>(Call.getType());
5982 break;
5983 }
5984 case Intrinsic::matrix_column_major_store: {
5985 Stride = dyn_cast<ConstantInt>(Call.getArgOperand(2));
5986 NumRows = cast<ConstantInt>(Call.getArgOperand(4));
5987 NumColumns = cast<ConstantInt>(Call.getArgOperand(5));
5988 ResultTy = cast<VectorType>(Call.getArgOperand(0)->getType());
5989 Op0ElemTy =
5990 cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
5991 break;
5992 }
5993 default:
5994 llvm_unreachable("unexpected intrinsic");
5995 }
5996
5997 Check(ResultTy->getElementType()->isIntegerTy() ||
5998 ResultTy->getElementType()->isFloatingPointTy(),
5999 "Result type must be an integer or floating-point type!", IF);
6000
6001 if (Op0ElemTy)
6002 Check(ResultTy->getElementType() == Op0ElemTy,
6003 "Vector element type mismatch of the result and first operand "
6004 "vector!",
6005 IF);
6006
6007 if (Op1ElemTy)
6008 Check(ResultTy->getElementType() == Op1ElemTy,
6009 "Vector element type mismatch of the result and second operand "
6010 "vector!",
6011 IF);
6012
6013 Check(cast<FixedVectorType>(ResultTy)->getNumElements() ==
6014 NumRows->getZExtValue() * NumColumns->getZExtValue(),
6015 "Result of a matrix operation does not fit in the returned vector!");
6016
6017 if (Stride)
6018 Check(Stride->getZExtValue() >= NumRows->getZExtValue(),
6019 "Stride must be greater or equal than the number of rows!", IF);
6020
6021 break;
6022 }
6023 case Intrinsic::vector_splice: {
6024 VectorType *VecTy = cast<VectorType>(Call.getType());
6025 int64_t Idx = cast<ConstantInt>(Call.getArgOperand(2))->getSExtValue();
6026 int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
6027 if (Call.getParent() && Call.getParent()->getParent()) {
6028 AttributeList Attrs = Call.getParent()->getParent()->getAttributes();
6029 if (Attrs.hasFnAttr(Attribute::VScaleRange))
6030 KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
6031 }
6032 Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
6033 (Idx >= 0 && Idx < KnownMinNumElements),
6034 "The splice index exceeds the range [-VL, VL-1] where VL is the "
6035 "known minimum number of elements in the vector. For scalable "
6036 "vectors the minimum number of elements is determined from "
6037 "vscale_range.",
6038 &Call);
6039 break;
6040 }
6041 case Intrinsic::experimental_stepvector: {
6042 VectorType *VecTy = dyn_cast<VectorType>(Call.getType());
6043 Check(VecTy && VecTy->getScalarType()->isIntegerTy() &&
6044 VecTy->getScalarSizeInBits() >= 8,
6045 "experimental_stepvector only supported for vectors of integers "
6046 "with a bitwidth of at least 8.",
6047 &Call);
6048 break;
6049 }
6050 case Intrinsic::vector_insert: {
6051 Value *Vec = Call.getArgOperand(0);
6052 Value *SubVec = Call.getArgOperand(1);
6053 Value *Idx = Call.getArgOperand(2);
6054 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6055
6056 VectorType *VecTy = cast<VectorType>(Vec->getType());
6057 VectorType *SubVecTy = cast<VectorType>(SubVec->getType());
6058
6059 ElementCount VecEC = VecTy->getElementCount();
6060 ElementCount SubVecEC = SubVecTy->getElementCount();
6061 Check(VecTy->getElementType() == SubVecTy->getElementType(),
6062 "vector_insert parameters must have the same element "
6063 "type.",
6064 &Call);
6065 Check(IdxN % SubVecEC.getKnownMinValue() == 0,
6066 "vector_insert index must be a constant multiple of "
6067 "the subvector's known minimum vector length.");
6068
6069 // If this insertion is not the 'mixed' case where a fixed vector is
6070 // inserted into a scalable vector, ensure that the insertion of the
6071 // subvector does not overrun the parent vector.
6072 if (VecEC.isScalable() == SubVecEC.isScalable()) {
6073 Check(IdxN < VecEC.getKnownMinValue() &&
6074 IdxN + SubVecEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6075 "subvector operand of vector_insert would overrun the "
6076 "vector being inserted into.");
6077 }
6078 break;
6079 }
6080 case Intrinsic::vector_extract: {
6081 Value *Vec = Call.getArgOperand(0);
6082 Value *Idx = Call.getArgOperand(1);
6083 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6084
6085 VectorType *ResultTy = cast<VectorType>(Call.getType());
6086 VectorType *VecTy = cast<VectorType>(Vec->getType());
6087
6088 ElementCount VecEC = VecTy->getElementCount();
6089 ElementCount ResultEC = ResultTy->getElementCount();
6090
6091 Check(ResultTy->getElementType() == VecTy->getElementType(),
6092 "vector_extract result must have the same element "
6093 "type as the input vector.",
6094 &Call);
6095 Check(IdxN % ResultEC.getKnownMinValue() == 0,
6096 "vector_extract index must be a constant multiple of "
6097 "the result type's known minimum vector length.");
6098
6099 // If this extraction is not the 'mixed' case where a fixed vector is
6100 // extracted from a scalable vector, ensure that the extraction does not
6101 // overrun the parent vector.
6102 if (VecEC.isScalable() == ResultEC.isScalable()) {
6103 Check(IdxN < VecEC.getKnownMinValue() &&
6104 IdxN + ResultEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
6105 "vector_extract would overrun.");
6106 }
6107 break;
6108 }
6109 case Intrinsic::experimental_noalias_scope_decl: {
6110 NoAliasScopeDecls.push_back(cast<IntrinsicInst>(&Call));
6111 break;
6112 }
6113 case Intrinsic::preserve_array_access_index:
6114 case Intrinsic::preserve_struct_access_index:
6115 case Intrinsic::aarch64_ldaxr:
6116 case Intrinsic::aarch64_ldxr:
6117 case Intrinsic::arm_ldaex:
6118 case Intrinsic::arm_ldrex: {
6119 Type *ElemTy = Call.getParamElementType(0);
6120 Check(ElemTy, "Intrinsic requires elementtype attribute on first argument.",
6121 &Call);
6122 break;
6123 }
6124 case Intrinsic::aarch64_stlxr:
6125 case Intrinsic::aarch64_stxr:
6126 case Intrinsic::arm_stlex:
6127 case Intrinsic::arm_strex: {
6128 Type *ElemTy = Call.getAttributes().getParamElementType(1);
6129 Check(ElemTy,
6130 "Intrinsic requires elementtype attribute on second argument.",
6131 &Call);
6132 break;
6133 }
6134 case Intrinsic::aarch64_prefetch: {
6135 Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6136 "write argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6137 Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6138 "target argument to llvm.aarch64.prefetch must be 0-3", Call);
6139 Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6140 "stream argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6141 Check(cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue() < 2,
6142 "isdata argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6143 break;
6144 }
6145 case Intrinsic::callbr_landingpad: {
6146 const auto *CBR = dyn_cast<CallBrInst>(Call.getOperand(0));
6147 Check(CBR, "intrinstic requires callbr operand", &Call);
6148 if (!CBR)
6149 break;
6150
6151 const BasicBlock *LandingPadBB = Call.getParent();
6152 const BasicBlock *PredBB = LandingPadBB->getUniquePredecessor();
6153 if (!PredBB) {
6154 CheckFailed("Intrinsic in block must have 1 unique predecessor", &Call);
6155 break;
6156 }
6157 if (!isa<CallBrInst>(PredBB->getTerminator())) {
6158 CheckFailed("Intrinsic must have corresponding callbr in predecessor",
6159 &Call);
6160 break;
6161 }
6162 Check(llvm::any_of(CBR->getIndirectDests(),
6163 [LandingPadBB](const BasicBlock *IndDest) {
6164 return IndDest == LandingPadBB;
6165 }),
6166 "Intrinsic's corresponding callbr must have intrinsic's parent basic "
6167 "block in indirect destination list",
6168 &Call);
6169 const Instruction &First = *LandingPadBB->begin();
6170 Check(&First == &Call, "No other instructions may proceed intrinsic",
6171 &Call);
6172 break;
6173 }
6174 case Intrinsic::amdgcn_cs_chain: {
6175 auto CallerCC = Call.getCaller()->getCallingConv();
6176 switch (CallerCC) {
6180 break;
6181 default:
6182 CheckFailed("Intrinsic can only be used from functions with the "
6183 "amdgpu_cs, amdgpu_cs_chain or amdgpu_cs_chain_preserve "
6184 "calling conventions",
6185 &Call);
6186 break;
6187 }
6188
6189 Check(Call.paramHasAttr(2, Attribute::InReg),
6190 "SGPR arguments must have the `inreg` attribute", &Call);
6191 Check(!Call.paramHasAttr(3, Attribute::InReg),
6192 "VGPR arguments must not have the `inreg` attribute", &Call);
6193 break;
6194 }
6195 case Intrinsic::amdgcn_set_inactive_chain_arg: {
6196 auto CallerCC = Call.getCaller()->getCallingConv();
6197 switch (CallerCC) {
6200 break;
6201 default:
6202 CheckFailed("Intrinsic can only be used from functions with the "
6203 "amdgpu_cs_chain or amdgpu_cs_chain_preserve "
6204 "calling conventions",
6205 &Call);
6206 break;
6207 }
6208
6209 unsigned InactiveIdx = 1;
6210 Check(!Call.paramHasAttr(InactiveIdx, Attribute::InReg),
6211 "Value for inactive lanes must not have the `inreg` attribute",
6212 &Call);
6213 Check(isa<Argument>(Call.getArgOperand(InactiveIdx)),
6214 "Value for inactive lanes must be a function argument", &Call);
6215 Check(!cast<Argument>(Call.getArgOperand(InactiveIdx))->hasInRegAttr(),
6216 "Value for inactive lanes must be a VGPR function argument", &Call);
6217 break;
6218 }
6219 case Intrinsic::nvvm_setmaxnreg_inc_sync_aligned_u32:
6220 case Intrinsic::nvvm_setmaxnreg_dec_sync_aligned_u32: {
6221 Value *V = Call.getArgOperand(0);
6222 unsigned RegCount = cast<ConstantInt>(V)->getZExtValue();
6223 Check(RegCount % 8 == 0,
6224 "reg_count argument to nvvm.setmaxnreg must be in multiples of 8");
6225 Check((RegCount >= 24 && RegCount <= 256),
6226 "reg_count argument to nvvm.setmaxnreg must be within [24, 256]");
6227 break;
6228 }
6229 case Intrinsic::experimental_convergence_entry:
6230 case Intrinsic::experimental_convergence_anchor:
6231 break;
6232 case Intrinsic::experimental_convergence_loop:
6233 break;
6234 case Intrinsic::ptrmask: {
6235 Type *Ty0 = Call.getArgOperand(0)->getType();
6236 Type *Ty1 = Call.getArgOperand(1)->getType();
6238 "llvm.ptrmask intrinsic first argument must be pointer or vector "
6239 "of pointers",
6240 &Call);
6241 Check(
6242 Ty0->isVectorTy() == Ty1->isVectorTy(),
6243 "llvm.ptrmask intrinsic arguments must be both scalars or both vectors",
6244 &Call);
6245 if (Ty0->isVectorTy())
6246 Check(cast<VectorType>(Ty0)->getElementCount() ==
6247 cast<VectorType>(Ty1)->getElementCount(),
6248 "llvm.ptrmask intrinsic arguments must have the same number of "
6249 "elements",
6250 &Call);
6251 Check(DL.getIndexTypeSizeInBits(Ty0) == Ty1->getScalarSizeInBits(),
6252 "llvm.ptrmask intrinsic second argument bitwidth must match "
6253 "pointer index type size of first argument",
6254 &Call);
6255 break;
6256 }
6257 case Intrinsic::threadlocal_address: {
6258 const Value &Arg0 = *Call.getArgOperand(0);
6259 Check(isa<GlobalValue>(Arg0),
6260 "llvm.threadlocal.address first argument must be a GlobalValue");
6261 Check(cast<GlobalValue>(Arg0).isThreadLocal(),
6262 "llvm.threadlocal.address operand isThreadLocal() must be true");
6263 break;
6264 }
6265 };
6266
6267 // Verify that there aren't any unmediated control transfers between funclets.
6269 Function *F = Call.getParent()->getParent();
6270 if (F->hasPersonalityFn() &&
6271 isScopedEHPersonality(classifyEHPersonality(F->getPersonalityFn()))) {
6272 // Run EH funclet coloring on-demand and cache results for other intrinsic
6273 // calls in this function
6274 if (BlockEHFuncletColors.empty())
6275 BlockEHFuncletColors = colorEHFunclets(*F);
6276
6277 // Check for catch-/cleanup-pad in first funclet block
6278 bool InEHFunclet = false;
6279 BasicBlock *CallBB = Call.getParent();
6280 const ColorVector &CV = BlockEHFuncletColors.find(CallBB)->second;
6281 assert(CV.size() > 0 && "Uncolored block");
6282 for (BasicBlock *ColorFirstBB : CV)
6283 if (dyn_cast_or_null<FuncletPadInst>(ColorFirstBB->getFirstNonPHI()))
6284 InEHFunclet = true;
6285
6286 // Check for funclet operand bundle
6287 bool HasToken = false;
6288 for (unsigned I = 0, E = Call.getNumOperandBundles(); I != E; ++I)
6289 if (Call.getOperandBundleAt(I).getTagID() == LLVMContext::OB_funclet)
6290 HasToken = true;
6291
6292 // This would cause silent code truncation in WinEHPrepare
6293 if (InEHFunclet)
6294 Check(HasToken, "Missing funclet token on intrinsic call", &Call);
6295 }
6296 }
6297}
6298
6299/// Carefully grab the subprogram from a local scope.
6300///
6301/// This carefully grabs the subprogram from a local scope, avoiding the
6302/// built-in assertions that would typically fire.
6304 if (!LocalScope)
6305 return nullptr;
6306
6307 if (auto *SP = dyn_cast<DISubprogram>(LocalScope))
6308 return SP;
6309
6310 if (auto *LB = dyn_cast<DILexicalBlockBase>(LocalScope))
6311 return getSubprogram(LB->getRawScope());
6312
6313 // Just return null; broken scope chains are checked elsewhere.
6314 assert(!isa<DILocalScope>(LocalScope) && "Unknown type of local scope");
6315 return nullptr;
6316}
6317
6318void Verifier::visit(DbgLabelRecord &DLR) {
6319 CheckDI(isa<DILabel>(DLR.getRawLabel()),
6320 "invalid #dbg_label intrinsic variable", &DLR, DLR.getRawLabel());
6321
6322 // Ignore broken !dbg attachments; they're checked elsewhere.
6323 if (MDNode *N = DLR.getDebugLoc().getAsMDNode())
6324 if (!isa<DILocation>(N))
6325 return;
6326
6327 BasicBlock *BB = DLR.getParent();
6328 Function *F = BB ? BB->getParent() : nullptr;
6329
6330 // The scopes for variables and !dbg attachments must agree.
6331 DILabel *Label = DLR.getLabel();
6332 DILocation *Loc = DLR.getDebugLoc();
6333 CheckDI(Loc, "#dbg_label record requires a !dbg attachment", &DLR, BB, F);
6334
6335 DISubprogram *LabelSP = getSubprogram(Label->getRawScope());
6336 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
6337 if (!LabelSP || !LocSP)
6338 return;
6339
6340 CheckDI(LabelSP == LocSP,
6341 "mismatched subprogram between #dbg_label label and !dbg attachment",
6342 &DLR, BB, F, Label, Label->getScope()->getSubprogram(), Loc,
6343 Loc->getScope()->getSubprogram());
6344}
6345
6346void Verifier::visit(DbgVariableRecord &DVR) {
6347 BasicBlock *BB = DVR.getParent();
6348 Function *F = BB->getParent();
6349
6353 "invalid #dbg record type", &DVR, DVR.getType());
6354
6355 // The location for a DbgVariableRecord must be either a ValueAsMetadata,
6356 // DIArgList, or an empty MDNode (which is a legacy representation for an
6357 // "undef" location).
6358 auto *MD = DVR.getRawLocation();
6359 CheckDI(MD && (isa<ValueAsMetadata>(MD) || isa<DIArgList>(MD) ||
6360 (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands())),
6361 "invalid #dbg record address/value", &DVR, MD);
6362 if (auto *VAM = dyn_cast<ValueAsMetadata>(MD))
6363 visitValueAsMetadata(*VAM, F);
6364 else if (auto *AL = dyn_cast<DIArgList>(MD))
6365 visitDIArgList(*AL, F);
6366
6367 CheckDI(isa_and_nonnull<DILocalVariable>(DVR.getRawVariable()),
6368 "invalid #dbg record variable", &DVR, DVR.getRawVariable());
6369 visitMDNode(*DVR.getRawVariable(), AreDebugLocsAllowed::No);
6370
6371 CheckDI(isa_and_nonnull<DIExpression>(DVR.getRawExpression()),
6372 "invalid #dbg record expression", &DVR, DVR.getRawExpression());
6373 visitMDNode(*DVR.getExpression(), AreDebugLocsAllowed::No);
6374
6375 if (DVR.isDbgAssign()) {
6376 CheckDI(isa_and_nonnull<DIAssignID>(DVR.getRawAssignID()),
6377 "invalid #dbg_assign DIAssignID", &DVR, DVR.getRawAssignID());
6378 visitMDNode(*cast<DIAssignID>(DVR.getRawAssignID()),
6379 AreDebugLocsAllowed::No);
6380
6381 const auto *RawAddr = DVR.getRawAddress();
6382 // Similarly to the location above, the address for an assign
6383 // DbgVariableRecord must be a ValueAsMetadata or an empty MDNode, which
6384 // represents an undef address.
6385 CheckDI(
6386 isa<ValueAsMetadata>(RawAddr) ||
6387 (isa<MDNode>(RawAddr) && !cast<MDNode>(RawAddr)->getNumOperands()),
6388 "invalid #dbg_assign address", &DVR, DVR.getRawAddress());
6389 if (auto *VAM = dyn_cast<ValueAsMetadata>(RawAddr))
6390 visitValueAsMetadata(*VAM, F);
6391
6392 CheckDI(isa_and_nonnull<DIExpression>(DVR.getRawAddressExpression()),
6393 "invalid #dbg_assign address expression", &DVR,
6395 visitMDNode(*DVR.getAddressExpression(), AreDebugLocsAllowed::No);
6396
6397 // All of the linked instructions should be in the same function as DVR.
6398 for (Instruction *I : at::getAssignmentInsts(&DVR))
6399 CheckDI(DVR.getFunction() == I->getFunction(),
6400 "inst not in same function as #dbg_assign", I, &DVR);
6401 }
6402
6403 // This check is redundant with one in visitLocalVariable().
6404 DILocalVariable *Var = DVR.getVariable();
6405 CheckDI(isType(Var->getRawType()), "invalid type ref", Var,
6406 Var->getRawType());
6407
6408 auto *DLNode = DVR.getDebugLoc().getAsMDNode();
6409 CheckDI(isa_and_nonnull<DILocation>(DLNode), "invalid #dbg record DILocation",
6410 &DVR, DLNode);
6411 DILocation *Loc = DVR.getDebugLoc();
6412
6413 // The scopes for variables and !dbg attachments must agree.
6414 DISubprogram *VarSP = getSubprogram(Var->getRawScope());
6415 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
6416 if (!VarSP || !LocSP)
6417 return; // Broken scope chains are checked elsewhere.
6418
6419 CheckDI(VarSP == LocSP,
6420 "mismatched subprogram between #dbg record variable and DILocation",
6421 &DVR, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
6422 Loc->getScope()->getSubprogram());
6423
6424 verifyFnArgs(DVR);
6425}
6426
6427void Verifier::visitVPIntrinsic(VPIntrinsic &VPI) {
6428 if (auto *VPCast = dyn_cast<VPCastIntrinsic>(&VPI)) {
6429 auto *RetTy = cast<VectorType>(VPCast->getType());
6430 auto *ValTy = cast<VectorType>(VPCast->getOperand(0)->getType());
6431 Check(RetTy->getElementCount() == ValTy->getElementCount(),
6432 "VP cast intrinsic first argument and result vector lengths must be "
6433 "equal",
6434 *VPCast);
6435
6436 switch (VPCast->getIntrinsicID()) {
6437 default:
6438 llvm_unreachable("Unknown VP cast intrinsic");
6439 case Intrinsic::vp_trunc:
6440 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
6441 "llvm.vp.trunc intrinsic first argument and result element type "
6442 "must be integer",
6443 *VPCast);
6444 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
6445 "llvm.vp.trunc intrinsic the bit size of first argument must be "
6446 "larger than the bit size of the return type",
6447 *VPCast);
6448 break;
6449 case Intrinsic::vp_zext:
6450 case Intrinsic::vp_sext:
6451 Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
6452 "llvm.vp.zext or llvm.vp.sext intrinsic first argument and result "
6453 "element type must be integer",
6454 *VPCast);
6455 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
6456 "llvm.vp.zext or llvm.vp.sext intrinsic the bit size of first "
6457 "argument must be smaller than the bit size of the return type",
6458 *VPCast);
6459 break;
6460 case Intrinsic::vp_fptoui:
6461 case Intrinsic::vp_fptosi:
6462 case Intrinsic::vp_lrint:
6463 case Intrinsic::vp_llrint:
6464 Check(
6465 RetTy->isIntOrIntVectorTy() && ValTy->isFPOrFPVectorTy(),
6466 "llvm.vp.fptoui, llvm.vp.fptosi, llvm.vp.lrint or llvm.vp.llrint" "intrinsic first argument element "
6467 "type must be floating-point and result element type must be integer",
6468 *VPCast);
6469 break;
6470 case Intrinsic::vp_uitofp:
6471 case Intrinsic::vp_sitofp:
6472 Check(
6473 RetTy->isFPOrFPVectorTy() && ValTy->isIntOrIntVectorTy(),
6474 "llvm.vp.uitofp or llvm.vp.sitofp intrinsic first argument element "
6475 "type must be integer and result element type must be floating-point",
6476 *VPCast);
6477 break;
6478 case Intrinsic::vp_fptrunc:
6479 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
6480 "llvm.vp.fptrunc intrinsic first argument and result element type "
6481 "must be floating-point",
6482 *VPCast);
6483 Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
6484 "llvm.vp.fptrunc intrinsic the bit size of first argument must be "
6485 "larger than the bit size of the return type",
6486 *VPCast);
6487 break;
6488 case Intrinsic::vp_fpext:
6489 Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
6490 "llvm.vp.fpext intrinsic first argument and result element type "
6491 "must be floating-point",
6492 *VPCast);
6493 Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
6494 "llvm.vp.fpext intrinsic the bit size of first argument must be "
6495 "smaller than the bit size of the return type",
6496 *VPCast);
6497 break;
6498 case Intrinsic::vp_ptrtoint:
6499 Check(RetTy->isIntOrIntVectorTy() && ValTy->isPtrOrPtrVectorTy(),
6500 "llvm.vp.ptrtoint intrinsic first argument element type must be "
6501 "pointer and result element type must be integer",
6502 *VPCast);
6503 break;
6504 case Intrinsic::vp_inttoptr:
6505 Check(RetTy->isPtrOrPtrVectorTy() && ValTy->isIntOrIntVectorTy(),
6506 "llvm.vp.inttoptr intrinsic first argument element type must be "
6507 "integer and result element type must be pointer",
6508 *VPCast);
6509 break;
6510 }
6511 }
6512 if (VPI.getIntrinsicID() == Intrinsic::vp_fcmp) {
6513 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
6515 "invalid predicate for VP FP comparison intrinsic", &VPI);
6516 }
6517 if (VPI.getIntrinsicID() == Intrinsic::vp_icmp) {
6518 auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
6520 "invalid predicate for VP integer comparison intrinsic", &VPI);
6521 }
6522 if (VPI.getIntrinsicID() == Intrinsic::vp_is_fpclass) {
6523 auto TestMask = cast<ConstantInt>(VPI.getOperand(1));
6524 Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
6525 "unsupported bits for llvm.vp.is.fpclass test mask");
6526 }
6527}
6528
6529void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
6530 unsigned NumOperands;
6531 bool HasRoundingMD;
6532 switch (FPI.getIntrinsicID()) {
6533#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
6534 case Intrinsic::INTRINSIC: \
6535 NumOperands = NARG; \
6536 HasRoundingMD = ROUND_MODE; \
6537 break;
6538#include "llvm/IR/ConstrainedOps.def"
6539 default:
6540 llvm_unreachable("Invalid constrained FP intrinsic!");
6541 }
6542 NumOperands += (1 + HasRoundingMD);
6543 // Compare intrinsics carry an extra predicate metadata operand.
6544 if (isa<ConstrainedFPCmpIntrinsic>(FPI))
6545 NumOperands += 1;
6546 Check((FPI.arg_size() == NumOperands),
6547 "invalid arguments for constrained FP intrinsic", &FPI);
6548
6549 switch (FPI.getIntrinsicID()) {
6550 case Intrinsic::experimental_constrained_lrint:
6551 case Intrinsic::experimental_constrained_llrint: {
6552 Type *ValTy = FPI.getArgOperand(0)->getType();
6553 Type *ResultTy = FPI.getType();
6554 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
6555 "Intrinsic does not support vectors", &FPI);
6556 }
6557 break;
6558
6559 case Intrinsic::experimental_constrained_lround:
6560 case Intrinsic::experimental_constrained_llround: {
6561 Type *ValTy = FPI.getArgOperand(0)->getType();
6562 Type *ResultTy = FPI.getType();
6563 Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
6564 "Intrinsic does not support vectors", &FPI);
6565 break;
6566 }
6567
6568 case Intrinsic::experimental_constrained_fcmp:
6569 case Intrinsic::experimental_constrained_fcmps: {
6570 auto Pred = cast<ConstrainedFPCmpIntrinsic>(&FPI)->getPredicate();
6572 "invalid predicate for constrained FP comparison intrinsic", &FPI);
6573 break;
6574 }
6575
6576 case Intrinsic::experimental_constrained_fptosi:
6577 case Intrinsic::experimental_constrained_fptoui: {
6578 Value *Operand = FPI.getArgOperand(0);
6579 ElementCount SrcEC;
6580 Check(Operand->getType()->isFPOrFPVectorTy(),
6581 "Intrinsic first argument must be floating point", &FPI);
6582 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
6583 SrcEC = cast<VectorType>(OperandT)->getElementCount();
6584 }
6585
6586 Operand = &FPI;
6587 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
6588 "Intrinsic first argument and result disagree on vector use", &FPI);
6589 Check(Operand->getType()->isIntOrIntVectorTy(),
6590 "Intrinsic result must be an integer", &FPI);
6591 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
6592 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
6593 "Intrinsic first argument and result vector lengths must be equal",
6594 &FPI);
6595 }
6596 }
6597 break;
6598
6599 case Intrinsic::experimental_constrained_sitofp:
6600 case Intrinsic::experimental_constrained_uitofp: {
6601 Value *Operand = FPI.getArgOperand(0);
6602 ElementCount SrcEC;
6603 Check(Operand->getType()->isIntOrIntVectorTy(),
6604 "Intrinsic first argument must be integer", &FPI);
6605 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
6606 SrcEC = cast<VectorType>(OperandT)->getElementCount();
6607 }
6608
6609 Operand = &FPI;
6610 Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
6611 "Intrinsic first argument and result disagree on vector use", &FPI);
6612 Check(Operand->getType()->isFPOrFPVectorTy(),
6613 "Intrinsic result must be a floating point", &FPI);
6614 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
6615 Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
6616 "Intrinsic first argument and result vector lengths must be equal",
6617 &FPI);
6618 }
6619 } break;
6620
6621 case Intrinsic::experimental_constrained_fptrunc:
6622 case Intrinsic::experimental_constrained_fpext: {
6623 Value *Operand = FPI.getArgOperand(0);
6624 Type *OperandTy = Operand->getType();
6625 Value *Result = &FPI;
6626 Type *ResultTy = Result->getType();
6627 Check(OperandTy->isFPOrFPVectorTy(),
6628 "Intrinsic first argument must be FP or FP vector", &FPI);
6629 Check(ResultTy->isFPOrFPVectorTy(),
6630 "Intrinsic result must be FP or FP vector", &FPI);
6631 Check(OperandTy->isVectorTy() == ResultTy->isVectorTy(),
6632 "Intrinsic first argument and result disagree on vector use", &FPI);
6633 if (OperandTy->isVectorTy()) {
6634 Check(cast<VectorType>(OperandTy)->getElementCount() ==
6635 cast<VectorType>(ResultTy)->getElementCount(),
6636 "Intrinsic first argument and result vector lengths must be equal",
6637 &FPI);
6638 }
6639 if (FPI.getIntrinsicID() == Intrinsic::experimental_constrained_fptrunc) {
6640 Check(OperandTy->getScalarSizeInBits() > ResultTy->getScalarSizeInBits(),
6641 "Intrinsic first argument's type must be larger than result type",
6642 &FPI);
6643 } else {
6644 Check(OperandTy->getScalarSizeInBits() < ResultTy->getScalarSizeInBits(),
6645 "Intrinsic first argument's type must be smaller than result type",
6646 &FPI);
6647 }
6648 }
6649 break;
6650
6651 default:
6652 break;
6653 }
6654
6655 // If a non-metadata argument is passed in a metadata slot then the
6656 // error will be caught earlier when the incorrect argument doesn't
6657 // match the specification in the intrinsic call table. Thus, no
6658 // argument type check is needed here.
6659
6660 Check(FPI.getExceptionBehavior().has_value(),
6661 "invalid exception behavior argument", &FPI);
6662 if (HasRoundingMD) {
6663 Check(FPI.getRoundingMode().has_value(), "invalid rounding mode argument",
6664 &FPI);
6665 }
6666}
6667
6668void Verifier::visitDbgIntrinsic(StringRef Kind, DbgVariableIntrinsic &DII) {
6669 auto *MD = DII.getRawLocation();
6670 CheckDI(isa<ValueAsMetadata>(MD) || isa<DIArgList>(MD) ||
6671 (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands()),
6672 "invalid llvm.dbg." + Kind + " intrinsic address/value", &DII, MD);
6673 CheckDI(isa<DILocalVariable>(DII.getRawVariable()),
6674 "invalid llvm.dbg." + Kind + " intrinsic variable", &DII,
6675 DII.getRawVariable());
6676 CheckDI(isa<DIExpression>(DII.getRawExpression()),
6677 "invalid llvm.dbg." + Kind + " intrinsic expression", &DII,
6678 DII.getRawExpression());
6679
6680 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(&DII)) {
6681 CheckDI(isa<DIAssignID>(DAI->getRawAssignID()),
6682 "invalid llvm.dbg.assign intrinsic DIAssignID", &DII,
6683 DAI->getRawAssignID());
6684 const auto *RawAddr = DAI->getRawAddress();
6685 CheckDI(
6686 isa<ValueAsMetadata>(RawAddr) ||
6687 (isa<MDNode>(RawAddr) && !cast<MDNode>(RawAddr)->getNumOperands()),
6688 "invalid llvm.dbg.assign intrinsic address", &DII,
6689 DAI->getRawAddress());
6690 CheckDI(isa<DIExpression>(DAI->getRawAddressExpression()),
6691 "invalid llvm.dbg.assign intrinsic address expression", &DII,
6692 DAI->getRawAddressExpression());
6693 // All of the linked instructions should be in the same function as DII.
6695 CheckDI(DAI->getFunction() == I->getFunction(),
6696 "inst not in same function as dbg.assign", I, DAI);
6697 }
6698
6699 // Ignore broken !dbg attachments; they're checked elsewhere.
6700 if (MDNode *N = DII.getDebugLoc().getAsMDNode())
6701 if (!isa<DILocation>(N))
6702 return;
6703
6704 BasicBlock *BB = DII.getParent();
6705 Function *F = BB ? BB->getParent() : nullptr;
6706
6707 // The scopes for variables and !dbg attachments must agree.
6708 DILocalVariable *Var = DII.getVariable();
6709 DILocation *Loc = DII.getDebugLoc();
6710 CheckDI(Loc, "llvm.dbg." + Kind + " intrinsic requires a !dbg attachment",
6711 &DII, BB, F);
6712
6713 DISubprogram *VarSP = getSubprogram(Var->getRawScope());
6714 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
6715 if (!VarSP || !LocSP)
6716 return; // Broken scope chains are checked elsewhere.
6717
6718 CheckDI(VarSP == LocSP,
6719 "mismatched subprogram between llvm.dbg." + Kind +
6720 " variable and !dbg attachment",
6721 &DII, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
6722 Loc->getScope()->getSubprogram());
6723
6724 // This check is redundant with one in visitLocalVariable().
6725 CheckDI(isType(Var->getRawType()), "invalid type ref", Var,
6726 Var->getRawType());
6727 verifyFnArgs(DII);
6728}
6729
6730void Verifier::visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI) {
6731 CheckDI(isa<DILabel>(DLI.getRawLabel()),
6732 "invalid llvm.dbg." + Kind + " intrinsic variable", &DLI,
6733 DLI.getRawLabel());
6734
6735 // Ignore broken !dbg attachments; they're checked elsewhere.
6736 if (MDNode *N = DLI.getDebugLoc().getAsMDNode())
6737 if (!isa<DILocation>(N))
6738 return;
6739
6740 BasicBlock *BB = DLI.getParent();
6741 Function *F = BB ? BB->getParent() : nullptr;
6742
6743 // The scopes for variables and !dbg attachments must agree.
6744 DILabel *Label = DLI.getLabel();
6745 DILocation *Loc = DLI.getDebugLoc();
6746 Check(Loc, "llvm.dbg." + Kind + " intrinsic requires a !dbg attachment", &DLI,
6747 BB, F);
6748
6749 DISubprogram *LabelSP = getSubprogram(Label->getRawScope());
6750 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
6751 if (!LabelSP || !LocSP)
6752 return;
6753
6754 CheckDI(LabelSP == LocSP,
6755 "mismatched subprogram between llvm.dbg." + Kind +
6756 " label and !dbg attachment",
6757 &DLI, BB, F, Label, Label->getScope()->getSubprogram(), Loc,
6758 Loc->getScope()->getSubprogram());
6759}
6760
6761void Verifier::verifyFragmentExpression(const DbgVariableIntrinsic &I) {
6762 DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(I.getRawVariable());
6763 DIExpression *E = dyn_cast_or_null<DIExpression>(I.getRawExpression());
6764
6765 // We don't know whether this intrinsic verified correctly.
6766 if (!V || !E || !E->isValid())
6767 return;
6768
6769 // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
6770 auto Fragment = E->getFragmentInfo();
6771 if (!Fragment)
6772 return;
6773
6774 // The frontend helps out GDB by emitting the members of local anonymous
6775 // unions as artificial local variables with shared storage. When SROA splits
6776 // the storage for artificial local variables that are smaller than the entire
6777 // union, the overhang piece will be outside of the allotted space for the
6778 // variable and this check fails.
6779 // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
6780 if (V->isArtificial())
6781 return;
6782
6783 verifyFragmentExpression(*V, *Fragment, &I);
6784}
6785void Verifier::verifyFragmentExpression(const DbgVariableRecord &DVR) {
6786 DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(DVR.getRawVariable());
6787 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
6788
6789 // We don't know whether this intrinsic verified correctly.
6790 if (!V || !E || !E->isValid())
6791 return;
6792
6793 // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
6794 auto Fragment = E->getFragmentInfo();
6795 if (!Fragment)
6796 return;
6797
6798 // The frontend helps out GDB by emitting the members of local anonymous
6799 // unions as artificial local variables with shared storage. When SROA splits
6800 // the storage for artificial local variables that are smaller than the entire
6801 // union, the overhang piece will be outside of the allotted space for the
6802 // variable and this check fails.
6803 // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
6804 if (V->isArtificial())
6805 return;
6806
6807 verifyFragmentExpression(*V, *Fragment, &DVR);
6808}
6809
6810template <typename ValueOrMetadata>
6811void Verifier::verifyFragmentExpression(const DIVariable &V,
6813 ValueOrMetadata *Desc) {
6814 // If there's no size, the type is broken, but that should be checked
6815 // elsewhere.
6816 auto VarSize = V.getSizeInBits();
6817 if (!VarSize)
6818 return;
6819
6820 unsigned FragSize = Fragment.SizeInBits;
6821 unsigned FragOffset = Fragment.OffsetInBits;
6822 CheckDI(FragSize + FragOffset <= *VarSize,
6823 "fragment is larger than or outside of variable", Desc, &V);
6824 CheckDI(FragSize != *VarSize, "fragment covers entire variable", Desc, &V);
6825}
6826
6827void Verifier::verifyFnArgs(const DbgVariableIntrinsic &I) {
6828 // This function does not take the scope of noninlined function arguments into
6829 // account. Don't run it if current function is nodebug, because it may
6830 // contain inlined debug intrinsics.
6831 if (!HasDebugInfo)
6832 return;
6833
6834 // For performance reasons only check non-inlined ones.
6835 if (I.getDebugLoc()->getInlinedAt())
6836 return;
6837
6838 DILocalVariable *Var = I.getVariable();
6839 CheckDI(Var, "dbg intrinsic without variable");
6840
6841 unsigned ArgNo = Var->getArg();
6842 if (!ArgNo)
6843 return;
6844
6845 // Verify there are no duplicate function argument debug info entries.
6846 // These will cause hard-to-debug assertions in the DWARF backend.
6847 if (DebugFnArgs.size() < ArgNo)
6848 DebugFnArgs.resize(ArgNo, nullptr);
6849
6850 auto *Prev = DebugFnArgs[ArgNo - 1];
6851 DebugFnArgs[ArgNo - 1] = Var;
6852 CheckDI(!Prev || (Prev == Var), "conflicting debug info for argument", &I,
6853 Prev, Var);
6854}
6855void Verifier::verifyFnArgs(const DbgVariableRecord &DVR) {
6856 // This function does not take the scope of noninlined function arguments into
6857 // account. Don't run it if current function is nodebug, because it may
6858 // contain inlined debug intrinsics.
6859 if (!HasDebugInfo)
6860 return;
6861
6862 // For performance reasons only check non-inlined ones.
6863 if (DVR.getDebugLoc()->getInlinedAt())
6864 return;
6865
6866 DILocalVariable *Var = DVR.getVariable();
6867 CheckDI(Var, "#dbg record without variable");
6868
6869 unsigned ArgNo = Var->getArg();
6870 if (!ArgNo)
6871 return;
6872
6873 // Verify there are no duplicate function argument debug info entries.
6874 // These will cause hard-to-debug assertions in the DWARF backend.
6875 if (DebugFnArgs.size() < ArgNo)
6876 DebugFnArgs.resize(ArgNo, nullptr);
6877
6878 auto *Prev = DebugFnArgs[ArgNo - 1];
6879 DebugFnArgs[ArgNo - 1] = Var;
6880 CheckDI(!Prev || (Prev == Var), "conflicting debug info for argument", &DVR,
6881 Prev, Var);
6882}
6883
6884void Verifier::verifyNotEntryValue(const DbgVariableIntrinsic &I) {
6885 DIExpression *E = dyn_cast_or_null<DIExpression>(I.getRawExpression());
6886
6887 // We don't know whether this intrinsic verified correctly.
6888 if (!E || !E->isValid())
6889 return;
6890
6891 if (isa<ValueAsMetadata>(I.getRawLocation())) {
6892 Value *VarValue = I.getVariableLocationOp(0);
6893 if (isa<UndefValue>(VarValue) || isa<PoisonValue>(VarValue))
6894 return;
6895 // We allow EntryValues for swift async arguments, as they have an
6896 // ABI-guarantee to be turned into a specific register.
6897 if (auto *ArgLoc = dyn_cast_or_null<Argument>(VarValue);
6898 ArgLoc && ArgLoc->hasAttribute(Attribute::SwiftAsync))
6899 return;
6900 }
6901
6902 CheckDI(!E->isEntryValue(),
6903 "Entry values are only allowed in MIR unless they target a "
6904 "swiftasync Argument",
6905 &I);
6906}
6907void Verifier::verifyNotEntryValue(const DbgVariableRecord &DVR) {
6908 DIExpression *E = dyn_cast_or_null<DIExpression>(DVR.getRawExpression());
6909
6910 // We don't know whether this intrinsic verified correctly.
6911 if (!E || !E->isValid())
6912 return;
6913
6914 if (isa<ValueAsMetadata>(DVR.getRawLocation())) {
6915 Value *VarValue = DVR.getVariableLocationOp(0);
6916 if (isa<UndefValue>(VarValue) || isa<PoisonValue>(VarValue))
6917 return;
6918 // We allow EntryValues for swift async arguments, as they have an
6919 // ABI-guarantee to be turned into a specific register.
6920 if (auto *ArgLoc = dyn_cast_or_null<Argument>(VarValue);
6921 ArgLoc && ArgLoc->hasAttribute(Attribute::SwiftAsync))
6922 return;
6923 }
6924
6925 CheckDI(!E->isEntryValue(),
6926 "Entry values are only allowed in MIR unless they target a "
6927 "swiftasync Argument",
6928 &DVR);
6929}
6930
6931void Verifier::verifyCompileUnits() {
6932 // When more than one Module is imported into the same context, such as during
6933 // an LTO build before linking the modules, ODR type uniquing may cause types
6934 // to point to a different CU. This check does not make sense in this case.
6935 if (M.getContext().isODRUniquingDebugTypes())
6936 return;
6937 auto *CUs = M.getNamedMetadata("llvm.dbg.cu");
6939 if (CUs)
6940 Listed.insert(CUs->op_begin(), CUs->op_end());
6941 for (const auto *CU : CUVisited)
6942 CheckDI(Listed.count(CU), "DICompileUnit not listed in llvm.dbg.cu", CU);
6943 CUVisited.clear();
6944}
6945
6946void Verifier::verifyDeoptimizeCallingConvs() {
6947 if (DeoptimizeDeclarations.empty())
6948 return;
6949
6950 const Function *First = DeoptimizeDeclarations[0];
6951 for (const auto *F : ArrayRef(DeoptimizeDeclarations).slice(1)) {
6952 Check(First->getCallingConv() == F->getCallingConv(),
6953 "All llvm.experimental.deoptimize declarations must have the same "
6954 "calling convention",
6955 First, F);
6956 }
6957}
6958
6959void Verifier::verifyAttachedCallBundle(const CallBase &Call,
6960 const OperandBundleUse &BU) {
6961 FunctionType *FTy = Call.getFunctionType();
6962
6963 Check((FTy->getReturnType()->isPointerTy() ||
6964 (Call.doesNotReturn() && FTy->getReturnType()->isVoidTy())),
6965 "a call with operand bundle \"clang.arc.attachedcall\" must call a "
6966 "function returning a pointer or a non-returning function that has a "
6967 "void return type",
6968 Call);
6969
6970 Check(BU.Inputs.size() == 1 && isa<Function>(BU.Inputs.front()),
6971 "operand bundle \"clang.arc.attachedcall\" requires one function as "
6972 "an argument",
6973 Call);
6974
6975 auto *Fn = cast<Function>(BU.Inputs.front());
6976 Intrinsic::ID IID = Fn->getIntrinsicID();
6977
6978 if (IID) {
6979 Check((IID == Intrinsic::objc_retainAutoreleasedReturnValue ||
6980 IID == Intrinsic::objc_unsafeClaimAutoreleasedReturnValue),
6981 "invalid function argument", Call);
6982 } else {
6983 StringRef FnName = Fn->getName();
6984 Check((FnName == "objc_retainAutoreleasedReturnValue" ||
6985 FnName == "objc_unsafeClaimAutoreleasedReturnValue"),
6986 "invalid function argument", Call);
6987 }
6988}
6989
6990void Verifier::verifyNoAliasScopeDecl() {
6991 if (NoAliasScopeDecls.empty())
6992 return;
6993
6994 // only a single scope must be declared at a time.
6995 for (auto *II : NoAliasScopeDecls) {
6996 assert(II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl &&
6997 "Not a llvm.experimental.noalias.scope.decl ?");
6998 const auto *ScopeListMV = dyn_cast<MetadataAsValue>(
6999 II->getOperand(Intrinsic::NoAliasScopeDeclScopeArg));
7000 Check(ScopeListMV != nullptr,
7001 "llvm.experimental.noalias.scope.decl must have a MetadataAsValue "
7002 "argument",
7003 II);
7004
7005 const auto *ScopeListMD = dyn_cast<MDNode>(ScopeListMV->getMetadata());
7006 Check(ScopeListMD != nullptr, "!id.scope.list must point to an MDNode", II);
7007 Check(ScopeListMD->getNumOperands() == 1,
7008 "!id.scope.list must point to a list with a single scope", II);
7009 visitAliasScopeListMetadata(ScopeListMD);
7010 }
7011
7012 // Only check the domination rule when requested. Once all passes have been
7013 // adapted this option can go away.
7015 return;
7016
7017 // Now sort the intrinsics based on the scope MDNode so that declarations of
7018 // the same scopes are next to each other.
7019 auto GetScope = [](IntrinsicInst *II) {
7020 const auto *ScopeListMV = cast<MetadataAsValue>(
7021 II->getOperand(Intrinsic::NoAliasScopeDeclScopeArg));
7022 return &cast<MDNode>(ScopeListMV->getMetadata())->getOperand(0);
7023 };
7024
7025 // We are sorting on MDNode pointers here. For valid input IR this is ok.
7026 // TODO: Sort on Metadata ID to avoid non-deterministic error messages.
7027 auto Compare = [GetScope](IntrinsicInst *Lhs, IntrinsicInst *Rhs) {
7028 return GetScope(Lhs) < GetScope(Rhs);
7029 };
7030
7031 llvm::sort(NoAliasScopeDecls, Compare);
7032
7033 // Go over the intrinsics and check that for the same scope, they are not
7034 // dominating each other.
7035 auto ItCurrent = NoAliasScopeDecls.begin();
7036 while (ItCurrent != NoAliasScopeDecls.end()) {
7037 auto CurScope = GetScope(*ItCurrent);
7038 auto ItNext = ItCurrent;
7039 do {
7040 ++ItNext;
7041 } while (ItNext != NoAliasScopeDecls.end() &&
7042 GetScope(*ItNext) == CurScope);
7043
7044 // [ItCurrent, ItNext) represents the declarations for the same scope.
7045 // Ensure they are not dominating each other.. but only if it is not too
7046 // expensive.
7047 if (ItNext - ItCurrent < 32)
7048 for (auto *I : llvm::make_range(ItCurrent, ItNext))
7049 for (auto *J : llvm::make_range(ItCurrent, ItNext))
7050 if (I != J)
7051 Check(!DT.dominates(I, J),
7052 "llvm.experimental.noalias.scope.decl dominates another one "
7053 "with the same scope",
7054 I);
7055 ItCurrent = ItNext;
7056 }
7057}
7058
7059//===----------------------------------------------------------------------===//
7060// Implement the public interfaces to this file...
7061//===----------------------------------------------------------------------===//
7062
7064 Function &F = const_cast<Function &>(f);
7065
7066 // Don't use a raw_null_ostream. Printing IR is expensive.
7067 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/true, *f.getParent());
7068
7069 // Note that this function's return value is inverted from what you would
7070 // expect of a function called "verify".
7071 return !V.verify(F);
7072}
7073
7075 bool *BrokenDebugInfo) {
7076 // Don't use a raw_null_ostream. Printing IR is expensive.
7077 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/!BrokenDebugInfo, M);
7078
7079 bool Broken = false;
7080 for (const Function &F : M)
7081 Broken |= !V.verify(F);
7082
7083 Broken |= !V.verify();
7084 if (BrokenDebugInfo)
7085 *BrokenDebugInfo = V.hasBrokenDebugInfo();
7086 // Note that this function's return value is inverted from what you would
7087 // expect of a function called "verify".
7088 return Broken;
7089}
7090
7091namespace {
7092
7093struct VerifierLegacyPass : public FunctionPass {
7094 static char ID;
7095
7096 std::unique_ptr<Verifier> V;
7097 bool FatalErrors = true;
7098
7099 VerifierLegacyPass() : FunctionPass(ID) {
7101 }
7102 explicit VerifierLegacyPass(bool FatalErrors)
7103 : FunctionPass(ID),
7104 FatalErrors(FatalErrors) {
7106 }
7107
7108 bool doInitialization(Module &M) override {
7109 V = std::make_unique<Verifier>(
7110 &dbgs(), /*ShouldTreatBrokenDebugInfoAsError=*/false, M);
7111 return false;
7112 }
7113
7114 bool runOnFunction(Function &F) override {
7115 if (!V->verify(F) && FatalErrors) {
7116 errs() << "in function " << F.getName() << '\n';
7117 report_fatal_error("Broken function found, compilation aborted!");
7118 }
7119 return false;
7120 }
7121
7122 bool doFinalization(Module &M) override {
7123 bool HasErrors = false;
7124 for (Function &F : M)
7125 if (F.isDeclaration())
7126 HasErrors |= !V->verify(F);
7127
7128 HasErrors |= !V->verify();
7129 if (FatalErrors && (HasErrors || V->hasBrokenDebugInfo()))
7130 report_fatal_error("Broken module found, compilation aborted!");
7131 return false;
7132 }
7133
7134 void getAnalysisUsage(AnalysisUsage &AU) const override {
7135 AU.setPreservesAll();
7136 }
7137};
7138
7139} // end anonymous namespace
7140
7141/// Helper to issue failure from the TBAA verification
7142template <typename... Tys> void TBAAVerifier::CheckFailed(Tys &&... Args) {
7143 if (Diagnostic)
7144 return Diagnostic->CheckFailed(Args...);
7145}
7146
7147#define CheckTBAA(C, ...) \
7148 do { \
7149 if (!(C)) { \
7150 CheckFailed(__VA_ARGS__); \
7151 return false; \
7152 } \
7153 } while (false)
7154
7155/// Verify that \p BaseNode can be used as the "base type" in the struct-path
7156/// TBAA scheme. This means \p BaseNode is either a scalar node, or a
7157/// struct-type node describing an aggregate data structure (like a struct).
7158TBAAVerifier::TBAABaseNodeSummary
7159TBAAVerifier::verifyTBAABaseNode(Instruction &I, const MDNode *BaseNode,
7160 bool IsNewFormat) {
7161 if (BaseNode->getNumOperands() < 2) {
7162 CheckFailed("Base nodes must have at least two operands", &I, BaseNode);
7163 return {true, ~0u};
7164 }
7165
7166 auto Itr = TBAABaseNodes.find(BaseNode);
7167 if (Itr != TBAABaseNodes.end())
7168 return Itr->second;
7169
7170 auto Result = verifyTBAABaseNodeImpl(I, BaseNode, IsNewFormat);
7171 auto InsertResult = TBAABaseNodes.insert({BaseNode, Result});
7172 (void)InsertResult;
7173 assert(InsertResult.second && "We just checked!");
7174 return Result;
7175}
7176
7177TBAAVerifier::TBAABaseNodeSummary
7178TBAAVerifier::verifyTBAABaseNodeImpl(Instruction &I, const MDNode *BaseNode,
7179 bool IsNewFormat) {
7180 const TBAAVerifier::TBAABaseNodeSummary InvalidNode = {true, ~0u};
7181
7182 if (BaseNode->getNumOperands() == 2) {
7183 // Scalar nodes can only be accessed at offset 0.
7184 return isValidScalarTBAANode(BaseNode)
7185 ? TBAAVerifier::TBAABaseNodeSummary({false, 0})
7186 : InvalidNode;
7187 }
7188
7189 if (IsNewFormat) {
7190 if (BaseNode->getNumOperands() % 3 != 0) {
7191 CheckFailed("Access tag nodes must have the number of operands that is a "
7192 "multiple of 3!", BaseNode);
7193 return InvalidNode;
7194 }
7195 } else {
7196 if (BaseNode->getNumOperands() % 2 != 1) {
7197 CheckFailed("Struct tag nodes must have an odd number of operands!",
7198 BaseNode);
7199 return InvalidNode;
7200 }
7201 }
7202
7203 // Check the type size field.
7204 if (IsNewFormat) {
7205 auto *TypeSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7206 BaseNode->getOperand(1));
7207 if (!TypeSizeNode) {
7208 CheckFailed("Type size nodes must be constants!", &I, BaseNode);
7209 return InvalidNode;
7210 }
7211 }
7212
7213 // Check the type name field. In the new format it can be anything.
7214 if (!IsNewFormat && !isa<MDString>(BaseNode->getOperand(0))) {
7215 CheckFailed("Struct tag nodes have a string as their first operand",
7216 BaseNode);
7217 return InvalidNode;
7218 }
7219
7220 bool Failed = false;
7221
7222 std::optional<APInt> PrevOffset;
7223 unsigned BitWidth = ~0u;
7224
7225 // We've already checked that BaseNode is not a degenerate root node with one
7226 // operand in \c verifyTBAABaseNode, so this loop should run at least once.
7227 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
7228 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
7229 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
7230 Idx += NumOpsPerField) {
7231 const MDOperand &FieldTy = BaseNode->getOperand(Idx);
7232 const MDOperand &FieldOffset = BaseNode->getOperand(Idx + 1);
7233 if (!isa<MDNode>(FieldTy)) {
7234 CheckFailed("Incorrect field entry in struct type node!", &I, BaseNode);
7235 Failed = true;
7236 continue;
7237 }
7238
7239 auto *OffsetEntryCI =
7240 mdconst::dyn_extract_or_null<ConstantInt>(FieldOffset);
7241 if (!OffsetEntryCI) {
7242 CheckFailed("Offset entries must be constants!", &I, BaseNode);
7243 Failed = true;
7244 continue;
7245 }
7246
7247 if (BitWidth == ~0u)
7248 BitWidth = OffsetEntryCI->getBitWidth();
7249
7250 if (OffsetEntryCI->getBitWidth() != BitWidth) {
7251 CheckFailed(
7252 "Bitwidth between the offsets and struct type entries must match", &I,
7253 BaseNode);
7254 Failed = true;
7255 continue;
7256 }
7257
7258 // NB! As far as I can tell, we generate a non-strictly increasing offset
7259 // sequence only from structs that have zero size bit fields. When
7260 // recursing into a contained struct in \c getFieldNodeFromTBAABaseNode we
7261 // pick the field lexically the latest in struct type metadata node. This
7262 // mirrors the actual behavior of the alias analysis implementation.
7263 bool IsAscending =
7264 !PrevOffset || PrevOffset->ule(OffsetEntryCI->getValue());
7265
7266 if (!IsAscending) {
7267 CheckFailed("Offsets must be increasing!", &I, BaseNode);
7268 Failed = true;
7269 }
7270
7271 PrevOffset = OffsetEntryCI->getValue();
7272
7273 if (IsNewFormat) {
7274 auto *MemberSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7275 BaseNode->getOperand(Idx + 2));
7276 if (!MemberSizeNode) {
7277 CheckFailed("Member size entries must be constants!", &I, BaseNode);
7278 Failed = true;
7279 continue;
7280 }
7281 }
7282 }
7283
7284 return Failed ? InvalidNode
7285 : TBAAVerifier::TBAABaseNodeSummary(false, BitWidth);
7286}
7287
7288static bool IsRootTBAANode(const MDNode *MD) {
7289 return MD->getNumOperands() < 2;
7290}
7291
7292static bool IsScalarTBAANodeImpl(const MDNode *MD,
7294 if (MD->getNumOperands() != 2 && MD->getNumOperands() != 3)
7295 return false;
7296
7297 if (!isa<MDString>(MD->getOperand(0)))
7298 return false;
7299
7300 if (MD->getNumOperands() == 3) {
7301 auto *Offset = mdconst::dyn_extract<ConstantInt>(MD->getOperand(2));
7302 if (!(Offset && Offset->isZero() && isa<MDString>(MD->getOperand(0))))
7303 return false;
7304 }
7305
7306 auto *Parent = dyn_cast_or_null<MDNode>(MD->getOperand(1));
7307 return Parent && Visited.insert(Parent).second &&
7308 (IsRootTBAANode(Parent) || IsScalarTBAANodeImpl(Parent, Visited));
7309}
7310
7311bool TBAAVerifier::isValidScalarTBAANode(const MDNode *MD) {
7312 auto ResultIt = TBAAScalarNodes.find(MD);
7313 if (ResultIt != TBAAScalarNodes.end())
7314 return ResultIt->second;
7315
7317 bool Result = IsScalarTBAANodeImpl(MD, Visited);
7318 auto InsertResult = TBAAScalarNodes.insert({MD, Result});
7319 (void)InsertResult;
7320 assert(InsertResult.second && "Just checked!");
7321
7322 return Result;
7323}
7324
7325/// Returns the field node at the offset \p Offset in \p BaseNode. Update \p
7326/// Offset in place to be the offset within the field node returned.
7327///
7328/// We assume we've okayed \p BaseNode via \c verifyTBAABaseNode.
7329MDNode *TBAAVerifier::getFieldNodeFromTBAABaseNode(Instruction &I,
7330 const MDNode *BaseNode,
7331 APInt &Offset,
7332 bool IsNewFormat) {
7333 assert(BaseNode->getNumOperands() >= 2 && "Invalid base node!");
7334
7335 // Scalar nodes have only one possible "field" -- their parent in the access
7336 // hierarchy. Offset must be zero at this point, but our caller is supposed
7337 // to check that.
7338 if (BaseNode->getNumOperands() == 2)
7339 return cast<MDNode>(BaseNode->getOperand(1));
7340
7341 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
7342 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
7343 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
7344 Idx += NumOpsPerField) {
7345 auto *OffsetEntryCI =
7346 mdconst::extract<ConstantInt>(BaseNode->getOperand(Idx + 1));
7347 if (OffsetEntryCI->getValue().ugt(Offset)) {
7348 if (Idx == FirstFieldOpNo) {
7349 CheckFailed("Could not find TBAA parent in struct type node", &I,
7350 BaseNode, &Offset);
7351 return nullptr;
7352 }
7353
7354 unsigned PrevIdx = Idx - NumOpsPerField;
7355 auto *PrevOffsetEntryCI =
7356 mdconst::extract<ConstantInt>(BaseNode->getOperand(PrevIdx + 1));
7357 Offset -= PrevOffsetEntryCI->getValue();
7358 return cast<MDNode>(BaseNode->getOperand(PrevIdx));
7359 }
7360 }
7361
7362 unsigned LastIdx = BaseNode->getNumOperands() - NumOpsPerField;
7363 auto *LastOffsetEntryCI = mdconst::extract<ConstantInt>(
7364 BaseNode->getOperand(LastIdx + 1));
7365 Offset -= LastOffsetEntryCI->getValue();
7366 return cast<MDNode>(BaseNode->getOperand(LastIdx));
7367}
7368
7370 if (!Type || Type->getNumOperands() < 3)
7371 return false;
7372
7373 // In the new format type nodes shall have a reference to the parent type as
7374 // its first operand.
7375 return isa_and_nonnull<MDNode>(Type->getOperand(0));
7376}
7377
7379 CheckTBAA(MD->getNumOperands() > 0, "TBAA metadata cannot have 0 operands",
7380 &I, MD);
7381
7382 CheckTBAA(isa<LoadInst>(I) || isa<StoreInst>(I) || isa<CallInst>(I) ||
7383 isa<VAArgInst>(I) || isa<AtomicRMWInst>(I) ||
7384 isa<AtomicCmpXchgInst>(I),
7385 "This instruction shall not have a TBAA access tag!", &I);
7386
7387 bool IsStructPathTBAA =
7388 isa<MDNode>(MD->getOperand(0)) && MD->getNumOperands() >= 3;
7389
7390 CheckTBAA(IsStructPathTBAA,
7391 "Old-style TBAA is no longer allowed, use struct-path TBAA instead",
7392 &I);
7393
7394 MDNode *BaseNode = dyn_cast_or_null<MDNode>(MD->getOperand(0));
7395 MDNode *AccessType = dyn_cast_or_null<MDNode>(MD->getOperand(1));
7396
7397 bool IsNewFormat = isNewFormatTBAATypeNode(AccessType);
7398
7399 if (IsNewFormat) {
7400 CheckTBAA(MD->getNumOperands() == 4 || MD->getNumOperands() == 5,
7401 "Access tag metadata must have either 4 or 5 operands", &I, MD);
7402 } else {
7403 CheckTBAA(MD->getNumOperands() < 5,
7404 "Struct tag metadata must have either 3 or 4 operands", &I, MD);
7405 }
7406
7407 // Check the access size field.
7408 if (IsNewFormat) {
7409 auto *AccessSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7410 MD->getOperand(3));
7411 CheckTBAA(AccessSizeNode, "Access size field must be a constant", &I, MD);
7412 }
7413
7414 // Check the immutability flag.
7415 unsigned ImmutabilityFlagOpNo = IsNewFormat ? 4 : 3;
7416 if (MD->getNumOperands() == ImmutabilityFlagOpNo + 1) {
7417 auto *IsImmutableCI = mdconst::dyn_extract_or_null<ConstantInt>(
7418 MD->getOperand(ImmutabilityFlagOpNo));
7419 CheckTBAA(IsImmutableCI,
7420 "Immutability tag on struct tag metadata must be a constant", &I,
7421 MD);
7422 CheckTBAA(
7423 IsImmutableCI->isZero() || IsImmutableCI->isOne(),
7424 "Immutability part of the struct tag metadata must be either 0 or 1",
7425 &I, MD);
7426 }
7427
7428 CheckTBAA(BaseNode && AccessType,
7429 "Malformed struct tag metadata: base and access-type "
7430 "should be non-null and point to Metadata nodes",
7431 &I, MD, BaseNode, AccessType);
7432
7433 if (!IsNewFormat) {
7434 CheckTBAA(isValidScalarTBAANode(AccessType),
7435 "Access type node must be a valid scalar type", &I, MD,
7436 AccessType);
7437 }
7438
7439 auto *OffsetCI = mdconst::dyn_extract_or_null<ConstantInt>(MD->getOperand(2));
7440 CheckTBAA(OffsetCI, "Offset must be constant integer", &I, MD);
7441
7442 APInt Offset = OffsetCI->getValue();
7443 bool SeenAccessTypeInPath = false;
7444
7445 SmallPtrSet<MDNode *, 4> StructPath;
7446
7447 for (/* empty */; BaseNode && !IsRootTBAANode(BaseNode);
7448 BaseNode = getFieldNodeFromTBAABaseNode(I, BaseNode, Offset,
7449 IsNewFormat)) {
7450 if (!StructPath.insert(BaseNode).second) {
7451 CheckFailed("Cycle detected in struct path", &I, MD);
7452 return false;
7453 }
7454
7455 bool Invalid;
7456 unsigned BaseNodeBitWidth;
7457 std::tie(Invalid, BaseNodeBitWidth) = verifyTBAABaseNode(I, BaseNode,
7458 IsNewFormat);
7459
7460 // If the base node is invalid in itself, then we've already printed all the
7461 // errors we wanted to print.
7462 if (Invalid)
7463 return false;
7464
7465 SeenAccessTypeInPath |= BaseNode == AccessType;
7466
7467 if (isValidScalarTBAANode(BaseNode) || BaseNode == AccessType)
7468 CheckTBAA(Offset == 0, "Offset not zero at the point of scalar access",
7469 &I, MD, &Offset);
7470
7471 CheckTBAA(BaseNodeBitWidth == Offset.getBitWidth() ||
7472 (BaseNodeBitWidth == 0 && Offset == 0) ||
7473 (IsNewFormat && BaseNodeBitWidth == ~0u),
7474 "Access bit-width not the same as description bit-width", &I, MD,
7475 BaseNodeBitWidth, Offset.getBitWidth());
7476
7477 if (IsNewFormat && SeenAccessTypeInPath)
7478 break;
7479 }
7480
7481 CheckTBAA(SeenAccessTypeInPath, "Did not see access type in access path!", &I,
7482 MD);
7483 return true;
7484}
7485
7486char VerifierLegacyPass::ID = 0;
7487INITIALIZE_PASS(VerifierLegacyPass, "verify", "Module Verifier", false, false)
7488
7490 return new VerifierLegacyPass(FatalErrors);
7491}
7492
7493AnalysisKey VerifierAnalysis::Key;
7496 Result Res;
7498 return Res;
7499}
7500
7503 return { llvm::verifyFunction(F, &dbgs()), false };
7504}
7505
7507 auto Res = AM.getResult<VerifierAnalysis>(M);
7508 if (FatalErrors && (Res.IRBroken || Res.DebugInfoBroken))
7509 report_fatal_error("Broken module found, compilation aborted!");
7510
7511 return PreservedAnalyses::all();
7512}
7513
7515 auto res = AM.getResult<VerifierAnalysis>(F);
7516 if (res.IRBroken && FatalErrors)
7517 report_fatal_error("Broken function found, compilation aborted!");
7518
7519 return PreservedAnalyses::all();
7520}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file defines the StringMap class.
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
Atomic ordering constants.
@ RetAttr
Definition: Attributes.cpp:668
@ FnAttr
Definition: Attributes.cpp:666
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file declares the LLVM IR specialization of the GenericConvergenceVerifier template.
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines the DenseMap class.
This file contains constants used for implementing Dwarf debug support.
std::string Name
uint64_t Size
static bool runOnFunction(Function &F, bool PostInlining)
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file implements a map that provides insertion order iteration.
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
This file contains the declarations for metadata subclasses.
#define T1
Module.h This file contains the declarations for the Module class.
uint64_t High
LLVMContext & Context
#define P(N)
ppc ctr loops verify
This header defines various interfaces for pass management in LLVM.
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:38
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
verify safepoint Safepoint IR Verifier
raw_pwrite_stream & OS
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
This defines the Use class.
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
static bool IsScalarTBAANodeImpl(const MDNode *MD, SmallPtrSetImpl< const MDNode * > &Visited)
Definition: Verifier.cpp:7292
static bool isType(const Metadata *MD)
Definition: Verifier.cpp:1132
static Instruction * getSuccPad(Instruction *Terminator)
Definition: Verifier.cpp:2639
#define Check(C,...)
We know that cond should be true, if not print an error message.
Definition: Verifier.cpp:665
static bool isNewFormatTBAATypeNode(llvm::MDNode *Type)
Definition: Verifier.cpp:7369
#define CheckDI(C,...)
We know that a debug info condition should be true, if not print an error message.
Definition: Verifier.cpp:675
static void forEachUser(const Value *User, SmallPtrSet< const Value *, 32 > &Visited, llvm::function_ref< bool(const Value *)> Callback)
Definition: Verifier.cpp:716
static bool isDINode(const Metadata *MD)
Definition: Verifier.cpp:1134
static bool isScope(const Metadata *MD)
Definition: Verifier.cpp:1133
static cl::opt< bool > VerifyNoAliasScopeDomination("verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false), cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical " "scopes are not dominating"))
static DISubprogram * getSubprogram(Metadata *LocalScope)
Carefully grab the subprogram from a local scope.
Definition: Verifier.cpp:6303
static bool isTypeCongruent(Type *L, Type *R)
Two types are "congruent" if they are identical, or if they are both pointer types with different poi...
Definition: Verifier.cpp:3750
#define CheckTBAA(C,...)
Definition: Verifier.cpp:7147
static bool IsRootTBAANode(const MDNode *MD)
Definition: Verifier.cpp:7288
static bool isContiguous(const ConstantRange &A, const ConstantRange &B)
Definition: Verifier.cpp:4078
static Value * getParentPad(Value *EHPad)
Definition: Verifier.cpp:4322
static bool hasConflictingReferenceFlags(unsigned Flags)
Detect mutually exclusive flags.
Definition: Verifier.cpp:1283
static AttrBuilder getParameterABIAttributes(LLVMContext &C, unsigned I, AttributeList Attrs)
Definition: Verifier.cpp:3760
bool isFiniteNonZero() const
Definition: APFloat.h:1305
bool isNegative() const
Definition: APFloat.h:1295
const fltSemantics & getSemantics() const
Definition: APFloat.h:1303
Class for arbitrary precision integers.
Definition: APInt.h:76
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition: APInt.h:1179
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition: APInt.h:358
bool isMinValue() const
Determine if this is the smallest unsigned value.
Definition: APInt.h:395
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition: APInt.h:1128
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition: APInt.h:418
bool isMaxValue() const
Determine if this is the largest unsigned value.
Definition: APInt.h:377
This class represents a conversion between pointers from one address space to another.
an instruction to allocate memory on the stack
Definition: Instructions.h:59
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Definition: Instructions.h:157
bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:132
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:125
bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:103
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:321
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:473
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:160
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:539
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:748
static bool isFPOperation(BinOp Op)
Definition: Instructions.h:849
BinOp getOperation() const
Definition: Instructions.h:845
static StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
Definition: Instructions.h:887
bool contains(Attribute::AttrKind A) const
Return true if the builder has the specified attribute.
Definition: AttributeMask.h:67
bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
Definition: Attributes.cpp:841
std::string getAsString(bool InAttrGrp=false) const
Definition: Attributes.cpp:928
static Attribute::AttrKind getAttrKindFromName(StringRef AttrName)
Definition: Attributes.cpp:265
static bool canUseAsRetAttr(AttrKind Kind)
Definition: Attributes.cpp:689
static bool isExistingAttribute(StringRef Name)
Return true if the provided string matches the IR name of an attribute.
Definition: Attributes.cpp:288
static bool canUseAsFnAttr(AttrKind Kind)
Definition: Attributes.cpp:681
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition: Attributes.h:85
@ None
No attributes have been set.
Definition: Attributes.h:87
static bool isIntAttrKind(AttrKind Kind)
Definition: Attributes.h:101
static bool canUseAsParamAttr(AttrKind Kind)
Definition: Attributes.cpp:685
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition: Attributes.h:193
LLVM Basic Block Representation.
Definition: BasicBlock.h:60
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:430
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition: BasicBlock.h:499
const LandingPadInst * getLandingPadInst() const
Return the landingpad instruction associated with the landing pad.
Definition: BasicBlock.cpp:676
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:360
const Instruction & front() const
Definition: BasicBlock.h:453
bool isEntryBlock() const
Return true if this is the entry block of the containing function.
Definition: BasicBlock.cpp:564
const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
Definition: BasicBlock.cpp:460
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:206
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:165
bool isEHPad() const
Return true if this basic block is an exception handling block.
Definition: BasicBlock.h:657
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:221
This class represents a no-op cast from one type to another.
static BlockAddress * lookup(const BasicBlock *BB)
Lookup an existing BlockAddress constant for the given BasicBlock.
Definition: Constants.cpp:1864
Conditional or Unconditional Branch instruction.
bool isConditional() const
Value * getCondition() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1494
bool isInlineAsm() const
Check if this call is an inline asm statement.
Definition: InstrTypes.h:1809
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Definition: InstrTypes.h:1742
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1800
Value * getCalledOperand() const
Definition: InstrTypes.h:1735
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1687
FunctionType * getFunctionType() const
Definition: InstrTypes.h:1600
unsigned arg_size() const
Definition: InstrTypes.h:1685
AttributeList getAttributes() const
Return the parameter attributes for this call.
Definition: InstrTypes.h:1819
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
This class represents a function call, abstracting a target machine's calling convention.
bool isMustTailCall() const
static bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
unsigned getNumHandlers() const
return the number of 'handlers' in this catchswitch instruction, except the default handler
Value * getParentPad() const
BasicBlock * getUnwindDest() const
handler_range handlers()
iteration adapter for range-for loops.
BasicBlock * getUnwindDest() const
bool isFPPredicate() const
Definition: InstrTypes.h:1122
bool isIntPredicate() const
Definition: InstrTypes.h:1123
static bool isIntPredicate(Predicate P)
Definition: InstrTypes.h:1116
ConstantArray - Constant Array Declarations.
Definition: Constants.h:423
A constant value that is initialized with an expression using other constant values.
Definition: Constants.h:1017
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:268
This is the shared class of boolean and integer constants.
Definition: Constants.h:80
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
Definition: Constants.h:217
bool isNegative() const
Definition: Constants.h:200
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition: Constants.h:205
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:154
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:145
This class represents a range of values.
Definition: ConstantRange.h:47
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
static ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
Definition: Constants.cpp:1499
This is an important base class in LLVM.
Definition: Constant.h:41
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition: Constants.cpp:90
This is the common base class for constrained floating point intrinsics.
std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
std::optional< RoundingMode > getRoundingMode() const
List of ValueAsMetadata, to be used as an argument to a dbg.value intrinsic.
Assignment ID.
Basic type, like 'int' or 'float'.
Debug common block.
Enumeration value.
DWARF expression.
bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static std::optional< FragmentInfo > getFragmentInfo(expr_op_iterator Start, expr_op_iterator End)
Retrieve the details of this fragment expression.
A pair of DIGlobalVariable and DIExpression.
DIGlobalVariable * getVariable() const
DIExpression * getExpression() const
An imported module (C++ using directive or similar).
Debug lexical block.
A scope for locals.
DISubprogram * getSubprogram() const
Get the subprogram for this scope.
DILocalScope * getScope() const
Get the local scope for this variable.
Debug location.
Metadata * getRawScope() const
Represents a module in the programming language, for example, a Clang module, or a Fortran module.
Debug lexical block.
Base class for scope-like contexts.
String type, Fortran CHARACTER(n)
Subprogram description.
Array subrange.
Type array for a subprogram.
Base class for template parameters.
Base class for variables.
Metadata * getRawType() const
Metadata * getRawScope() const
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
This represents the llvm.dbg.label instruction.
Metadata * getRawLabel() const
DILabel * getLabel() const
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
void print(raw_ostream &O, bool IsForDebug=false) const
DebugLoc getDebugLoc() const
const BasicBlock * getParent() const
This is the common base class for debug info intrinsics for variables.
Metadata * getRawLocation() const
DILocalVariable * getVariable() const
Metadata * getRawVariable() const
Metadata * getRawExpression() const
Record of a variable value-assignment, aka a non instruction representation of the dbg....
MDNode * getRawAddressExpression() const
DIExpression * getExpression() const
Value * getVariableLocationOp(unsigned OpIdx) const
DILocalVariable * getVariable() const
Metadata * getRawLocation() const
Returns the metadata operand for the first location description.
@ End
Marks the end of the concrete types.
@ Any
To indicate all LocationTypes in searches.
DIExpression * getAddressExpression() const
MDNode * getAsMDNode() const
Return this as a bar MDNode.
Definition: DebugLoc.h:106
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:202
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
bool empty() const
Definition: DenseMap.h:98
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:220
void recalculate(ParentType &Func)
recalculate - compute a dominator tree for the given function
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
Definition: Dominators.cpp:321
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
This instruction extracts a single (scalar) element from a VectorType value.
static bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
This instruction extracts a struct member or array element value from an aggregate value.
ArrayRef< unsigned > getIndices() const
static Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
This instruction compares its operands according to the predicate given to the constructor.
This class represents an extension of floating point types.
This class represents a cast from floating point to signed integer.
This class represents a cast from floating point to unsigned integer.
This class represents a truncation of floating point types.
An instruction for ordering other memory operations.
Definition: Instructions.h:460
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Definition: Instructions.h:487
Value * getParentPad() const
Convenience accessors.
Definition: InstrTypes.h:2711
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:311
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition: Function.h:232
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition: Function.h:855
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
Definition: Function.h:237
const std::string & getGC() const
Definition: Function.cpp:772
Represents calls to the gc.relocate intrinsic.
Value * getBasePtr() const
Value * getDerivedPtr() const
void initialize(raw_ostream *OS, function_ref< void(const Twine &Message)> FailureCB, const FunctionT &F)
Generic tagged DWARF-like metadata node.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:973
static Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
static bool isValidLinkage(LinkageTypes L)
Definition: GlobalAlias.h:95
const Constant * getAliasee() const
Definition: GlobalAlias.h:84
const Function * getResolverFunction() const
Definition: Globals.cpp:592
static FunctionType * getResolverFunctionType(Type *IFuncValTy)
Definition: GlobalIFunc.h:83
static bool isValidLinkage(LinkageTypes L)
Definition: GlobalIFunc.h:87
const Constant * getResolver() const
Definition: GlobalIFunc.h:70
bool hasComdat() const
Definition: GlobalObject.h:128
MDNode * getMetadata(unsigned KindID) const
Get the current metadata attachments for the given kind, if any.
Definition: Value.h:565
bool hasExternalLinkage() const
Definition: GlobalValue.h:511
bool isDSOLocal() const
Definition: GlobalValue.h:305
bool isImplicitDSOLocal() const
Definition: GlobalValue.h:298
bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition: Globals.cpp:281
bool hasValidDeclarationLinkage() const
Definition: GlobalValue.h:533
LinkageTypes getLinkage() const
Definition: GlobalValue.h:546
bool hasDefaultVisibility() const
Definition: GlobalValue.h:249
bool hasPrivateLinkage() const
Definition: GlobalValue.h:527
bool hasHiddenVisibility() const
Definition: GlobalValue.h:250
bool hasExternalWeakLinkage() const
Definition: GlobalValue.h:529
bool hasDLLImportStorageClass() const
Definition: GlobalValue.h:278
bool hasDLLExportStorageClass() const
Definition: GlobalValue.h:281
bool isDeclarationForLinker() const
Definition: GlobalValue.h:618
unsigned getAddressSpace() const
Definition: GlobalValue.h:205
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:656
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:294
bool hasComdat() const
Definition: GlobalValue.h:241
bool hasCommonLinkage() const
Definition: GlobalValue.h:532
bool hasGlobalUnnamedAddr() const
Definition: GlobalValue.h:215
bool hasAppendingLinkage() const
Definition: GlobalValue.h:525
bool hasAvailableExternallyLinkage() const
Definition: GlobalValue.h:512
Type * getValueType() const
Definition: GlobalValue.h:296
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool hasInitializer() const
Definitions have initializers, declarations don't.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
This instruction compares its operands according to the predicate given to the constructor.
Indirect Branch Instruction.
BasicBlock * getDestination(unsigned i)
Return the specified destination.
unsigned getNumDestinations() const
return the number of possible destinations in this indirectbr instruction.
unsigned getNumSuccessors() const
This instruction inserts a single (scalar) element into a VectorType value.
static bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
This instruction inserts a struct field of array element value into an aggregate value.
Value * getAggregateOperand()
ArrayRef< unsigned > getIndices() const
Base class for instruction visitors.
Definition: InstVisitor.h:78
RetTy visitTerminator(Instruction &I)
Definition: InstVisitor.h:253
RetTy visitCallBase(CallBase &I)
Definition: InstVisitor.h:267
void visitFunction(Function &F)
Definition: InstVisitor.h:142
void visitBasicBlock(BasicBlock &BB)
Definition: InstVisitor.h:143
void visit(Iterator Start, Iterator End)
Definition: InstVisitor.h:87
RetTy visitFuncletPadInst(FuncletPadInst &I)
Definition: InstVisitor.h:197
void visitInstruction(Instruction &I)
Definition: InstVisitor.h:280
unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:454
bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
const BasicBlock * getParent() const
Definition: Instruction.h:152
const Function * getFunction() const
Return the function this instruction belongs to.
Definition: Instruction.cpp:87
This class represents a cast from an integer to a pointer.
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:47
static bool mayLowerToFunctionCall(Intrinsic::ID IID)
Check if the intrinsic might lower into a regular function call in the course of IR transformations.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:54
Invoke instruction.
BasicBlock * getUnwindDest() const
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
An instruction for reading from memory.
Definition: Instructions.h:184
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
Definition: Instructions.h:245
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Definition: Instructions.h:255
Align getAlign() const
Return the alignment of the access that is being performed.
Definition: Instructions.h:236
Metadata node.
Definition: Metadata.h:1067
const MDOperand & getOperand(unsigned I) const
Definition: Metadata.h:1428
bool isTemporary() const
Definition: Metadata.h:1251
ArrayRef< MDOperand > operands() const
Definition: Metadata.h:1426
unsigned getNumOperands() const
Return number of MDNode operands.
Definition: Metadata.h:1434
bool isDistinct() const
Definition: Metadata.h:1250
bool isResolved() const
Check if node is fully resolved.
Definition: Metadata.h:1247
LLVMContext & getContext() const
Definition: Metadata.h:1231
Tracking metadata reference owned by Metadata.
Definition: Metadata.h:889
Metadata * get() const
Definition: Metadata.h:918
A single uniqued string.
Definition: Metadata.h:720
StringRef getString() const
Definition: Metadata.cpp:610
Typed, array-like tuple of metadata.
Definition: Metadata.h:1627
Tuple of metadata.
Definition: Metadata.h:1470
static bool isTagMD(const Metadata *MD)
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
void clear()
Definition: MapVector.h:88
Metadata wrapper in the Value hierarchy.
Definition: Metadata.h:176
static MetadataAsValue * getIfExists(LLVMContext &Context, Metadata *MD)
Definition: Metadata.cpp:111
Metadata * getMetadata() const
Definition: Metadata.h:193
Root of the metadata hierarchy.
Definition: Metadata.h:62
void print(raw_ostream &OS, const Module *M=nullptr, bool IsForDebug=false) const
Print.
Definition: AsmWriter.cpp:5195
unsigned getMetadataID() const
Definition: Metadata.h:102
Manage lifetime of a slot tracker for printing IR.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
ModFlagBehavior
This enumeration defines the supported behaviors of module flags.
Definition: Module.h:115
@ AppendUnique
Appends the two values, which are required to be metadata nodes.
Definition: Module.h:144
@ Override
Uses the specified value, regardless of the behavior or value of the other module.
Definition: Module.h:136
@ Warning
Emits a warning if two values disagree.
Definition: Module.h:122
@ Error
Emits an error if two values disagree, otherwise the resulting value is that of the operands.
Definition: Module.h:118
@ Min
Takes the min of the two values, which are required to be integers.
Definition: Module.h:150
@ Append
Appends the two values, which are required to be metadata nodes.
Definition: Module.h:139
@ Max
Takes the max of the two values, which are required to be integers.
Definition: Module.h:147
@ Require
Adds a requirement that another module flag be present and have a specified value after linking is pe...
Definition: Module.h:131
const std::string & getModuleIdentifier() const
Get the module identifier which is, essentially, the name of the module.
Definition: Module.h:267
static bool isValidModFlagBehavior(Metadata *MD, ModFlagBehavior &MFB)
Checks if Metadata represents a valid ModFlagBehavior, and stores the converted result in MFB.
Definition: Module.cpp:288
A tuple of MDNodes.
Definition: Metadata.h:1729
StringRef getName() const
Definition: Metadata.cpp:1398
void print(raw_ostream &ROS, bool IsForDebug=false) const
Definition: AsmWriter.cpp:4856
iterator_range< op_iterator > operands()
Definition: Metadata.h:1825
op_range incoming_values()
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:109
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:115
Simple wrapper around std::function<void(raw_ostream&)>.
Definition: Printable.h:38
This class represents a cast from a pointer to an integer.
Interface for looking up the initializer for a variable name, used by Init::resolveReferences.
Definition: Record.h:2213
Resume the propagation of an exception.
Value * getValue() const
Convenience accessor.
Return a value (possibly void), from a function.
This class represents a sign extension of integer types.
This class represents a cast from signed integer to floating point.
This class represents the LLVM 'select' instruction.
static const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
This instruction constructs a fixed permutation of two input vectors.
static bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
static void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:321
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:360
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:342
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:427
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:179
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void reserve(size_type N)
Definition: SmallVector.h:676
iterator insert(iterator I, T &&Elt)
Definition: SmallVector.h:818
void resize(size_type N)
Definition: SmallVector.h:651
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
An instruction for storing to memory.
Definition: Instructions.h:317
StringMapEntry - This is used to represent one value that is inserted into a StringMap.
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition: StringRef.h:462
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:257
bool contains(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition: StringRef.h:416
static constexpr size_t npos
Definition: StringRef.h:52
Class to represent struct types.
Definition: DerivedTypes.h:216
unsigned getNumElements() const
Random access to the elements.
Definition: DerivedTypes.h:341
bool containsScalableVectorType(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Returns true if this struct contains a scalable vector.
Definition: Type.cpp:400
Type * getTypeAtIndex(const Value *V) const
Given an index value into the type, return the type of the element.
Definition: Type.cpp:612
Multiway switch.
Verify that the TBAA Metadatas are valid.
Definition: Verifier.h:39
bool visitTBAAMetadata(Instruction &I, const MDNode *MD)
Visit an instruction and return true if it is valid, return false if an invalid TBAA is attached.
Definition: Verifier.cpp:7378
@ CanBeGlobal
This type may be used as the value type of a global variable.
Definition: DerivedTypes.h:771
TinyPtrVector - This class is specialized for cases where there are normally 0 or 1 element in a vect...
Definition: TinyPtrVector.h:29
unsigned size() const
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:265
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition: Type.h:252
bool isLabelTy() const
Return true if this is 'label'.
Definition: Type.h:219
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition: Type.h:234
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:255
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:302
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition: Type.h:185
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition: Type.h:262
bool isScalableTy() const
Return true if this is a type whose size is a known multiple of vscale.
bool canLosslesslyBitCastTo(Type *Ty) const
Return true if this type could be converted with a lossless BitCast to type 'Ty'.
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
Definition: Type.h:243
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:228
bool isTokenTy() const
Return true if this is 'token'.
Definition: Type.h:225
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition: Type.h:216
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition: Type.h:348
bool isMetadataTy() const
Return true if this is 'metadata'.
Definition: Type.h:222
This class represents a cast unsigned integer to floating point.
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
op_range operands()
Definition: User.h:242
Value * getOperand(unsigned i) const
Definition: User.h:169
unsigned getNumOperands() const
Definition: User.h:191
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
This is the common base class for vector predication intrinsics.
Value wrapper in the Metadata hierarchy.
Definition: Metadata.h:450
Value * getValue() const
Definition: Metadata.h:490
LLVM Value Representation.
Definition: Value.h:74
iterator_range< user_iterator > materialized_users()
Definition: Value.h:415
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
static constexpr uint64_t MaximumAlignment
Definition: Value.h:807
const Value * stripPointerCastsAndAliases() const
Strip off pointer casts, all-zero GEPs, address space casts, and aliases.
Definition: Value.cpp:697
const Value * stripInBoundsOffsets(function_ref< void(const Value *)> Func=[](const Value *) {}) const
Strip off pointer casts and inbounds GEPs.
Definition: Value.cpp:785
iterator_range< user_iterator > users()
Definition: Value.h:421
bool materialized_use_empty() const
Definition: Value.h:349
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1074
bool hasName() const
Definition: Value.h:261
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
Check a module for errors, and report separate error states for IR and debug info errors.
Definition: Verifier.h:107
Result run(Module &M, ModuleAnalysisManager &)
Definition: Verifier.cpp:7494
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
Definition: Verifier.cpp:7506
This class represents zero extension of integer types.
constexpr bool isNonZero() const
Definition: TypeSize.h:158
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:171
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:168
An efficient, type-erasing, non-owning reference to a callable.
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:316
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
This file contains the declaration of the Comdat class, which represents a single COMDAT in LLVM.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
AttributeMask typeIncompatible(Type *Ty, AttributeSafetyKind ASK=ASK_ALL)
Which attributes cannot be applied to a type.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:121
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
@ AMDGPU_CS
Used for Mesa/AMDPAL compute shaders.
Definition: CallingConv.h:197
@ AMDGPU_VS
Used for Mesa vertex shaders, or AMDPAL last shader stage before rasterization (vertex shader if tess...
Definition: CallingConv.h:188
@ AMDGPU_KERNEL
Used for AMDGPU code object kernels.
Definition: CallingConv.h:200
@ AnyReg
OBSOLETED - Used for stack based JavaScript calls.
Definition: CallingConv.h:60
@ AMDGPU_CS_ChainPreserve
Used on AMDGPUs to give the middle-end more control over argument placement.
Definition: CallingConv.h:249
@ AMDGPU_HS
Used for Mesa/AMDPAL hull shaders (= tessellation control shaders).
Definition: CallingConv.h:206
@ AMDGPU_GS
Used for Mesa/AMDPAL geometry shaders.
Definition: CallingConv.h:191
@ X86_INTR
x86 hardware interrupt context.
Definition: CallingConv.h:173
@ AMDGPU_CS_Chain
Used on AMDGPUs to give the middle-end more control over argument placement.
Definition: CallingConv.h:245
@ AMDGPU_PS
Used for Mesa/AMDPAL pixel shaders.
Definition: CallingConv.h:194
@ Cold
Attempts to make code in the caller as efficient as possible under the assumption that the call is no...
Definition: CallingConv.h:47
@ PTX_Device
Call to a PTX device function.
Definition: CallingConv.h:129
@ SPIR_KERNEL
Used for SPIR kernel functions.
Definition: CallingConv.h:144
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:41
@ Intel_OCL_BI
Used for Intel OpenCL built-ins.
Definition: CallingConv.h:147
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition: CallingConv.h:76
@ PTX_Kernel
Call to a PTX kernel. Passes all arguments in parameter space.
Definition: CallingConv.h:125
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
Definition: CallingConv.h:87
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
MatchIntrinsicTypesResult matchIntrinsicSignature(FunctionType *FTy, ArrayRef< IITDescriptor > &Infos, SmallVectorImpl< Type * > &ArgTys)
Match the specified function type with the type constraints specified by the .td file.
Definition: Function.cpp:1710
void getIntrinsicInfoTableEntries(ID id, SmallVectorImpl< IITDescriptor > &T)
Return the IIT table descriptor for the specified intrinsic into an array of IITDescriptors.
Definition: Function.cpp:1315
@ MatchIntrinsicTypes_NoMatchRet
Definition: Intrinsics.h:214
@ MatchIntrinsicTypes_NoMatchArg
Definition: Intrinsics.h:215
StringRef getName(ID id)
Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
Definition: Function.cpp:1029
static const int NoAliasScopeDeclScopeArg
Definition: Intrinsics.h:37
bool matchIntrinsicVarArg(bool isVarArg, ArrayRef< IITDescriptor > &Infos)
Verify if the intrinsic has variable arguments.
Definition: Function.cpp:1736
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Definition: MCInstrDesc.h:148
@ System
Synchronized with respect to all concurrently executing threads.
Definition: LLVMContext.h:57
std::optional< VFInfo > tryDemangleForVFABI(StringRef MangledName, const FunctionType *FTy)
Function to construct a VFInfo out of a mangled names in the following format:
@ CE
Windows NT (Windows on ARM)
AssignmentInstRange getAssignmentInsts(DIAssignID *ID)
Return a range of instructions (typically just one) that have ID as an attachment.
Definition: DebugInfo.cpp:1883
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:450
bool isFortran(SourceLanguage S)
Definition: Dwarf.h:569
SourceLanguage
Definition: Dwarf.h:204
@ DW_LANG_lo_user
Definition: Dwarf.h:208
@ DW_MACINFO_undef
Definition: Dwarf.h:787
@ DW_MACINFO_start_file
Definition: Dwarf.h:788
@ DW_MACINFO_define
Definition: Dwarf.h:786
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
@ Offset
Definition: DWP.cpp:456
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1722
bool canInstructionHaveMMRAs(const Instruction &I)
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are are tuples (A,...
Definition: STLExtras.h:2406
bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
Definition: Verifier.cpp:7063
AllocFnKind
Definition: Attributes.h:48
testing::Matcher< const detail::ErrorHolder & > Failed()
Definition: Error.h:198
void initializeVerifierLegacyPassPass(PassRegistry &)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition: STLExtras.h:2073
DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:280
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1729
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:275
bool isModSet(const ModRefInfo MRI)
Definition: ModRef.h:48
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1647
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:156
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ ArgMem
Access to memory via argument pointers.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
FunctionPass * createVerifierPass(bool FatalErrors=true)
Definition: Verifier.cpp:7489
@ Invalid
Denotes invalid value.
@ Dynamic
Denotes mode unknown at compile time.
@ MaskAll
A bitmask that includes all valid flags.
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:191
std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
Definition: FPEnv.cpp:24
std::unique_ptr< GCStrategy > getGCStrategy(const StringRef Name)
Lookup the GCStrategy object associated with the given gc name.
Definition: GCStrategy.cpp:24
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1879
bool pred_empty(const BasicBlock *BB)
Definition: CFG.h:118
bool verifyModule(const Module &M, raw_ostream *OS=nullptr, bool *BrokenDebugInfo=nullptr)
Check a module for errors.
Definition: Verifier.cpp:7074
#define N
static const fltSemantics & IEEEsingle() LLVM_READNONE
Definition: APFloat.cpp:249
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition: Analysis.h:26
Holds the characteristics of one fragment of a larger variable.
Description of the encoding of one expression Op.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
A lightweight accessor for an operand bundle meant to be passed around by value.
Definition: InstrTypes.h:1389
uint32_t getTagID() const
Return the tag of this operand bundle as an integer.
Definition: InstrTypes.h:1417
ArrayRef< Use > Inputs
Definition: InstrTypes.h:1390
void DebugInfoCheckFailed(const Twine &Message)
A debug info check failed.
Definition: Verifier.cpp:302
VerifierSupport(raw_ostream *OS, const Module &M)
Definition: Verifier.cpp:154
bool Broken
Track the brokenness of the module while recursively visiting.
Definition: Verifier.cpp:148
raw_ostream * OS
Definition: Verifier.cpp:140
void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A check failed (with values to print).
Definition: Verifier.cpp:295
bool BrokenDebugInfo
Broken debug info can be "recovered" from by stripping the debug info.
Definition: Verifier.cpp:150
LLVMContext & Context
Definition: Verifier.cpp:145
bool TreatBrokenDebugInfoAsError
Whether to treat broken debug info as an error.
Definition: Verifier.cpp:152
void CheckFailed(const Twine &Message)
A check failed, so printout out the condition and the message.
Definition: Verifier.cpp:284
const Module & M
Definition: Verifier.cpp:141
const DataLayout & DL
Definition: Verifier.cpp:144
void DebugInfoCheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs)
A debug info check failed (with values to print).
Definition: Verifier.cpp:311
ModuleSlotTracker MST
Definition: Verifier.cpp:142