LLVM 23.0.0git
MachineVerifier.cpp
Go to the documentation of this file.
1//===- MachineVerifier.cpp - Machine Code Verifier ------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Pass to verify generated machine code. The following is checked:
10//
11// Operand counts: All explicit operands must be present.
12//
13// Register classes: All physical and virtual register operands must be
14// compatible with the register class required by the instruction descriptor.
15//
16// Register live intervals: Registers must be defined only once, and must be
17// defined before use.
18//
19// The machine code verifier is enabled with the command-line option
20// -verify-machineinstrs.
21//===----------------------------------------------------------------------===//
22
24#include "llvm/ADT/BitVector.h"
25#include "llvm/ADT/DenseMap.h"
26#include "llvm/ADT/DenseSet.h"
29#include "llvm/ADT/STLExtras.h"
33#include "llvm/ADT/StringRef.h"
34#include "llvm/ADT/Twine.h"
64#include "llvm/IR/BasicBlock.h"
65#include "llvm/IR/Constants.h"
67#include "llvm/IR/Function.h"
68#include "llvm/IR/InlineAsm.h"
71#include "llvm/MC/LaneBitmask.h"
72#include "llvm/MC/MCAsmInfo.h"
73#include "llvm/MC/MCDwarf.h"
74#include "llvm/MC/MCInstrDesc.h"
77#include "llvm/Pass.h"
82#include "llvm/Support/ModRef.h"
83#include "llvm/Support/Mutex.h"
86#include <algorithm>
87#include <cassert>
88#include <cstddef>
89#include <cstdint>
90#include <iterator>
91#include <string>
92#include <utility>
93
94using namespace llvm;
95
96namespace {
97
98/// Used the by the ReportedErrors class to guarantee only one error is reported
99/// at one time.
100static ManagedStatic<sys::SmartMutex<true>> ReportedErrorsLock;
101
102struct MachineVerifier {
103 MachineVerifier(MachineFunctionAnalysisManager &MFAM, const char *b,
104 raw_ostream *OS, bool AbortOnError = true)
105 : MFAM(&MFAM), OS(OS ? *OS : nulls()), Banner(b),
106 ReportedErrs(AbortOnError) {}
107
108 MachineVerifier(Pass *pass, const char *b, raw_ostream *OS,
109 bool AbortOnError = true)
110 : PASS(pass), OS(OS ? *OS : nulls()), Banner(b),
111 ReportedErrs(AbortOnError) {}
112
113 MachineVerifier(const char *b, LiveVariables *LiveVars,
114 LiveIntervals *LiveInts, LiveStacks *LiveStks,
115 SlotIndexes *Indexes, raw_ostream *OS,
116 bool AbortOnError = true)
117 : OS(OS ? *OS : nulls()), Banner(b), LiveVars(LiveVars),
118 LiveInts(LiveInts), LiveStks(LiveStks), Indexes(Indexes),
119 ReportedErrs(AbortOnError) {}
120
121 /// \returns true if no problems were found.
122 bool verify(const MachineFunction &MF);
123
124 MachineFunctionAnalysisManager *MFAM = nullptr;
125 Pass *const PASS = nullptr;
126 raw_ostream &OS;
127 const char *Banner;
128 const MachineFunction *MF = nullptr;
129 const TargetMachine *TM = nullptr;
130 const TargetInstrInfo *TII = nullptr;
131 const TargetRegisterInfo *TRI = nullptr;
132 const MachineRegisterInfo *MRI = nullptr;
133 const RegisterBankInfo *RBI = nullptr;
134
135 // Avoid querying the MachineFunctionProperties for each operand.
136 bool isFunctionRegBankSelected = false;
137 bool isFunctionSelected = false;
138 bool isFunctionTracksDebugUserValues = false;
139
140 using RegVector = SmallVector<Register, 16>;
141 using RegMaskVector = SmallVector<const uint32_t *, 4>;
142 using RegSet = DenseSet<Register>;
143 using RegMap = DenseMap<Register, const MachineInstr *>;
144 using BlockSet = SmallPtrSet<const MachineBasicBlock *, 8>;
145
146 const MachineInstr *FirstNonPHI = nullptr;
147 const MachineInstr *FirstTerminator = nullptr;
148 BlockSet FunctionBlocks;
149
150 BitVector regsReserved;
151 RegSet regsLive;
152 RegVector regsDefined, regsDead, regsKilled;
153 RegMaskVector regMasks;
154
155 SlotIndex lastIndex;
156
157 // Add Reg and any sub-registers to RV
158 void addRegWithSubRegs(RegVector &RV, Register Reg) {
159 RV.push_back(Reg);
160 if (Reg.isPhysical())
161 append_range(RV, TRI->subregs(Reg.asMCReg()));
162 }
163
164 struct BBInfo {
165 // Is this MBB reachable from the MF entry point?
166 bool reachable = false;
167
168 // Vregs that must be live in because they are used without being
169 // defined. Map value is the user. vregsLiveIn doesn't include regs
170 // that only are used by PHI nodes.
171 RegMap vregsLiveIn;
172
173 // Regs killed in MBB. They may be defined again, and will then be in both
174 // regsKilled and regsLiveOut.
175 RegSet regsKilled;
176
177 // Regs defined in MBB and live out. Note that vregs passing through may
178 // be live out without being mentioned here.
179 RegSet regsLiveOut;
180
181 // Vregs that pass through MBB untouched. This set is disjoint from
182 // regsKilled and regsLiveOut.
183 RegSet vregsPassed;
184
185 // Vregs that must pass through MBB because they are needed by a successor
186 // block. This set is disjoint from regsLiveOut.
187 RegSet vregsRequired;
188
189 // Set versions of block's predecessor and successor lists.
190 BlockSet Preds, Succs;
191
192 BBInfo() = default;
193
194 // Add register to vregsRequired if it belongs there. Return true if
195 // anything changed.
196 bool addRequired(Register Reg) {
197 if (!Reg.isVirtual())
198 return false;
199 if (regsLiveOut.count(Reg))
200 return false;
201 return vregsRequired.insert(Reg).second;
202 }
203
204 // Same for a full set.
205 bool addRequired(const RegSet &RS) {
206 bool Changed = false;
207 for (Register Reg : RS)
208 Changed |= addRequired(Reg);
209 return Changed;
210 }
211
212 // Same for a full map.
213 bool addRequired(const RegMap &RM) {
214 bool Changed = false;
215 for (const auto &I : RM)
216 Changed |= addRequired(I.first);
217 return Changed;
218 }
219
220 // Live-out registers are either in regsLiveOut or vregsPassed.
221 bool isLiveOut(Register Reg) const {
222 return regsLiveOut.count(Reg) || vregsPassed.count(Reg);
223 }
224 };
225
226 // Extra register info per MBB.
227 DenseMap<const MachineBasicBlock *, BBInfo> MBBInfoMap;
228
229 bool isReserved(Register Reg) {
230 return Reg.id() < regsReserved.size() && regsReserved.test(Reg.id());
231 }
232
233 bool isAllocatable(Register Reg) const {
234 return Reg.id() < TRI->getNumRegs() && TRI->isInAllocatableClass(Reg) &&
235 !regsReserved.test(Reg.id());
236 }
237
238 // Analysis information if available
239 LiveVariables *LiveVars = nullptr;
240 LiveIntervals *LiveInts = nullptr;
241 LiveStacks *LiveStks = nullptr;
242 SlotIndexes *Indexes = nullptr;
243
244 /// A class to track the number of reported error and to guarantee that only
245 /// one error is reported at one time.
246 class ReportedErrors {
247 unsigned NumReported = 0;
248 bool AbortOnError;
249
250 public:
251 /// \param AbortOnError -- If set, abort after printing the first error.
252 ReportedErrors(bool AbortOnError) : AbortOnError(AbortOnError) {}
253
254 ~ReportedErrors() {
255 if (!hasError())
256 return;
257 if (AbortOnError)
258 report_fatal_error("Found " + Twine(NumReported) +
259 " machine code errors.");
260 // Since we haven't aborted, release the lock to allow other threads to
261 // report errors.
262 ReportedErrorsLock->unlock();
263 }
264
265 /// Increment the number of reported errors.
266 /// \returns true if this is the first reported error.
267 bool increment() {
268 // If this is the first error this thread has encountered, grab the lock
269 // to prevent other threads from reporting errors at the same time.
270 // Otherwise we assume we already have the lock.
271 if (!hasError())
272 ReportedErrorsLock->lock();
273 ++NumReported;
274 return NumReported == 1;
275 }
276
277 /// \returns true if an error was reported.
278 bool hasError() { return NumReported; }
279 };
280 ReportedErrors ReportedErrs;
281
282 // This is calculated only when trying to verify convergence control tokens.
283 // Similar to the LLVM IR verifier, we calculate this locally instead of
284 // relying on the pass manager.
285 MachineDominatorTree DT;
286
287 void visitMachineFunctionBefore();
288 void visitMachineBasicBlockBefore(const MachineBasicBlock *MBB);
289 void visitMachineBundleBefore(const MachineInstr *MI);
290
291 /// Verify that all of \p MI's virtual register operands are scalars.
292 /// \returns True if all virtual register operands are scalar. False
293 /// otherwise.
294 bool verifyAllRegOpsScalar(const MachineInstr &MI,
295 const MachineRegisterInfo &MRI);
296 bool verifyVectorElementMatch(LLT Ty0, LLT Ty1, const MachineInstr *MI);
297
298 bool verifyGIntrinsicSideEffects(const MachineInstr *MI);
299 bool verifyGIntrinsicConvergence(const MachineInstr *MI);
300 void verifyPreISelGenericInstruction(const MachineInstr *MI);
301
302 void visitMachineInstrBefore(const MachineInstr *MI);
303 void visitMachineOperand(const MachineOperand *MO, unsigned MONum);
304 void visitMachineBundleAfter(const MachineInstr *MI);
305 void visitMachineBasicBlockAfter(const MachineBasicBlock *MBB);
306 void visitMachineFunctionAfter();
307
308 void report(const char *msg, const MachineFunction *MF);
309 void report(const char *msg, const MachineBasicBlock *MBB);
310 void report(const char *msg, const MachineInstr *MI);
311 void report(const char *msg, const MachineOperand *MO, unsigned MONum,
312 LLT MOVRegType = LLT{});
313 void report(const Twine &Msg, const MachineInstr *MI);
314
315 void report_context(const LiveInterval &LI) const;
316 void report_context(const LiveRange &LR, VirtRegOrUnit VRegOrUnit,
317 LaneBitmask LaneMask) const;
318 void report_context(const LiveRange::Segment &S) const;
319 void report_context(const VNInfo &VNI) const;
320 void report_context(SlotIndex Pos) const;
321 void report_context(MCPhysReg PhysReg) const;
322 void report_context_liverange(const LiveRange &LR) const;
323 void report_context_lanemask(LaneBitmask LaneMask) const;
324 void report_context_vreg(Register VReg) const;
325 void report_context_vreg_regunit(VirtRegOrUnit VRegOrUnit) const;
326
327 void verifyInlineAsm(const MachineInstr *MI);
328
329 void checkLiveness(const MachineOperand *MO, unsigned MONum);
330 void checkLivenessAtUse(const MachineOperand *MO, unsigned MONum,
331 SlotIndex UseIdx, const LiveRange &LR,
332 VirtRegOrUnit VRegOrUnit,
333 LaneBitmask LaneMask = LaneBitmask::getNone());
334 void checkLivenessAtDef(const MachineOperand *MO, unsigned MONum,
335 SlotIndex DefIdx, const LiveRange &LR,
336 VirtRegOrUnit VRegOrUnit, bool SubRangeCheck = false,
337 LaneBitmask LaneMask = LaneBitmask::getNone());
338
339 void markReachable(const MachineBasicBlock *MBB);
340 void calcRegsPassed();
341 void checkPHIOps(const MachineBasicBlock &MBB);
342
343 void calcRegsRequired();
344 void verifyLiveVariables();
345 void verifyLiveIntervals();
346 void verifyLiveInterval(const LiveInterval &);
347 void verifyLiveRangeValue(const LiveRange &, const VNInfo *, VirtRegOrUnit,
348 LaneBitmask);
349 void verifyLiveRangeSegment(const LiveRange &,
350 const LiveRange::const_iterator I, VirtRegOrUnit,
351 LaneBitmask);
352 void verifyLiveRange(const LiveRange &, VirtRegOrUnit,
353 LaneBitmask LaneMask = LaneBitmask::getNone());
354
355 void verifyStackFrame();
356 /// Check that the stack protector is the top-most object in the stack.
357 void verifyStackProtector();
358
359 void verifySlotIndexes() const;
360 void verifyProperties(const MachineFunction &MF);
361};
362
363struct MachineVerifierLegacyPass : public MachineFunctionPass {
364 static char ID; // Pass ID, replacement for typeid
365
366 const std::string Banner;
367
368 MachineVerifierLegacyPass(std::string banner = std::string())
369 : MachineFunctionPass(ID), Banner(std::move(banner)) {}
370
371 void getAnalysisUsage(AnalysisUsage &AU) const override {
372 AU.addUsedIfAvailable<LiveStacksWrapperLegacy>();
373 AU.addUsedIfAvailable<LiveVariablesWrapperPass>();
374 AU.addUsedIfAvailable<SlotIndexesWrapperPass>();
375 AU.addUsedIfAvailable<LiveIntervalsWrapperPass>();
376 AU.setPreservesAll();
378 }
379
380 bool runOnMachineFunction(MachineFunction &MF) override {
381 // Skip functions that have known verification problems.
382 // FIXME: Remove this mechanism when all problematic passes have been
383 // fixed.
384 if (MF.getProperties().hasFailsVerification())
385 return false;
386
387 MachineVerifier(this, Banner.c_str(), &errs()).verify(MF);
388 return false;
389 }
390};
391
392} // end anonymous namespace
393
397 // Skip functions that have known verification problems.
398 // FIXME: Remove this mechanism when all problematic passes have been
399 // fixed.
400 if (MF.getProperties().hasFailsVerification())
401 return PreservedAnalyses::all();
402 MachineVerifier(MFAM, Banner.c_str(), &errs()).verify(MF);
403 return PreservedAnalyses::all();
404}
405
406char MachineVerifierLegacyPass::ID = 0;
407
408INITIALIZE_PASS(MachineVerifierLegacyPass, "machineverifier",
409 "Verify generated machine code", false, false)
410
412 return new MachineVerifierLegacyPass(Banner);
413}
414
415void llvm::verifyMachineFunction(const std::string &Banner,
416 const MachineFunction &MF) {
417 // TODO: Use MFAM after porting below analyses.
418 // LiveVariables *LiveVars;
419 // LiveIntervals *LiveInts;
420 // LiveStacks *LiveStks;
421 // SlotIndexes *Indexes;
422 MachineVerifier(nullptr, Banner.c_str(), &errs()).verify(MF);
423}
424
425bool MachineFunction::verify(Pass *p, const char *Banner, raw_ostream *OS,
426 bool AbortOnError) const {
427 return MachineVerifier(p, Banner, OS, AbortOnError).verify(*this);
428}
429
431 const char *Banner, raw_ostream *OS,
432 bool AbortOnError) const {
433 return MachineVerifier(MFAM, Banner, OS, AbortOnError).verify(*this);
434}
435
437 const char *Banner, raw_ostream *OS,
438 bool AbortOnError) const {
439 return MachineVerifier(Banner, /*LiveVars=*/nullptr, LiveInts,
440 /*LiveStks=*/nullptr, Indexes, OS, AbortOnError)
441 .verify(*this);
442}
443
444void MachineVerifier::verifySlotIndexes() const {
445 if (Indexes == nullptr)
446 return;
447
448 // Ensure the IdxMBB list is sorted by slot indexes.
451 E = Indexes->MBBIndexEnd(); I != E; ++I) {
452 assert(!Last.isValid() || I->first > Last);
453 Last = I->first;
454 }
455}
456
457void MachineVerifier::verifyProperties(const MachineFunction &MF) {
458 // If a pass has introduced virtual registers without clearing the
459 // NoVRegs property (or set it without allocating the vregs)
460 // then report an error.
461 if (MF.getProperties().hasNoVRegs() && MRI->getNumVirtRegs())
462 report("Function has NoVRegs property but there are VReg operands", &MF);
463}
464
465bool MachineVerifier::verify(const MachineFunction &MF) {
466 this->MF = &MF;
467 TM = &MF.getTarget();
470 RBI = MF.getSubtarget().getRegBankInfo();
471 MRI = &MF.getRegInfo();
472
473 const MachineFunctionProperties &Props = MF.getProperties();
474 const bool isFunctionFailedISel = Props.hasFailedISel();
475
476 // If we're mid-GlobalISel and we already triggered the fallback path then
477 // it's expected that the MIR is somewhat broken but that's ok since we'll
478 // reset it and clear the FailedISel attribute in ResetMachineFunctions.
479 if (isFunctionFailedISel)
480 return true;
481
482 isFunctionRegBankSelected = Props.hasRegBankSelected();
483 isFunctionSelected = Props.hasSelected();
484 isFunctionTracksDebugUserValues = Props.hasTracksDebugUserValues();
485
486 if (PASS) {
487 auto *LISWrapper = PASS->getAnalysisIfAvailable<LiveIntervalsWrapperPass>();
488 LiveInts = LISWrapper ? &LISWrapper->getLIS() : nullptr;
489 // We don't want to verify LiveVariables if LiveIntervals is available.
490 auto *LVWrapper = PASS->getAnalysisIfAvailable<LiveVariablesWrapperPass>();
491 if (!LiveInts)
492 LiveVars = LVWrapper ? &LVWrapper->getLV() : nullptr;
493 auto *LSWrapper = PASS->getAnalysisIfAvailable<LiveStacksWrapperLegacy>();
494 LiveStks = LSWrapper ? &LSWrapper->getLS() : nullptr;
495 auto *SIWrapper = PASS->getAnalysisIfAvailable<SlotIndexesWrapperPass>();
496 Indexes = SIWrapper ? &SIWrapper->getSI() : nullptr;
497 }
498 if (MFAM) {
499 MachineFunction &Func = const_cast<MachineFunction &>(MF);
500 LiveInts = MFAM->getCachedResult<LiveIntervalsAnalysis>(Func);
501 if (!LiveInts)
502 LiveVars = MFAM->getCachedResult<LiveVariablesAnalysis>(Func);
503 // TODO: LiveStks = MFAM->getCachedResult<LiveStacksAnalysis>(Func);
504 Indexes = MFAM->getCachedResult<SlotIndexesAnalysis>(Func);
505 }
506
507 verifySlotIndexes();
508
509 verifyProperties(MF);
510
511 visitMachineFunctionBefore();
512 for (const MachineBasicBlock &MBB : MF) {
513 visitMachineBasicBlockBefore(&MBB);
514 // Keep track of the current bundle header.
515 const MachineInstr *CurBundle = nullptr;
516 // Do we expect the next instruction to be part of the same bundle?
517 bool InBundle = false;
518
519 for (const MachineInstr &MI : MBB.instrs()) {
520 if (MI.getParent() != &MBB) {
521 report("Bad instruction parent pointer", &MBB);
522 OS << "Instruction: " << MI;
523 continue;
524 }
525
526 // Check for consistent bundle flags.
527 if (InBundle && !MI.isBundledWithPred())
528 report("Missing BundledPred flag, "
529 "BundledSucc was set on predecessor",
530 &MI);
531 if (!InBundle && MI.isBundledWithPred())
532 report("BundledPred flag is set, "
533 "but BundledSucc not set on predecessor",
534 &MI);
535
536 // Is this a bundle header?
537 if (!MI.isInsideBundle()) {
538 if (CurBundle)
539 visitMachineBundleAfter(CurBundle);
540 CurBundle = &MI;
541 visitMachineBundleBefore(CurBundle);
542 } else if (!CurBundle)
543 report("No bundle header", &MI);
544 visitMachineInstrBefore(&MI);
545 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
546 const MachineOperand &Op = MI.getOperand(I);
547 if (Op.getParent() != &MI) {
548 // Make sure to use correct addOperand / removeOperand / ChangeTo
549 // functions when replacing operands of a MachineInstr.
550 report("Instruction has operand with wrong parent set", &MI);
551 }
552
553 visitMachineOperand(&Op, I);
554 }
555
556 // Was this the last bundled instruction?
557 InBundle = MI.isBundledWithSucc();
558 }
559 if (CurBundle)
560 visitMachineBundleAfter(CurBundle);
561 if (InBundle)
562 report("BundledSucc flag set on last instruction in block", &MBB.back());
563 visitMachineBasicBlockAfter(&MBB);
564 }
565 visitMachineFunctionAfter();
566
567 // Clean up.
568 regsLive.clear();
569 regsDefined.clear();
570 regsDead.clear();
571 regsKilled.clear();
572 regMasks.clear();
573 MBBInfoMap.clear();
574
575 return !ReportedErrs.hasError();
576}
577
578void MachineVerifier::report(const char *msg, const MachineFunction *MF) {
579 assert(MF);
580 OS << '\n';
581 if (ReportedErrs.increment()) {
582 if (Banner)
583 OS << "# " << Banner << '\n';
584
585 if (LiveInts != nullptr)
586 LiveInts->print(OS);
587 else
588 MF->print(OS, Indexes);
589 }
590
591 OS << "*** Bad machine code: " << msg << " ***\n"
592 << "- function: " << MF->getName() << '\n';
593}
594
595void MachineVerifier::report(const char *msg, const MachineBasicBlock *MBB) {
596 assert(MBB);
597 report(msg, MBB->getParent());
598 OS << "- basic block: " << printMBBReference(*MBB) << ' ' << MBB->getName()
599 << " (" << (const void *)MBB << ')';
600 if (Indexes)
601 OS << " [" << Indexes->getMBBStartIdx(MBB) << ';'
602 << Indexes->getMBBEndIdx(MBB) << ')';
603 OS << '\n';
604}
605
606void MachineVerifier::report(const char *msg, const MachineInstr *MI) {
607 assert(MI);
608 report(msg, MI->getParent());
609 OS << "- instruction: ";
610 if (Indexes && Indexes->hasIndex(*MI))
611 OS << Indexes->getInstructionIndex(*MI) << '\t';
612 MI->print(OS, /*IsStandalone=*/true);
613}
614
615void MachineVerifier::report(const char *msg, const MachineOperand *MO,
616 unsigned MONum, LLT MOVRegType) {
617 assert(MO);
618 report(msg, MO->getParent());
619 OS << "- operand " << MONum << ": ";
620 MO->print(OS, MOVRegType, TRI);
621 OS << '\n';
622}
623
624void MachineVerifier::report(const Twine &Msg, const MachineInstr *MI) {
625 report(Msg.str().c_str(), MI);
626}
627
628void MachineVerifier::report_context(SlotIndex Pos) const {
629 OS << "- at: " << Pos << '\n';
630}
631
632void MachineVerifier::report_context(const LiveInterval &LI) const {
633 OS << "- interval: " << LI << '\n';
634}
635
636void MachineVerifier::report_context(const LiveRange &LR,
637 VirtRegOrUnit VRegOrUnit,
638 LaneBitmask LaneMask) const {
639 report_context_liverange(LR);
640 report_context_vreg_regunit(VRegOrUnit);
641 if (LaneMask.any())
642 report_context_lanemask(LaneMask);
643}
644
645void MachineVerifier::report_context(const LiveRange::Segment &S) const {
646 OS << "- segment: " << S << '\n';
647}
648
649void MachineVerifier::report_context(const VNInfo &VNI) const {
650 OS << "- ValNo: " << VNI.id << " (def " << VNI.def << ")\n";
651}
652
653void MachineVerifier::report_context_liverange(const LiveRange &LR) const {
654 OS << "- liverange: " << LR << '\n';
655}
656
657void MachineVerifier::report_context(MCPhysReg PReg) const {
658 OS << "- p. register: " << printReg(PReg, TRI) << '\n';
659}
660
661void MachineVerifier::report_context_vreg(Register VReg) const {
662 OS << "- v. register: " << printReg(VReg, TRI) << '\n';
663}
664
665void MachineVerifier::report_context_vreg_regunit(
666 VirtRegOrUnit VRegOrUnit) const {
667 if (VRegOrUnit.isVirtualReg()) {
668 report_context_vreg(VRegOrUnit.asVirtualReg());
669 } else {
670 OS << "- regunit: " << printRegUnit(VRegOrUnit.asMCRegUnit(), TRI)
671 << '\n';
672 }
673}
674
675void MachineVerifier::report_context_lanemask(LaneBitmask LaneMask) const {
676 OS << "- lanemask: " << PrintLaneMask(LaneMask) << '\n';
677}
678
679void MachineVerifier::markReachable(const MachineBasicBlock *MBB) {
680 BBInfo &MInfo = MBBInfoMap[MBB];
681 if (!MInfo.reachable) {
682 MInfo.reachable = true;
683 for (const MachineBasicBlock *Succ : MBB->successors())
684 markReachable(Succ);
685 }
686}
687
688void MachineVerifier::visitMachineFunctionBefore() {
689 lastIndex = SlotIndex();
690 regsReserved = MRI->reservedRegsFrozen() ? MRI->getReservedRegs()
691 : TRI->getReservedRegs(*MF);
692
693 if (!MF->empty())
694 markReachable(&MF->front());
695
696 // Build a set of the basic blocks in the function.
697 FunctionBlocks.clear();
698 for (const auto &MBB : *MF) {
699 FunctionBlocks.insert(&MBB);
700 BBInfo &MInfo = MBBInfoMap[&MBB];
701
702 MInfo.Preds.insert_range(MBB.predecessors());
703 if (MInfo.Preds.size() != MBB.pred_size())
704 report("MBB has duplicate entries in its predecessor list.", &MBB);
705
706 MInfo.Succs.insert_range(MBB.successors());
707 if (MInfo.Succs.size() != MBB.succ_size())
708 report("MBB has duplicate entries in its successor list.", &MBB);
709 }
710
711 // Check that the register use lists are sane.
712 MRI->verifyUseLists();
713
714 if (!MF->empty()) {
715 verifyStackFrame();
716 verifyStackProtector();
717 }
718}
719
720void
721MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
722 FirstTerminator = nullptr;
723 FirstNonPHI = nullptr;
724
725 if (!MF->getProperties().hasNoPHIs() && MRI->tracksLiveness()) {
726 // If this block has allocatable physical registers live-in, check that
727 // it is an entry block or landing pad.
728 for (const auto &LI : MBB->liveins()) {
729 if (isAllocatable(LI.PhysReg) && !MBB->isEHPad() &&
730 MBB->getIterator() != MBB->getParent()->begin() &&
732 report("MBB has allocatable live-in, but isn't entry, landing-pad, or "
733 "inlineasm-br-indirect-target.",
734 MBB);
735 report_context(LI.PhysReg);
736 }
737 }
738 }
739
740 if (MBB->isIRBlockAddressTaken()) {
742 report("ir-block-address-taken is associated with basic block not used by "
743 "a blockaddress.",
744 MBB);
745 }
746
747 // Count the number of landing pad successors.
749 for (const auto *succ : MBB->successors()) {
750 if (succ->isEHPad())
751 LandingPadSuccs.insert(succ);
752 if (!FunctionBlocks.count(succ))
753 report("MBB has successor that isn't part of the function.", MBB);
754 if (!MBBInfoMap[succ].Preds.count(MBB)) {
755 report("Inconsistent CFG", MBB);
756 OS << "MBB is not in the predecessor list of the successor "
757 << printMBBReference(*succ) << ".\n";
758 }
759 }
760
761 // Check the predecessor list.
762 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
763 if (!FunctionBlocks.count(Pred))
764 report("MBB has predecessor that isn't part of the function.", MBB);
765 if (!MBBInfoMap[Pred].Succs.count(MBB)) {
766 report("Inconsistent CFG", MBB);
767 OS << "MBB is not in the successor list of the predecessor "
768 << printMBBReference(*Pred) << ".\n";
769 }
770 }
771
772 const MCAsmInfo &AsmInfo = TM->getMCAsmInfo();
773 const BasicBlock *BB = MBB->getBasicBlock();
774 const Function &F = MF->getFunction();
775 if (LandingPadSuccs.size() > 1 &&
778 !isScopedEHPersonality(classifyEHPersonality(F.getPersonalityFn())))
779 report("MBB has more than one landing pad successor", MBB);
780
781 // Call analyzeBranch. If it succeeds, there several more conditions to check.
782 MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
784 if (!TII->analyzeBranch(*const_cast<MachineBasicBlock *>(MBB), TBB, FBB,
785 Cond)) {
786 // Ok, analyzeBranch thinks it knows what's going on with this block. Let's
787 // check whether its answers match up with reality.
788 if (!TBB && !FBB) {
789 // Block falls through to its successor.
790 if (!MBB->empty() && MBB->back().isBarrier() &&
791 !TII->isPredicated(MBB->back())) {
792 report("MBB exits via unconditional fall-through but ends with a "
793 "barrier instruction!", MBB);
794 }
795 if (!Cond.empty()) {
796 report("MBB exits via unconditional fall-through but has a condition!",
797 MBB);
798 }
799 } else if (TBB && !FBB && Cond.empty()) {
800 // Block unconditionally branches somewhere.
801 if (MBB->empty()) {
802 report("MBB exits via unconditional branch but doesn't contain "
803 "any instructions!", MBB);
804 } else if (!MBB->back().isBarrier()) {
805 report("MBB exits via unconditional branch but doesn't end with a "
806 "barrier instruction!", MBB);
807 } else if (!MBB->back().isTerminator()) {
808 report("MBB exits via unconditional branch but the branch isn't a "
809 "terminator instruction!", MBB);
810 }
811 } else if (TBB && !FBB && !Cond.empty()) {
812 // Block conditionally branches somewhere, otherwise falls through.
813 if (MBB->empty()) {
814 report("MBB exits via conditional branch/fall-through but doesn't "
815 "contain any instructions!", MBB);
816 } else if (MBB->back().isBarrier()) {
817 report("MBB exits via conditional branch/fall-through but ends with a "
818 "barrier instruction!", MBB);
819 } else if (!MBB->back().isTerminator()) {
820 report("MBB exits via conditional branch/fall-through but the branch "
821 "isn't a terminator instruction!", MBB);
822 }
823 } else if (TBB && FBB) {
824 // Block conditionally branches somewhere, otherwise branches
825 // somewhere else.
826 if (MBB->empty()) {
827 report("MBB exits via conditional branch/branch but doesn't "
828 "contain any instructions!", MBB);
829 } else if (!MBB->back().isBarrier()) {
830 report("MBB exits via conditional branch/branch but doesn't end with a "
831 "barrier instruction!", MBB);
832 } else if (!MBB->back().isTerminator()) {
833 report("MBB exits via conditional branch/branch but the branch "
834 "isn't a terminator instruction!", MBB);
835 }
836 if (Cond.empty()) {
837 report("MBB exits via conditional branch/branch but there's no "
838 "condition!", MBB);
839 }
840 } else {
841 report("analyzeBranch returned invalid data!", MBB);
842 }
843
844 // Now check that the successors match up with the answers reported by
845 // analyzeBranch.
846 if (TBB && !MBB->isSuccessor(TBB))
847 report("MBB exits via jump or conditional branch, but its target isn't a "
848 "CFG successor!",
849 MBB);
850 if (FBB && !MBB->isSuccessor(FBB))
851 report("MBB exits via conditional branch, but its target isn't a CFG "
852 "successor!",
853 MBB);
854
855 // There might be a fallthrough to the next block if there's either no
856 // unconditional true branch, or if there's a condition, and one of the
857 // branches is missing.
858 bool Fallthrough = !TBB || (!Cond.empty() && !FBB);
859
860 // A conditional fallthrough must be an actual CFG successor, not
861 // unreachable. (Conversely, an unconditional fallthrough might not really
862 // be a successor, because the block might end in unreachable.)
863 if (!Cond.empty() && !FBB) {
865 if (MBBI == MF->end()) {
866 report("MBB conditionally falls through out of function!", MBB);
867 } else if (!MBB->isSuccessor(&*MBBI))
868 report("MBB exits via conditional branch/fall-through but the CFG "
869 "successors don't match the actual successors!",
870 MBB);
871 }
872
873 // Verify that there aren't any extra un-accounted-for successors.
874 for (const MachineBasicBlock *SuccMBB : MBB->successors()) {
875 // If this successor is one of the branch targets, it's okay.
876 if (SuccMBB == TBB || SuccMBB == FBB)
877 continue;
878 // If we might have a fallthrough, and the successor is the fallthrough
879 // block, that's also ok.
880 if (Fallthrough && SuccMBB == MBB->getNextNode())
881 continue;
882 // Also accept successors which are for exception-handling or might be
883 // inlineasm_br targets.
884 if (SuccMBB->isEHPad() || SuccMBB->isInlineAsmBrIndirectTarget())
885 continue;
886 report("MBB has unexpected successors which are not branch targets, "
887 "fallthrough, EHPads, or inlineasm_br targets.",
888 MBB);
889 }
890 }
891
892 regsLive.clear();
893 if (MRI->tracksLiveness()) {
894 for (const auto &LI : MBB->liveins()) {
895 if (!LI.PhysReg.isPhysical()) {
896 report("MBB live-in list contains non-physical register", MBB);
897 continue;
898 }
899 regsLive.insert_range(TRI->subregs_inclusive(LI.PhysReg));
900 }
901 }
902
903 const MachineFrameInfo &MFI = MF->getFrameInfo();
904 BitVector PR = MFI.getPristineRegs(*MF);
905 for (unsigned I : PR.set_bits())
906 regsLive.insert_range(TRI->subregs_inclusive(I));
907
908 regsKilled.clear();
909 regsDefined.clear();
910
911 if (Indexes)
912 lastIndex = Indexes->getMBBStartIdx(MBB);
913}
914
915// This function gets called for all bundle headers, including normal
916// stand-alone unbundled instructions.
917void MachineVerifier::visitMachineBundleBefore(const MachineInstr *MI) {
918 if (Indexes && Indexes->hasIndex(*MI)) {
919 SlotIndex idx = Indexes->getInstructionIndex(*MI);
920 if (!(idx > lastIndex)) {
921 report("Instruction index out of order", MI);
922 OS << "Last instruction was at " << lastIndex << '\n';
923 }
924 lastIndex = idx;
925 }
926
927 // Ensure non-terminators don't follow terminators.
928 if (MI->isTerminator()) {
929 if (!FirstTerminator)
930 FirstTerminator = MI;
931 } else if (FirstTerminator) {
932 // For GlobalISel, G_INVOKE_REGION_START is a terminator that we allow to
933 // precede non-terminators.
934 if (FirstTerminator->getOpcode() != TargetOpcode::G_INVOKE_REGION_START) {
935 report("Non-terminator instruction after the first terminator", MI);
936 OS << "First terminator was:\t" << *FirstTerminator;
937 }
938 }
939}
940
941// The operands on an INLINEASM instruction must follow a template.
942// Verify that the flag operands make sense.
943void MachineVerifier::verifyInlineAsm(const MachineInstr *MI) {
944 // The first two operands on INLINEASM are the asm string and global flags.
945 if (MI->getNumOperands() < 2) {
946 report("Too few operands on inline asm", MI);
947 return;
948 }
949 if (!MI->getOperand(0).isSymbol())
950 report("Asm string must be an external symbol", MI);
951 if (!MI->getOperand(1).isImm())
952 report("Asm flags must be an immediate", MI);
953 // Allowed flags are Extra_HasSideEffects = 1, Extra_IsAlignStack = 2,
954 // Extra_AsmDialect = 4, Extra_MayLoad = 8, and Extra_MayStore = 16,
955 // and Extra_IsConvergent = 32, Extra_MayUnwind = 64.
956 if (!isUInt<7>(MI->getOperand(1).getImm()))
957 report("Unknown asm flags", &MI->getOperand(1), 1);
958
959 static_assert(InlineAsm::MIOp_FirstOperand == 2, "Asm format changed");
960
961 unsigned OpNo = InlineAsm::MIOp_FirstOperand;
962 unsigned NumOps;
963 for (unsigned e = MI->getNumOperands(); OpNo < e; OpNo += NumOps) {
964 const MachineOperand &MO = MI->getOperand(OpNo);
965 // There may be implicit ops after the fixed operands.
966 if (!MO.isImm())
967 break;
968 const InlineAsm::Flag F(MO.getImm());
969 NumOps = 1 + F.getNumOperandRegisters();
970 }
971
972 if (OpNo > MI->getNumOperands())
973 report("Missing operands in last group", MI);
974
975 // An optional MDNode follows the groups.
976 if (OpNo < MI->getNumOperands() && MI->getOperand(OpNo).isMetadata())
977 ++OpNo;
978
979 // All trailing operands must be implicit registers.
980 for (unsigned e = MI->getNumOperands(); OpNo < e; ++OpNo) {
981 const MachineOperand &MO = MI->getOperand(OpNo);
982 if (!MO.isReg() || !MO.isImplicit())
983 report("Expected implicit register after groups", &MO, OpNo);
984 }
985
986 if (MI->getOpcode() == TargetOpcode::INLINEASM_BR) {
987 const MachineBasicBlock *MBB = MI->getParent();
988
989 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = MI->getNumOperands();
990 i != e; ++i) {
991 const MachineOperand &MO = MI->getOperand(i);
992
993 if (!MO.isMBB())
994 continue;
995
996 // Check the successor & predecessor lists look ok, assume they are
997 // not. Find the indirect target without going through the successors.
998 const MachineBasicBlock *IndirectTargetMBB = MO.getMBB();
999 if (!IndirectTargetMBB) {
1000 report("INLINEASM_BR indirect target does not exist", &MO, i);
1001 break;
1002 }
1003
1004 if (!MBB->isSuccessor(IndirectTargetMBB))
1005 report("INLINEASM_BR indirect target missing from successor list", &MO,
1006 i);
1007
1008 if (!IndirectTargetMBB->isPredecessor(MBB))
1009 report("INLINEASM_BR indirect target predecessor list missing parent",
1010 &MO, i);
1011 }
1012 }
1013}
1014
1015bool MachineVerifier::verifyAllRegOpsScalar(const MachineInstr &MI,
1016 const MachineRegisterInfo &MRI) {
1017 if (none_of(MI.explicit_operands(), [&MRI](const MachineOperand &Op) {
1018 if (!Op.isReg())
1019 return false;
1020 const auto Reg = Op.getReg();
1021 if (Reg.isPhysical())
1022 return false;
1023 return !MRI.getType(Reg).isScalar();
1024 }))
1025 return true;
1026 report("All register operands must have scalar types", &MI);
1027 return false;
1028}
1029
1030/// Check that types are consistent when two operands need to have the same
1031/// number of vector elements.
1032/// \return true if the types are valid.
1033bool MachineVerifier::verifyVectorElementMatch(LLT Ty0, LLT Ty1,
1034 const MachineInstr *MI) {
1035 if (Ty0.isVector() != Ty1.isVector()) {
1036 report("operand types must be all-vector or all-scalar", MI);
1037 // Generally we try to report as many issues as possible at once, but in
1038 // this case it's not clear what should we be comparing the size of the
1039 // scalar with: the size of the whole vector or its lane. Instead of
1040 // making an arbitrary choice and emitting not so helpful message, let's
1041 // avoid the extra noise and stop here.
1042 return false;
1043 }
1044
1045 if (Ty0.isVector() && Ty0.getElementCount() != Ty1.getElementCount()) {
1046 report("operand types must preserve number of vector elements", MI);
1047 return false;
1048 }
1049
1050 return true;
1051}
1052
1053bool MachineVerifier::verifyGIntrinsicSideEffects(const MachineInstr *MI) {
1054 auto Opcode = MI->getOpcode();
1055 bool NoSideEffects = Opcode == TargetOpcode::G_INTRINSIC ||
1056 Opcode == TargetOpcode::G_INTRINSIC_CONVERGENT;
1057 unsigned IntrID = cast<GIntrinsic>(MI)->getIntrinsicID();
1058 if (IntrID != 0 && IntrID < Intrinsic::num_intrinsics) {
1060 MF->getFunction().getContext(), static_cast<Intrinsic::ID>(IntrID));
1061 bool DeclHasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
1062 if (NoSideEffects && DeclHasSideEffects) {
1063 report(Twine(TII->getName(Opcode),
1064 " used with intrinsic that accesses memory"),
1065 MI);
1066 return false;
1067 }
1068 if (!NoSideEffects && !DeclHasSideEffects) {
1069 report(Twine(TII->getName(Opcode), " used with readnone intrinsic"), MI);
1070 return false;
1071 }
1072 }
1073
1074 return true;
1075}
1076
1077bool MachineVerifier::verifyGIntrinsicConvergence(const MachineInstr *MI) {
1078 auto Opcode = MI->getOpcode();
1079 bool NotConvergent = Opcode == TargetOpcode::G_INTRINSIC ||
1080 Opcode == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS;
1081 unsigned IntrID = cast<GIntrinsic>(MI)->getIntrinsicID();
1082 if (IntrID != 0 && IntrID < Intrinsic::num_intrinsics) {
1084 MF->getFunction().getContext(), static_cast<Intrinsic::ID>(IntrID));
1085 bool DeclIsConvergent = Attrs.hasAttribute(Attribute::Convergent);
1086 if (NotConvergent && DeclIsConvergent) {
1087 report(Twine(TII->getName(Opcode), " used with a convergent intrinsic"),
1088 MI);
1089 return false;
1090 }
1091 if (!NotConvergent && !DeclIsConvergent) {
1092 report(
1093 Twine(TII->getName(Opcode), " used with a non-convergent intrinsic"),
1094 MI);
1095 return false;
1096 }
1097 }
1098
1099 return true;
1100}
1101
1102void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
1103 if (isFunctionSelected)
1104 report("Unexpected generic instruction in a Selected function", MI);
1105
1106 const MCInstrDesc &MCID = MI->getDesc();
1107 unsigned NumOps = MI->getNumOperands();
1108
1109 // Branches must reference a basic block if they are not indirect
1110 if (MI->isBranch() && !MI->isIndirectBranch()) {
1111 bool HasMBB = false;
1112 for (const MachineOperand &Op : MI->operands()) {
1113 if (Op.isMBB()) {
1114 HasMBB = true;
1115 break;
1116 }
1117 }
1118
1119 if (!HasMBB) {
1120 report("Branch instruction is missing a basic block operand or "
1121 "isIndirectBranch property",
1122 MI);
1123 }
1124 }
1125
1126 // Check types.
1128 for (unsigned I = 0, E = std::min(MCID.getNumOperands(), NumOps);
1129 I != E; ++I) {
1130 if (!MCID.operands()[I].isGenericType())
1131 continue;
1132 // Generic instructions specify type equality constraints between some of
1133 // their operands. Make sure these are consistent.
1134 size_t TypeIdx = MCID.operands()[I].getGenericTypeIndex();
1135 Types.resize(std::max(TypeIdx + 1, Types.size()));
1136
1137 const MachineOperand *MO = &MI->getOperand(I);
1138 if (!MO->isReg()) {
1139 report("generic instruction must use register operands", MI);
1140 continue;
1141 }
1142
1143 LLT OpTy = MRI->getType(MO->getReg());
1144 // Don't report a type mismatch if there is no actual mismatch, only a
1145 // type missing, to reduce noise:
1146 if (OpTy.isValid()) {
1147 // Only the first valid type for a type index will be printed: don't
1148 // overwrite it later so it's always clear which type was expected:
1149 if (!Types[TypeIdx].isValid())
1150 Types[TypeIdx] = OpTy;
1151 else if (Types[TypeIdx] != OpTy)
1152 report("Type mismatch in generic instruction", MO, I, OpTy);
1153 } else {
1154 // Generic instructions must have types attached to their operands.
1155 report("Generic instruction is missing a virtual register type", MO, I);
1156 }
1157 }
1158
1159 // Generic opcodes must not have physical register operands.
1160 for (unsigned I = 0; I < MI->getNumOperands(); ++I) {
1161 const MachineOperand *MO = &MI->getOperand(I);
1162 if (MO->isReg() && MO->getReg().isPhysical())
1163 report("Generic instruction cannot have physical register", MO, I);
1164 }
1165
1166 // Avoid out of bounds in checks below. This was already reported earlier.
1167 if (MI->getNumOperands() < MCID.getNumOperands())
1168 return;
1169
1171 if (!TII->verifyInstruction(*MI, ErrorInfo))
1172 report(ErrorInfo.data(), MI);
1173
1174 // Verify properties of various specific instruction types
1175 unsigned Opc = MI->getOpcode();
1176 switch (Opc) {
1177 case TargetOpcode::G_ASSERT_SEXT:
1178 case TargetOpcode::G_ASSERT_ZEXT: {
1179 std::string OpcName =
1180 Opc == TargetOpcode::G_ASSERT_ZEXT ? "G_ASSERT_ZEXT" : "G_ASSERT_SEXT";
1181 if (!MI->getOperand(2).isImm()) {
1182 report(Twine(OpcName, " expects an immediate operand #2"), MI);
1183 break;
1184 }
1185
1186 Register Dst = MI->getOperand(0).getReg();
1187 Register Src = MI->getOperand(1).getReg();
1188 LLT SrcTy = MRI->getType(Src);
1189 int64_t Imm = MI->getOperand(2).getImm();
1190 if (Imm <= 0) {
1191 report(Twine(OpcName, " size must be >= 1"), MI);
1192 break;
1193 }
1194
1195 if (Imm >= SrcTy.getScalarSizeInBits()) {
1196 report(Twine(OpcName, " size must be less than source bit width"), MI);
1197 break;
1198 }
1199
1200 const RegisterBank *SrcRB = RBI->getRegBank(Src, *MRI, *TRI);
1201 const RegisterBank *DstRB = RBI->getRegBank(Dst, *MRI, *TRI);
1202
1203 // Allow only the source bank to be set.
1204 if ((SrcRB && DstRB && SrcRB != DstRB) || (DstRB && !SrcRB)) {
1205 report(Twine(OpcName, " cannot change register bank"), MI);
1206 break;
1207 }
1208
1209 // Don't allow a class change. Do allow member class->regbank.
1210 const TargetRegisterClass *DstRC = MRI->getRegClassOrNull(Dst);
1211 if (DstRC && DstRC != MRI->getRegClassOrNull(Src)) {
1212 report(
1213 Twine(OpcName, " source and destination register classes must match"),
1214 MI);
1215 break;
1216 }
1217
1218 break;
1219 }
1220
1221 case TargetOpcode::G_CONSTANT:
1222 case TargetOpcode::G_FCONSTANT: {
1223 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1224 if (DstTy.isVector())
1225 report("Instruction cannot use a vector result type", MI);
1226
1227 if (MI->getOpcode() == TargetOpcode::G_CONSTANT) {
1228 if (!MI->getOperand(1).isCImm()) {
1229 report("G_CONSTANT operand must be cimm", MI);
1230 break;
1231 }
1232
1233 const ConstantInt *CI = MI->getOperand(1).getCImm();
1234 if (CI->getBitWidth() != DstTy.getSizeInBits())
1235 report("inconsistent constant size", MI);
1236 } else {
1237 if (!MI->getOperand(1).isFPImm()) {
1238 report("G_FCONSTANT operand must be fpimm", MI);
1239 break;
1240 }
1241 const ConstantFP *CF = MI->getOperand(1).getFPImm();
1242
1244 DstTy.getSizeInBits()) {
1245 report("inconsistent constant size", MI);
1246 }
1247 }
1248
1249 break;
1250 }
1251 case TargetOpcode::G_LOAD:
1252 case TargetOpcode::G_STORE:
1253 case TargetOpcode::G_ZEXTLOAD:
1254 case TargetOpcode::G_SEXTLOAD:
1255 case TargetOpcode::G_FPEXTLOAD:
1256 case TargetOpcode::G_FPTRUNCSTORE: {
1257 LLT ValTy = MRI->getType(MI->getOperand(0).getReg());
1258 LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
1259 if (!PtrTy.isPointer())
1260 report("Generic memory instruction must access a pointer", MI);
1261
1262 // Generic loads and stores must have a single MachineMemOperand
1263 // describing that access.
1264 if (!MI->hasOneMemOperand()) {
1265 report("Generic instruction accessing memory must have one mem operand",
1266 MI);
1267 } else {
1268 const MachineMemOperand &MMO = **MI->memoperands_begin();
1269 if (isa<GExtLoad>(*MI)) {
1271 ValTy.getSizeInBits()))
1272 report("Generic extload must have a narrower memory type", MI);
1273 } else if (isa<GFPTruncStore>(*MI)) {
1275 ValTy.getSizeInBits()))
1276 report("Generic truncstore must have a narrower memory type", MI);
1277 } else if (MI->getOpcode() == TargetOpcode::G_LOAD) {
1279 ValTy.getSizeInBytes()))
1280 report("load memory size cannot exceed result size", MI);
1281
1282 if (MMO.getRanges()) {
1283 ConstantInt *i =
1285 const LLT RangeTy = LLT::scalar(i->getIntegerType()->getBitWidth());
1286 const LLT MemTy = MMO.getMemoryType();
1287 if (MemTy.getScalarType() != RangeTy ||
1288 ValTy.isScalar() != MemTy.isScalar() ||
1289 (ValTy.isVector() &&
1290 ValTy.getNumElements() != MemTy.getNumElements())) {
1291 report("range is incompatible with the result type", MI);
1292 }
1293 }
1294 } else if (MI->getOpcode() == TargetOpcode::G_STORE) {
1296 MMO.getSize().getValue()))
1297 report("store memory size cannot exceed value size", MI);
1298 }
1299
1300 const AtomicOrdering Order = MMO.getSuccessOrdering();
1301 if (isa<GAnyStore>(*MI)) {
1302 if (Order == AtomicOrdering::Acquire ||
1304 report("atomic store cannot use acquire ordering", MI);
1305
1306 } else {
1307 if (Order == AtomicOrdering::Release ||
1309 report("atomic load cannot use release ordering", MI);
1310 }
1311 }
1312
1313 break;
1314 }
1315 case TargetOpcode::G_PHI: {
1316 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1317 if (!DstTy.isValid() || !all_of(drop_begin(MI->operands()),
1318 [this, &DstTy](const MachineOperand &MO) {
1319 if (!MO.isReg())
1320 return true;
1321 LLT Ty = MRI->getType(MO.getReg());
1322 if (!Ty.isValid() || (Ty != DstTy))
1323 return false;
1324 return true;
1325 }))
1326 report("Generic Instruction G_PHI has operands with incompatible/missing "
1327 "types",
1328 MI);
1329 break;
1330 }
1331 case TargetOpcode::G_BITCAST: {
1332 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1333 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1334 if (!DstTy.isValid() || !SrcTy.isValid())
1335 break;
1336
1337 if (SrcTy.isPointer() != DstTy.isPointer())
1338 report("bitcast cannot convert between pointers and other types", MI);
1339
1340 if (SrcTy.getSizeInBits() != DstTy.getSizeInBits())
1341 report("bitcast sizes must match", MI);
1342
1343 bool SameType = SrcTy.getKind() == DstTy.getKind();
1344 if (SameType && SrcTy.isPointerOrPointerVector())
1345 SameType &= SrcTy.getAddressSpace() == DstTy.getAddressSpace();
1346
1347 SameType &= SrcTy.getScalarSizeInBits() == DstTy.getScalarSizeInBits();
1348
1349 if (SameType && SrcTy.isVector())
1350 SameType &= SrcTy.getElementCount() == DstTy.getElementCount();
1351 if (SameType && SrcTy.isFloatOrFloatVector())
1352 SameType &= SrcTy.getFpSemantics() == DstTy.getFpSemantics();
1353
1354 if (SameType)
1355 report("bitcast must change the type", MI);
1356
1357 break;
1358 }
1359 case TargetOpcode::G_INTTOPTR:
1360 case TargetOpcode::G_PTRTOINT:
1361 case TargetOpcode::G_ADDRSPACE_CAST: {
1362 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1363 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1364 if (!DstTy.isValid() || !SrcTy.isValid())
1365 break;
1366
1367 verifyVectorElementMatch(DstTy, SrcTy, MI);
1368
1369 DstTy = DstTy.getScalarType();
1370 SrcTy = SrcTy.getScalarType();
1371
1372 if (MI->getOpcode() == TargetOpcode::G_INTTOPTR) {
1373 if (!DstTy.isPointer())
1374 report("inttoptr result type must be a pointer", MI);
1375 if (SrcTy.isPointer())
1376 report("inttoptr source type must not be a pointer", MI);
1377 } else if (MI->getOpcode() == TargetOpcode::G_PTRTOINT) {
1378 if (!SrcTy.isPointer())
1379 report("ptrtoint source type must be a pointer", MI);
1380 if (DstTy.isPointer())
1381 report("ptrtoint result type must not be a pointer", MI);
1382 } else {
1383 assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST);
1384 if (!SrcTy.isPointer() || !DstTy.isPointer())
1385 report("addrspacecast types must be pointers", MI);
1386 else {
1387 if (SrcTy.getAddressSpace() == DstTy.getAddressSpace())
1388 report("addrspacecast must convert different address spaces", MI);
1389 }
1390 }
1391
1392 break;
1393 }
1394 case TargetOpcode::G_PTR_ADD: {
1395 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1396 LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
1397 LLT OffsetTy = MRI->getType(MI->getOperand(2).getReg());
1398 if (!DstTy.isValid() || !PtrTy.isValid() || !OffsetTy.isValid())
1399 break;
1400
1401 if (!PtrTy.isPointerOrPointerVector())
1402 report("gep first operand must be a pointer", MI);
1403
1404 if (OffsetTy.isPointerOrPointerVector())
1405 report("gep offset operand must not be a pointer", MI);
1406
1407 if (PtrTy.isPointerOrPointerVector()) {
1408 const DataLayout &DL = MF->getDataLayout();
1409 unsigned AS = PtrTy.getAddressSpace();
1410 unsigned IndexSizeInBits = DL.getIndexSize(AS) * 8;
1411 if (OffsetTy.getScalarSizeInBits() != IndexSizeInBits) {
1412 report("gep offset operand must match index size for address space",
1413 MI);
1414 }
1415 }
1416
1417 // TODO: Is the offset allowed to be a scalar with a vector?
1418 break;
1419 }
1420 case TargetOpcode::G_PTRMASK: {
1421 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1422 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1423 LLT MaskTy = MRI->getType(MI->getOperand(2).getReg());
1424 if (!DstTy.isValid() || !SrcTy.isValid() || !MaskTy.isValid())
1425 break;
1426
1427 if (!DstTy.isPointerOrPointerVector())
1428 report("ptrmask result type must be a pointer", MI);
1429
1430 if (!MaskTy.getScalarType().isScalar())
1431 report("ptrmask mask type must be an integer", MI);
1432
1433 verifyVectorElementMatch(DstTy, MaskTy, MI);
1434 break;
1435 }
1436 case TargetOpcode::G_SEXT:
1437 case TargetOpcode::G_ZEXT:
1438 case TargetOpcode::G_ANYEXT:
1439 case TargetOpcode::G_TRUNC:
1440 case TargetOpcode::G_TRUNC_SSAT_S:
1441 case TargetOpcode::G_TRUNC_SSAT_U:
1442 case TargetOpcode::G_TRUNC_USAT_U:
1443 case TargetOpcode::G_FPEXT:
1444 case TargetOpcode::G_FPTRUNC: {
1445 // Number of operands and presense of types is already checked (and
1446 // reported in case of any issues), so no need to report them again. As
1447 // we're trying to report as many issues as possible at once, however, the
1448 // instructions aren't guaranteed to have the right number of operands or
1449 // types attached to them at this point
1450 assert(MCID.getNumOperands() == 2 && "Expected 2 operands G_*{EXT,TRUNC}");
1451 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1452 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1453 if (!DstTy.isValid() || !SrcTy.isValid())
1454 break;
1455
1457 report("Generic extend/truncate can not operate on pointers", MI);
1458
1459 verifyVectorElementMatch(DstTy, SrcTy, MI);
1460
1461 unsigned DstSize = DstTy.getScalarSizeInBits();
1462 unsigned SrcSize = SrcTy.getScalarSizeInBits();
1463 switch (MI->getOpcode()) {
1464 default:
1465 if (DstSize <= SrcSize)
1466 report("Generic extend has destination type no larger than source", MI);
1467 break;
1468 case TargetOpcode::G_TRUNC:
1469 case TargetOpcode::G_TRUNC_SSAT_S:
1470 case TargetOpcode::G_TRUNC_SSAT_U:
1471 case TargetOpcode::G_TRUNC_USAT_U:
1472 case TargetOpcode::G_FPTRUNC:
1473 if (DstSize >= SrcSize)
1474 report("Generic truncate has destination type no smaller than source",
1475 MI);
1476 break;
1477 }
1478 break;
1479 }
1480 case TargetOpcode::G_SELECT: {
1481 LLT SelTy = MRI->getType(MI->getOperand(0).getReg());
1482 LLT CondTy = MRI->getType(MI->getOperand(1).getReg());
1483 if (!SelTy.isValid() || !CondTy.isValid())
1484 break;
1485
1486 // Scalar condition select on a vector is valid.
1487 if (CondTy.isVector())
1488 verifyVectorElementMatch(SelTy, CondTy, MI);
1489 break;
1490 }
1491 case TargetOpcode::G_MERGE_VALUES: {
1492 // G_MERGE_VALUES should only be used to merge scalars into a larger scalar,
1493 // e.g. s2N = MERGE sN, sN
1494 // Merging multiple scalars into a vector is not allowed, should use
1495 // G_BUILD_VECTOR for that.
1496 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1497 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1498 if (DstTy.isVector() || SrcTy.isVector())
1499 report("G_MERGE_VALUES cannot operate on vectors", MI);
1500
1501 const unsigned NumOps = MI->getNumOperands();
1502 if (DstTy.getSizeInBits() != SrcTy.getSizeInBits() * (NumOps - 1))
1503 report("G_MERGE_VALUES result size is inconsistent", MI);
1504
1505 for (unsigned I = 2; I != NumOps; ++I) {
1506 if (MRI->getType(MI->getOperand(I).getReg()) != SrcTy)
1507 report("G_MERGE_VALUES source types do not match", MI);
1508 }
1509
1510 break;
1511 }
1512 case TargetOpcode::G_UNMERGE_VALUES: {
1513 unsigned NumDsts = MI->getNumOperands() - 1;
1514 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1515 for (unsigned i = 1; i < NumDsts; ++i) {
1516 if (MRI->getType(MI->getOperand(i).getReg()) != DstTy) {
1517 report("G_UNMERGE_VALUES destination types do not match", MI);
1518 break;
1519 }
1520 }
1521
1522 LLT SrcTy = MRI->getType(MI->getOperand(NumDsts).getReg());
1523 if (DstTy.isVector()) {
1524 // This case is the converse of G_CONCAT_VECTORS.
1525 if (!SrcTy.isVector() ||
1526 (SrcTy.getScalarType() != DstTy.getScalarType() &&
1527 !SrcTy.isPointerVector()) ||
1528 SrcTy.isScalableVector() != DstTy.isScalableVector() ||
1529 SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits())
1530 report("G_UNMERGE_VALUES source operand does not match vector "
1531 "destination operands",
1532 MI);
1533 } else if (SrcTy.isVector()) {
1534 // This case is the converse of G_BUILD_VECTOR, but relaxed to allow
1535 // mismatched types as long as the total size matches:
1536 // %0:_(s64), %1:_(s64) = G_UNMERGE_VALUES %2:_(<4 x s32>)
1537 if (SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits())
1538 report("G_UNMERGE_VALUES vector source operand does not match scalar "
1539 "destination operands",
1540 MI);
1541 } else {
1542 // This case is the converse of G_MERGE_VALUES.
1543 if (SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits()) {
1544 report("G_UNMERGE_VALUES scalar source operand does not match scalar "
1545 "destination operands",
1546 MI);
1547 }
1548 }
1549 break;
1550 }
1551 case TargetOpcode::G_BUILD_VECTOR: {
1552 // Source types must be scalars, dest type a vector. Total size of scalars
1553 // must match the dest vector size.
1554 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1555 LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg());
1556 if (!DstTy.isVector() || SrcEltTy.isVector()) {
1557 report("G_BUILD_VECTOR must produce a vector from scalar operands", MI);
1558 break;
1559 }
1560
1561 if (DstTy.getElementType() != SrcEltTy)
1562 report("G_BUILD_VECTOR result element type must match source type", MI);
1563
1564 if (DstTy.getNumElements() != MI->getNumOperands() - 1)
1565 report("G_BUILD_VECTOR must have an operand for each element", MI);
1566
1567 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1568 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1569 report("G_BUILD_VECTOR source operand types are not homogeneous", MI);
1570
1571 break;
1572 }
1573 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1574 // Source types must be scalars, dest type a vector. Scalar types must be
1575 // larger than the dest vector elt type, as this is a truncating operation.
1576 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1577 LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg());
1578 if (!DstTy.isVector() || SrcEltTy.isVector())
1579 report("G_BUILD_VECTOR_TRUNC must produce a vector from scalar operands",
1580 MI);
1581 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1582 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1583 report("G_BUILD_VECTOR_TRUNC source operand types are not homogeneous",
1584 MI);
1585 if (SrcEltTy.getSizeInBits() <= DstTy.getElementType().getSizeInBits())
1586 report("G_BUILD_VECTOR_TRUNC source operand types are not larger than "
1587 "dest elt type",
1588 MI);
1589 break;
1590 }
1591 case TargetOpcode::G_CONCAT_VECTORS: {
1592 // Source types should be vectors, and total size should match the dest
1593 // vector size.
1594 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1595 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1596 if (!DstTy.isVector() || !SrcTy.isVector())
1597 report("G_CONCAT_VECTOR requires vector source and destination operands",
1598 MI);
1599
1600 if (MI->getNumOperands() < 3)
1601 report("G_CONCAT_VECTOR requires at least 2 source operands", MI);
1602
1603 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1604 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1605 report("G_CONCAT_VECTOR source operand types are not homogeneous", MI);
1606 if (DstTy.getElementCount() !=
1607 SrcTy.getElementCount() * (MI->getNumOperands() - 1))
1608 report("G_CONCAT_VECTOR num dest and source elements should match", MI);
1609 break;
1610 }
1611 case TargetOpcode::G_ICMP:
1612 case TargetOpcode::G_FCMP: {
1613 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1614 LLT SrcTy = MRI->getType(MI->getOperand(2).getReg());
1615
1616 if ((DstTy.isVector() != SrcTy.isVector()) ||
1617 (DstTy.isVector() &&
1618 DstTy.getElementCount() != SrcTy.getElementCount()))
1619 report("Generic vector icmp/fcmp must preserve number of lanes", MI);
1620
1621 break;
1622 }
1623 case TargetOpcode::G_SCMP:
1624 case TargetOpcode::G_UCMP: {
1625 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1626 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1627
1628 if (SrcTy.isPointerOrPointerVector()) {
1629 report("Generic scmp/ucmp does not support pointers as operands", MI);
1630 break;
1631 }
1632
1633 if (DstTy.isPointerOrPointerVector()) {
1634 report("Generic scmp/ucmp does not support pointers as a result", MI);
1635 break;
1636 }
1637
1638 if (DstTy.getScalarSizeInBits() < 2) {
1639 report("Result type must be at least 2 bits wide", MI);
1640 break;
1641 }
1642
1643 if ((DstTy.isVector() != SrcTy.isVector()) ||
1644 (DstTy.isVector() &&
1645 DstTy.getElementCount() != SrcTy.getElementCount())) {
1646 report("Generic vector scmp/ucmp must preserve number of lanes", MI);
1647 break;
1648 }
1649
1650 break;
1651 }
1652 case TargetOpcode::G_EXTRACT: {
1653 const MachineOperand &SrcOp = MI->getOperand(1);
1654 if (!SrcOp.isReg()) {
1655 report("extract source must be a register", MI);
1656 break;
1657 }
1658
1659 const MachineOperand &OffsetOp = MI->getOperand(2);
1660 if (!OffsetOp.isImm()) {
1661 report("extract offset must be a constant", MI);
1662 break;
1663 }
1664
1665 unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
1666 unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
1667 if (SrcSize == DstSize)
1668 report("extract source must be larger than result", MI);
1669
1670 if (DstSize + OffsetOp.getImm() > SrcSize)
1671 report("extract reads past end of register", MI);
1672 break;
1673 }
1674 case TargetOpcode::G_INSERT: {
1675 const MachineOperand &SrcOp = MI->getOperand(2);
1676 if (!SrcOp.isReg()) {
1677 report("insert source must be a register", MI);
1678 break;
1679 }
1680
1681 const MachineOperand &OffsetOp = MI->getOperand(3);
1682 if (!OffsetOp.isImm()) {
1683 report("insert offset must be a constant", MI);
1684 break;
1685 }
1686
1687 unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
1688 unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
1689
1690 if (DstSize <= SrcSize)
1691 report("inserted size must be smaller than total register", MI);
1692
1693 if (SrcSize + OffsetOp.getImm() > DstSize)
1694 report("insert writes past end of register", MI);
1695
1696 break;
1697 }
1698 case TargetOpcode::G_JUMP_TABLE: {
1699 if (!MI->getOperand(1).isJTI())
1700 report("G_JUMP_TABLE source operand must be a jump table index", MI);
1701 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1702 if (!DstTy.isPointer())
1703 report("G_JUMP_TABLE dest operand must have a pointer type", MI);
1704 break;
1705 }
1706 case TargetOpcode::G_BRJT: {
1707 if (!MRI->getType(MI->getOperand(0).getReg()).isPointer())
1708 report("G_BRJT src operand 0 must be a pointer type", MI);
1709
1710 if (!MI->getOperand(1).isJTI())
1711 report("G_BRJT src operand 1 must be a jump table index", MI);
1712
1713 const auto &IdxOp = MI->getOperand(2);
1714 if (!IdxOp.isReg() || MRI->getType(IdxOp.getReg()).isPointer())
1715 report("G_BRJT src operand 2 must be a scalar reg type", MI);
1716 break;
1717 }
1718 case TargetOpcode::G_INTRINSIC:
1719 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1720 case TargetOpcode::G_INTRINSIC_CONVERGENT:
1721 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS: {
1722 // TODO: Should verify number of def and use operands, but the current
1723 // interface requires passing in IR types for mangling.
1724 const MachineOperand &IntrIDOp = MI->getOperand(MI->getNumExplicitDefs());
1725 if (!IntrIDOp.isIntrinsicID()) {
1726 report("G_INTRINSIC first src operand must be an intrinsic ID", MI);
1727 break;
1728 }
1729
1730 if (!verifyGIntrinsicSideEffects(MI))
1731 break;
1732 if (!verifyGIntrinsicConvergence(MI))
1733 break;
1734
1735 break;
1736 }
1737 case TargetOpcode::G_SEXT_INREG: {
1738 if (!MI->getOperand(2).isImm()) {
1739 report("G_SEXT_INREG expects an immediate operand #2", MI);
1740 break;
1741 }
1742
1743 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1744 int64_t Imm = MI->getOperand(2).getImm();
1745 if (Imm <= 0)
1746 report("G_SEXT_INREG size must be >= 1", MI);
1747 if (Imm >= SrcTy.getScalarSizeInBits())
1748 report("G_SEXT_INREG size must be less than source bit width", MI);
1749 break;
1750 }
1751 case TargetOpcode::G_BSWAP: {
1752 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1753 if (DstTy.getScalarSizeInBits() % 16 != 0)
1754 report("G_BSWAP size must be a multiple of 16 bits", MI);
1755 break;
1756 }
1757 case TargetOpcode::G_VSCALE: {
1758 if (!MI->getOperand(1).isCImm()) {
1759 report("G_VSCALE operand must be cimm", MI);
1760 break;
1761 }
1762 if (MI->getOperand(1).getCImm()->isZero()) {
1763 report("G_VSCALE immediate cannot be zero", MI);
1764 break;
1765 }
1766 break;
1767 }
1768 case TargetOpcode::G_STEP_VECTOR: {
1769 if (!MI->getOperand(1).isCImm()) {
1770 report("operand must be cimm", MI);
1771 break;
1772 }
1773
1774 if (!MI->getOperand(1).getCImm()->getValue().isStrictlyPositive()) {
1775 report("step must be > 0", MI);
1776 break;
1777 }
1778
1779 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1780 if (!DstTy.isScalableVector()) {
1781 report("Destination type must be a scalable vector", MI);
1782 break;
1783 }
1784
1785 // <vscale x 2 x p0>
1786 if (!DstTy.getElementType().isScalar()) {
1787 report("Destination element type must be scalar", MI);
1788 break;
1789 }
1790
1791 if (MI->getOperand(1).getCImm()->getBitWidth() !=
1793 report("step bitwidth differs from result type element bitwidth", MI);
1794 break;
1795 }
1796 break;
1797 }
1798 case TargetOpcode::G_INSERT_SUBVECTOR: {
1799 const MachineOperand &Src0Op = MI->getOperand(1);
1800 if (!Src0Op.isReg()) {
1801 report("G_INSERT_SUBVECTOR first source must be a register", MI);
1802 break;
1803 }
1804
1805 const MachineOperand &Src1Op = MI->getOperand(2);
1806 if (!Src1Op.isReg()) {
1807 report("G_INSERT_SUBVECTOR second source must be a register", MI);
1808 break;
1809 }
1810
1811 const MachineOperand &IndexOp = MI->getOperand(3);
1812 if (!IndexOp.isImm()) {
1813 report("G_INSERT_SUBVECTOR index must be an immediate", MI);
1814 break;
1815 }
1816
1817 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1818 LLT Src1Ty = MRI->getType(Src1Op.getReg());
1819
1820 if (!DstTy.isVector()) {
1821 report("Destination type must be a vector", MI);
1822 break;
1823 }
1824
1825 if (!Src1Ty.isVector()) {
1826 report("Second source must be a vector", MI);
1827 break;
1828 }
1829
1830 if (DstTy.getElementType() != Src1Ty.getElementType()) {
1831 report("Element type of vectors must be the same", MI);
1832 break;
1833 }
1834
1835 if (Src1Ty.isScalable() != DstTy.isScalable()) {
1836 report("Vector types must both be fixed or both be scalable", MI);
1837 break;
1838 }
1839
1841 DstTy.getElementCount())) {
1842 report("Second source must be smaller than destination vector", MI);
1843 break;
1844 }
1845
1846 uint64_t Idx = IndexOp.getImm();
1847 uint64_t Src1MinLen = Src1Ty.getElementCount().getKnownMinValue();
1848 if (IndexOp.getImm() % Src1MinLen != 0) {
1849 report("Index must be a multiple of the second source vector's "
1850 "minimum vector length",
1851 MI);
1852 break;
1853 }
1854
1855 uint64_t DstMinLen = DstTy.getElementCount().getKnownMinValue();
1856 if (Idx >= DstMinLen || Idx + Src1MinLen > DstMinLen) {
1857 report("Subvector type and index must not cause insert to overrun the "
1858 "vector being inserted into",
1859 MI);
1860 break;
1861 }
1862
1863 break;
1864 }
1865 case TargetOpcode::G_EXTRACT_SUBVECTOR: {
1866 const MachineOperand &SrcOp = MI->getOperand(1);
1867 if (!SrcOp.isReg()) {
1868 report("G_EXTRACT_SUBVECTOR first source must be a register", MI);
1869 break;
1870 }
1871
1872 const MachineOperand &IndexOp = MI->getOperand(2);
1873 if (!IndexOp.isImm()) {
1874 report("G_EXTRACT_SUBVECTOR index must be an immediate", MI);
1875 break;
1876 }
1877
1878 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1879 LLT SrcTy = MRI->getType(SrcOp.getReg());
1880
1881 if (!DstTy.isVector()) {
1882 report("Destination type must be a vector", MI);
1883 break;
1884 }
1885
1886 if (!SrcTy.isVector()) {
1887 report("Source must be a vector", MI);
1888 break;
1889 }
1890
1891 if (DstTy.getElementType() != SrcTy.getElementType()) {
1892 report("Element type of vectors must be the same", MI);
1893 break;
1894 }
1895
1896 if (SrcTy.isScalable() != DstTy.isScalable()) {
1897 report("Vector types must both be fixed or both be scalable", MI);
1898 break;
1899 }
1900
1902 SrcTy.getElementCount())) {
1903 report("Destination vector must be smaller than source vector", MI);
1904 break;
1905 }
1906
1907 uint64_t Idx = IndexOp.getImm();
1908 uint64_t DstMinLen = DstTy.getElementCount().getKnownMinValue();
1909 if (Idx % DstMinLen != 0) {
1910 report("Index must be a multiple of the destination vector's minimum "
1911 "vector length",
1912 MI);
1913 break;
1914 }
1915
1916 uint64_t SrcMinLen = SrcTy.getElementCount().getKnownMinValue();
1917 if (Idx >= SrcMinLen || Idx + DstMinLen > SrcMinLen) {
1918 report("Destination type and index must not cause extract to overrun the "
1919 "source vector",
1920 MI);
1921 break;
1922 }
1923
1924 break;
1925 }
1926 case TargetOpcode::G_SHUFFLE_VECTOR: {
1927 const MachineOperand &MaskOp = MI->getOperand(3);
1928 if (!MaskOp.isShuffleMask()) {
1929 report("Incorrect mask operand type for G_SHUFFLE_VECTOR", MI);
1930 break;
1931 }
1932
1933 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1934 LLT Src0Ty = MRI->getType(MI->getOperand(1).getReg());
1935 LLT Src1Ty = MRI->getType(MI->getOperand(2).getReg());
1936
1937 if (Src0Ty != Src1Ty)
1938 report("Source operands must be the same type", MI);
1939
1940 if (Src0Ty.getScalarType() != DstTy.getScalarType()) {
1941 report("G_SHUFFLE_VECTOR cannot change element type", MI);
1942 break;
1943 }
1944 if (!Src0Ty.isVector()) {
1945 report("G_SHUFFLE_VECTOR must have vector src", MI);
1946 break;
1947 }
1948 if (!DstTy.isVector()) {
1949 report("G_SHUFFLE_VECTOR must have vector dst", MI);
1950 break;
1951 }
1952
1953 // Don't check that all operands are vector because scalars are used in
1954 // place of 1 element vectors.
1955 int SrcNumElts = Src0Ty.getNumElements();
1956 int DstNumElts = DstTy.getNumElements();
1957
1958 ArrayRef<int> MaskIdxes = MaskOp.getShuffleMask();
1959
1960 if (static_cast<int>(MaskIdxes.size()) != DstNumElts)
1961 report("Wrong result type for shufflemask", MI);
1962
1963 for (int Idx : MaskIdxes) {
1964 if (Idx < 0)
1965 continue;
1966
1967 if (Idx >= 2 * SrcNumElts)
1968 report("Out of bounds shuffle index", MI);
1969 }
1970
1971 break;
1972 }
1973
1974 case TargetOpcode::G_SPLAT_VECTOR: {
1975 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1976 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1977
1978 if (!DstTy.isScalableVector()) {
1979 report("Destination type must be a scalable vector", MI);
1980 break;
1981 }
1982
1983 if (!SrcTy.isScalar() && !SrcTy.isPointer()) {
1984 report("Source type must be a scalar or pointer", MI);
1985 break;
1986 }
1987
1989 SrcTy.getSizeInBits())) {
1990 report("Element type of the destination must be the same size or smaller "
1991 "than the source type",
1992 MI);
1993 break;
1994 }
1995
1996 break;
1997 }
1998 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1999 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
2000 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
2001 LLT IdxTy = MRI->getType(MI->getOperand(2).getReg());
2002
2003 if (!DstTy.isScalar() && !DstTy.isPointer()) {
2004 report("Destination type must be a scalar or pointer", MI);
2005 break;
2006 }
2007
2008 if (!SrcTy.isVector()) {
2009 report("First source must be a vector", MI);
2010 break;
2011 }
2012
2013 auto TLI = MF->getSubtarget().getTargetLowering();
2014 if (IdxTy.getSizeInBits() != TLI->getVectorIdxWidth(MF->getDataLayout())) {
2015 report("Index type must match VectorIdxTy", MI);
2016 break;
2017 }
2018
2019 break;
2020 }
2021 case TargetOpcode::G_INSERT_VECTOR_ELT: {
2022 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
2023 LLT VecTy = MRI->getType(MI->getOperand(1).getReg());
2024 LLT ScaTy = MRI->getType(MI->getOperand(2).getReg());
2025 LLT IdxTy = MRI->getType(MI->getOperand(3).getReg());
2026
2027 if (!DstTy.isVector()) {
2028 report("Destination type must be a vector", MI);
2029 break;
2030 }
2031
2032 if (VecTy != DstTy) {
2033 report("Destination type and vector type must match", MI);
2034 break;
2035 }
2036
2037 if (!ScaTy.isScalar() && !ScaTy.isPointer()) {
2038 report("Inserted element must be a scalar or pointer", MI);
2039 break;
2040 }
2041
2042 auto TLI = MF->getSubtarget().getTargetLowering();
2043 if (IdxTy.getSizeInBits() != TLI->getVectorIdxWidth(MF->getDataLayout())) {
2044 report("Index type must match VectorIdxTy", MI);
2045 break;
2046 }
2047
2048 break;
2049 }
2050 case TargetOpcode::G_DYN_STACKALLOC: {
2051 const MachineOperand &DstOp = MI->getOperand(0);
2052 const MachineOperand &AllocOp = MI->getOperand(1);
2053 const MachineOperand &AlignOp = MI->getOperand(2);
2054
2055 if (!DstOp.isReg() || !MRI->getType(DstOp.getReg()).isPointer()) {
2056 report("dst operand 0 must be a pointer type", MI);
2057 break;
2058 }
2059
2060 if (!AllocOp.isReg() || !MRI->getType(AllocOp.getReg()).isScalar()) {
2061 report("src operand 1 must be a scalar reg type", MI);
2062 break;
2063 }
2064
2065 if (!AlignOp.isImm()) {
2066 report("src operand 2 must be an immediate type", MI);
2067 break;
2068 }
2069 break;
2070 }
2071 case TargetOpcode::G_MEMCPY_INLINE:
2072 case TargetOpcode::G_MEMCPY:
2073 case TargetOpcode::G_MEMMOVE: {
2074 ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
2075 if (MMOs.size() != 2) {
2076 report("memcpy/memmove must have 2 memory operands", MI);
2077 break;
2078 }
2079
2080 if ((!MMOs[0]->isStore() || MMOs[0]->isLoad()) ||
2081 (MMOs[1]->isStore() || !MMOs[1]->isLoad())) {
2082 report("wrong memory operand types", MI);
2083 break;
2084 }
2085
2086 if (MMOs[0]->getSize() != MMOs[1]->getSize())
2087 report("inconsistent memory operand sizes", MI);
2088
2089 LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg());
2090 LLT SrcPtrTy = MRI->getType(MI->getOperand(1).getReg());
2091
2092 if (!DstPtrTy.isPointer() || !SrcPtrTy.isPointer()) {
2093 report("memory instruction operand must be a pointer", MI);
2094 break;
2095 }
2096
2097 if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
2098 report("inconsistent store address space", MI);
2099 if (SrcPtrTy.getAddressSpace() != MMOs[1]->getAddrSpace())
2100 report("inconsistent load address space", MI);
2101
2102 if (Opc != TargetOpcode::G_MEMCPY_INLINE)
2103 if (!MI->getOperand(3).isImm() || (MI->getOperand(3).getImm() & ~1LL))
2104 report("'tail' flag (operand 3) must be an immediate 0 or 1", MI);
2105
2106 break;
2107 }
2108 case TargetOpcode::G_BZERO:
2109 case TargetOpcode::G_MEMSET: {
2110 ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
2111 std::string Name = Opc == TargetOpcode::G_MEMSET ? "memset" : "bzero";
2112 if (MMOs.size() != 1) {
2113 report(Twine(Name, " must have 1 memory operand"), MI);
2114 break;
2115 }
2116
2117 if ((!MMOs[0]->isStore() || MMOs[0]->isLoad())) {
2118 report(Twine(Name, " memory operand must be a store"), MI);
2119 break;
2120 }
2121
2122 LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg());
2123 if (!DstPtrTy.isPointer()) {
2124 report(Twine(Name, " operand must be a pointer"), MI);
2125 break;
2126 }
2127
2128 if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
2129 report("inconsistent " + Twine(Name, " address space"), MI);
2130
2131 if (!MI->getOperand(MI->getNumOperands() - 1).isImm() ||
2132 (MI->getOperand(MI->getNumOperands() - 1).getImm() & ~1LL))
2133 report("'tail' flag (last operand) must be an immediate 0 or 1", MI);
2134
2135 break;
2136 }
2137 case TargetOpcode::G_UBSANTRAP: {
2138 const MachineOperand &KindOp = MI->getOperand(0);
2139 if (!MI->getOperand(0).isImm()) {
2140 report("Crash kind must be an immediate", &KindOp, 0);
2141 break;
2142 }
2143 int64_t Kind = MI->getOperand(0).getImm();
2144 if (!isInt<8>(Kind))
2145 report("Crash kind must be 8 bit wide", &KindOp, 0);
2146 break;
2147 }
2148 case TargetOpcode::G_VECREDUCE_SEQ_FADD:
2149 case TargetOpcode::G_VECREDUCE_SEQ_FMUL: {
2150 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
2151 LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg());
2152 LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg());
2153 if (!DstTy.isScalar())
2154 report("Vector reduction requires a scalar destination type", MI);
2155 if (!Src1Ty.isScalar())
2156 report("Sequential FADD/FMUL vector reduction requires a scalar 1st operand", MI);
2157 if (!Src2Ty.isVector())
2158 report("Sequential FADD/FMUL vector reduction must have a vector 2nd operand", MI);
2159 break;
2160 }
2161 case TargetOpcode::G_VECREDUCE_FADD:
2162 case TargetOpcode::G_VECREDUCE_FMUL:
2163 case TargetOpcode::G_VECREDUCE_FMAX:
2164 case TargetOpcode::G_VECREDUCE_FMIN:
2165 case TargetOpcode::G_VECREDUCE_FMAXIMUM:
2166 case TargetOpcode::G_VECREDUCE_FMINIMUM:
2167 case TargetOpcode::G_VECREDUCE_ADD:
2168 case TargetOpcode::G_VECREDUCE_MUL:
2169 case TargetOpcode::G_VECREDUCE_AND:
2170 case TargetOpcode::G_VECREDUCE_OR:
2171 case TargetOpcode::G_VECREDUCE_XOR:
2172 case TargetOpcode::G_VECREDUCE_SMAX:
2173 case TargetOpcode::G_VECREDUCE_SMIN:
2174 case TargetOpcode::G_VECREDUCE_UMAX:
2175 case TargetOpcode::G_VECREDUCE_UMIN: {
2176 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
2177 if (!DstTy.isScalar())
2178 report("Vector reduction requires a scalar destination type", MI);
2179 break;
2180 }
2181
2182 case TargetOpcode::G_SBFX:
2183 case TargetOpcode::G_UBFX: {
2184 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
2185 if (DstTy.isVector()) {
2186 report("Bitfield extraction is not supported on vectors", MI);
2187 break;
2188 }
2189 break;
2190 }
2191 case TargetOpcode::G_SHL:
2192 case TargetOpcode::G_LSHR:
2193 case TargetOpcode::G_ASHR:
2194 case TargetOpcode::G_ROTR:
2195 case TargetOpcode::G_ROTL: {
2196 LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg());
2197 LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg());
2198 if (Src1Ty.isVector() != Src2Ty.isVector()) {
2199 report("Shifts and rotates require operands to be either all scalars or "
2200 "all vectors",
2201 MI);
2202 break;
2203 }
2204 break;
2205 }
2206 case TargetOpcode::G_LLROUND:
2207 case TargetOpcode::G_LROUND: {
2208 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
2209 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
2210 if (!DstTy.isValid() || !SrcTy.isValid())
2211 break;
2212 if (SrcTy.isPointer() || DstTy.isPointer()) {
2213 StringRef Op = SrcTy.isPointer() ? "Source" : "Destination";
2214 report(Twine(Op, " operand must not be a pointer type"), MI);
2215 } else if (SrcTy.isScalar()) {
2216 verifyAllRegOpsScalar(*MI, *MRI);
2217 break;
2218 } else if (SrcTy.isVector()) {
2219 verifyVectorElementMatch(SrcTy, DstTy, MI);
2220 break;
2221 }
2222 break;
2223 }
2224 case TargetOpcode::G_IS_FPCLASS: {
2225 LLT DestTy = MRI->getType(MI->getOperand(0).getReg());
2226 LLT DestEltTy = DestTy.getScalarType();
2227 if (!DestEltTy.isScalar()) {
2228 report("Destination must be a scalar or vector of scalars", MI);
2229 break;
2230 }
2231 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
2232 LLT SrcEltTy = SrcTy.getScalarType();
2233 if (!SrcEltTy.isScalar()) {
2234 report("Source must be a scalar or vector of scalars", MI);
2235 break;
2236 }
2237 if (!verifyVectorElementMatch(DestTy, SrcTy, MI))
2238 break;
2239 const MachineOperand &TestMO = MI->getOperand(2);
2240 if (!TestMO.isImm()) {
2241 report("floating-point class set (operand 2) must be an immediate", MI);
2242 break;
2243 }
2244 int64_t Test = TestMO.getImm();
2246 report("Incorrect floating-point class set (operand 2)", MI);
2247 break;
2248 }
2249 break;
2250 }
2251 case TargetOpcode::G_PREFETCH: {
2252 const MachineOperand &AddrOp = MI->getOperand(0);
2253 if (!AddrOp.isReg() || !MRI->getType(AddrOp.getReg()).isPointer()) {
2254 report("addr operand must be a pointer", &AddrOp, 0);
2255 break;
2256 }
2257 const MachineOperand &RWOp = MI->getOperand(1);
2258 if (!RWOp.isImm() || (uint64_t)RWOp.getImm() >= 2) {
2259 report("rw operand must be an immediate 0-1", &RWOp, 1);
2260 break;
2261 }
2262 const MachineOperand &LocalityOp = MI->getOperand(2);
2263 if (!LocalityOp.isImm() || (uint64_t)LocalityOp.getImm() >= 4) {
2264 report("locality operand must be an immediate 0-3", &LocalityOp, 2);
2265 break;
2266 }
2267 const MachineOperand &CacheTypeOp = MI->getOperand(3);
2268 if (!CacheTypeOp.isImm() || (uint64_t)CacheTypeOp.getImm() >= 2) {
2269 report("cache type operand must be an immediate 0-1", &CacheTypeOp, 3);
2270 break;
2271 }
2272 break;
2273 }
2274 case TargetOpcode::G_ASSERT_ALIGN: {
2275 if (MI->getOperand(2).getImm() < 1)
2276 report("alignment immediate must be >= 1", MI);
2277 break;
2278 }
2279 case TargetOpcode::G_CONSTANT_POOL: {
2280 if (!MI->getOperand(1).isCPI())
2281 report("Src operand 1 must be a constant pool index", MI);
2282 if (!MRI->getType(MI->getOperand(0).getReg()).isPointer())
2283 report("Dst operand 0 must be a pointer", MI);
2284 break;
2285 }
2286 case TargetOpcode::G_PTRAUTH_GLOBAL_VALUE: {
2287 const MachineOperand &AddrOp = MI->getOperand(1);
2288 if (!AddrOp.isReg() || !MRI->getType(AddrOp.getReg()).isPointer())
2289 report("addr operand must be a pointer", &AddrOp, 1);
2290 break;
2291 }
2292 case TargetOpcode::G_SMIN:
2293 case TargetOpcode::G_SMAX:
2294 case TargetOpcode::G_UMIN:
2295 case TargetOpcode::G_UMAX: {
2296 const LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
2297 if (DstTy.isPointerOrPointerVector())
2298 report("Generic smin/smax/umin/umax does not support pointer operands",
2299 MI);
2300 break;
2301 }
2302 default:
2303 break;
2304 }
2305}
2306
2307void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
2308 const MCInstrDesc &MCID = MI->getDesc();
2309 if (MI->getNumOperands() < MCID.getNumOperands()) {
2310 report("Too few operands", MI);
2311 OS << MCID.getNumOperands() << " operands expected, but "
2312 << MI->getNumOperands() << " given.\n";
2313 }
2314
2315 if (MI->getFlag(MachineInstr::NoConvergent) && !MCID.isConvergent())
2316 report("NoConvergent flag expected only on convergent instructions.", MI);
2317
2318 if (MI->isPHI()) {
2319 if (MF->getProperties().hasNoPHIs())
2320 report("Found PHI instruction with NoPHIs property set", MI);
2321
2322 if (FirstNonPHI)
2323 report("Found PHI instruction after non-PHI", MI);
2324 } else if (FirstNonPHI == nullptr)
2325 FirstNonPHI = MI;
2326
2327 // Check the tied operands.
2328 if (MI->isInlineAsm())
2329 verifyInlineAsm(MI);
2330
2331 // Check that unspillable terminators define a reg and have at most one use.
2332 if (TII->isUnspillableTerminator(MI)) {
2333 if (!MI->getOperand(0).isReg() || !MI->getOperand(0).isDef())
2334 report("Unspillable Terminator does not define a reg", MI);
2335 Register Def = MI->getOperand(0).getReg();
2336 if (Def.isVirtual() && !MF->getProperties().hasNoPHIs() &&
2337 std::distance(MRI->use_nodbg_begin(Def), MRI->use_nodbg_end()) > 1)
2338 report("Unspillable Terminator expected to have at most one use!", MI);
2339 }
2340
2341 // A fully-formed DBG_VALUE must have a location. Ignore partially formed
2342 // DBG_VALUEs: these are convenient to use in tests, but should never get
2343 // generated.
2344 if (MI->isDebugValue() && MI->getNumOperands() == 4)
2345 if (!MI->getDebugLoc())
2346 report("Missing DebugLoc for debug instruction", MI);
2347
2348 // Meta instructions should never be the subject of debug value tracking,
2349 // they don't create a value in the output program at all.
2350 if (MI->isMetaInstruction() && MI->peekDebugInstrNum())
2351 report("Metadata instruction should not have a value tracking number", MI);
2352
2353 // Check the MachineMemOperands for basic consistency.
2354 for (MachineMemOperand *Op : MI->memoperands()) {
2355 if (Op->isLoad() && !MI->mayLoad())
2356 report("Missing mayLoad flag", MI);
2357 if (Op->isStore() && !MI->mayStore())
2358 report("Missing mayStore flag", MI);
2359 }
2360
2361 // Debug values must not have a slot index.
2362 // Other instructions must have one, unless they are inside a bundle.
2363 if (LiveInts) {
2364 bool mapped = !LiveInts->isNotInMIMap(*MI);
2365 if (MI->isDebugOrPseudoInstr()) {
2366 if (mapped)
2367 report("Debug instruction has a slot index", MI);
2368 } else if (MI->isInsideBundle()) {
2369 if (mapped)
2370 report("Instruction inside bundle has a slot index", MI);
2371 } else {
2372 if (!mapped)
2373 report("Missing slot index", MI);
2374 }
2375 }
2376
2377 unsigned Opc = MCID.getOpcode();
2379 verifyPreISelGenericInstruction(MI);
2380 return;
2381 }
2382
2384 if (!TII->verifyInstruction(*MI, ErrorInfo))
2385 report(ErrorInfo.data(), MI);
2386
2387 // Verify properties of various specific instruction types
2388 switch (MI->getOpcode()) {
2389 case TargetOpcode::COPY: {
2390 const MachineOperand &DstOp = MI->getOperand(0);
2391 const MachineOperand &SrcOp = MI->getOperand(1);
2392 const Register SrcReg = SrcOp.getReg();
2393 const Register DstReg = DstOp.getReg();
2394
2395 LLT DstTy = MRI->getType(DstReg);
2396 LLT SrcTy = MRI->getType(SrcReg);
2397 if (SrcTy.isValid() && DstTy.isValid()) {
2398 // If both types are valid, check that the types are the same.
2399 if (SrcTy != DstTy) {
2400 report("Copy Instruction is illegal with mismatching types", MI);
2401 OS << "Def = " << DstTy << ", Src = " << SrcTy << '\n';
2402 }
2403
2404 break;
2405 }
2406
2407 if (!SrcTy.isValid() && !DstTy.isValid())
2408 break;
2409
2410 // If we have only one valid type, this is likely a copy between a virtual
2411 // and physical register.
2412 TypeSize SrcSize = TypeSize::getZero();
2413 TypeSize DstSize = TypeSize::getZero();
2414 if (SrcReg.isPhysical() && DstTy.isValid()) {
2415 const TargetRegisterClass *SrcRC =
2416 TRI->getMinimalPhysRegClassLLT(SrcReg, DstTy);
2417 if (!SrcRC)
2418 SrcSize = TRI->getRegSizeInBits(SrcReg, *MRI);
2419 } else {
2420 SrcSize = TRI->getRegSizeInBits(SrcReg, *MRI);
2421 }
2422
2423 if (DstReg.isPhysical() && SrcTy.isValid()) {
2424 const TargetRegisterClass *DstRC =
2425 TRI->getMinimalPhysRegClassLLT(DstReg, SrcTy);
2426 if (!DstRC)
2427 DstSize = TRI->getRegSizeInBits(DstReg, *MRI);
2428 } else {
2429 DstSize = TRI->getRegSizeInBits(DstReg, *MRI);
2430 }
2431
2432 // The next two checks allow COPY between physical and virtual registers,
2433 // when the virtual register has a scalable size and the physical register
2434 // has a fixed size. These checks allow COPY between *potentially*
2435 // mismatched sizes. However, once RegisterBankSelection occurs,
2436 // MachineVerifier should be able to resolve a fixed size for the scalable
2437 // vector, and at that point this function will know for sure whether the
2438 // sizes are mismatched and correctly report a size mismatch.
2439 if (SrcReg.isPhysical() && DstReg.isVirtual() && DstSize.isScalable() &&
2440 !SrcSize.isScalable())
2441 break;
2442 if (SrcReg.isVirtual() && DstReg.isPhysical() && SrcSize.isScalable() &&
2443 !DstSize.isScalable())
2444 break;
2445
2446 if (SrcSize.isNonZero() && DstSize.isNonZero() && SrcSize != DstSize) {
2447 if (!DstOp.getSubReg() && !SrcOp.getSubReg()) {
2448 report("Copy Instruction is illegal with mismatching sizes", MI);
2449 OS << "Def Size = " << DstSize << ", Src Size = " << SrcSize << '\n';
2450 }
2451 }
2452 break;
2453 }
2454 case TargetOpcode::COPY_LANEMASK: {
2455 const MachineOperand &DstOp = MI->getOperand(0);
2456 const MachineOperand &SrcOp = MI->getOperand(1);
2457 const MachineOperand &LaneMaskOp = MI->getOperand(2);
2458 const Register SrcReg = SrcOp.getReg();
2459 const LaneBitmask LaneMask = LaneMaskOp.getLaneMask();
2460 LaneBitmask SrcMaxLaneMask = LaneBitmask::getAll();
2461
2462 if (DstOp.getSubReg())
2463 report("COPY_LANEMASK must not use a subregister index", &DstOp, 0);
2464
2465 if (SrcOp.getSubReg())
2466 report("COPY_LANEMASK must not use a subregister index", &SrcOp, 1);
2467
2468 if (LaneMask.none())
2469 report("COPY_LANEMASK must read at least one lane", MI);
2470
2471 if (SrcReg.isPhysical()) {
2472 const TargetRegisterClass *SrcRC = TRI->getMinimalPhysRegClass(SrcReg);
2473 if (SrcRC)
2474 SrcMaxLaneMask = SrcRC->getLaneMask();
2475 } else {
2476 SrcMaxLaneMask = MRI->getMaxLaneMaskForVReg(SrcReg);
2477 }
2478
2479 // COPY_LANEMASK should be used only for partial copy. For full
2480 // copy, one should strictly use the COPY instruction.
2481 if (SrcMaxLaneMask == LaneMask)
2482 report("COPY_LANEMASK cannot be used to do full copy", MI);
2483
2484 // If LaneMask is greater than the SrcMaxLaneMask, it implies
2485 // COPY_LANEMASK is attempting to read from the lanes that
2486 // don't exists in the source register.
2487 if (SrcMaxLaneMask < LaneMask)
2488 report("COPY_LANEMASK attempts to read from the lanes that "
2489 "don't exist in the source register",
2490 MI);
2491
2492 break;
2493 }
2494 case TargetOpcode::STATEPOINT: {
2495 StatepointOpers SO(MI);
2496 if (!MI->getOperand(SO.getIDPos()).isImm() ||
2497 !MI->getOperand(SO.getNBytesPos()).isImm() ||
2498 !MI->getOperand(SO.getNCallArgsPos()).isImm()) {
2499 report("meta operands to STATEPOINT not constant!", MI);
2500 break;
2501 }
2502
2503 auto VerifyStackMapConstant = [&](unsigned Offset) {
2504 if (Offset >= MI->getNumOperands()) {
2505 report("stack map constant to STATEPOINT is out of range!", MI);
2506 return;
2507 }
2508 if (!MI->getOperand(Offset - 1).isImm() ||
2509 MI->getOperand(Offset - 1).getImm() != StackMaps::ConstantOp ||
2510 !MI->getOperand(Offset).isImm())
2511 report("stack map constant to STATEPOINT not well formed!", MI);
2512 };
2513 VerifyStackMapConstant(SO.getCCIdx());
2514 VerifyStackMapConstant(SO.getFlagsIdx());
2515 VerifyStackMapConstant(SO.getNumDeoptArgsIdx());
2516 VerifyStackMapConstant(SO.getNumGCPtrIdx());
2517 VerifyStackMapConstant(SO.getNumAllocaIdx());
2518 VerifyStackMapConstant(SO.getNumGcMapEntriesIdx());
2519
2520 // Verify that all explicit statepoint defs are tied to gc operands as
2521 // they are expected to be a relocation of gc operands.
2522 unsigned FirstGCPtrIdx = SO.getFirstGCPtrIdx();
2523 unsigned LastGCPtrIdx = SO.getNumAllocaIdx() - 2;
2524 for (unsigned Idx = 0; Idx < MI->getNumDefs(); Idx++) {
2525 unsigned UseOpIdx;
2526 if (!MI->isRegTiedToUseOperand(Idx, &UseOpIdx)) {
2527 report("STATEPOINT defs expected to be tied", MI);
2528 break;
2529 }
2530 if (UseOpIdx < FirstGCPtrIdx || UseOpIdx > LastGCPtrIdx) {
2531 report("STATEPOINT def tied to non-gc operand", MI);
2532 break;
2533 }
2534 }
2535
2536 // TODO: verify we have properly encoded deopt arguments
2537 } break;
2538 case TargetOpcode::INSERT_SUBREG: {
2539 unsigned InsertedSize;
2540 if (unsigned SubIdx = MI->getOperand(2).getSubReg())
2541 InsertedSize = TRI->getSubRegIdxSize(SubIdx);
2542 else
2543 InsertedSize = TRI->getRegSizeInBits(MI->getOperand(2).getReg(), *MRI);
2544 unsigned SubRegSize = TRI->getSubRegIdxSize(MI->getOperand(3).getImm());
2545 if (SubRegSize < InsertedSize) {
2546 report("INSERT_SUBREG expected inserted value to have equal or lesser "
2547 "size than the subreg it was inserted into", MI);
2548 break;
2549 }
2550 } break;
2551 case TargetOpcode::REG_SEQUENCE: {
2552 unsigned NumOps = MI->getNumOperands();
2553 if (!(NumOps & 1)) {
2554 report("Invalid number of operands for REG_SEQUENCE", MI);
2555 break;
2556 }
2557
2558 for (unsigned I = 1; I != NumOps; I += 2) {
2559 const MachineOperand &RegOp = MI->getOperand(I);
2560 const MachineOperand &SubRegOp = MI->getOperand(I + 1);
2561
2562 if (!RegOp.isReg())
2563 report("Invalid register operand for REG_SEQUENCE", &RegOp, I);
2564
2565 if (!SubRegOp.isImm() || SubRegOp.getImm() == 0 ||
2566 SubRegOp.getImm() >= TRI->getNumSubRegIndices()) {
2567 report("Invalid subregister index operand for REG_SEQUENCE",
2568 &SubRegOp, I + 1);
2569 }
2570 }
2571
2572 Register DstReg = MI->getOperand(0).getReg();
2573 if (DstReg.isPhysical())
2574 report("REG_SEQUENCE does not support physical register results", MI);
2575
2576 if (MI->getOperand(0).getSubReg())
2577 report("Invalid subreg result for REG_SEQUENCE", MI);
2578
2579 break;
2580 }
2581 }
2582}
2583
2584void
2585MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
2586 const MachineInstr *MI = MO->getParent();
2587 const MCInstrDesc &MCID = MI->getDesc();
2588 unsigned NumDefs = MCID.getNumDefs();
2589 if (MCID.getOpcode() == TargetOpcode::PATCHPOINT)
2590 NumDefs = (MONum == 0 && MO->isReg()) ? NumDefs : 0;
2591
2592 // The first MCID.NumDefs operands must be explicit register defines
2593 if (MONum < NumDefs) {
2594 const MCOperandInfo &MCOI = MCID.operands()[MONum];
2595 if (!MO->isReg())
2596 report("Explicit definition must be a register", MO, MONum);
2597 else if (!MO->isDef() && !MCOI.isOptionalDef())
2598 report("Explicit definition marked as use", MO, MONum);
2599 else if (MO->isImplicit())
2600 report("Explicit definition marked as implicit", MO, MONum);
2601 } else if (MONum < MCID.getNumOperands()) {
2602 const MCOperandInfo &MCOI = MCID.operands()[MONum];
2603 // Don't check if it's the last operand in a variadic instruction. See,
2604 // e.g., LDM_RET in the arm back end. Check non-variadic operands only.
2605 bool IsOptional = MI->isVariadic() && MONum == MCID.getNumOperands() - 1;
2606 if (!IsOptional) {
2607 if (MO->isReg()) {
2608 if (MO->isDef() && !MCOI.isOptionalDef() && !MCID.variadicOpsAreDefs())
2609 report("Explicit operand marked as def", MO, MONum);
2610 if (MO->isImplicit())
2611 report("Explicit operand marked as implicit", MO, MONum);
2612 }
2613
2614 // Check that an instruction has register operands only as expected.
2615 if (MCOI.OperandType == MCOI::OPERAND_REGISTER &&
2616 !MO->isReg() && !MO->isFI())
2617 report("Expected a register operand.", MO, MONum);
2618 if (MO->isReg()) {
2619 if (MCOI.OperandType == MCOI::OPERAND_IMMEDIATE ||
2620 (MCOI.OperandType == MCOI::OPERAND_PCREL &&
2621 !TII->isPCRelRegisterOperandLegal(*MO)))
2622 report("Expected a non-register operand.", MO, MONum);
2623 }
2624 }
2625
2626 int TiedTo = MCID.getOperandConstraint(MONum, MCOI::TIED_TO);
2627 if (TiedTo != -1) {
2628 if (!MO->isReg())
2629 report("Tied use must be a register", MO, MONum);
2630 else if (!MO->isTied())
2631 report("Operand should be tied", MO, MONum);
2632 else if (unsigned(TiedTo) != MI->findTiedOperandIdx(MONum))
2633 report("Tied def doesn't match MCInstrDesc", MO, MONum);
2634 else if (MO->getReg().isPhysical()) {
2635 const MachineOperand &MOTied = MI->getOperand(TiedTo);
2636 if (!MOTied.isReg())
2637 report("Tied counterpart must be a register", &MOTied, TiedTo);
2638 else if (MOTied.getReg().isPhysical() &&
2639 MO->getReg() != MOTied.getReg())
2640 report("Tied physical registers must match.", &MOTied, TiedTo);
2641 }
2642 } else if (MO->isReg() && MO->isTied())
2643 report("Explicit operand should not be tied", MO, MONum);
2644 } else if (!MI->isVariadic()) {
2645 // ARM adds %reg0 operands to indicate predicates. We'll allow that.
2646 if (!MO->isValidExcessOperand())
2647 report("Extra explicit operand on non-variadic instruction", MO, MONum);
2648 }
2649
2650 // Verify earlyClobber def operand
2651 if (MCID.getOperandConstraint(MONum, MCOI::EARLY_CLOBBER) != -1) {
2652 if (!MO->isReg())
2653 report("Early clobber must be a register", MI);
2654 if (!MO->isEarlyClobber())
2655 report("Missing earlyClobber flag", MI);
2656 }
2657
2658 switch (MO->getType()) {
2660 // Verify debug flag on debug instructions. Check this first because reg0
2661 // indicates an undefined debug value.
2662 if (MI->isDebugInstr() && MO->isUse()) {
2663 if (!MO->isDebug())
2664 report("Register operand must be marked debug", MO, MONum);
2665 } else if (MO->isDebug()) {
2666 report("Register operand must not be marked debug", MO, MONum);
2667 }
2668
2669 const Register Reg = MO->getReg();
2670 if (!Reg)
2671 return;
2672 if (MRI->tracksLiveness() && !MI->isDebugInstr())
2673 checkLiveness(MO, MONum);
2674
2675 if (MO->isDef() && MO->isUndef() && !MO->getSubReg() &&
2676 MO->getReg().isVirtual()) // TODO: Apply to physregs too
2677 report("Undef virtual register def operands require a subregister", MO, MONum);
2678
2679 // Verify the consistency of tied operands.
2680 if (MO->isTied()) {
2681 unsigned OtherIdx = MI->findTiedOperandIdx(MONum);
2682 const MachineOperand &OtherMO = MI->getOperand(OtherIdx);
2683 if (!OtherMO.isReg())
2684 report("Must be tied to a register", MO, MONum);
2685 if (!OtherMO.isTied())
2686 report("Missing tie flags on tied operand", MO, MONum);
2687 if (MI->findTiedOperandIdx(OtherIdx) != MONum)
2688 report("Inconsistent tie links", MO, MONum);
2689 if (MONum < MCID.getNumDefs()) {
2690 if (OtherIdx < MCID.getNumOperands()) {
2691 if (-1 == MCID.getOperandConstraint(OtherIdx, MCOI::TIED_TO))
2692 report("Explicit def tied to explicit use without tie constraint",
2693 MO, MONum);
2694 } else {
2695 if (!OtherMO.isImplicit())
2696 report("Explicit def should be tied to implicit use", MO, MONum);
2697 }
2698 }
2699 }
2700
2701 // Verify two-address constraints after the twoaddressinstruction pass.
2702 // Both twoaddressinstruction pass and phi-node-elimination pass call
2703 // MRI->leaveSSA() to set MF as not IsSSA, we should do the verification
2704 // after twoaddressinstruction pass not after phi-node-elimination pass. So
2705 // we shouldn't use the IsSSA as the condition, we should based on
2706 // TiedOpsRewritten property to verify two-address constraints, this
2707 // property will be set in twoaddressinstruction pass.
2708 unsigned DefIdx;
2709 if (MF->getProperties().hasTiedOpsRewritten() && MO->isUse() &&
2710 MI->isRegTiedToDefOperand(MONum, &DefIdx) &&
2711 Reg != MI->getOperand(DefIdx).getReg())
2712 report("Two-address instruction operands must be identical", MO, MONum);
2713
2714 // Check register classes.
2715 unsigned SubIdx = MO->getSubReg();
2716
2717 if (Reg.isPhysical()) {
2718 if (SubIdx) {
2719 report("Illegal subregister index for physical register", MO, MONum);
2720 return;
2721 }
2722 if (MONum < MCID.getNumOperands()) {
2723 if (const TargetRegisterClass *DRC = TII->getRegClass(MCID, MONum)) {
2724 if (!DRC->contains(Reg)) {
2725 report("Illegal physical register for instruction", MO, MONum);
2726 OS << printReg(Reg, TRI) << " is not a "
2727 << TRI->getRegClassName(DRC) << " register.\n";
2728 }
2729 }
2730 }
2731 if (MO->isRenamable()) {
2732 if (MRI->isReserved(Reg)) {
2733 report("isRenamable set on reserved register", MO, MONum);
2734 return;
2735 }
2736 }
2737 } else {
2738 // Virtual register.
2739 const TargetRegisterClass *RC = MRI->getRegClassOrNull(Reg);
2740 if (!RC) {
2741 // This is a generic virtual register.
2742
2743 // Do not allow undef uses for generic virtual registers. This ensures
2744 // getVRegDef can never fail and return null on a generic register.
2745 //
2746 // FIXME: This restriction should probably be broadened to all SSA
2747 // MIR. However, DetectDeadLanes/ProcessImplicitDefs technically still
2748 // run on the SSA function just before phi elimination.
2749 if (MO->isUndef())
2750 report("Generic virtual register use cannot be undef", MO, MONum);
2751
2752 // Debug value instruction is permitted to use undefined vregs.
2753 // This is a performance measure to skip the overhead of immediately
2754 // pruning unused debug operands. The final undef substitution occurs
2755 // when debug values are allocated in LDVImpl::handleDebugValue, so
2756 // these verifications always apply after this pass.
2757 if (isFunctionTracksDebugUserValues || !MO->isUse() ||
2758 !MI->isDebugValue() || !MRI->def_empty(Reg)) {
2759 // If we're post-Select, we can't have gvregs anymore.
2760 if (isFunctionSelected) {
2761 report("Generic virtual register invalid in a Selected function",
2762 MO, MONum);
2763 return;
2764 }
2765
2766 // The gvreg must have a type and it must not have a SubIdx.
2767 LLT Ty = MRI->getType(Reg);
2768 if (!Ty.isValid()) {
2769 report("Generic virtual register must have a valid type", MO,
2770 MONum);
2771 return;
2772 }
2773
2774 const RegisterBank *RegBank = MRI->getRegBankOrNull(Reg);
2775 const RegisterBankInfo *RBI = MF->getSubtarget().getRegBankInfo();
2776
2777 // If we're post-RegBankSelect, the gvreg must have a bank.
2778 if (!RegBank && isFunctionRegBankSelected) {
2779 report("Generic virtual register must have a bank in a "
2780 "RegBankSelected function",
2781 MO, MONum);
2782 return;
2783 }
2784
2785 // Make sure the register fits into its register bank if any.
2786 if (RegBank && Ty.isValid() && !Ty.isScalableVector() &&
2787 RBI->getMaximumSize(RegBank->getID()) < Ty.getSizeInBits()) {
2788 report("Register bank is too small for virtual register", MO,
2789 MONum);
2790 OS << "Register bank " << RegBank->getName() << " too small("
2791 << RBI->getMaximumSize(RegBank->getID()) << ") to fit "
2792 << Ty.getSizeInBits() << "-bits\n";
2793 return;
2794 }
2795 }
2796
2797 if (SubIdx) {
2798 report("Generic virtual register does not allow subregister index", MO,
2799 MONum);
2800 return;
2801 }
2802
2803 // If this is a target specific instruction and this operand
2804 // has register class constraint, the virtual register must
2805 // comply to it.
2806 if (!isPreISelGenericOpcode(MCID.getOpcode()) &&
2807 MONum < MCID.getNumOperands() && TII->getRegClass(MCID, MONum)) {
2808 report("Virtual register does not match instruction constraint", MO,
2809 MONum);
2810 OS << "Expect register class "
2811 << TRI->getRegClassName(TII->getRegClass(MCID, MONum))
2812 << " but got nothing\n";
2813 return;
2814 }
2815
2816 break;
2817 }
2818 // Validate that SubIdx can be applied to the virtual register.
2819 if (!TRI->isSubRegValidForRegClass(RC, SubIdx)) {
2820 report("Invalid subregister index for virtual register", MO, MONum);
2821 OS << "Register class " << TRI->getRegClassName(RC)
2822 << " does not support subreg index "
2823 << TRI->getSubRegIndexName(SubIdx) << '\n';
2824 return;
2825 }
2826 if (MONum >= MCID.getNumOperands())
2827 break;
2828 const TargetRegisterClass *DRC = TII->getRegClass(MCID, MONum);
2829 if (!DRC)
2830 break;
2831
2832 // If SubIdx is used, verify that RC with SubIdx can be used for an
2833 // operand of class DRC. This is valid if for every register in RC, the
2834 // register obtained by applying SubIdx to it is in DRC.
2835 if (SubIdx && TRI->getMatchingSuperRegClass(RC, DRC, SubIdx) != RC) {
2836 report("Illegal virtual register for instruction", MO, MONum);
2837 OS << TRI->getRegClassName(RC) << "." << TRI->getSubRegIndexName(SubIdx)
2838 << " cannot be used for " << TRI->getRegClassName(DRC)
2839 << " operands.";
2840 }
2841
2842 // If no SubIdx is used, verify that RC is a sub-class of DRC.
2843 if (!SubIdx && !RC->hasSuperClassEq(DRC)) {
2844 report("Illegal virtual register for instruction", MO, MONum);
2845 OS << "Expected a " << TRI->getRegClassName(DRC)
2846 << " register, but got a " << TRI->getRegClassName(RC)
2847 << " register\n";
2848 }
2849 }
2850 break;
2851 }
2852
2854 regMasks.push_back(MO->getRegMask());
2855 break;
2856
2858 if (MI->isPHI() && !MO->getMBB()->isSuccessor(MI->getParent()))
2859 report("PHI operand is not in the CFG", MO, MONum);
2860 break;
2861
2863 if (LiveStks && LiveStks->hasInterval(MO->getIndex()) &&
2864 LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2865 int FI = MO->getIndex();
2866 LiveInterval &LI = LiveStks->getInterval(FI);
2867 SlotIndex Idx = LiveInts->getInstructionIndex(*MI);
2868
2869 bool MayStore = MI->mayStore();
2870 bool MayLoad = MI->mayLoad();
2871 // For a memory-to-memory move, we need to check if the frame
2872 // index is used for storing or loading, by inspecting the
2873 // memory operands.
2874 if (MayStore && MayLoad) {
2875 for (const MachineMemOperand *MMO : MI->memoperands()) {
2877 MMO->getPseudoValue());
2878 if (!Value || Value->getFrameIndex() != FI)
2879 continue;
2880
2881 if (MMO->isStore())
2882 MayLoad = false;
2883 else
2884 MayStore = false;
2885 break;
2886 }
2887 if (MayLoad == MayStore)
2888 report("Missing fixed stack memoperand.", MI);
2889 }
2890 if (MayLoad && !LI.liveAt(Idx.getRegSlot(true))) {
2891 report("Instruction loads from dead spill slot", MO, MONum);
2892 OS << "Live stack: " << LI << '\n';
2893 }
2894 if (MayStore && !LI.liveAt(Idx.getRegSlot())) {
2895 report("Instruction stores to dead spill slot", MO, MONum);
2896 OS << "Live stack: " << LI << '\n';
2897 }
2898 }
2899 break;
2900
2902 if (MO->getCFIIndex() >= MF->getFrameInstructions().size())
2903 report("CFI instruction has invalid index", MO, MONum);
2904 break;
2905
2906 default:
2907 break;
2908 }
2909}
2910
2911void MachineVerifier::checkLivenessAtUse(const MachineOperand *MO,
2912 unsigned MONum, SlotIndex UseIdx,
2913 const LiveRange &LR,
2914 VirtRegOrUnit VRegOrUnit,
2915 LaneBitmask LaneMask) {
2916 const MachineInstr *MI = MO->getParent();
2917
2918 if (!LR.verify()) {
2919 report("invalid live range", MO, MONum);
2920 report_context_liverange(LR);
2921 report_context_vreg_regunit(VRegOrUnit);
2922 report_context(UseIdx);
2923 return;
2924 }
2925
2926 LiveQueryResult LRQ = LR.Query(UseIdx);
2927 bool HasValue = LRQ.valueIn() || (MI->isPHI() && LRQ.valueOut());
2928 // Check if we have a segment at the use, note however that we only need one
2929 // live subregister range, the others may be dead.
2930 if (!HasValue && LaneMask.none()) {
2931 report("No live segment at use", MO, MONum);
2932 report_context_liverange(LR);
2933 report_context_vreg_regunit(VRegOrUnit);
2934 report_context(UseIdx);
2935 }
2936 if (MO->isKill() && !LRQ.isKill()) {
2937 report("Live range continues after kill flag", MO, MONum);
2938 report_context_liverange(LR);
2939 report_context_vreg_regunit(VRegOrUnit);
2940 if (LaneMask.any())
2941 report_context_lanemask(LaneMask);
2942 report_context(UseIdx);
2943 }
2944}
2945
2946void MachineVerifier::checkLivenessAtDef(const MachineOperand *MO,
2947 unsigned MONum, SlotIndex DefIdx,
2948 const LiveRange &LR,
2949 VirtRegOrUnit VRegOrUnit,
2950 bool SubRangeCheck,
2951 LaneBitmask LaneMask) {
2952 if (!LR.verify()) {
2953 report("invalid live range", MO, MONum);
2954 report_context_liverange(LR);
2955 report_context_vreg_regunit(VRegOrUnit);
2956 if (LaneMask.any())
2957 report_context_lanemask(LaneMask);
2958 report_context(DefIdx);
2959 }
2960
2961 if (const VNInfo *VNI = LR.getVNInfoAt(DefIdx)) {
2962 // The LR can correspond to the whole reg and its def slot is not obliged
2963 // to be the same as the MO' def slot. E.g. when we check here "normal"
2964 // subreg MO but there is other EC subreg MO in the same instruction so the
2965 // whole reg has EC def slot and differs from the currently checked MO' def
2966 // slot. For example:
2967 // %0 [16e,32r:0) 0@16e L..3 [16e,32r:0) 0@16e L..C [16r,32r:0) 0@16r
2968 // Check that there is an early-clobber def of the same superregister
2969 // somewhere is performed in visitMachineFunctionAfter()
2970 if (((SubRangeCheck || MO->getSubReg() == 0) && VNI->def != DefIdx) ||
2971 !SlotIndex::isSameInstr(VNI->def, DefIdx) ||
2972 (VNI->def != DefIdx &&
2973 (!VNI->def.isEarlyClobber() || !DefIdx.isRegister()))) {
2974 report("Inconsistent valno->def", MO, MONum);
2975 report_context_liverange(LR);
2976 report_context_vreg_regunit(VRegOrUnit);
2977 if (LaneMask.any())
2978 report_context_lanemask(LaneMask);
2979 report_context(*VNI);
2980 report_context(DefIdx);
2981 }
2982 } else {
2983 report("No live segment at def", MO, MONum);
2984 report_context_liverange(LR);
2985 report_context_vreg_regunit(VRegOrUnit);
2986 if (LaneMask.any())
2987 report_context_lanemask(LaneMask);
2988 report_context(DefIdx);
2989 }
2990 // Check that, if the dead def flag is present, LiveInts agree.
2991 if (MO->isDead()) {
2992 LiveQueryResult LRQ = LR.Query(DefIdx);
2993 if (!LRQ.isDeadDef()) {
2994 assert(VRegOrUnit.isVirtualReg() && "Expecting a virtual register.");
2995 // A dead subreg def only tells us that the specific subreg is dead. There
2996 // could be other non-dead defs of other subregs, or we could have other
2997 // parts of the register being live through the instruction. So unless we
2998 // are checking liveness for a subrange it is ok for the live range to
2999 // continue, given that we have a dead def of a subregister.
3000 if (SubRangeCheck || MO->getSubReg() == 0) {
3001 report("Live range continues after dead def flag", MO, MONum);
3002 report_context_liverange(LR);
3003 report_context_vreg_regunit(VRegOrUnit);
3004 if (LaneMask.any())
3005 report_context_lanemask(LaneMask);
3006 }
3007 }
3008 }
3009}
3010
3011void MachineVerifier::checkLiveness(const MachineOperand *MO, unsigned MONum) {
3012 const MachineInstr *MI = MO->getParent();
3013 const Register Reg = MO->getReg();
3014 const unsigned SubRegIdx = MO->getSubReg();
3015
3016 const LiveInterval *LI = nullptr;
3017 if (LiveInts && Reg.isVirtual()) {
3018 if (LiveInts->hasInterval(Reg)) {
3019 LI = &LiveInts->getInterval(Reg);
3020 if (SubRegIdx != 0 && (MO->isDef() || !MO->isUndef()) && !LI->empty() &&
3022 report("Live interval for subreg operand has no subranges", MO, MONum);
3023 } else {
3024 report("Virtual register has no live interval", MO, MONum);
3025 }
3026 }
3027
3028 // Both use and def operands can read a register.
3029 if (MO->readsReg()) {
3030 if (MO->isKill())
3031 addRegWithSubRegs(regsKilled, Reg);
3032
3033 // Check that LiveVars knows this kill (unless we are inside a bundle, in
3034 // which case we have already checked that LiveVars knows any kills on the
3035 // bundle header instead).
3036 if (LiveVars && Reg.isVirtual() && MO->isKill() &&
3037 !MI->isBundledWithPred()) {
3039 if (!is_contained(VI.Kills, MI))
3040 report("Kill missing from LiveVariables", MO, MONum);
3041 }
3042
3043 // Check LiveInts liveness and kill.
3044 if (LiveInts && !LiveInts->isNotInMIMap(*MI)) {
3045 SlotIndex UseIdx;
3046 if (MI->isPHI()) {
3047 // PHI use occurs on the edge, so check for live out here instead.
3048 UseIdx = LiveInts->getMBBEndIdx(
3049 MI->getOperand(MONum + 1).getMBB()).getPrevSlot();
3050 } else {
3051 UseIdx = LiveInts->getInstructionIndex(*MI);
3052 }
3053 // Check the cached regunit intervals.
3054 if (Reg.isPhysical() && !isReserved(Reg)) {
3055 for (MCRegUnit Unit : TRI->regunits(Reg.asMCReg())) {
3056 if (MRI->isReservedRegUnit(Unit))
3057 continue;
3058 if (const LiveRange *LR = LiveInts->getCachedRegUnit(Unit))
3059 checkLivenessAtUse(MO, MONum, UseIdx, *LR, VirtRegOrUnit(Unit));
3060 }
3061 }
3062
3063 if (Reg.isVirtual()) {
3064 // This is a virtual register interval.
3065 checkLivenessAtUse(MO, MONum, UseIdx, *LI, VirtRegOrUnit(Reg));
3066
3067 if (LI->hasSubRanges() && !MO->isDef()) {
3068 LaneBitmask MOMask = SubRegIdx != 0
3069 ? TRI->getSubRegIndexLaneMask(SubRegIdx)
3070 : MRI->getMaxLaneMaskForVReg(Reg);
3071 LaneBitmask LiveInMask;
3072 for (const LiveInterval::SubRange &SR : LI->subranges()) {
3073 if ((MOMask & SR.LaneMask).none())
3074 continue;
3075 checkLivenessAtUse(MO, MONum, UseIdx, SR, VirtRegOrUnit(Reg),
3076 SR.LaneMask);
3077 LiveQueryResult LRQ = SR.Query(UseIdx);
3078 if (LRQ.valueIn() || (MI->isPHI() && LRQ.valueOut()))
3079 LiveInMask |= SR.LaneMask;
3080 }
3081 // At least parts of the register has to be live at the use.
3082 if ((LiveInMask & MOMask).none()) {
3083 report("No live subrange at use", MO, MONum);
3084 report_context(*LI);
3085 report_context(UseIdx);
3086 }
3087 // For PHIs all lanes should be live
3088 if (MI->isPHI() && LiveInMask != MOMask) {
3089 report("Not all lanes of PHI source live at use", MO, MONum);
3090 report_context(*LI);
3091 report_context(UseIdx);
3092 }
3093 }
3094 }
3095 }
3096
3097 // Use of a dead register.
3098 if (!regsLive.count(Reg)) {
3099 if (Reg.isPhysical()) {
3100 // Reserved registers may be used even when 'dead'.
3101 bool Bad = !isReserved(Reg);
3102 // We are fine if just any subregister has a defined value.
3103 if (Bad) {
3104
3105 for (const MCPhysReg &SubReg : TRI->subregs(Reg)) {
3106 if (regsLive.count(SubReg)) {
3107 Bad = false;
3108 break;
3109 }
3110 }
3111 }
3112 // If there is an additional implicit-use of a super register we stop
3113 // here. By definition we are fine if the super register is not
3114 // (completely) dead, if the complete super register is dead we will
3115 // get a report for its operand.
3116 if (Bad) {
3117 for (const MachineOperand &MOP : MI->uses()) {
3118 if (!MOP.isReg() || !MOP.isImplicit())
3119 continue;
3120
3121 if (!MOP.getReg().isPhysical())
3122 continue;
3123
3124 if (MOP.getReg() != Reg &&
3125 all_of(TRI->regunits(Reg), [&](const MCRegUnit RegUnit) {
3126 return llvm::is_contained(TRI->regunits(MOP.getReg()),
3127 RegUnit);
3128 }))
3129 Bad = false;
3130 }
3131 }
3132 if (Bad)
3133 report("Using an undefined physical register", MO, MONum);
3134 } else if (MRI->def_empty(Reg)) {
3135 report("Reading virtual register without a def", MO, MONum);
3136 } else {
3137 BBInfo &MInfo = MBBInfoMap[MI->getParent()];
3138 // We don't know which virtual registers are live in, so only complain
3139 // if vreg was killed in this MBB. Otherwise keep track of vregs that
3140 // must be live in. PHI instructions are handled separately.
3141 if (MInfo.regsKilled.count(Reg))
3142 report("Using a killed virtual register", MO, MONum);
3143 else if (!MI->isPHI())
3144 MInfo.vregsLiveIn.insert(std::make_pair(Reg, MI));
3145 }
3146 }
3147 }
3148
3149 if (MO->isDef()) {
3150 // Register defined.
3151 // TODO: verify that earlyclobber ops are not used.
3152 if (MO->isDead())
3153 addRegWithSubRegs(regsDead, Reg);
3154 else
3155 addRegWithSubRegs(regsDefined, Reg);
3156
3157 // Verify SSA form.
3158 if (MRI->isSSA() && Reg.isVirtual()) {
3159 if (!MRI->hasOneDef(Reg))
3160 report("Multiple virtual register defs in SSA form", MO, MONum);
3161 if (MO->getSubReg())
3162 report("Subreg def in SSA form", MO, MONum);
3163 }
3164
3165 // Check LiveInts for a live segment, but only for virtual registers.
3166 if (LiveInts && !LiveInts->isNotInMIMap(*MI)) {
3167 SlotIndex DefIdx = LiveInts->getInstructionIndex(*MI);
3168 DefIdx = DefIdx.getRegSlot(MO->isEarlyClobber());
3169
3170 if (Reg.isVirtual()) {
3171 checkLivenessAtDef(MO, MONum, DefIdx, *LI, VirtRegOrUnit(Reg));
3172
3173 if (LI->hasSubRanges()) {
3174 LaneBitmask MOMask = SubRegIdx != 0
3175 ? TRI->getSubRegIndexLaneMask(SubRegIdx)
3176 : MRI->getMaxLaneMaskForVReg(Reg);
3177 for (const LiveInterval::SubRange &SR : LI->subranges()) {
3178 if ((SR.LaneMask & MOMask).none())
3179 continue;
3180 checkLivenessAtDef(MO, MONum, DefIdx, SR, VirtRegOrUnit(Reg), true,
3181 SR.LaneMask);
3182 }
3183 }
3184 }
3185 }
3186 }
3187}
3188
3189// This function gets called after visiting all instructions in a bundle. The
3190// argument points to the bundle header.
3191// Normal stand-alone instructions are also considered 'bundles', and this
3192// function is called for all of them.
3193void MachineVerifier::visitMachineBundleAfter(const MachineInstr *MI) {
3194 BBInfo &MInfo = MBBInfoMap[MI->getParent()];
3195 set_union(MInfo.regsKilled, regsKilled);
3196 set_subtract(regsLive, regsKilled); regsKilled.clear();
3197 // Kill any masked registers.
3198 while (!regMasks.empty()) {
3199 const uint32_t *Mask = regMasks.pop_back_val();
3200 for (Register Reg : regsLive)
3201 if (Reg.isPhysical() &&
3203 regsDead.push_back(Reg);
3204 }
3205 set_subtract(regsLive, regsDead); regsDead.clear();
3206 set_union(regsLive, regsDefined); regsDefined.clear();
3207}
3208
3209void
3210MachineVerifier::visitMachineBasicBlockAfter(const MachineBasicBlock *MBB) {
3211 MBBInfoMap[MBB].regsLiveOut = regsLive;
3212 regsLive.clear();
3213
3214 if (Indexes) {
3215 SlotIndex stop = Indexes->getMBBEndIdx(MBB);
3216 if (!(stop > lastIndex)) {
3217 report("Block ends before last instruction index", MBB);
3218 OS << "Block ends at " << stop << " last instruction was at " << lastIndex
3219 << '\n';
3220 }
3221 lastIndex = stop;
3222 }
3223}
3224
3225namespace {
3226// This implements a set of registers that serves as a filter: can filter other
3227// sets by passing through elements not in the filter and blocking those that
3228// are. Any filter implicitly includes the full set of physical registers upon
3229// creation, thus filtering them all out. The filter itself as a set only grows,
3230// and needs to be as efficient as possible.
3231struct VRegFilter {
3232 // Add elements to the filter itself. \pre Input set \p FromRegSet must have
3233 // no duplicates. Both virtual and physical registers are fine.
3234 template <typename RegSetT> void add(const RegSetT &FromRegSet) {
3235 SmallVector<Register, 0> VRegsBuffer;
3236 filterAndAdd(FromRegSet, VRegsBuffer);
3237 }
3238 // Filter \p FromRegSet through the filter and append passed elements into \p
3239 // ToVRegs. All elements appended are then added to the filter itself.
3240 // \returns true if anything changed.
3241 template <typename RegSetT>
3242 bool filterAndAdd(const RegSetT &FromRegSet,
3243 SmallVectorImpl<Register> &ToVRegs) {
3244 unsigned SparseUniverse = Sparse.size();
3245 unsigned NewSparseUniverse = SparseUniverse;
3246 unsigned NewDenseSize = Dense.size();
3247 size_t Begin = ToVRegs.size();
3248 for (Register Reg : FromRegSet) {
3249 if (!Reg.isVirtual())
3250 continue;
3251 unsigned Index = Reg.virtRegIndex();
3252 if (Index < SparseUniverseMax) {
3253 if (Index < SparseUniverse && Sparse.test(Index))
3254 continue;
3255 NewSparseUniverse = std::max(NewSparseUniverse, Index + 1);
3256 } else {
3257 if (Dense.count(Reg))
3258 continue;
3259 ++NewDenseSize;
3260 }
3261 ToVRegs.push_back(Reg);
3262 }
3263 size_t End = ToVRegs.size();
3264 if (Begin == End)
3265 return false;
3266 // Reserving space in sets once performs better than doing so continuously
3267 // and pays easily for double look-ups (even in Dense with SparseUniverseMax
3268 // tuned all the way down) and double iteration (the second one is over a
3269 // SmallVector, which is a lot cheaper compared to DenseSet or BitVector).
3270 Sparse.resize(NewSparseUniverse);
3271 Dense.reserve(NewDenseSize);
3272 for (unsigned I = Begin; I < End; ++I) {
3273 Register Reg = ToVRegs[I];
3274 unsigned Index = Reg.virtRegIndex();
3275 if (Index < SparseUniverseMax)
3276 Sparse.set(Index);
3277 else
3278 Dense.insert(Reg);
3279 }
3280 return true;
3281 }
3282
3283private:
3284 static constexpr unsigned SparseUniverseMax = 10 * 1024 * 8;
3285 // VRegs indexed within SparseUniverseMax are tracked by Sparse, those beyond
3286 // are tracked by Dense. The only purpose of the threshold and the Dense set
3287 // is to have a reasonably growing memory usage in pathological cases (large
3288 // number of very sparse VRegFilter instances live at the same time). In
3289 // practice even in the worst-by-execution time cases having all elements
3290 // tracked by Sparse (very large SparseUniverseMax scenario) tends to be more
3291 // space efficient than if tracked by Dense. The threshold is set to keep the
3292 // worst-case memory usage within 2x of figures determined empirically for
3293 // "all Dense" scenario in such worst-by-execution-time cases.
3294 BitVector Sparse;
3295 DenseSet<Register> Dense;
3296};
3297
3298// Implements both a transfer function and a (binary, in-place) join operator
3299// for a dataflow over register sets with set union join and filtering transfer
3300// (out_b = in_b \ filter_b). filter_b is expected to be set-up ahead of time.
3301// Maintains out_b as its state, allowing for O(n) iteration over it at any
3302// time, where n is the size of the set (as opposed to O(U) where U is the
3303// universe). filter_b implicitly contains all physical registers at all times.
3304class FilteringVRegSet {
3305 VRegFilter Filter;
3307
3308public:
3309 // Set-up the filter_b. \pre Input register set \p RS must have no duplicates.
3310 // Both virtual and physical registers are fine.
3311 template <typename RegSetT> void addToFilter(const RegSetT &RS) {
3312 Filter.add(RS);
3313 }
3314 // Passes \p RS through the filter_b (transfer function) and adds what's left
3315 // to itself (out_b).
3316 template <typename RegSetT> bool add(const RegSetT &RS) {
3317 // Double-duty the Filter: to maintain VRegs a set (and the join operation
3318 // a set union) just add everything being added here to the Filter as well.
3319 return Filter.filterAndAdd(RS, VRegs);
3320 }
3321 using const_iterator = decltype(VRegs)::const_iterator;
3322 const_iterator begin() const { return VRegs.begin(); }
3323 const_iterator end() const { return VRegs.end(); }
3324 size_t size() const { return VRegs.size(); }
3325};
3326} // namespace
3327
3328// Calculate the largest possible vregsPassed sets. These are the registers that
3329// can pass through an MBB live, but may not be live every time. It is assumed
3330// that all vregsPassed sets are empty before the call.
3331void MachineVerifier::calcRegsPassed() {
3332 if (MF->empty())
3333 // ReversePostOrderTraversal doesn't handle empty functions.
3334 return;
3335
3336 for (const MachineBasicBlock *MB :
3338 FilteringVRegSet VRegs;
3339 BBInfo &Info = MBBInfoMap[MB];
3340 assert(Info.reachable);
3341
3342 VRegs.addToFilter(Info.regsKilled);
3343 VRegs.addToFilter(Info.regsLiveOut);
3344 for (const MachineBasicBlock *Pred : MB->predecessors()) {
3345 const BBInfo &PredInfo = MBBInfoMap[Pred];
3346 if (!PredInfo.reachable)
3347 continue;
3348
3349 VRegs.add(PredInfo.regsLiveOut);
3350 VRegs.add(PredInfo.vregsPassed);
3351 }
3352 Info.vregsPassed.reserve(VRegs.size());
3353 Info.vregsPassed.insert_range(VRegs);
3354 }
3355}
3356
3357// Calculate the set of virtual registers that must be passed through each basic
3358// block in order to satisfy the requirements of successor blocks. This is very
3359// similar to calcRegsPassed, only backwards.
3360void MachineVerifier::calcRegsRequired() {
3361 // First push live-in regs to predecessors' vregsRequired.
3363 for (const auto &MBB : *MF) {
3364 BBInfo &MInfo = MBBInfoMap[&MBB];
3365 for (const MachineBasicBlock *Pred : MBB.predecessors()) {
3366 BBInfo &PInfo = MBBInfoMap[Pred];
3367 if (PInfo.addRequired(MInfo.vregsLiveIn))
3368 todo.insert(Pred);
3369 }
3370
3371 // Handle the PHI node.
3372 for (const MachineInstr &MI : MBB.phis()) {
3373 for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) {
3374 // Skip those Operands which are undef regs or not regs.
3375 if (!MI.getOperand(i).isReg() || !MI.getOperand(i).readsReg())
3376 continue;
3377
3378 // Get register and predecessor for one PHI edge.
3379 Register Reg = MI.getOperand(i).getReg();
3380 const MachineBasicBlock *Pred = MI.getOperand(i + 1).getMBB();
3381
3382 BBInfo &PInfo = MBBInfoMap[Pred];
3383 if (PInfo.addRequired(Reg))
3384 todo.insert(Pred);
3385 }
3386 }
3387 }
3388
3389 // Iteratively push vregsRequired to predecessors. This will converge to the
3390 // same final state regardless of DenseSet iteration order.
3391 while (!todo.empty()) {
3392 const MachineBasicBlock *MBB = *todo.begin();
3393 todo.erase(MBB);
3394 BBInfo &MInfo = MBBInfoMap[MBB];
3395 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
3396 if (Pred == MBB)
3397 continue;
3398 BBInfo &SInfo = MBBInfoMap[Pred];
3399 if (SInfo.addRequired(MInfo.vregsRequired))
3400 todo.insert(Pred);
3401 }
3402 }
3403}
3404
3405// Check PHI instructions at the beginning of MBB. It is assumed that
3406// calcRegsPassed has been run so BBInfo::isLiveOut is valid.
3407void MachineVerifier::checkPHIOps(const MachineBasicBlock &MBB) {
3408 BBInfo &MInfo = MBBInfoMap[&MBB];
3409
3411 for (const MachineInstr &Phi : MBB) {
3412 if (!Phi.isPHI())
3413 break;
3414 seen.clear();
3415
3416 const MachineOperand &MODef = Phi.getOperand(0);
3417 if (!MODef.isReg() || !MODef.isDef()) {
3418 report("Expected first PHI operand to be a register def", &MODef, 0);
3419 continue;
3420 }
3421 if (MODef.isTied() || MODef.isImplicit() || MODef.isInternalRead() ||
3422 MODef.isEarlyClobber() || MODef.isDebug())
3423 report("Unexpected flag on PHI operand", &MODef, 0);
3424 Register DefReg = MODef.getReg();
3425 if (!DefReg.isVirtual())
3426 report("Expected first PHI operand to be a virtual register", &MODef, 0);
3427
3428 for (unsigned I = 1, E = Phi.getNumOperands(); I != E; I += 2) {
3429 const MachineOperand &MO0 = Phi.getOperand(I);
3430 if (!MO0.isReg()) {
3431 report("Expected PHI operand to be a register", &MO0, I);
3432 continue;
3433 }
3434 if (MO0.isImplicit() || MO0.isInternalRead() || MO0.isEarlyClobber() ||
3435 MO0.isDebug() || MO0.isTied())
3436 report("Unexpected flag on PHI operand", &MO0, I);
3437
3438 const MachineOperand &MO1 = Phi.getOperand(I + 1);
3439 if (!MO1.isMBB()) {
3440 report("Expected PHI operand to be a basic block", &MO1, I + 1);
3441 continue;
3442 }
3443
3444 const MachineBasicBlock &Pre = *MO1.getMBB();
3445 if (!Pre.isSuccessor(&MBB)) {
3446 report("PHI input is not a predecessor block", &MO1, I + 1);
3447 continue;
3448 }
3449
3450 if (MInfo.reachable) {
3451 seen.insert(&Pre);
3452 BBInfo &PrInfo = MBBInfoMap[&Pre];
3453 if (!MO0.isUndef() && PrInfo.reachable &&
3454 !PrInfo.isLiveOut(MO0.getReg()))
3455 report("PHI operand is not live-out from predecessor", &MO0, I);
3456 }
3457 }
3458
3459 // Did we see all predecessors?
3460 if (MInfo.reachable) {
3461 for (MachineBasicBlock *Pred : MBB.predecessors()) {
3462 if (!seen.count(Pred)) {
3463 report("Missing PHI operand", &Phi);
3464 OS << printMBBReference(*Pred)
3465 << " is a predecessor according to the CFG.\n";
3466 }
3467 }
3468 }
3469 }
3470}
3471
3472static void
3474 std::function<void(const Twine &Message)> FailureCB,
3475 raw_ostream &OS) {
3477 CV.initialize(&OS, FailureCB, MF);
3478
3479 for (const auto &MBB : MF) {
3480 CV.visit(MBB);
3481 for (const auto &MI : MBB.instrs())
3482 CV.visit(MI);
3483 }
3484
3485 if (CV.sawTokens()) {
3486 DT.recalculate(const_cast<MachineFunction &>(MF));
3487 CV.verify(DT);
3488 }
3489}
3490
3491void MachineVerifier::visitMachineFunctionAfter() {
3492 auto FailureCB = [this](const Twine &Message) {
3493 report(Message.str().c_str(), MF);
3494 };
3495 verifyConvergenceControl(*MF, DT, FailureCB, OS);
3496
3497 calcRegsPassed();
3498
3499 for (const MachineBasicBlock &MBB : *MF)
3500 checkPHIOps(MBB);
3501
3502 // Now check liveness info if available
3503 calcRegsRequired();
3504
3505 // Check for killed virtual registers that should be live out.
3506 for (const auto &MBB : *MF) {
3507 BBInfo &MInfo = MBBInfoMap[&MBB];
3508 for (Register VReg : MInfo.vregsRequired)
3509 if (MInfo.regsKilled.count(VReg)) {
3510 report("Virtual register killed in block, but needed live out.", &MBB);
3511 OS << "Virtual register " << printReg(VReg)
3512 << " is used after the block.\n";
3513 }
3514 }
3515
3516 if (!MF->empty()) {
3517 BBInfo &MInfo = MBBInfoMap[&MF->front()];
3518 for (Register VReg : MInfo.vregsRequired) {
3519 report("Virtual register defs don't dominate all uses.", MF);
3520 report_context_vreg(VReg);
3521 }
3522 }
3523
3524 if (LiveVars)
3525 verifyLiveVariables();
3526 if (LiveInts)
3527 verifyLiveIntervals();
3528
3529 // Check live-in list of each MBB. If a register is live into MBB, check
3530 // that the register is in regsLiveOut of each predecessor block. Since
3531 // this must come from a definition in the predecessor or its live-in
3532 // list, this will catch a live-through case where the predecessor does not
3533 // have the register in its live-in list. This currently only checks
3534 // registers that have no aliases, are not allocatable and are not
3535 // reserved, which could mean a condition code register for instance.
3536 if (MRI->tracksLiveness())
3537 for (const auto &MBB : *MF)
3539 MCRegister LiveInReg = P.PhysReg;
3540 bool hasAliases = MCRegAliasIterator(LiveInReg, TRI, false).isValid();
3541 if (hasAliases || isAllocatable(LiveInReg) || isReserved(LiveInReg))
3542 continue;
3543 for (const MachineBasicBlock *Pred : MBB.predecessors()) {
3544 BBInfo &PInfo = MBBInfoMap[Pred];
3545 if (!PInfo.regsLiveOut.count(LiveInReg)) {
3546 report("Live in register not found to be live out from predecessor.",
3547 &MBB);
3548 OS << TRI->getName(LiveInReg) << " not found to be live out from "
3549 << printMBBReference(*Pred) << '\n';
3550 }
3551 }
3552 }
3553
3554 for (auto CSInfo : MF->getCallSitesInfo())
3555 if (!CSInfo.first->isCall())
3556 report("Call site info referencing instruction that is not call", MF);
3557
3558 // If there's debug-info, check that we don't have any duplicate value
3559 // tracking numbers.
3560 if (MF->getFunction().getSubprogram()) {
3561 DenseSet<unsigned> SeenNumbers;
3562 for (const auto &MBB : *MF) {
3563 for (const auto &MI : MBB) {
3564 if (auto Num = MI.peekDebugInstrNum()) {
3565 auto Result = SeenNumbers.insert((unsigned)Num);
3566 if (!Result.second)
3567 report("Instruction has a duplicated value tracking number", &MI);
3568 }
3569 }
3570 }
3571 }
3572}
3573
3574void MachineVerifier::verifyLiveVariables() {
3575 assert(LiveVars && "Don't call verifyLiveVariables without LiveVars");
3576 for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
3579 for (const auto &MBB : *MF) {
3580 BBInfo &MInfo = MBBInfoMap[&MBB];
3581
3582 // Our vregsRequired should be identical to LiveVariables' AliveBlocks
3583 if (MInfo.vregsRequired.count(Reg)) {
3584 if (!VI.AliveBlocks.test(MBB.getNumber())) {
3585 report("LiveVariables: Block missing from AliveBlocks", &MBB);
3586 OS << "Virtual register " << printReg(Reg)
3587 << " must be live through the block.\n";
3588 }
3589 } else {
3590 if (VI.AliveBlocks.test(MBB.getNumber())) {
3591 report("LiveVariables: Block should not be in AliveBlocks", &MBB);
3592 OS << "Virtual register " << printReg(Reg)
3593 << " is not needed live through the block.\n";
3594 }
3595 }
3596 }
3597 }
3598}
3599
3600void MachineVerifier::verifyLiveIntervals() {
3601 assert(LiveInts && "Don't call verifyLiveIntervals without LiveInts");
3602 for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
3604
3605 // Spilling and splitting may leave unused registers around. Skip them.
3606 if (MRI->reg_nodbg_empty(Reg))
3607 continue;
3608
3609 if (!LiveInts->hasInterval(Reg)) {
3610 report("Missing live interval for virtual register", MF);
3611 OS << printReg(Reg, TRI) << " still has defs or uses\n";
3612 continue;
3613 }
3614
3615 const LiveInterval &LI = LiveInts->getInterval(Reg);
3616 assert(Reg == LI.reg() && "Invalid reg to interval mapping");
3617 verifyLiveInterval(LI);
3618 }
3619
3620 // Verify all the cached regunit intervals.
3621 for (MCRegUnit Unit : TRI->regunits())
3622 if (const LiveRange *LR = LiveInts->getCachedRegUnit(Unit))
3623 verifyLiveRange(*LR, VirtRegOrUnit(Unit));
3624}
3625
3626void MachineVerifier::verifyLiveRangeValue(const LiveRange &LR,
3627 const VNInfo *VNI,
3628 VirtRegOrUnit VRegOrUnit,
3629 LaneBitmask LaneMask) {
3630 if (VNI->isUnused())
3631 return;
3632
3633 const VNInfo *DefVNI = LR.getVNInfoAt(VNI->def);
3634
3635 if (!DefVNI) {
3636 report("Value not live at VNInfo def and not marked unused", MF);
3637 report_context(LR, VRegOrUnit, LaneMask);
3638 report_context(*VNI);
3639 return;
3640 }
3641
3642 if (DefVNI != VNI) {
3643 report("Live segment at def has different VNInfo", MF);
3644 report_context(LR, VRegOrUnit, LaneMask);
3645 report_context(*VNI);
3646 return;
3647 }
3648
3649 const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(VNI->def);
3650 if (!MBB) {
3651 report("Invalid VNInfo definition index", MF);
3652 report_context(LR, VRegOrUnit, LaneMask);
3653 report_context(*VNI);
3654 return;
3655 }
3656
3657 if (VNI->isPHIDef()) {
3658 if (VNI->def != LiveInts->getMBBStartIdx(MBB)) {
3659 report("PHIDef VNInfo is not defined at MBB start", MBB);
3660 report_context(LR, VRegOrUnit, LaneMask);
3661 report_context(*VNI);
3662 }
3663 return;
3664 }
3665
3666 // Non-PHI def.
3667 const MachineInstr *MI = LiveInts->getInstructionFromIndex(VNI->def);
3668 if (!MI) {
3669 report("No instruction at VNInfo def index", MBB);
3670 report_context(LR, VRegOrUnit, LaneMask);
3671 report_context(*VNI);
3672 return;
3673 }
3674
3675 bool hasDef = false;
3676 bool isEarlyClobber = false;
3677 for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
3678 if (!MOI->isReg() || !MOI->isDef())
3679 continue;
3680 if (VRegOrUnit.isVirtualReg()) {
3681 if (MOI->getReg() != VRegOrUnit.asVirtualReg())
3682 continue;
3683 } else {
3684 if (!MOI->getReg().isPhysical() ||
3685 !TRI->hasRegUnit(MOI->getReg(), VRegOrUnit.asMCRegUnit()))
3686 continue;
3687 }
3688 if (LaneMask.any() &&
3689 (TRI->getSubRegIndexLaneMask(MOI->getSubReg()) & LaneMask).none())
3690 continue;
3691 hasDef = true;
3692 if (MOI->isEarlyClobber())
3693 isEarlyClobber = true;
3694 }
3695
3696 if (!hasDef) {
3697 report("Defining instruction does not modify register", MI);
3698 report_context(LR, VRegOrUnit, LaneMask);
3699 report_context(*VNI);
3700 }
3701
3702 // Early clobber defs begin at USE slots, but other defs must begin at
3703 // DEF slots.
3704 if (isEarlyClobber) {
3705 if (!VNI->def.isEarlyClobber()) {
3706 report("Early clobber def must be at an early-clobber slot", MBB);
3707 report_context(LR, VRegOrUnit, LaneMask);
3708 report_context(*VNI);
3709 }
3710 } else if (!VNI->def.isRegister()) {
3711 report("Non-PHI, non-early clobber def must be at a register slot", MBB);
3712 report_context(LR, VRegOrUnit, LaneMask);
3713 report_context(*VNI);
3714 }
3715}
3716
3717void MachineVerifier::verifyLiveRangeSegment(const LiveRange &LR,
3719 VirtRegOrUnit VRegOrUnit,
3720 LaneBitmask LaneMask) {
3721 const LiveRange::Segment &S = *I;
3722 const VNInfo *VNI = S.valno;
3723 assert(VNI && "Live segment has no valno");
3724
3725 if (VNI->id >= LR.getNumValNums() || VNI != LR.getValNumInfo(VNI->id)) {
3726 report("Foreign valno in live segment", MF);
3727 report_context(LR, VRegOrUnit, LaneMask);
3728 report_context(S);
3729 report_context(*VNI);
3730 }
3731
3732 if (VNI->isUnused()) {
3733 report("Live segment valno is marked unused", MF);
3734 report_context(LR, VRegOrUnit, LaneMask);
3735 report_context(S);
3736 }
3737
3738 const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(S.start);
3739 if (!MBB) {
3740 report("Bad start of live segment, no basic block", MF);
3741 report_context(LR, VRegOrUnit, LaneMask);
3742 report_context(S);
3743 return;
3744 }
3745 SlotIndex MBBStartIdx = LiveInts->getMBBStartIdx(MBB);
3746 if (S.start != MBBStartIdx && S.start != VNI->def) {
3747 report("Live segment must begin at MBB entry or valno def", MBB);
3748 report_context(LR, VRegOrUnit, LaneMask);
3749 report_context(S);
3750 }
3751
3752 const MachineBasicBlock *EndMBB =
3753 LiveInts->getMBBFromIndex(S.end.getPrevSlot());
3754 if (!EndMBB) {
3755 report("Bad end of live segment, no basic block", MF);
3756 report_context(LR, VRegOrUnit, LaneMask);
3757 report_context(S);
3758 return;
3759 }
3760
3761 // Checks for non-live-out segments.
3762 if (S.end != LiveInts->getMBBEndIdx(EndMBB)) {
3763 // RegUnit intervals are allowed dead phis.
3764 if (!VRegOrUnit.isVirtualReg() && VNI->isPHIDef() && S.start == VNI->def &&
3765 S.end == VNI->def.getDeadSlot())
3766 return;
3767
3768 // The live segment is ending inside EndMBB
3769 const MachineInstr *MI =
3770 LiveInts->getInstructionFromIndex(S.end.getPrevSlot());
3771 if (!MI) {
3772 report("Live segment doesn't end at a valid instruction", EndMBB);
3773 report_context(LR, VRegOrUnit, LaneMask);
3774 report_context(S);
3775 return;
3776 }
3777
3778 // The block slot must refer to a basic block boundary.
3779 if (S.end.isBlock()) {
3780 report("Live segment ends at B slot of an instruction", EndMBB);
3781 report_context(LR, VRegOrUnit, LaneMask);
3782 report_context(S);
3783 }
3784
3785 if (S.end.isDead()) {
3786 // Segment ends on the dead slot.
3787 // That means there must be a dead def.
3788 if (!SlotIndex::isSameInstr(S.start, S.end)) {
3789 report("Live segment ending at dead slot spans instructions", EndMBB);
3790 report_context(LR, VRegOrUnit, LaneMask);
3791 report_context(S);
3792 }
3793 }
3794
3795 // After tied operands are rewritten, a live segment can only end at an
3796 // early-clobber slot if it is being redefined by an early-clobber def.
3797 // TODO: Before tied operands are rewritten, a live segment can only end at
3798 // an early-clobber slot if the last use is tied to an early-clobber def.
3799 if (MF->getProperties().hasTiedOpsRewritten() && S.end.isEarlyClobber()) {
3800 if (I + 1 == LR.end() || (I + 1)->start != S.end) {
3801 report("Live segment ending at early clobber slot must be "
3802 "redefined by an EC def in the same instruction",
3803 EndMBB);
3804 report_context(LR, VRegOrUnit, LaneMask);
3805 report_context(S);
3806 }
3807 }
3808
3809 // The following checks only apply to virtual registers. Physreg liveness
3810 // is too weird to check.
3811 if (VRegOrUnit.isVirtualReg()) {
3812 // A live segment can end with either a redefinition, a kill flag on a
3813 // use, or a dead flag on a def.
3814 bool hasRead = false;
3815 bool hasSubRegDef = false;
3816 bool hasDeadDef = false;
3817 for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
3818 if (!MOI->isReg() || MOI->getReg() != VRegOrUnit.asVirtualReg())
3819 continue;
3820 unsigned Sub = MOI->getSubReg();
3821 LaneBitmask SLM =
3822 Sub != 0 ? TRI->getSubRegIndexLaneMask(Sub) : LaneBitmask::getAll();
3823 if (MOI->isDef()) {
3824 if (Sub != 0) {
3825 hasSubRegDef = true;
3826 // An operand %0:sub0 reads %0:sub1..n. Invert the lane
3827 // mask for subregister defs. Read-undef defs will be handled by
3828 // readsReg below.
3829 SLM = ~SLM;
3830 }
3831 if (MOI->isDead())
3832 hasDeadDef = true;
3833 }
3834 if (LaneMask.any() && (LaneMask & SLM).none())
3835 continue;
3836 if (MOI->readsReg())
3837 hasRead = true;
3838 }
3839 if (S.end.isDead()) {
3840 // Make sure that the corresponding machine operand for a "dead" live
3841 // range has the dead flag. We cannot perform this check for subregister
3842 // liveranges as partially dead values are allowed.
3843 if (LaneMask.none() && !hasDeadDef) {
3844 report(
3845 "Instruction ending live segment on dead slot has no dead flag",
3846 MI);
3847 report_context(LR, VRegOrUnit, LaneMask);
3848 report_context(S);
3849 }
3850 } else {
3851 if (!hasRead) {
3852 // When tracking subregister liveness, the main range must start new
3853 // values on partial register writes, even if there is no read.
3854 if (!MRI->shouldTrackSubRegLiveness(VRegOrUnit.asVirtualReg()) ||
3855 LaneMask.any() || !hasSubRegDef) {
3856 report("Instruction ending live segment doesn't read the register",
3857 MI);
3858 report_context(LR, VRegOrUnit, LaneMask);
3859 report_context(S);
3860 }
3861 }
3862 }
3863 }
3864 }
3865
3866 // Now check all the basic blocks in this live segment.
3868 // Is this live segment the beginning of a non-PHIDef VN?
3869 if (S.start == VNI->def && !VNI->isPHIDef()) {
3870 // Not live-in to any blocks.
3871 if (MBB == EndMBB)
3872 return;
3873 // Skip this block.
3874 ++MFI;
3875 }
3876
3878 if (LaneMask.any()) {
3879 LiveInterval &OwnerLI = LiveInts->getInterval(VRegOrUnit.asVirtualReg());
3880 OwnerLI.computeSubRangeUndefs(Undefs, LaneMask, *MRI, *Indexes);
3881 }
3882
3883 while (true) {
3884 assert(LiveInts->isLiveInToMBB(LR, &*MFI));
3885 // We don't know how to track physregs into a landing pad.
3886 if (!VRegOrUnit.isVirtualReg() && MFI->isEHPad()) {
3887 if (&*MFI == EndMBB)
3888 break;
3889 ++MFI;
3890 continue;
3891 }
3892
3893 // Is VNI a PHI-def in the current block?
3894 bool IsPHI = VNI->isPHIDef() &&
3895 VNI->def == LiveInts->getMBBStartIdx(&*MFI);
3896
3897 // Check that VNI is live-out of all predecessors.
3898 for (const MachineBasicBlock *Pred : MFI->predecessors()) {
3899 SlotIndex PEnd = LiveInts->getMBBEndIdx(Pred);
3900 // Predecessor of landing pad live-out on last call.
3901 if (MFI->isEHPad()) {
3902 for (const MachineInstr &MI : llvm::reverse(*Pred)) {
3903 if (MI.isCall()) {
3904 PEnd = Indexes->getInstructionIndex(MI).getBoundaryIndex();
3905 break;
3906 }
3907 }
3908 }
3909 const VNInfo *PVNI = LR.getVNInfoBefore(PEnd);
3910
3911 // All predecessors must have a live-out value. However for a phi
3912 // instruction with subregister intervals
3913 // only one of the subregisters (not necessarily the current one) needs to
3914 // be defined.
3915 if (!PVNI && (LaneMask.none() || !IsPHI)) {
3916 if (LiveRangeCalc::isJointlyDominated(Pred, Undefs, *Indexes))
3917 continue;
3918 report("Register not marked live out of predecessor", Pred);
3919 report_context(LR, VRegOrUnit, LaneMask);
3920 report_context(*VNI);
3921 OS << " live into " << printMBBReference(*MFI) << '@'
3922 << LiveInts->getMBBStartIdx(&*MFI) << ", not live before " << PEnd
3923 << '\n';
3924 continue;
3925 }
3926
3927 // Only PHI-defs can take different predecessor values.
3928 if (!IsPHI && PVNI != VNI) {
3929 report("Different value live out of predecessor", Pred);
3930 report_context(LR, VRegOrUnit, LaneMask);
3931 OS << "Valno #" << PVNI->id << " live out of "
3932 << printMBBReference(*Pred) << '@' << PEnd << "\nValno #" << VNI->id
3933 << " live into " << printMBBReference(*MFI) << '@'
3934 << LiveInts->getMBBStartIdx(&*MFI) << '\n';
3935 }
3936 }
3937 if (&*MFI == EndMBB)
3938 break;
3939 ++MFI;
3940 }
3941}
3942
3943void MachineVerifier::verifyLiveRange(const LiveRange &LR,
3944 VirtRegOrUnit VRegOrUnit,
3945 LaneBitmask LaneMask) {
3946 for (const VNInfo *VNI : LR.valnos)
3947 verifyLiveRangeValue(LR, VNI, VRegOrUnit, LaneMask);
3948
3949 for (LiveRange::const_iterator I = LR.begin(), E = LR.end(); I != E; ++I)
3950 verifyLiveRangeSegment(LR, I, VRegOrUnit, LaneMask);
3951}
3952
3953void MachineVerifier::verifyLiveInterval(const LiveInterval &LI) {
3954 Register Reg = LI.reg();
3955 assert(Reg.isVirtual());
3956 verifyLiveRange(LI, VirtRegOrUnit(Reg));
3957
3958 if (LI.hasSubRanges()) {
3960 LaneBitmask MaxMask = MRI->getMaxLaneMaskForVReg(Reg);
3961 for (const LiveInterval::SubRange &SR : LI.subranges()) {
3962 if ((Mask & SR.LaneMask).any()) {
3963 report("Lane masks of sub ranges overlap in live interval", MF);
3964 report_context(LI);
3965 }
3966 if ((SR.LaneMask & ~MaxMask).any()) {
3967 report("Subrange lanemask is invalid", MF);
3968 report_context(LI);
3969 }
3970 if (SR.empty()) {
3971 report("Subrange must not be empty", MF);
3972 report_context(SR, VirtRegOrUnit(LI.reg()), SR.LaneMask);
3973 }
3974 Mask |= SR.LaneMask;
3975 verifyLiveRange(SR, VirtRegOrUnit(LI.reg()), SR.LaneMask);
3976 if (!LI.covers(SR)) {
3977 report("A Subrange is not covered by the main range", MF);
3978 report_context(LI);
3979 }
3980 }
3981 }
3982
3983 // Check the LI only has one connected component.
3984 ConnectedVNInfoEqClasses ConEQ(*LiveInts);
3985 unsigned NumComp = ConEQ.Classify(LI);
3986 if (NumComp > 1) {
3987 report("Multiple connected components in live interval", MF);
3988 report_context(LI);
3989 for (unsigned comp = 0; comp != NumComp; ++comp) {
3990 OS << comp << ": valnos";
3991 for (const VNInfo *I : LI.valnos)
3992 if (comp == ConEQ.getEqClass(I))
3993 OS << ' ' << I->id;
3994 OS << '\n';
3995 }
3996 }
3997}
3998
3999namespace {
4000
4001 // FrameSetup and FrameDestroy can have zero adjustment, so using a single
4002 // integer, we can't tell whether it is a FrameSetup or FrameDestroy if the
4003 // value is zero.
4004 // We use a bool plus an integer to capture the stack state.
4005struct StackStateOfBB {
4006 StackStateOfBB() = default;
4007 StackStateOfBB(int EntryVal, int ExitVal, bool EntrySetup, bool ExitSetup)
4008 : EntryValue(EntryVal), ExitValue(ExitVal), EntryIsSetup(EntrySetup),
4009 ExitIsSetup(ExitSetup) {}
4010
4011 // Can be negative, which means we are setting up a frame.
4012 int EntryValue = 0;
4013 int ExitValue = 0;
4014 bool EntryIsSetup = false;
4015 bool ExitIsSetup = false;
4016};
4017
4018} // end anonymous namespace
4019
4020/// Make sure on every path through the CFG, a FrameSetup <n> is always followed
4021/// by a FrameDestroy <n>, stack adjustments are identical on all
4022/// CFG edges to a merge point, and frame is destroyed at end of a return block.
4023void MachineVerifier::verifyStackFrame() {
4024 unsigned FrameSetupOpcode = TII->getCallFrameSetupOpcode();
4025 unsigned FrameDestroyOpcode = TII->getCallFrameDestroyOpcode();
4026 if (FrameSetupOpcode == ~0u && FrameDestroyOpcode == ~0u)
4027 return;
4028
4030 SPState.resize(MF->getNumBlockIDs());
4032
4033 // Visit the MBBs in DFS order.
4034 for (df_ext_iterator<const MachineFunction *,
4036 DFI = df_ext_begin(MF, Reachable), DFE = df_ext_end(MF, Reachable);
4037 DFI != DFE; ++DFI) {
4038 const MachineBasicBlock *MBB = *DFI;
4039
4040 StackStateOfBB BBState;
4041 // Check the exit state of the DFS stack predecessor.
4042 if (DFI.getPathLength() >= 2) {
4043 const MachineBasicBlock *StackPred = DFI.getPath(DFI.getPathLength() - 2);
4044 assert(Reachable.count(StackPred) &&
4045 "DFS stack predecessor is already visited.\n");
4046 BBState.EntryValue = SPState[StackPred->getNumber()].ExitValue;
4047 BBState.EntryIsSetup = SPState[StackPred->getNumber()].ExitIsSetup;
4048 BBState.ExitValue = BBState.EntryValue;
4049 BBState.ExitIsSetup = BBState.EntryIsSetup;
4050 }
4051
4052 if ((int)MBB->getCallFrameSize() != -BBState.EntryValue) {
4053 report("Call frame size on entry does not match value computed from "
4054 "predecessor",
4055 MBB);
4056 OS << "Call frame size on entry " << MBB->getCallFrameSize()
4057 << " does not match value computed from predecessor "
4058 << -BBState.EntryValue << '\n';
4059 }
4060
4061 // Update stack state by checking contents of MBB.
4062 for (const auto &I : *MBB) {
4063 if (I.getOpcode() == FrameSetupOpcode) {
4064 if (BBState.ExitIsSetup)
4065 report("FrameSetup is after another FrameSetup", &I);
4066 if (!MRI->isSSA() && !MF->getFrameInfo().adjustsStack())
4067 report("AdjustsStack not set in presence of a frame pseudo "
4068 "instruction.", &I);
4069 BBState.ExitValue -= TII->getFrameTotalSize(I);
4070 BBState.ExitIsSetup = true;
4071 }
4072
4073 if (I.getOpcode() == FrameDestroyOpcode) {
4074 int Size = TII->getFrameTotalSize(I);
4075 if (!BBState.ExitIsSetup)
4076 report("FrameDestroy is not after a FrameSetup", &I);
4077 int AbsSPAdj = BBState.ExitValue < 0 ? -BBState.ExitValue :
4078 BBState.ExitValue;
4079 if (BBState.ExitIsSetup && AbsSPAdj != Size) {
4080 report("FrameDestroy <n> is after FrameSetup <m>", &I);
4081 OS << "FrameDestroy <" << Size << "> is after FrameSetup <"
4082 << AbsSPAdj << ">.\n";
4083 }
4084 if (!MRI->isSSA() && !MF->getFrameInfo().adjustsStack())
4085 report("AdjustsStack not set in presence of a frame pseudo "
4086 "instruction.", &I);
4087 BBState.ExitValue += Size;
4088 BBState.ExitIsSetup = false;
4089 }
4090 }
4091 SPState[MBB->getNumber()] = BBState;
4092
4093 // Make sure the exit state of any predecessor is consistent with the entry
4094 // state.
4095 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
4096 if (Reachable.count(Pred) &&
4097 (SPState[Pred->getNumber()].ExitValue != BBState.EntryValue ||
4098 SPState[Pred->getNumber()].ExitIsSetup != BBState.EntryIsSetup)) {
4099 report("The exit stack state of a predecessor is inconsistent.", MBB);
4100 OS << "Predecessor " << printMBBReference(*Pred) << " has exit state ("
4101 << SPState[Pred->getNumber()].ExitValue << ", "
4102 << SPState[Pred->getNumber()].ExitIsSetup << "), while "
4103 << printMBBReference(*MBB) << " has entry state ("
4104 << BBState.EntryValue << ", " << BBState.EntryIsSetup << ").\n";
4105 }
4106 }
4107
4108 // Make sure the entry state of any successor is consistent with the exit
4109 // state.
4110 for (const MachineBasicBlock *Succ : MBB->successors()) {
4111 if (Reachable.count(Succ) &&
4112 (SPState[Succ->getNumber()].EntryValue != BBState.ExitValue ||
4113 SPState[Succ->getNumber()].EntryIsSetup != BBState.ExitIsSetup)) {
4114 report("The entry stack state of a successor is inconsistent.", MBB);
4115 OS << "Successor " << printMBBReference(*Succ) << " has entry state ("
4116 << SPState[Succ->getNumber()].EntryValue << ", "
4117 << SPState[Succ->getNumber()].EntryIsSetup << "), while "
4118 << printMBBReference(*MBB) << " has exit state ("
4119 << BBState.ExitValue << ", " << BBState.ExitIsSetup << ").\n";
4120 }
4121 }
4122
4123 // Make sure a basic block with return ends with zero stack adjustment.
4124 if (!MBB->empty() && MBB->back().isReturn()) {
4125 if (BBState.ExitIsSetup)
4126 report("A return block ends with a FrameSetup.", MBB);
4127 if (BBState.ExitValue)
4128 report("A return block ends with a nonzero stack adjustment.", MBB);
4129 }
4130 }
4131}
4132
4133void MachineVerifier::verifyStackProtector() {
4134 const MachineFrameInfo &MFI = MF->getFrameInfo();
4135 if (!MFI.hasStackProtectorIndex())
4136 return;
4137 // Only applicable when the offsets of frame objects have been determined,
4138 // which is indicated by a non-zero stack size.
4139 if (!MFI.getStackSize())
4140 return;
4141 const TargetFrameLowering &TFI = *MF->getSubtarget().getFrameLowering();
4142 bool StackGrowsDown =
4144 unsigned FI = MFI.getStackProtectorIndex();
4145 int64_t SPStart = MFI.getObjectOffset(FI);
4146 int64_t SPEnd = SPStart + MFI.getObjectSize(FI);
4147 for (unsigned I = 0, E = MFI.getObjectIndexEnd(); I != E; ++I) {
4148 if (I == FI)
4149 continue;
4150 if (MFI.isDeadObjectIndex(I))
4151 continue;
4152 // FIXME: Skip non-default stack objects, as some targets may place them
4153 // above the stack protector. This is a workaround for the fact that
4154 // backends such as AArch64 may place SVE stack objects *above* the stack
4155 // protector.
4157 continue;
4158 // Skip variable-sized objects because they do not have a fixed offset.
4160 continue;
4161 // FIXME: Skip spill slots which may be allocated above the stack protector.
4162 // Ideally this would only skip callee-saved registers, but we don't have
4163 // that information here. For example, spill-slots used for scavenging are
4164 // not described in CalleeSavedInfo.
4165 if (MFI.isSpillSlotObjectIndex(I))
4166 continue;
4167 int64_t ObjStart = MFI.getObjectOffset(I);
4168 int64_t ObjEnd = ObjStart + MFI.getObjectSize(I);
4169 if (SPStart < ObjEnd && ObjStart < SPEnd) {
4170 report("Stack protector overlaps with another stack object", MF);
4171 break;
4172 }
4173 if ((StackGrowsDown && SPStart <= ObjStart) ||
4174 (!StackGrowsDown && SPStart >= ObjStart)) {
4175 report("Stack protector is not the top-most object on the stack", MF);
4176 break;
4177 }
4178 }
4179}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
aarch64 promote const
static bool isLoad(int Opcode)
static bool isStore(int Opcode)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
This file implements the BitVector class.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file defines the DenseMap class.
This file defines the DenseSet and SmallDenseSet classes.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
std::pair< Instruction::BinaryOps, Value * > OffsetOp
Find all possible pairs (BinOp, RHS) that BinOp V, RHS can be simplified.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
A common definition of LaneBitmask for use in TableGen and CodeGen.
Implement a low-level type suitable for MachineInstr level instruction selection.
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
print mir2vec MIR2Vec Vocabulary Printer Pass
Definition MIR2Vec.cpp:598
This file declares the MIR specialization of the GenericConvergenceVerifier template.
Register Reg
Register const TargetRegisterInfo * TRI
static void verifyConvergenceControl(const MachineFunction &MF, MachineDominatorTree &DT, std::function< void(const Twine &Message)> FailureCB, raw_ostream &OS)
Promote Memory to Register
Definition Mem2Reg.cpp:110
modulo schedule Modulo Schedule test pass
#define P(N)
ppc ctr loops verify
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
static bool isLiveOut(const MachineBasicBlock &MBB, unsigned Reg)
SI Optimize VGPR LiveRange
std::unordered_set< BasicBlock * > BlockSet
This file contains some templates that are useful if you are working with the STL at all.
This file defines generic set operations that may be used on set's of different types,...
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file describes how to lower LLVM code to machine code.
static unsigned getSize(unsigned Kind)
static LLVM_ABI unsigned getSizeInBits(const fltSemantics &Sem)
Returns the size of the floating point number (in bits) in the given semantics.
Definition APFloat.cpp:278
const fltSemantics & getSemantics() const
Definition APFloat.h:1546
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
AnalysisUsage & addUsedIfAvailable()
Add the specified Pass class to the set of analyses used by this pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
size_t size() const
Get the array size.
Definition ArrayRef.h:141
This class holds the attributes for a particular argument, parameter, function, or return value.
Definition Attributes.h:407
LLVM Basic Block Representation.
Definition BasicBlock.h:62
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches,...
Definition BasicBlock.h:687
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction; assumes that the block is well-formed.
Definition BasicBlock.h:237
void clear()
Removes all bits from the bitvector.
Definition BitVector.h:349
iterator_range< const_set_bits_iterator > set_bits() const
Definition BitVector.h:159
ConnectedVNInfoEqClasses - Helper class that can divide VNInfos in a LiveInterval into equivalence cl...
ConstMIBundleOperands - Iterate over all operands in a const bundle of machine instructions.
ConstantFP - Floating Point Values [float, double].
Definition Constants.h:420
const APFloat & getValueAPF() const
Definition Constants.h:463
This is the shared class of boolean and integer constants.
Definition Constants.h:87
IntegerType * getIntegerType() const
Variant of the getType() method to always return an IntegerType, which reduces the amount of casting ...
Definition Constants.h:198
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition Constants.h:162
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Implements a dense probed hash-table based set.
Definition DenseSet.h:279
void recalculate(ParentType &Func)
recalculate - compute a dominator tree for the given function
Register getReg() const
Base class for user error types.
Definition Error.h:354
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
const Function & getFunction() const
Definition Function.h:166
void initialize(raw_ostream *OS, function_ref< void(const Twine &Message)> FailureCB, const FunctionT &F)
bool isPredicated(const MachineInstr &MI) const override
Returns true if the instruction is already predicated.
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
Analyze the branching code at the end of MBB, returning true if it cannot be understood (e....
unsigned getBitWidth() const
Get the number of bits in this IntegerType.
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
constexpr unsigned getScalarSizeInBits() const
constexpr bool isFloatOrFloatVector() const
constexpr bool isScalar() const
constexpr Kind getKind() const
LLT getScalarType() const
constexpr bool isPointerVector() const
constexpr FpSemantics getFpSemantics() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isValid() const
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
constexpr bool isScalable() const
Returns true if the LLT is a scalable vector.
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr bool isPointer() const
constexpr ElementCount getElementCount() const
constexpr unsigned getAddressSpace() const
constexpr bool isPointerOrPointerVector() const
constexpr TypeSize getSizeInBytes() const
Returns the total size of the type in bytes, i.e.
LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
A live range for subregisters.
LiveInterval - This class represents the liveness of a register, or stack slot.
Register reg() const
bool hasSubRanges() const
Returns true if subregister liveness information is available.
iterator_range< subrange_iterator > subranges()
LLVM_ABI void computeSubRangeUndefs(SmallVectorImpl< SlotIndex > &Undefs, LaneBitmask LaneMask, const MachineRegisterInfo &MRI, const SlotIndexes &Indexes) const
For a given lane mask LaneMask, compute indexes at which the lane is marked undefined by subregister ...
void print(raw_ostream &O, const Module *=nullptr) const override
Implement the dump method.
Result of a LiveRange query.
bool isDeadDef() const
Return true if this instruction has a dead def.
VNInfo * valueIn() const
Return the value that is live-in to the instruction.
VNInfo * valueOut() const
Return the value leaving the instruction, if any.
bool isKill() const
Return true if the live-in value is killed by this instruction.
static LLVM_ABI bool isJointlyDominated(const MachineBasicBlock *MBB, ArrayRef< SlotIndex > Defs, const SlotIndexes &Indexes)
A diagnostic function to check if the end of the block MBB is jointly dominated by the blocks corresp...
This class represents the liveness of a register, stack slot, etc.
VNInfo * getValNumInfo(unsigned ValNo)
getValNumInfo - Returns pointer to the specified val#.
Segments::const_iterator const_iterator
bool liveAt(SlotIndex index) const
LLVM_ABI bool covers(const LiveRange &Other) const
Returns true if all segments of the Other live range are completely covered by this live range.
bool empty() const
LiveQueryResult Query(SlotIndex Idx) const
Query Liveness at Idx.
VNInfo * getVNInfoBefore(SlotIndex Idx) const
getVNInfoBefore - Return the VNInfo that is live up to but not necessarily including Idx,...
bool verify() const
Walk the range and assert if any invariants fail to hold.
unsigned getNumValNums() const
iterator begin()
VNInfoList valnos
VNInfo * getVNInfoAt(SlotIndex Idx) const
getVNInfoAt - Return the VNInfo that is live at Idx, or NULL.
LLVM_ABI VarInfo & getVarInfo(Register Reg)
getVarInfo - Return the VarInfo structure for the specified VIRTUAL register.
TypeSize getValue() const
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition MCAsmInfo.h:66
ExceptionHandling getExceptionHandlingType() const
Definition MCAsmInfo.h:659
Describe properties that are true of each instruction in the target description file.
This holds information about one operand of a machine instruction, indicating the register class for ...
Definition MCInstrDesc.h:86
MCRegAliasIterator enumerates all registers aliasing Reg.
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1444
bool isValid() const
isValid - Returns true until all the operands have been visited.
bool isInlineAsmBrIndirectTarget() const
Returns true if this is the indirect dest of an INLINEASM_BR.
bool isEHPad() const
Returns true if the block is a landing pad.
iterator_range< livein_iterator > liveins() const
iterator_range< iterator > phis()
Returns a range that iterates over the phis in the basic block.
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
bool isIRBlockAddressTaken() const
Test whether this block is the target of an IR BlockAddress.
BasicBlock * getAddressTakenIRBlock() const
Retrieves the BasicBlock which corresponds to this MachineBasicBlock.
LLVM_ABI bool isPredecessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a predecessor of this block.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
unsigned getCallFrameSize() const
Return the call frame size on entry to this basic block.
iterator_range< succ_iterator > successors()
LLVM_ABI bool isSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a successor of this block.
iterator_range< pred_iterator > predecessors()
LLVM_ABI StringRef getName() const
Return the name of the corresponding LLVM basic block, or an empty string.
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects.
int getStackProtectorIndex() const
Return the index for the stack protector object.
bool isSpillSlotObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a spill slot.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
LLVM_ABI BitVector getPristineRegs(const MachineFunction &MF) const
Return a set of physical registers that are pristine.
bool isVariableSizedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a variable sized object.
int getObjectIndexEnd() const
Return one past the maximum frame object index.
bool hasStackProtectorIndex() const
uint8_t getStackID(int ObjectIdx) const
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
bool isDeadObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a dead object.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
Properties which a MachineFunction may have at a given point in time.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
bool verify(Pass *p=nullptr, const char *Banner=nullptr, raw_ostream *OS=nullptr, bool AbortOnError=true) const
Run the current MachineFunction through the machine code verifier, useful for debugger use.
const MachineFunctionProperties & getProperties() const
Get the function properties.
const MachineBasicBlock & front() const
void print(raw_ostream &OS, const SlotIndexes *=nullptr) const
print - Print out the MachineFunction in a format suitable for debugging to the specified stream.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
BasicBlockListType::const_iterator const_iterator
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool isReturn(QueryType Type=AnyInBundle) const
bool isTerminator(QueryType Type=AnyInBundle) const
Returns true if this instruction part of the terminator for a basic block.
bool isBarrier(QueryType Type=AnyInBundle) const
Returns true if the specified instruction stops control flow from executing the instruction immediate...
A description of a memory reference used in the backend.
LocationSize getSize() const
Return the size in bytes of the memory reference.
const PseudoSourceValue * getPseudoValue() const
LLT getMemoryType() const
Return the memory type of the memory reference.
const MDNode * getRanges() const
Return the range tag for the memory reference.
AtomicOrdering getSuccessOrdering() const
Return the atomic ordering requirements for this memory operation.
LocationSize getSizeInBits() const
Return the size in bits of the memory reference.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
int64_t getImm() const
bool readsReg() const
readsReg - Returns true if this operand reads the previous value of its register.
bool isIntrinsicID() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
ArrayRef< int > getShuffleMask() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
bool isValidExcessOperand() const
Return true if this operand can validly be appended to an arbitrary operand list.
bool isShuffleMask() const
LLVM_ABI void print(raw_ostream &os, const TargetRegisterInfo *TRI=nullptr) const
Print the MachineOperand to os.
LaneBitmask getLaneMask() const
unsigned getCFIIndex() const
LLVM_ABI bool isRenamable() const
isRenamable - Returns true if this register may be renamed, i.e.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
bool isEarlyClobber() const
Register getReg() const
getReg - Returns the register number.
bool isInternalRead() const
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
const uint32_t * getRegMask() const
getRegMask - Returns a bit mask of registers preserved by this RegMask operand.
@ MO_CFIIndex
MCCFIInstruction index.
@ MO_RegisterMask
Mask of preserved registers.
@ MO_MachineBasicBlock
MachineBasicBlock reference.
@ MO_FrameIndex
Abstract Stack Frame Index.
@ MO_Register
Register operand.
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
use_nodbg_iterator use_nodbg_begin(Register RegNo) const
LLVM_ABI void verifyUseLists() const
Verify the use list of all registers.
bool tracksLiveness() const
tracksLiveness - Returns true when tracking register liveness accurately.
static use_nodbg_iterator use_nodbg_end()
bool isReserved(MCRegister PhysReg) const
isReserved - Returns true when PhysReg is a reserved register.
const BitVector & getReservedRegs() const
getReservedRegs - Returns a reference to the frozen set of reserved registers.
LLT getType(Register Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.
bool reservedRegsFrozen() const
reservedRegsFrozen - Returns true after freezeReservedRegs() was called to ensure the set of reserved...
bool def_empty(Register RegNo) const
def_empty - Return true if there are no instructions defining the specified register (it may be live-...
bool reg_nodbg_empty(Register RegNo) const
reg_nodbg_empty - Return true if the only instructions using or defining Reg are Debug instructions.
const RegisterBank * getRegBankOrNull(Register Reg) const
Return the register bank of Reg, or null if Reg has not been assigned a register bank or has been ass...
bool shouldTrackSubRegLiveness(const TargetRegisterClass &RC) const
Returns true if liveness for register class RC should be tracked at the subregister level.
bool hasOneDef(Register RegNo) const
Return true if there is exactly one operand defining the specified register.
LLVM_ABI bool isReservedRegUnit(MCRegUnit Unit) const
Returns true when the given register unit is considered reserved.
const TargetRegisterClass * getRegClassOrNull(Register Reg) const
Return the register class of Reg, or null if Reg has not been assigned a register class yet.
LLVM_ABI LaneBitmask getMaxLaneMaskForVReg(Register Reg) const
Returns a mask covering all bits that can appear in lane masks of subregisters of the virtual registe...
unsigned getNumVirtRegs() const
getNumVirtRegs - Return the number of virtual registers created.
LLVM_ABI PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
ManagedStatic - This transparently changes the behavior of global statics to be lazily constructed on...
Pass interface - Implemented by all 'passes'.
Definition Pass.h:99
virtual void print(raw_ostream &OS, const Module *M) const
print - Print out the internal state of the pass.
Definition Pass.cpp:140
AnalysisType * getAnalysisIfAvailable() const
getAnalysisIfAvailable<AnalysisType>() - Subclasses use this function to get analysis information tha...
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
Holds all the information related to register banks.
const RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
unsigned getMaximumSize(unsigned RegBankID) const
Get the maximum size in bits that fits in the given register bank.
This class implements the register bank concept.
const char * getName() const
Get a user friendly name of this register bank.
unsigned getID() const
Get the identifier of this register bank.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
static Register index2VirtReg(unsigned Index)
Convert a 0-based index to a virtual register number.
Definition Register.h:72
MCRegister asMCReg() const
Utility to check-convert this value to a MCRegister.
Definition Register.h:107
unsigned virtRegIndex() const
Convert a virtual register number to a 0-based index.
Definition Register.h:87
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition Register.h:79
constexpr unsigned id() const
Definition Register.h:100
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:83
SlotIndex - An opaque wrapper around machine indexes.
Definition SlotIndexes.h:66
static bool isSameInstr(SlotIndex A, SlotIndex B)
isSameInstr - Return true if A and B refer to the same instruction.
bool isBlock() const
isBlock - Returns true if this is a block boundary slot.
SlotIndex getDeadSlot() const
Returns the dead def kill slot for the current instruction.
bool isEarlyClobber() const
isEarlyClobber - Returns true if this is an early-clobber slot.
bool isRegister() const
isRegister - Returns true if this is a normal register use/def slot.
SlotIndex getPrevSlot() const
Returns the previous slot in the index list.
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
bool isDead() const
isDead - Returns true if this is a dead def kill slot.
SlotIndexes pass.
MBBIndexIterator MBBIndexBegin() const
Returns an iterator for the begin of the idx2MBBMap.
MBBIndexIterator MBBIndexEnd() const
Return an iterator for the end of the idx2MBBMap.
SmallVectorImpl< IdxMBBPair >::const_iterator MBBIndexIterator
Iterator over the idx2MBBMap (sorted pairs of slot index of basic block begin and basic block)
size_type size() const
Definition SmallPtrSet.h:99
bool erase(PtrType Ptr)
Remove pointer from the set.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
iterator begin() const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Register getReg() const
MI-level Statepoint operands.
Definition StackMaps.h:159
Represent a constant reference to a string, i.e.
Definition StringRef.h:56
Information about stack frame layout on the target.
StackDirection getStackGrowthDirection() const
getStackGrowthDirection - Return the direction the stack grows
const MCAsmInfo & getMCAsmInfo() const
Return target specific asm information.
bool hasSuperClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a super-class of or equal to this class.
LaneBitmask getLaneMask() const
Returns the combination of all lane masks of register in this class.
virtual const RegisterBankInfo * getRegBankInfo() const
If the information for the register banks is available, return it.
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
LLVM_ABI std::string str() const
Return the twine contents as a std::string.
Definition Twine.cpp:17
static constexpr TypeSize getZero()
Definition TypeSize.h:349
VNInfo - Value Number Information.
bool isUnused() const
Returns true if this value is unused.
unsigned id
The ID number of this value.
SlotIndex def
The index of the defining instruction.
bool isPHIDef() const
Returns true if this value is defined by a PHI instruction (or was, PHI instructions may have been el...
LLVM Value Representation.
Definition Value.h:75
Wrapper class representing a virtual register or register unit.
Definition Register.h:181
constexpr bool isVirtualReg() const
Definition Register.h:197
constexpr MCRegUnit asMCRegUnit() const
Definition Register.h:201
constexpr Register asVirtualReg() const
Definition Register.h:206
std::pair< iterator, bool > insert(const ValueT &V)
Definition DenseSet.h:202
constexpr bool isNonZero() const
Definition TypeSize.h:155
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:216
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:223
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:237
self_iterator getIterator()
Definition ilist_node.h:123
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
Changed
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
LLVM_ABI AttributeSet getFnAttributes(LLVMContext &C, ID id)
Return the function attributes for an intrinsic.
@ OPERAND_IMMEDIATE
Definition MCInstrDesc.h:61
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:668
NodeAddr< DefNode * > Def
Definition RDFGraph.h:384
NodeAddr< PhiNode * > Phi
Definition RDFGraph.h:390
NodeAddr< FuncNode * > Func
Definition RDFGraph.h:393
iterator end() const
Definition BasicBlock.h:89
LLVM_ABI iterator begin() const
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:315
@ Offset
Definition DWP.cpp:557
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1738
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1668
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
@ SjLj
setjmp/longjmp based exceptions
Definition CodeGen.h:56
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
auto dyn_cast_if_present(const Y &Val)
dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a null (or none in the case ...
Definition Casting.h:732
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2207
void set_subtract(S1Ty &S1, const S2Ty &S2)
set_subtract(A, B) - Compute A := A - B
Printable PrintLaneMask(LaneBitmask LaneMask)
Create Printable object to print LaneBitmasks on a raw_ostream.
Definition LaneBitmask.h:92
LLVM_ABI Printable printRegUnit(MCRegUnit Unit, const TargetRegisterInfo *TRI)
Create Printable object to print register units on a raw_ostream.
AnalysisManager< MachineFunction > MachineFunctionAnalysisManager
bool isPreISelGenericOptimizationHint(unsigned Opcode)
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
LLVM_ABI FunctionPass * createMachineVerifierPass(const std::string &Banner)
createMachineVerifierPass - This pass verifies cenerated machine code instructions for correctness.
LLVM_ABI void verifyMachineFunction(const std::string &Banner, const MachineFunction &MF)
auto reverse(ContainerTy &&C)
Definition STLExtras.h:407
detail::ValueMatchesPoly< M > HasValue(M Matcher)
Definition Error.h:221
df_ext_iterator< T, SetTy > df_ext_begin(const T &G, SetTy &S)
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1752
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
GenericConvergenceVerifier< MachineSSAContext > MachineConvergenceVerifier
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:189
LLVM_ABI raw_ostream & nulls()
This returns a reference to a raw_ostream which simply discards output.
bool set_union(S1Ty &S1, const S2Ty &S2)
set_union(A, B) - Compute A := A u B, return whether A changed.
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ Sub
Subtraction of integers.
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
DWARFExpression::Operation Op
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1916
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1946
df_ext_iterator< T, SetTy > df_ext_end(const T &G, SetTy &S)
LLVM_ABI Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
LLVM_ABI Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
Implement std::hash so that hash_code can be used in STL containers.
Definition BitVector.h:874
static constexpr LaneBitmask getAll()
Definition LaneBitmask.h:82
constexpr bool none() const
Definition LaneBitmask.h:52
constexpr bool any() const
Definition LaneBitmask.h:53
static constexpr LaneBitmask getNone()
Definition LaneBitmask.h:81
This represents a simple continuous liveness interval for a value.
VarInfo - This represents the regions where a virtual register is live in the program.
Pair of physical register and lane mask.