LLVM 17.0.0git
MachineVerifier.cpp
Go to the documentation of this file.
1//===- MachineVerifier.cpp - Machine Code Verifier ------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Pass to verify generated machine code. The following is checked:
10//
11// Operand counts: All explicit operands must be present.
12//
13// Register classes: All physical and virtual register operands must be
14// compatible with the register class required by the instruction descriptor.
15//
16// Register live intervals: Registers must be defined only once, and must be
17// defined before use.
18//
19// The machine code verifier is enabled with the command-line option
20// -verify-machineinstrs.
21//===----------------------------------------------------------------------===//
22
23#include "llvm/ADT/BitVector.h"
24#include "llvm/ADT/DenseMap.h"
25#include "llvm/ADT/DenseSet.h"
28#include "llvm/ADT/STLExtras.h"
32#include "llvm/ADT/StringRef.h"
33#include "llvm/ADT/Twine.h"
58#include "llvm/IR/BasicBlock.h"
59#include "llvm/IR/Constants.h"
61#include "llvm/IR/Function.h"
62#include "llvm/IR/InlineAsm.h"
65#include "llvm/MC/LaneBitmask.h"
66#include "llvm/MC/MCAsmInfo.h"
67#include "llvm/MC/MCDwarf.h"
68#include "llvm/MC/MCInstrDesc.h"
71#include "llvm/Pass.h"
76#include "llvm/Support/ModRef.h"
79#include <algorithm>
80#include <cassert>
81#include <cstddef>
82#include <cstdint>
83#include <iterator>
84#include <string>
85#include <utility>
86
87using namespace llvm;
88
89namespace {
90
91 struct MachineVerifier {
92 MachineVerifier(Pass *pass, const char *b) : PASS(pass), Banner(b) {}
93
94 unsigned verify(const MachineFunction &MF);
95
96 Pass *const PASS;
97 const char *Banner;
98 const MachineFunction *MF;
99 const TargetMachine *TM;
100 const TargetInstrInfo *TII;
101 const TargetRegisterInfo *TRI;
103 const RegisterBankInfo *RBI;
104
105 unsigned foundErrors;
106
107 // Avoid querying the MachineFunctionProperties for each operand.
108 bool isFunctionRegBankSelected;
109 bool isFunctionSelected;
110 bool isFunctionTracksDebugUserValues;
111
112 using RegVector = SmallVector<Register, 16>;
113 using RegMaskVector = SmallVector<const uint32_t *, 4>;
114 using RegSet = DenseSet<Register>;
117
118 const MachineInstr *FirstNonPHI;
119 const MachineInstr *FirstTerminator;
120 BlockSet FunctionBlocks;
121
122 BitVector regsReserved;
123 RegSet regsLive;
124 RegVector regsDefined, regsDead, regsKilled;
125 RegMaskVector regMasks;
126
127 SlotIndex lastIndex;
128
129 // Add Reg and any sub-registers to RV
130 void addRegWithSubRegs(RegVector &RV, Register Reg) {
131 RV.push_back(Reg);
132 if (Reg.isPhysical())
133 append_range(RV, TRI->subregs(Reg.asMCReg()));
134 }
135
136 struct BBInfo {
137 // Is this MBB reachable from the MF entry point?
138 bool reachable = false;
139
140 // Vregs that must be live in because they are used without being
141 // defined. Map value is the user. vregsLiveIn doesn't include regs
142 // that only are used by PHI nodes.
143 RegMap vregsLiveIn;
144
145 // Regs killed in MBB. They may be defined again, and will then be in both
146 // regsKilled and regsLiveOut.
147 RegSet regsKilled;
148
149 // Regs defined in MBB and live out. Note that vregs passing through may
150 // be live out without being mentioned here.
151 RegSet regsLiveOut;
152
153 // Vregs that pass through MBB untouched. This set is disjoint from
154 // regsKilled and regsLiveOut.
155 RegSet vregsPassed;
156
157 // Vregs that must pass through MBB because they are needed by a successor
158 // block. This set is disjoint from regsLiveOut.
159 RegSet vregsRequired;
160
161 // Set versions of block's predecessor and successor lists.
162 BlockSet Preds, Succs;
163
164 BBInfo() = default;
165
166 // Add register to vregsRequired if it belongs there. Return true if
167 // anything changed.
168 bool addRequired(Register Reg) {
169 if (!Reg.isVirtual())
170 return false;
171 if (regsLiveOut.count(Reg))
172 return false;
173 return vregsRequired.insert(Reg).second;
174 }
175
176 // Same for a full set.
177 bool addRequired(const RegSet &RS) {
178 bool Changed = false;
179 for (Register Reg : RS)
180 Changed |= addRequired(Reg);
181 return Changed;
182 }
183
184 // Same for a full map.
185 bool addRequired(const RegMap &RM) {
186 bool Changed = false;
187 for (const auto &I : RM)
188 Changed |= addRequired(I.first);
189 return Changed;
190 }
191
192 // Live-out registers are either in regsLiveOut or vregsPassed.
193 bool isLiveOut(Register Reg) const {
194 return regsLiveOut.count(Reg) || vregsPassed.count(Reg);
195 }
196 };
197
198 // Extra register info per MBB.
200
201 bool isReserved(Register Reg) {
202 return Reg.id() < regsReserved.size() && regsReserved.test(Reg.id());
203 }
204
205 bool isAllocatable(Register Reg) const {
206 return Reg.id() < TRI->getNumRegs() && TRI->isInAllocatableClass(Reg) &&
207 !regsReserved.test(Reg.id());
208 }
209
210 // Analysis information if available
211 LiveVariables *LiveVars;
212 LiveIntervals *LiveInts;
213 LiveStacks *LiveStks;
214 SlotIndexes *Indexes;
215
216 void visitMachineFunctionBefore();
217 void visitMachineBasicBlockBefore(const MachineBasicBlock *MBB);
218 void visitMachineBundleBefore(const MachineInstr *MI);
219
220 /// Verify that all of \p MI's virtual register operands are scalars.
221 /// \returns True if all virtual register operands are scalar. False
222 /// otherwise.
223 bool verifyAllRegOpsScalar(const MachineInstr &MI,
224 const MachineRegisterInfo &MRI);
225 bool verifyVectorElementMatch(LLT Ty0, LLT Ty1, const MachineInstr *MI);
226 void verifyPreISelGenericInstruction(const MachineInstr *MI);
227 void visitMachineInstrBefore(const MachineInstr *MI);
228 void visitMachineOperand(const MachineOperand *MO, unsigned MONum);
229 void visitMachineBundleAfter(const MachineInstr *MI);
230 void visitMachineBasicBlockAfter(const MachineBasicBlock *MBB);
231 void visitMachineFunctionAfter();
232
233 void report(const char *msg, const MachineFunction *MF);
234 void report(const char *msg, const MachineBasicBlock *MBB);
235 void report(const char *msg, const MachineInstr *MI);
236 void report(const char *msg, const MachineOperand *MO, unsigned MONum,
237 LLT MOVRegType = LLT{});
238 void report(const Twine &Msg, const MachineInstr *MI);
239
240 void report_context(const LiveInterval &LI) const;
241 void report_context(const LiveRange &LR, Register VRegUnit,
242 LaneBitmask LaneMask) const;
243 void report_context(const LiveRange::Segment &S) const;
244 void report_context(const VNInfo &VNI) const;
245 void report_context(SlotIndex Pos) const;
246 void report_context(MCPhysReg PhysReg) const;
247 void report_context_liverange(const LiveRange &LR) const;
248 void report_context_lanemask(LaneBitmask LaneMask) const;
249 void report_context_vreg(Register VReg) const;
250 void report_context_vreg_regunit(Register VRegOrUnit) const;
251
252 void verifyInlineAsm(const MachineInstr *MI);
253
254 void checkLiveness(const MachineOperand *MO, unsigned MONum);
255 void checkLivenessAtUse(const MachineOperand *MO, unsigned MONum,
256 SlotIndex UseIdx, const LiveRange &LR,
257 Register VRegOrUnit,
258 LaneBitmask LaneMask = LaneBitmask::getNone());
259 void checkLivenessAtDef(const MachineOperand *MO, unsigned MONum,
260 SlotIndex DefIdx, const LiveRange &LR,
261 Register VRegOrUnit, bool SubRangeCheck = false,
262 LaneBitmask LaneMask = LaneBitmask::getNone());
263
264 void markReachable(const MachineBasicBlock *MBB);
265 void calcRegsPassed();
266 void checkPHIOps(const MachineBasicBlock &MBB);
267
268 void calcRegsRequired();
269 void verifyLiveVariables();
270 void verifyLiveIntervals();
271 void verifyLiveInterval(const LiveInterval&);
272 void verifyLiveRangeValue(const LiveRange &, const VNInfo *, Register,
274 void verifyLiveRangeSegment(const LiveRange &,
277 void verifyLiveRange(const LiveRange &, Register,
278 LaneBitmask LaneMask = LaneBitmask::getNone());
279
280 void verifyStackFrame();
281
282 void verifySlotIndexes() const;
283 void verifyProperties(const MachineFunction &MF);
284 };
285
286 struct MachineVerifierPass : public MachineFunctionPass {
287 static char ID; // Pass ID, replacement for typeid
288
289 const std::string Banner;
290
291 MachineVerifierPass(std::string banner = std::string())
292 : MachineFunctionPass(ID), Banner(std::move(banner)) {
294 }
295
296 void getAnalysisUsage(AnalysisUsage &AU) const override {
299 AU.setPreservesAll();
301 }
302
303 bool runOnMachineFunction(MachineFunction &MF) override {
304 // Skip functions that have known verification problems.
305 // FIXME: Remove this mechanism when all problematic passes have been
306 // fixed.
307 if (MF.getProperties().hasProperty(
308 MachineFunctionProperties::Property::FailsVerification))
309 return false;
310
311 unsigned FoundErrors = MachineVerifier(this, Banner.c_str()).verify(MF);
312 if (FoundErrors)
313 report_fatal_error("Found "+Twine(FoundErrors)+" machine code errors.");
314 return false;
315 }
316 };
317
318} // end anonymous namespace
319
320char MachineVerifierPass::ID = 0;
321
322INITIALIZE_PASS(MachineVerifierPass, "machineverifier",
323 "Verify generated machine code", false, false)
324
326 return new MachineVerifierPass(Banner);
327}
328
330 const std::string &Banner,
331 const MachineFunction &MF) {
332 // TODO: Use MFAM after porting below analyses.
333 // LiveVariables *LiveVars;
334 // LiveIntervals *LiveInts;
335 // LiveStacks *LiveStks;
336 // SlotIndexes *Indexes;
337 unsigned FoundErrors = MachineVerifier(nullptr, Banner.c_str()).verify(MF);
338 if (FoundErrors)
339 report_fatal_error("Found " + Twine(FoundErrors) + " machine code errors.");
340}
341
342bool MachineFunction::verify(Pass *p, const char *Banner, bool AbortOnErrors)
343 const {
344 MachineFunction &MF = const_cast<MachineFunction&>(*this);
345 unsigned FoundErrors = MachineVerifier(p, Banner).verify(MF);
346 if (AbortOnErrors && FoundErrors)
347 report_fatal_error("Found "+Twine(FoundErrors)+" machine code errors.");
348 return FoundErrors == 0;
349}
350
351void MachineVerifier::verifySlotIndexes() const {
352 if (Indexes == nullptr)
353 return;
354
355 // Ensure the IdxMBB list is sorted by slot indexes.
358 E = Indexes->MBBIndexEnd(); I != E; ++I) {
359 assert(!Last.isValid() || I->first > Last);
360 Last = I->first;
361 }
362}
363
364void MachineVerifier::verifyProperties(const MachineFunction &MF) {
365 // If a pass has introduced virtual registers without clearing the
366 // NoVRegs property (or set it without allocating the vregs)
367 // then report an error.
368 if (MF.getProperties().hasProperty(
370 MRI->getNumVirtRegs())
371 report("Function has NoVRegs property but there are VReg operands", &MF);
372}
373
374unsigned MachineVerifier::verify(const MachineFunction &MF) {
375 foundErrors = 0;
376
377 this->MF = &MF;
378 TM = &MF.getTarget();
381 RBI = MF.getSubtarget().getRegBankInfo();
382 MRI = &MF.getRegInfo();
383
384 const bool isFunctionFailedISel = MF.getProperties().hasProperty(
386
387 // If we're mid-GlobalISel and we already triggered the fallback path then
388 // it's expected that the MIR is somewhat broken but that's ok since we'll
389 // reset it and clear the FailedISel attribute in ResetMachineFunctions.
390 if (isFunctionFailedISel)
391 return foundErrors;
392
393 isFunctionRegBankSelected = MF.getProperties().hasProperty(
395 isFunctionSelected = MF.getProperties().hasProperty(
397 isFunctionTracksDebugUserValues = MF.getProperties().hasProperty(
399
400 LiveVars = nullptr;
401 LiveInts = nullptr;
402 LiveStks = nullptr;
403 Indexes = nullptr;
404 if (PASS) {
405 LiveInts = PASS->getAnalysisIfAvailable<LiveIntervals>();
406 // We don't want to verify LiveVariables if LiveIntervals is available.
407 if (!LiveInts)
408 LiveVars = PASS->getAnalysisIfAvailable<LiveVariables>();
409 LiveStks = PASS->getAnalysisIfAvailable<LiveStacks>();
410 Indexes = PASS->getAnalysisIfAvailable<SlotIndexes>();
411 }
412
413 verifySlotIndexes();
414
415 verifyProperties(MF);
416
417 visitMachineFunctionBefore();
418 for (const MachineBasicBlock &MBB : MF) {
419 visitMachineBasicBlockBefore(&MBB);
420 // Keep track of the current bundle header.
421 const MachineInstr *CurBundle = nullptr;
422 // Do we expect the next instruction to be part of the same bundle?
423 bool InBundle = false;
424
425 for (const MachineInstr &MI : MBB.instrs()) {
426 if (MI.getParent() != &MBB) {
427 report("Bad instruction parent pointer", &MBB);
428 errs() << "Instruction: " << MI;
429 continue;
430 }
431
432 // Check for consistent bundle flags.
433 if (InBundle && !MI.isBundledWithPred())
434 report("Missing BundledPred flag, "
435 "BundledSucc was set on predecessor",
436 &MI);
437 if (!InBundle && MI.isBundledWithPred())
438 report("BundledPred flag is set, "
439 "but BundledSucc not set on predecessor",
440 &MI);
441
442 // Is this a bundle header?
443 if (!MI.isInsideBundle()) {
444 if (CurBundle)
445 visitMachineBundleAfter(CurBundle);
446 CurBundle = &MI;
447 visitMachineBundleBefore(CurBundle);
448 } else if (!CurBundle)
449 report("No bundle header", &MI);
450 visitMachineInstrBefore(&MI);
451 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
452 const MachineOperand &Op = MI.getOperand(I);
453 if (Op.getParent() != &MI) {
454 // Make sure to use correct addOperand / removeOperand / ChangeTo
455 // functions when replacing operands of a MachineInstr.
456 report("Instruction has operand with wrong parent set", &MI);
457 }
458
459 visitMachineOperand(&Op, I);
460 }
461
462 // Was this the last bundled instruction?
463 InBundle = MI.isBundledWithSucc();
464 }
465 if (CurBundle)
466 visitMachineBundleAfter(CurBundle);
467 if (InBundle)
468 report("BundledSucc flag set on last instruction in block", &MBB.back());
469 visitMachineBasicBlockAfter(&MBB);
470 }
471 visitMachineFunctionAfter();
472
473 // Clean up.
474 regsLive.clear();
475 regsDefined.clear();
476 regsDead.clear();
477 regsKilled.clear();
478 regMasks.clear();
479 MBBInfoMap.clear();
480
481 return foundErrors;
482}
483
484void MachineVerifier::report(const char *msg, const MachineFunction *MF) {
485 assert(MF);
486 errs() << '\n';
487 if (!foundErrors++) {
488 if (Banner)
489 errs() << "# " << Banner << '\n';
490 if (LiveInts != nullptr)
491 LiveInts->print(errs());
492 else
493 MF->print(errs(), Indexes);
494 }
495 errs() << "*** Bad machine code: " << msg << " ***\n"
496 << "- function: " << MF->getName() << "\n";
497}
498
499void MachineVerifier::report(const char *msg, const MachineBasicBlock *MBB) {
500 assert(MBB);
501 report(msg, MBB->getParent());
502 errs() << "- basic block: " << printMBBReference(*MBB) << ' '
503 << MBB->getName() << " (" << (const void *)MBB << ')';
504 if (Indexes)
505 errs() << " [" << Indexes->getMBBStartIdx(MBB)
506 << ';' << Indexes->getMBBEndIdx(MBB) << ')';
507 errs() << '\n';
508}
509
510void MachineVerifier::report(const char *msg, const MachineInstr *MI) {
511 assert(MI);
512 report(msg, MI->getParent());
513 errs() << "- instruction: ";
514 if (Indexes && Indexes->hasIndex(*MI))
515 errs() << Indexes->getInstructionIndex(*MI) << '\t';
516 MI->print(errs(), /*IsStandalone=*/true);
517}
518
519void MachineVerifier::report(const char *msg, const MachineOperand *MO,
520 unsigned MONum, LLT MOVRegType) {
521 assert(MO);
522 report(msg, MO->getParent());
523 errs() << "- operand " << MONum << ": ";
524 MO->print(errs(), MOVRegType, TRI);
525 errs() << "\n";
526}
527
528void MachineVerifier::report(const Twine &Msg, const MachineInstr *MI) {
529 report(Msg.str().c_str(), MI);
530}
531
532void MachineVerifier::report_context(SlotIndex Pos) const {
533 errs() << "- at: " << Pos << '\n';
534}
535
536void MachineVerifier::report_context(const LiveInterval &LI) const {
537 errs() << "- interval: " << LI << '\n';
538}
539
540void MachineVerifier::report_context(const LiveRange &LR, Register VRegUnit,
541 LaneBitmask LaneMask) const {
542 report_context_liverange(LR);
543 report_context_vreg_regunit(VRegUnit);
544 if (LaneMask.any())
545 report_context_lanemask(LaneMask);
546}
547
548void MachineVerifier::report_context(const LiveRange::Segment &S) const {
549 errs() << "- segment: " << S << '\n';
550}
551
552void MachineVerifier::report_context(const VNInfo &VNI) const {
553 errs() << "- ValNo: " << VNI.id << " (def " << VNI.def << ")\n";
554}
555
556void MachineVerifier::report_context_liverange(const LiveRange &LR) const {
557 errs() << "- liverange: " << LR << '\n';
558}
559
560void MachineVerifier::report_context(MCPhysReg PReg) const {
561 errs() << "- p. register: " << printReg(PReg, TRI) << '\n';
562}
563
564void MachineVerifier::report_context_vreg(Register VReg) const {
565 errs() << "- v. register: " << printReg(VReg, TRI) << '\n';
566}
567
568void MachineVerifier::report_context_vreg_regunit(Register VRegOrUnit) const {
569 if (VRegOrUnit.isVirtual()) {
570 report_context_vreg(VRegOrUnit);
571 } else {
572 errs() << "- regunit: " << printRegUnit(VRegOrUnit, TRI) << '\n';
573 }
574}
575
576void MachineVerifier::report_context_lanemask(LaneBitmask LaneMask) const {
577 errs() << "- lanemask: " << PrintLaneMask(LaneMask) << '\n';
578}
579
580void MachineVerifier::markReachable(const MachineBasicBlock *MBB) {
581 BBInfo &MInfo = MBBInfoMap[MBB];
582 if (!MInfo.reachable) {
583 MInfo.reachable = true;
584 for (const MachineBasicBlock *Succ : MBB->successors())
585 markReachable(Succ);
586 }
587}
588
589void MachineVerifier::visitMachineFunctionBefore() {
590 lastIndex = SlotIndex();
591 regsReserved = MRI->reservedRegsFrozen() ? MRI->getReservedRegs()
592 : TRI->getReservedRegs(*MF);
593
594 if (!MF->empty())
595 markReachable(&MF->front());
596
597 // Build a set of the basic blocks in the function.
598 FunctionBlocks.clear();
599 for (const auto &MBB : *MF) {
600 FunctionBlocks.insert(&MBB);
601 BBInfo &MInfo = MBBInfoMap[&MBB];
602
603 MInfo.Preds.insert(MBB.pred_begin(), MBB.pred_end());
604 if (MInfo.Preds.size() != MBB.pred_size())
605 report("MBB has duplicate entries in its predecessor list.", &MBB);
606
607 MInfo.Succs.insert(MBB.succ_begin(), MBB.succ_end());
608 if (MInfo.Succs.size() != MBB.succ_size())
609 report("MBB has duplicate entries in its successor list.", &MBB);
610 }
611
612 // Check that the register use lists are sane.
613 MRI->verifyUseLists();
614
615 if (!MF->empty())
616 verifyStackFrame();
617}
618
619void
620MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
621 FirstTerminator = nullptr;
622 FirstNonPHI = nullptr;
623
624 if (!MF->getProperties().hasProperty(
625 MachineFunctionProperties::Property::NoPHIs) && MRI->tracksLiveness()) {
626 // If this block has allocatable physical registers live-in, check that
627 // it is an entry block or landing pad.
628 for (const auto &LI : MBB->liveins()) {
629 if (isAllocatable(LI.PhysReg) && !MBB->isEHPad() &&
630 MBB->getIterator() != MBB->getParent()->begin() &&
632 report("MBB has allocatable live-in, but isn't entry, landing-pad, or "
633 "inlineasm-br-indirect-target.",
634 MBB);
635 report_context(LI.PhysReg);
636 }
637 }
638 }
639
640 if (MBB->isIRBlockAddressTaken()) {
642 report("ir-block-address-taken is associated with basic block not used by "
643 "a blockaddress.",
644 MBB);
645 }
646
647 // Count the number of landing pad successors.
649 for (const auto *succ : MBB->successors()) {
650 if (succ->isEHPad())
651 LandingPadSuccs.insert(succ);
652 if (!FunctionBlocks.count(succ))
653 report("MBB has successor that isn't part of the function.", MBB);
654 if (!MBBInfoMap[succ].Preds.count(MBB)) {
655 report("Inconsistent CFG", MBB);
656 errs() << "MBB is not in the predecessor list of the successor "
657 << printMBBReference(*succ) << ".\n";
658 }
659 }
660
661 // Check the predecessor list.
662 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
663 if (!FunctionBlocks.count(Pred))
664 report("MBB has predecessor that isn't part of the function.", MBB);
665 if (!MBBInfoMap[Pred].Succs.count(MBB)) {
666 report("Inconsistent CFG", MBB);
667 errs() << "MBB is not in the successor list of the predecessor "
668 << printMBBReference(*Pred) << ".\n";
669 }
670 }
671
672 const MCAsmInfo *AsmInfo = TM->getMCAsmInfo();
673 const BasicBlock *BB = MBB->getBasicBlock();
674 const Function &F = MF->getFunction();
675 if (LandingPadSuccs.size() > 1 &&
676 !(AsmInfo &&
678 BB && isa<SwitchInst>(BB->getTerminator())) &&
679 !isScopedEHPersonality(classifyEHPersonality(F.getPersonalityFn())))
680 report("MBB has more than one landing pad successor", MBB);
681
682 // Call analyzeBranch. If it succeeds, there several more conditions to check.
683 MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
685 if (!TII->analyzeBranch(*const_cast<MachineBasicBlock *>(MBB), TBB, FBB,
686 Cond)) {
687 // Ok, analyzeBranch thinks it knows what's going on with this block. Let's
688 // check whether its answers match up with reality.
689 if (!TBB && !FBB) {
690 // Block falls through to its successor.
691 if (!MBB->empty() && MBB->back().isBarrier() &&
692 !TII->isPredicated(MBB->back())) {
693 report("MBB exits via unconditional fall-through but ends with a "
694 "barrier instruction!", MBB);
695 }
696 if (!Cond.empty()) {
697 report("MBB exits via unconditional fall-through but has a condition!",
698 MBB);
699 }
700 } else if (TBB && !FBB && Cond.empty()) {
701 // Block unconditionally branches somewhere.
702 if (MBB->empty()) {
703 report("MBB exits via unconditional branch but doesn't contain "
704 "any instructions!", MBB);
705 } else if (!MBB->back().isBarrier()) {
706 report("MBB exits via unconditional branch but doesn't end with a "
707 "barrier instruction!", MBB);
708 } else if (!MBB->back().isTerminator()) {
709 report("MBB exits via unconditional branch but the branch isn't a "
710 "terminator instruction!", MBB);
711 }
712 } else if (TBB && !FBB && !Cond.empty()) {
713 // Block conditionally branches somewhere, otherwise falls through.
714 if (MBB->empty()) {
715 report("MBB exits via conditional branch/fall-through but doesn't "
716 "contain any instructions!", MBB);
717 } else if (MBB->back().isBarrier()) {
718 report("MBB exits via conditional branch/fall-through but ends with a "
719 "barrier instruction!", MBB);
720 } else if (!MBB->back().isTerminator()) {
721 report("MBB exits via conditional branch/fall-through but the branch "
722 "isn't a terminator instruction!", MBB);
723 }
724 } else if (TBB && FBB) {
725 // Block conditionally branches somewhere, otherwise branches
726 // somewhere else.
727 if (MBB->empty()) {
728 report("MBB exits via conditional branch/branch but doesn't "
729 "contain any instructions!", MBB);
730 } else if (!MBB->back().isBarrier()) {
731 report("MBB exits via conditional branch/branch but doesn't end with a "
732 "barrier instruction!", MBB);
733 } else if (!MBB->back().isTerminator()) {
734 report("MBB exits via conditional branch/branch but the branch "
735 "isn't a terminator instruction!", MBB);
736 }
737 if (Cond.empty()) {
738 report("MBB exits via conditional branch/branch but there's no "
739 "condition!", MBB);
740 }
741 } else {
742 report("analyzeBranch returned invalid data!", MBB);
743 }
744
745 // Now check that the successors match up with the answers reported by
746 // analyzeBranch.
747 if (TBB && !MBB->isSuccessor(TBB))
748 report("MBB exits via jump or conditional branch, but its target isn't a "
749 "CFG successor!",
750 MBB);
751 if (FBB && !MBB->isSuccessor(FBB))
752 report("MBB exits via conditional branch, but its target isn't a CFG "
753 "successor!",
754 MBB);
755
756 // There might be a fallthrough to the next block if there's either no
757 // unconditional true branch, or if there's a condition, and one of the
758 // branches is missing.
759 bool Fallthrough = !TBB || (!Cond.empty() && !FBB);
760
761 // A conditional fallthrough must be an actual CFG successor, not
762 // unreachable. (Conversely, an unconditional fallthrough might not really
763 // be a successor, because the block might end in unreachable.)
764 if (!Cond.empty() && !FBB) {
766 if (MBBI == MF->end()) {
767 report("MBB conditionally falls through out of function!", MBB);
768 } else if (!MBB->isSuccessor(&*MBBI))
769 report("MBB exits via conditional branch/fall-through but the CFG "
770 "successors don't match the actual successors!",
771 MBB);
772 }
773
774 // Verify that there aren't any extra un-accounted-for successors.
775 for (const MachineBasicBlock *SuccMBB : MBB->successors()) {
776 // If this successor is one of the branch targets, it's okay.
777 if (SuccMBB == TBB || SuccMBB == FBB)
778 continue;
779 // If we might have a fallthrough, and the successor is the fallthrough
780 // block, that's also ok.
781 if (Fallthrough && SuccMBB == MBB->getNextNode())
782 continue;
783 // Also accept successors which are for exception-handling or might be
784 // inlineasm_br targets.
785 if (SuccMBB->isEHPad() || SuccMBB->isInlineAsmBrIndirectTarget())
786 continue;
787 report("MBB has unexpected successors which are not branch targets, "
788 "fallthrough, EHPads, or inlineasm_br targets.",
789 MBB);
790 }
791 }
792
793 regsLive.clear();
794 if (MRI->tracksLiveness()) {
795 for (const auto &LI : MBB->liveins()) {
796 if (!Register::isPhysicalRegister(LI.PhysReg)) {
797 report("MBB live-in list contains non-physical register", MBB);
798 continue;
799 }
800 for (const MCPhysReg &SubReg : TRI->subregs_inclusive(LI.PhysReg))
801 regsLive.insert(SubReg);
802 }
803 }
804
805 const MachineFrameInfo &MFI = MF->getFrameInfo();
806 BitVector PR = MFI.getPristineRegs(*MF);
807 for (unsigned I : PR.set_bits()) {
808 for (const MCPhysReg &SubReg : TRI->subregs_inclusive(I))
809 regsLive.insert(SubReg);
810 }
811
812 regsKilled.clear();
813 regsDefined.clear();
814
815 if (Indexes)
816 lastIndex = Indexes->getMBBStartIdx(MBB);
817}
818
819// This function gets called for all bundle headers, including normal
820// stand-alone unbundled instructions.
821void MachineVerifier::visitMachineBundleBefore(const MachineInstr *MI) {
822 if (Indexes && Indexes->hasIndex(*MI)) {
823 SlotIndex idx = Indexes->getInstructionIndex(*MI);
824 if (!(idx > lastIndex)) {
825 report("Instruction index out of order", MI);
826 errs() << "Last instruction was at " << lastIndex << '\n';
827 }
828 lastIndex = idx;
829 }
830
831 // Ensure non-terminators don't follow terminators.
832 if (MI->isTerminator()) {
833 if (!FirstTerminator)
834 FirstTerminator = MI;
835 } else if (FirstTerminator) {
836 // For GlobalISel, G_INVOKE_REGION_START is a terminator that we allow to
837 // precede non-terminators.
838 if (FirstTerminator->getOpcode() != TargetOpcode::G_INVOKE_REGION_START) {
839 report("Non-terminator instruction after the first terminator", MI);
840 errs() << "First terminator was:\t" << *FirstTerminator;
841 }
842 }
843}
844
845// The operands on an INLINEASM instruction must follow a template.
846// Verify that the flag operands make sense.
847void MachineVerifier::verifyInlineAsm(const MachineInstr *MI) {
848 // The first two operands on INLINEASM are the asm string and global flags.
849 if (MI->getNumOperands() < 2) {
850 report("Too few operands on inline asm", MI);
851 return;
852 }
853 if (!MI->getOperand(0).isSymbol())
854 report("Asm string must be an external symbol", MI);
855 if (!MI->getOperand(1).isImm())
856 report("Asm flags must be an immediate", MI);
857 // Allowed flags are Extra_HasSideEffects = 1, Extra_IsAlignStack = 2,
858 // Extra_AsmDialect = 4, Extra_MayLoad = 8, and Extra_MayStore = 16,
859 // and Extra_IsConvergent = 32.
860 if (!isUInt<6>(MI->getOperand(1).getImm()))
861 report("Unknown asm flags", &MI->getOperand(1), 1);
862
863 static_assert(InlineAsm::MIOp_FirstOperand == 2, "Asm format changed");
864
865 unsigned OpNo = InlineAsm::MIOp_FirstOperand;
866 unsigned NumOps;
867 for (unsigned e = MI->getNumOperands(); OpNo < e; OpNo += NumOps) {
868 const MachineOperand &MO = MI->getOperand(OpNo);
869 // There may be implicit ops after the fixed operands.
870 if (!MO.isImm())
871 break;
872 NumOps = 1 + InlineAsm::getNumOperandRegisters(MO.getImm());
873 }
874
875 if (OpNo > MI->getNumOperands())
876 report("Missing operands in last group", MI);
877
878 // An optional MDNode follows the groups.
879 if (OpNo < MI->getNumOperands() && MI->getOperand(OpNo).isMetadata())
880 ++OpNo;
881
882 // All trailing operands must be implicit registers.
883 for (unsigned e = MI->getNumOperands(); OpNo < e; ++OpNo) {
884 const MachineOperand &MO = MI->getOperand(OpNo);
885 if (!MO.isReg() || !MO.isImplicit())
886 report("Expected implicit register after groups", &MO, OpNo);
887 }
888
889 if (MI->getOpcode() == TargetOpcode::INLINEASM_BR) {
890 const MachineBasicBlock *MBB = MI->getParent();
891
892 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = MI->getNumOperands();
893 i != e; ++i) {
894 const MachineOperand &MO = MI->getOperand(i);
895
896 if (!MO.isMBB())
897 continue;
898
899 // Check the successor & predecessor lists look ok, assume they are
900 // not. Find the indirect target without going through the successors.
901 const MachineBasicBlock *IndirectTargetMBB = MO.getMBB();
902 if (!IndirectTargetMBB) {
903 report("INLINEASM_BR indirect target does not exist", &MO, i);
904 break;
905 }
906
907 if (!MBB->isSuccessor(IndirectTargetMBB))
908 report("INLINEASM_BR indirect target missing from successor list", &MO,
909 i);
910
911 if (!IndirectTargetMBB->isPredecessor(MBB))
912 report("INLINEASM_BR indirect target predecessor list missing parent",
913 &MO, i);
914 }
915 }
916}
917
918bool MachineVerifier::verifyAllRegOpsScalar(const MachineInstr &MI,
919 const MachineRegisterInfo &MRI) {
920 if (none_of(MI.explicit_operands(), [&MRI](const MachineOperand &Op) {
921 if (!Op.isReg())
922 return false;
923 const auto Reg = Op.getReg();
924 if (Reg.isPhysical())
925 return false;
926 return !MRI.getType(Reg).isScalar();
927 }))
928 return true;
929 report("All register operands must have scalar types", &MI);
930 return false;
931}
932
933/// Check that types are consistent when two operands need to have the same
934/// number of vector elements.
935/// \return true if the types are valid.
936bool MachineVerifier::verifyVectorElementMatch(LLT Ty0, LLT Ty1,
937 const MachineInstr *MI) {
938 if (Ty0.isVector() != Ty1.isVector()) {
939 report("operand types must be all-vector or all-scalar", MI);
940 // Generally we try to report as many issues as possible at once, but in
941 // this case it's not clear what should we be comparing the size of the
942 // scalar with: the size of the whole vector or its lane. Instead of
943 // making an arbitrary choice and emitting not so helpful message, let's
944 // avoid the extra noise and stop here.
945 return false;
946 }
947
948 if (Ty0.isVector() && Ty0.getNumElements() != Ty1.getNumElements()) {
949 report("operand types must preserve number of vector elements", MI);
950 return false;
951 }
952
953 return true;
954}
955
956void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
957 if (isFunctionSelected)
958 report("Unexpected generic instruction in a Selected function", MI);
959
960 const MCInstrDesc &MCID = MI->getDesc();
961 unsigned NumOps = MI->getNumOperands();
962
963 // Branches must reference a basic block if they are not indirect
964 if (MI->isBranch() && !MI->isIndirectBranch()) {
965 bool HasMBB = false;
966 for (const MachineOperand &Op : MI->operands()) {
967 if (Op.isMBB()) {
968 HasMBB = true;
969 break;
970 }
971 }
972
973 if (!HasMBB) {
974 report("Branch instruction is missing a basic block operand or "
975 "isIndirectBranch property",
976 MI);
977 }
978 }
979
980 // Check types.
982 for (unsigned I = 0, E = std::min(MCID.getNumOperands(), NumOps);
983 I != E; ++I) {
984 if (!MCID.operands()[I].isGenericType())
985 continue;
986 // Generic instructions specify type equality constraints between some of
987 // their operands. Make sure these are consistent.
988 size_t TypeIdx = MCID.operands()[I].getGenericTypeIndex();
989 Types.resize(std::max(TypeIdx + 1, Types.size()));
990
991 const MachineOperand *MO = &MI->getOperand(I);
992 if (!MO->isReg()) {
993 report("generic instruction must use register operands", MI);
994 continue;
995 }
996
997 LLT OpTy = MRI->getType(MO->getReg());
998 // Don't report a type mismatch if there is no actual mismatch, only a
999 // type missing, to reduce noise:
1000 if (OpTy.isValid()) {
1001 // Only the first valid type for a type index will be printed: don't
1002 // overwrite it later so it's always clear which type was expected:
1003 if (!Types[TypeIdx].isValid())
1004 Types[TypeIdx] = OpTy;
1005 else if (Types[TypeIdx] != OpTy)
1006 report("Type mismatch in generic instruction", MO, I, OpTy);
1007 } else {
1008 // Generic instructions must have types attached to their operands.
1009 report("Generic instruction is missing a virtual register type", MO, I);
1010 }
1011 }
1012
1013 // Generic opcodes must not have physical register operands.
1014 for (unsigned I = 0; I < MI->getNumOperands(); ++I) {
1015 const MachineOperand *MO = &MI->getOperand(I);
1016 if (MO->isReg() && MO->getReg().isPhysical())
1017 report("Generic instruction cannot have physical register", MO, I);
1018 }
1019
1020 // Avoid out of bounds in checks below. This was already reported earlier.
1021 if (MI->getNumOperands() < MCID.getNumOperands())
1022 return;
1023
1025 if (!TII->verifyInstruction(*MI, ErrorInfo))
1026 report(ErrorInfo.data(), MI);
1027
1028 // Verify properties of various specific instruction types
1029 unsigned Opc = MI->getOpcode();
1030 switch (Opc) {
1031 case TargetOpcode::G_ASSERT_SEXT:
1032 case TargetOpcode::G_ASSERT_ZEXT: {
1033 std::string OpcName =
1034 Opc == TargetOpcode::G_ASSERT_ZEXT ? "G_ASSERT_ZEXT" : "G_ASSERT_SEXT";
1035 if (!MI->getOperand(2).isImm()) {
1036 report(Twine(OpcName, " expects an immediate operand #2"), MI);
1037 break;
1038 }
1039
1040 Register Dst = MI->getOperand(0).getReg();
1041 Register Src = MI->getOperand(1).getReg();
1042 LLT SrcTy = MRI->getType(Src);
1043 int64_t Imm = MI->getOperand(2).getImm();
1044 if (Imm <= 0) {
1045 report(Twine(OpcName, " size must be >= 1"), MI);
1046 break;
1047 }
1048
1049 if (Imm >= SrcTy.getScalarSizeInBits()) {
1050 report(Twine(OpcName, " size must be less than source bit width"), MI);
1051 break;
1052 }
1053
1054 const RegisterBank *SrcRB = RBI->getRegBank(Src, *MRI, *TRI);
1055 const RegisterBank *DstRB = RBI->getRegBank(Dst, *MRI, *TRI);
1056
1057 // Allow only the source bank to be set.
1058 if ((SrcRB && DstRB && SrcRB != DstRB) || (DstRB && !SrcRB)) {
1059 report(Twine(OpcName, " cannot change register bank"), MI);
1060 break;
1061 }
1062
1063 // Don't allow a class change. Do allow member class->regbank.
1064 const TargetRegisterClass *DstRC = MRI->getRegClassOrNull(Dst);
1065 if (DstRC && DstRC != MRI->getRegClassOrNull(Src)) {
1066 report(
1067 Twine(OpcName, " source and destination register classes must match"),
1068 MI);
1069 break;
1070 }
1071
1072 break;
1073 }
1074
1075 case TargetOpcode::G_CONSTANT:
1076 case TargetOpcode::G_FCONSTANT: {
1077 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1078 if (DstTy.isVector())
1079 report("Instruction cannot use a vector result type", MI);
1080
1081 if (MI->getOpcode() == TargetOpcode::G_CONSTANT) {
1082 if (!MI->getOperand(1).isCImm()) {
1083 report("G_CONSTANT operand must be cimm", MI);
1084 break;
1085 }
1086
1087 const ConstantInt *CI = MI->getOperand(1).getCImm();
1088 if (CI->getBitWidth() != DstTy.getSizeInBits())
1089 report("inconsistent constant size", MI);
1090 } else {
1091 if (!MI->getOperand(1).isFPImm()) {
1092 report("G_FCONSTANT operand must be fpimm", MI);
1093 break;
1094 }
1095 const ConstantFP *CF = MI->getOperand(1).getFPImm();
1096
1098 DstTy.getSizeInBits()) {
1099 report("inconsistent constant size", MI);
1100 }
1101 }
1102
1103 break;
1104 }
1105 case TargetOpcode::G_LOAD:
1106 case TargetOpcode::G_STORE:
1107 case TargetOpcode::G_ZEXTLOAD:
1108 case TargetOpcode::G_SEXTLOAD: {
1109 LLT ValTy = MRI->getType(MI->getOperand(0).getReg());
1110 LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
1111 if (!PtrTy.isPointer())
1112 report("Generic memory instruction must access a pointer", MI);
1113
1114 // Generic loads and stores must have a single MachineMemOperand
1115 // describing that access.
1116 if (!MI->hasOneMemOperand()) {
1117 report("Generic instruction accessing memory must have one mem operand",
1118 MI);
1119 } else {
1120 const MachineMemOperand &MMO = **MI->memoperands_begin();
1121 if (MI->getOpcode() == TargetOpcode::G_ZEXTLOAD ||
1122 MI->getOpcode() == TargetOpcode::G_SEXTLOAD) {
1123 if (MMO.getSizeInBits() >= ValTy.getSizeInBits())
1124 report("Generic extload must have a narrower memory type", MI);
1125 } else if (MI->getOpcode() == TargetOpcode::G_LOAD) {
1126 if (MMO.getSize() > ValTy.getSizeInBytes())
1127 report("load memory size cannot exceed result size", MI);
1128 } else if (MI->getOpcode() == TargetOpcode::G_STORE) {
1129 if (ValTy.getSizeInBytes() < MMO.getSize())
1130 report("store memory size cannot exceed value size", MI);
1131 }
1132
1133 const AtomicOrdering Order = MMO.getSuccessOrdering();
1134 if (Opc == TargetOpcode::G_STORE) {
1135 if (Order == AtomicOrdering::Acquire ||
1137 report("atomic store cannot use acquire ordering", MI);
1138
1139 } else {
1140 if (Order == AtomicOrdering::Release ||
1142 report("atomic load cannot use release ordering", MI);
1143 }
1144 }
1145
1146 break;
1147 }
1148 case TargetOpcode::G_PHI: {
1149 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1150 if (!DstTy.isValid() || !all_of(drop_begin(MI->operands()),
1151 [this, &DstTy](const MachineOperand &MO) {
1152 if (!MO.isReg())
1153 return true;
1154 LLT Ty = MRI->getType(MO.getReg());
1155 if (!Ty.isValid() || (Ty != DstTy))
1156 return false;
1157 return true;
1158 }))
1159 report("Generic Instruction G_PHI has operands with incompatible/missing "
1160 "types",
1161 MI);
1162 break;
1163 }
1164 case TargetOpcode::G_BITCAST: {
1165 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1166 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1167 if (!DstTy.isValid() || !SrcTy.isValid())
1168 break;
1169
1170 if (SrcTy.isPointer() != DstTy.isPointer())
1171 report("bitcast cannot convert between pointers and other types", MI);
1172
1173 if (SrcTy.getSizeInBits() != DstTy.getSizeInBits())
1174 report("bitcast sizes must match", MI);
1175
1176 if (SrcTy == DstTy)
1177 report("bitcast must change the type", MI);
1178
1179 break;
1180 }
1181 case TargetOpcode::G_INTTOPTR:
1182 case TargetOpcode::G_PTRTOINT:
1183 case TargetOpcode::G_ADDRSPACE_CAST: {
1184 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1185 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1186 if (!DstTy.isValid() || !SrcTy.isValid())
1187 break;
1188
1189 verifyVectorElementMatch(DstTy, SrcTy, MI);
1190
1191 DstTy = DstTy.getScalarType();
1192 SrcTy = SrcTy.getScalarType();
1193
1194 if (MI->getOpcode() == TargetOpcode::G_INTTOPTR) {
1195 if (!DstTy.isPointer())
1196 report("inttoptr result type must be a pointer", MI);
1197 if (SrcTy.isPointer())
1198 report("inttoptr source type must not be a pointer", MI);
1199 } else if (MI->getOpcode() == TargetOpcode::G_PTRTOINT) {
1200 if (!SrcTy.isPointer())
1201 report("ptrtoint source type must be a pointer", MI);
1202 if (DstTy.isPointer())
1203 report("ptrtoint result type must not be a pointer", MI);
1204 } else {
1205 assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST);
1206 if (!SrcTy.isPointer() || !DstTy.isPointer())
1207 report("addrspacecast types must be pointers", MI);
1208 else {
1209 if (SrcTy.getAddressSpace() == DstTy.getAddressSpace())
1210 report("addrspacecast must convert different address spaces", MI);
1211 }
1212 }
1213
1214 break;
1215 }
1216 case TargetOpcode::G_PTR_ADD: {
1217 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1218 LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
1219 LLT OffsetTy = MRI->getType(MI->getOperand(2).getReg());
1220 if (!DstTy.isValid() || !PtrTy.isValid() || !OffsetTy.isValid())
1221 break;
1222
1223 if (!PtrTy.getScalarType().isPointer())
1224 report("gep first operand must be a pointer", MI);
1225
1226 if (OffsetTy.getScalarType().isPointer())
1227 report("gep offset operand must not be a pointer", MI);
1228
1229 // TODO: Is the offset allowed to be a scalar with a vector?
1230 break;
1231 }
1232 case TargetOpcode::G_PTRMASK: {
1233 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1234 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1235 LLT MaskTy = MRI->getType(MI->getOperand(2).getReg());
1236 if (!DstTy.isValid() || !SrcTy.isValid() || !MaskTy.isValid())
1237 break;
1238
1239 if (!DstTy.getScalarType().isPointer())
1240 report("ptrmask result type must be a pointer", MI);
1241
1242 if (!MaskTy.getScalarType().isScalar())
1243 report("ptrmask mask type must be an integer", MI);
1244
1245 verifyVectorElementMatch(DstTy, MaskTy, MI);
1246 break;
1247 }
1248 case TargetOpcode::G_SEXT:
1249 case TargetOpcode::G_ZEXT:
1250 case TargetOpcode::G_ANYEXT:
1251 case TargetOpcode::G_TRUNC:
1252 case TargetOpcode::G_FPEXT:
1253 case TargetOpcode::G_FPTRUNC: {
1254 // Number of operands and presense of types is already checked (and
1255 // reported in case of any issues), so no need to report them again. As
1256 // we're trying to report as many issues as possible at once, however, the
1257 // instructions aren't guaranteed to have the right number of operands or
1258 // types attached to them at this point
1259 assert(MCID.getNumOperands() == 2 && "Expected 2 operands G_*{EXT,TRUNC}");
1260 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1261 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1262 if (!DstTy.isValid() || !SrcTy.isValid())
1263 break;
1264
1265 LLT DstElTy = DstTy.getScalarType();
1266 LLT SrcElTy = SrcTy.getScalarType();
1267 if (DstElTy.isPointer() || SrcElTy.isPointer())
1268 report("Generic extend/truncate can not operate on pointers", MI);
1269
1270 verifyVectorElementMatch(DstTy, SrcTy, MI);
1271
1272 unsigned DstSize = DstElTy.getSizeInBits();
1273 unsigned SrcSize = SrcElTy.getSizeInBits();
1274 switch (MI->getOpcode()) {
1275 default:
1276 if (DstSize <= SrcSize)
1277 report("Generic extend has destination type no larger than source", MI);
1278 break;
1279 case TargetOpcode::G_TRUNC:
1280 case TargetOpcode::G_FPTRUNC:
1281 if (DstSize >= SrcSize)
1282 report("Generic truncate has destination type no smaller than source",
1283 MI);
1284 break;
1285 }
1286 break;
1287 }
1288 case TargetOpcode::G_SELECT: {
1289 LLT SelTy = MRI->getType(MI->getOperand(0).getReg());
1290 LLT CondTy = MRI->getType(MI->getOperand(1).getReg());
1291 if (!SelTy.isValid() || !CondTy.isValid())
1292 break;
1293
1294 // Scalar condition select on a vector is valid.
1295 if (CondTy.isVector())
1296 verifyVectorElementMatch(SelTy, CondTy, MI);
1297 break;
1298 }
1299 case TargetOpcode::G_MERGE_VALUES: {
1300 // G_MERGE_VALUES should only be used to merge scalars into a larger scalar,
1301 // e.g. s2N = MERGE sN, sN
1302 // Merging multiple scalars into a vector is not allowed, should use
1303 // G_BUILD_VECTOR for that.
1304 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1305 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1306 if (DstTy.isVector() || SrcTy.isVector())
1307 report("G_MERGE_VALUES cannot operate on vectors", MI);
1308
1309 const unsigned NumOps = MI->getNumOperands();
1310 if (DstTy.getSizeInBits() != SrcTy.getSizeInBits() * (NumOps - 1))
1311 report("G_MERGE_VALUES result size is inconsistent", MI);
1312
1313 for (unsigned I = 2; I != NumOps; ++I) {
1314 if (MRI->getType(MI->getOperand(I).getReg()) != SrcTy)
1315 report("G_MERGE_VALUES source types do not match", MI);
1316 }
1317
1318 break;
1319 }
1320 case TargetOpcode::G_UNMERGE_VALUES: {
1321 unsigned NumDsts = MI->getNumOperands() - 1;
1322 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1323 for (unsigned i = 1; i < NumDsts; ++i) {
1324 if (MRI->getType(MI->getOperand(i).getReg()) != DstTy) {
1325 report("G_UNMERGE_VALUES destination types do not match", MI);
1326 break;
1327 }
1328 }
1329
1330 LLT SrcTy = MRI->getType(MI->getOperand(NumDsts).getReg());
1331 if (DstTy.isVector()) {
1332 // This case is the converse of G_CONCAT_VECTORS.
1333 if (!SrcTy.isVector() || SrcTy.getScalarType() != DstTy.getScalarType() ||
1334 SrcTy.getNumElements() != NumDsts * DstTy.getNumElements())
1335 report("G_UNMERGE_VALUES source operand does not match vector "
1336 "destination operands",
1337 MI);
1338 } else if (SrcTy.isVector()) {
1339 // This case is the converse of G_BUILD_VECTOR, but relaxed to allow
1340 // mismatched types as long as the total size matches:
1341 // %0:_(s64), %1:_(s64) = G_UNMERGE_VALUES %2:_(<4 x s32>)
1342 if (SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits())
1343 report("G_UNMERGE_VALUES vector source operand does not match scalar "
1344 "destination operands",
1345 MI);
1346 } else {
1347 // This case is the converse of G_MERGE_VALUES.
1348 if (SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits()) {
1349 report("G_UNMERGE_VALUES scalar source operand does not match scalar "
1350 "destination operands",
1351 MI);
1352 }
1353 }
1354 break;
1355 }
1356 case TargetOpcode::G_BUILD_VECTOR: {
1357 // Source types must be scalars, dest type a vector. Total size of scalars
1358 // must match the dest vector size.
1359 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1360 LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg());
1361 if (!DstTy.isVector() || SrcEltTy.isVector()) {
1362 report("G_BUILD_VECTOR must produce a vector from scalar operands", MI);
1363 break;
1364 }
1365
1366 if (DstTy.getElementType() != SrcEltTy)
1367 report("G_BUILD_VECTOR result element type must match source type", MI);
1368
1369 if (DstTy.getNumElements() != MI->getNumOperands() - 1)
1370 report("G_BUILD_VECTOR must have an operand for each elemement", MI);
1371
1372 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1373 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1374 report("G_BUILD_VECTOR source operand types are not homogeneous", MI);
1375
1376 break;
1377 }
1378 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1379 // Source types must be scalars, dest type a vector. Scalar types must be
1380 // larger than the dest vector elt type, as this is a truncating operation.
1381 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1382 LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg());
1383 if (!DstTy.isVector() || SrcEltTy.isVector())
1384 report("G_BUILD_VECTOR_TRUNC must produce a vector from scalar operands",
1385 MI);
1386 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1387 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1388 report("G_BUILD_VECTOR_TRUNC source operand types are not homogeneous",
1389 MI);
1390 if (SrcEltTy.getSizeInBits() <= DstTy.getElementType().getSizeInBits())
1391 report("G_BUILD_VECTOR_TRUNC source operand types are not larger than "
1392 "dest elt type",
1393 MI);
1394 break;
1395 }
1396 case TargetOpcode::G_CONCAT_VECTORS: {
1397 // Source types should be vectors, and total size should match the dest
1398 // vector size.
1399 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1400 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1401 if (!DstTy.isVector() || !SrcTy.isVector())
1402 report("G_CONCAT_VECTOR requires vector source and destination operands",
1403 MI);
1404
1405 if (MI->getNumOperands() < 3)
1406 report("G_CONCAT_VECTOR requires at least 2 source operands", MI);
1407
1408 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1409 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1410 report("G_CONCAT_VECTOR source operand types are not homogeneous", MI);
1411 if (DstTy.getNumElements() !=
1412 SrcTy.getNumElements() * (MI->getNumOperands() - 1))
1413 report("G_CONCAT_VECTOR num dest and source elements should match", MI);
1414 break;
1415 }
1416 case TargetOpcode::G_ICMP:
1417 case TargetOpcode::G_FCMP: {
1418 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1419 LLT SrcTy = MRI->getType(MI->getOperand(2).getReg());
1420
1421 if ((DstTy.isVector() != SrcTy.isVector()) ||
1422 (DstTy.isVector() && DstTy.getNumElements() != SrcTy.getNumElements()))
1423 report("Generic vector icmp/fcmp must preserve number of lanes", MI);
1424
1425 break;
1426 }
1427 case TargetOpcode::G_EXTRACT: {
1428 const MachineOperand &SrcOp = MI->getOperand(1);
1429 if (!SrcOp.isReg()) {
1430 report("extract source must be a register", MI);
1431 break;
1432 }
1433
1434 const MachineOperand &OffsetOp = MI->getOperand(2);
1435 if (!OffsetOp.isImm()) {
1436 report("extract offset must be a constant", MI);
1437 break;
1438 }
1439
1440 unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
1441 unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
1442 if (SrcSize == DstSize)
1443 report("extract source must be larger than result", MI);
1444
1445 if (DstSize + OffsetOp.getImm() > SrcSize)
1446 report("extract reads past end of register", MI);
1447 break;
1448 }
1449 case TargetOpcode::G_INSERT: {
1450 const MachineOperand &SrcOp = MI->getOperand(2);
1451 if (!SrcOp.isReg()) {
1452 report("insert source must be a register", MI);
1453 break;
1454 }
1455
1456 const MachineOperand &OffsetOp = MI->getOperand(3);
1457 if (!OffsetOp.isImm()) {
1458 report("insert offset must be a constant", MI);
1459 break;
1460 }
1461
1462 unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
1463 unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
1464
1465 if (DstSize <= SrcSize)
1466 report("inserted size must be smaller than total register", MI);
1467
1468 if (SrcSize + OffsetOp.getImm() > DstSize)
1469 report("insert writes past end of register", MI);
1470
1471 break;
1472 }
1473 case TargetOpcode::G_JUMP_TABLE: {
1474 if (!MI->getOperand(1).isJTI())
1475 report("G_JUMP_TABLE source operand must be a jump table index", MI);
1476 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1477 if (!DstTy.isPointer())
1478 report("G_JUMP_TABLE dest operand must have a pointer type", MI);
1479 break;
1480 }
1481 case TargetOpcode::G_BRJT: {
1482 if (!MRI->getType(MI->getOperand(0).getReg()).isPointer())
1483 report("G_BRJT src operand 0 must be a pointer type", MI);
1484
1485 if (!MI->getOperand(1).isJTI())
1486 report("G_BRJT src operand 1 must be a jump table index", MI);
1487
1488 const auto &IdxOp = MI->getOperand(2);
1489 if (!IdxOp.isReg() || MRI->getType(IdxOp.getReg()).isPointer())
1490 report("G_BRJT src operand 2 must be a scalar reg type", MI);
1491 break;
1492 }
1493 case TargetOpcode::G_INTRINSIC:
1494 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS: {
1495 // TODO: Should verify number of def and use operands, but the current
1496 // interface requires passing in IR types for mangling.
1497 const MachineOperand &IntrIDOp = MI->getOperand(MI->getNumExplicitDefs());
1498 if (!IntrIDOp.isIntrinsicID()) {
1499 report("G_INTRINSIC first src operand must be an intrinsic ID", MI);
1500 break;
1501 }
1502
1503 bool NoSideEffects = MI->getOpcode() == TargetOpcode::G_INTRINSIC;
1504 unsigned IntrID = IntrIDOp.getIntrinsicID();
1505 if (IntrID != 0 && IntrID < Intrinsic::num_intrinsics) {
1507 MF->getFunction().getContext(), static_cast<Intrinsic::ID>(IntrID));
1508 bool DeclHasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
1509 if (NoSideEffects && DeclHasSideEffects) {
1510 report("G_INTRINSIC used with intrinsic that accesses memory", MI);
1511 break;
1512 }
1513 if (!NoSideEffects && !DeclHasSideEffects) {
1514 report("G_INTRINSIC_W_SIDE_EFFECTS used with readnone intrinsic", MI);
1515 break;
1516 }
1517 }
1518
1519 break;
1520 }
1521 case TargetOpcode::G_SEXT_INREG: {
1522 if (!MI->getOperand(2).isImm()) {
1523 report("G_SEXT_INREG expects an immediate operand #2", MI);
1524 break;
1525 }
1526
1527 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1528 int64_t Imm = MI->getOperand(2).getImm();
1529 if (Imm <= 0)
1530 report("G_SEXT_INREG size must be >= 1", MI);
1531 if (Imm >= SrcTy.getScalarSizeInBits())
1532 report("G_SEXT_INREG size must be less than source bit width", MI);
1533 break;
1534 }
1535 case TargetOpcode::G_SHUFFLE_VECTOR: {
1536 const MachineOperand &MaskOp = MI->getOperand(3);
1537 if (!MaskOp.isShuffleMask()) {
1538 report("Incorrect mask operand type for G_SHUFFLE_VECTOR", MI);
1539 break;
1540 }
1541
1542 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1543 LLT Src0Ty = MRI->getType(MI->getOperand(1).getReg());
1544 LLT Src1Ty = MRI->getType(MI->getOperand(2).getReg());
1545
1546 if (Src0Ty != Src1Ty)
1547 report("Source operands must be the same type", MI);
1548
1549 if (Src0Ty.getScalarType() != DstTy.getScalarType())
1550 report("G_SHUFFLE_VECTOR cannot change element type", MI);
1551
1552 // Don't check that all operands are vector because scalars are used in
1553 // place of 1 element vectors.
1554 int SrcNumElts = Src0Ty.isVector() ? Src0Ty.getNumElements() : 1;
1555 int DstNumElts = DstTy.isVector() ? DstTy.getNumElements() : 1;
1556
1557 ArrayRef<int> MaskIdxes = MaskOp.getShuffleMask();
1558
1559 if (static_cast<int>(MaskIdxes.size()) != DstNumElts)
1560 report("Wrong result type for shufflemask", MI);
1561
1562 for (int Idx : MaskIdxes) {
1563 if (Idx < 0)
1564 continue;
1565
1566 if (Idx >= 2 * SrcNumElts)
1567 report("Out of bounds shuffle index", MI);
1568 }
1569
1570 break;
1571 }
1572 case TargetOpcode::G_DYN_STACKALLOC: {
1573 const MachineOperand &DstOp = MI->getOperand(0);
1574 const MachineOperand &AllocOp = MI->getOperand(1);
1575 const MachineOperand &AlignOp = MI->getOperand(2);
1576
1577 if (!DstOp.isReg() || !MRI->getType(DstOp.getReg()).isPointer()) {
1578 report("dst operand 0 must be a pointer type", MI);
1579 break;
1580 }
1581
1582 if (!AllocOp.isReg() || !MRI->getType(AllocOp.getReg()).isScalar()) {
1583 report("src operand 1 must be a scalar reg type", MI);
1584 break;
1585 }
1586
1587 if (!AlignOp.isImm()) {
1588 report("src operand 2 must be an immediate type", MI);
1589 break;
1590 }
1591 break;
1592 }
1593 case TargetOpcode::G_MEMCPY_INLINE:
1594 case TargetOpcode::G_MEMCPY:
1595 case TargetOpcode::G_MEMMOVE: {
1596 ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
1597 if (MMOs.size() != 2) {
1598 report("memcpy/memmove must have 2 memory operands", MI);
1599 break;
1600 }
1601
1602 if ((!MMOs[0]->isStore() || MMOs[0]->isLoad()) ||
1603 (MMOs[1]->isStore() || !MMOs[1]->isLoad())) {
1604 report("wrong memory operand types", MI);
1605 break;
1606 }
1607
1608 if (MMOs[0]->getSize() != MMOs[1]->getSize())
1609 report("inconsistent memory operand sizes", MI);
1610
1611 LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg());
1612 LLT SrcPtrTy = MRI->getType(MI->getOperand(1).getReg());
1613
1614 if (!DstPtrTy.isPointer() || !SrcPtrTy.isPointer()) {
1615 report("memory instruction operand must be a pointer", MI);
1616 break;
1617 }
1618
1619 if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
1620 report("inconsistent store address space", MI);
1621 if (SrcPtrTy.getAddressSpace() != MMOs[1]->getAddrSpace())
1622 report("inconsistent load address space", MI);
1623
1624 if (Opc != TargetOpcode::G_MEMCPY_INLINE)
1625 if (!MI->getOperand(3).isImm() || (MI->getOperand(3).getImm() & ~1LL))
1626 report("'tail' flag (operand 3) must be an immediate 0 or 1", MI);
1627
1628 break;
1629 }
1630 case TargetOpcode::G_BZERO:
1631 case TargetOpcode::G_MEMSET: {
1632 ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
1633 std::string Name = Opc == TargetOpcode::G_MEMSET ? "memset" : "bzero";
1634 if (MMOs.size() != 1) {
1635 report(Twine(Name, " must have 1 memory operand"), MI);
1636 break;
1637 }
1638
1639 if ((!MMOs[0]->isStore() || MMOs[0]->isLoad())) {
1640 report(Twine(Name, " memory operand must be a store"), MI);
1641 break;
1642 }
1643
1644 LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg());
1645 if (!DstPtrTy.isPointer()) {
1646 report(Twine(Name, " operand must be a pointer"), MI);
1647 break;
1648 }
1649
1650 if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
1651 report("inconsistent " + Twine(Name, " address space"), MI);
1652
1653 if (!MI->getOperand(MI->getNumOperands() - 1).isImm() ||
1654 (MI->getOperand(MI->getNumOperands() - 1).getImm() & ~1LL))
1655 report("'tail' flag (last operand) must be an immediate 0 or 1", MI);
1656
1657 break;
1658 }
1659 case TargetOpcode::G_VECREDUCE_SEQ_FADD:
1660 case TargetOpcode::G_VECREDUCE_SEQ_FMUL: {
1661 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1662 LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg());
1663 LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg());
1664 if (!DstTy.isScalar())
1665 report("Vector reduction requires a scalar destination type", MI);
1666 if (!Src1Ty.isScalar())
1667 report("Sequential FADD/FMUL vector reduction requires a scalar 1st operand", MI);
1668 if (!Src2Ty.isVector())
1669 report("Sequential FADD/FMUL vector reduction must have a vector 2nd operand", MI);
1670 break;
1671 }
1672 case TargetOpcode::G_VECREDUCE_FADD:
1673 case TargetOpcode::G_VECREDUCE_FMUL:
1674 case TargetOpcode::G_VECREDUCE_FMAX:
1675 case TargetOpcode::G_VECREDUCE_FMIN:
1676 case TargetOpcode::G_VECREDUCE_ADD:
1677 case TargetOpcode::G_VECREDUCE_MUL:
1678 case TargetOpcode::G_VECREDUCE_AND:
1679 case TargetOpcode::G_VECREDUCE_OR:
1680 case TargetOpcode::G_VECREDUCE_XOR:
1681 case TargetOpcode::G_VECREDUCE_SMAX:
1682 case TargetOpcode::G_VECREDUCE_SMIN:
1683 case TargetOpcode::G_VECREDUCE_UMAX:
1684 case TargetOpcode::G_VECREDUCE_UMIN: {
1685 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1686 if (!DstTy.isScalar())
1687 report("Vector reduction requires a scalar destination type", MI);
1688 break;
1689 }
1690
1691 case TargetOpcode::G_SBFX:
1692 case TargetOpcode::G_UBFX: {
1693 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1694 if (DstTy.isVector()) {
1695 report("Bitfield extraction is not supported on vectors", MI);
1696 break;
1697 }
1698 break;
1699 }
1700 case TargetOpcode::G_SHL:
1701 case TargetOpcode::G_LSHR:
1702 case TargetOpcode::G_ASHR:
1703 case TargetOpcode::G_ROTR:
1704 case TargetOpcode::G_ROTL: {
1705 LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg());
1706 LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg());
1707 if (Src1Ty.isVector() != Src2Ty.isVector()) {
1708 report("Shifts and rotates require operands to be either all scalars or "
1709 "all vectors",
1710 MI);
1711 break;
1712 }
1713 break;
1714 }
1715 case TargetOpcode::G_LLROUND:
1716 case TargetOpcode::G_LROUND: {
1717 verifyAllRegOpsScalar(*MI, *MRI);
1718 break;
1719 }
1720 case TargetOpcode::G_IS_FPCLASS: {
1721 LLT DestTy = MRI->getType(MI->getOperand(0).getReg());
1722 LLT DestEltTy = DestTy.getScalarType();
1723 if (!DestEltTy.isScalar()) {
1724 report("Destination must be a scalar or vector of scalars", MI);
1725 break;
1726 }
1727 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1728 LLT SrcEltTy = SrcTy.getScalarType();
1729 if (!SrcEltTy.isScalar()) {
1730 report("Source must be a scalar or vector of scalars", MI);
1731 break;
1732 }
1733 if (!verifyVectorElementMatch(DestTy, SrcTy, MI))
1734 break;
1735 const MachineOperand &TestMO = MI->getOperand(2);
1736 if (!TestMO.isImm()) {
1737 report("floating-point class set (operand 2) must be an immediate", MI);
1738 break;
1739 }
1740 int64_t Test = TestMO.getImm();
1741 if (Test < 0 || Test > fcAllFlags) {
1742 report("Incorrect floating-point class set (operand 2)", MI);
1743 break;
1744 }
1745 break;
1746 }
1747 case TargetOpcode::G_ASSERT_ALIGN: {
1748 if (MI->getOperand(2).getImm() < 1)
1749 report("alignment immediate must be >= 1", MI);
1750 break;
1751 }
1752 case TargetOpcode::G_CONSTANT_POOL: {
1753 if (!MI->getOperand(1).isCPI())
1754 report("Src operand 1 must be a constant pool index", MI);
1755 if (!MRI->getType(MI->getOperand(0).getReg()).isPointer())
1756 report("Dst operand 0 must be a pointer", MI);
1757 break;
1758 }
1759 default:
1760 break;
1761 }
1762}
1763
1764void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
1765 const MCInstrDesc &MCID = MI->getDesc();
1766 if (MI->getNumOperands() < MCID.getNumOperands()) {
1767 report("Too few operands", MI);
1768 errs() << MCID.getNumOperands() << " operands expected, but "
1769 << MI->getNumOperands() << " given.\n";
1770 }
1771
1772 if (MI->isPHI()) {
1773 if (MF->getProperties().hasProperty(
1775 report("Found PHI instruction with NoPHIs property set", MI);
1776
1777 if (FirstNonPHI)
1778 report("Found PHI instruction after non-PHI", MI);
1779 } else if (FirstNonPHI == nullptr)
1780 FirstNonPHI = MI;
1781
1782 // Check the tied operands.
1783 if (MI->isInlineAsm())
1784 verifyInlineAsm(MI);
1785
1786 // Check that unspillable terminators define a reg and have at most one use.
1787 if (TII->isUnspillableTerminator(MI)) {
1788 if (!MI->getOperand(0).isReg() || !MI->getOperand(0).isDef())
1789 report("Unspillable Terminator does not define a reg", MI);
1790 Register Def = MI->getOperand(0).getReg();
1791 if (Def.isVirtual() &&
1792 !MF->getProperties().hasProperty(
1794 std::distance(MRI->use_nodbg_begin(Def), MRI->use_nodbg_end()) > 1)
1795 report("Unspillable Terminator expected to have at most one use!", MI);
1796 }
1797
1798 // A fully-formed DBG_VALUE must have a location. Ignore partially formed
1799 // DBG_VALUEs: these are convenient to use in tests, but should never get
1800 // generated.
1801 if (MI->isDebugValue() && MI->getNumOperands() == 4)
1802 if (!MI->getDebugLoc())
1803 report("Missing DebugLoc for debug instruction", MI);
1804
1805 // Meta instructions should never be the subject of debug value tracking,
1806 // they don't create a value in the output program at all.
1807 if (MI->isMetaInstruction() && MI->peekDebugInstrNum())
1808 report("Metadata instruction should not have a value tracking number", MI);
1809
1810 // Check the MachineMemOperands for basic consistency.
1811 for (MachineMemOperand *Op : MI->memoperands()) {
1812 if (Op->isLoad() && !MI->mayLoad())
1813 report("Missing mayLoad flag", MI);
1814 if (Op->isStore() && !MI->mayStore())
1815 report("Missing mayStore flag", MI);
1816 }
1817
1818 // Debug values must not have a slot index.
1819 // Other instructions must have one, unless they are inside a bundle.
1820 if (LiveInts) {
1821 bool mapped = !LiveInts->isNotInMIMap(*MI);
1822 if (MI->isDebugOrPseudoInstr()) {
1823 if (mapped)
1824 report("Debug instruction has a slot index", MI);
1825 } else if (MI->isInsideBundle()) {
1826 if (mapped)
1827 report("Instruction inside bundle has a slot index", MI);
1828 } else {
1829 if (!mapped)
1830 report("Missing slot index", MI);
1831 }
1832 }
1833
1834 unsigned Opc = MCID.getOpcode();
1836 verifyPreISelGenericInstruction(MI);
1837 return;
1838 }
1839
1841 if (!TII->verifyInstruction(*MI, ErrorInfo))
1842 report(ErrorInfo.data(), MI);
1843
1844 // Verify properties of various specific instruction types
1845 switch (MI->getOpcode()) {
1846 case TargetOpcode::COPY: {
1847 const MachineOperand &DstOp = MI->getOperand(0);
1848 const MachineOperand &SrcOp = MI->getOperand(1);
1849 const Register SrcReg = SrcOp.getReg();
1850 const Register DstReg = DstOp.getReg();
1851
1852 LLT DstTy = MRI->getType(DstReg);
1853 LLT SrcTy = MRI->getType(SrcReg);
1854 if (SrcTy.isValid() && DstTy.isValid()) {
1855 // If both types are valid, check that the types are the same.
1856 if (SrcTy != DstTy) {
1857 report("Copy Instruction is illegal with mismatching types", MI);
1858 errs() << "Def = " << DstTy << ", Src = " << SrcTy << "\n";
1859 }
1860
1861 break;
1862 }
1863
1864 if (!SrcTy.isValid() && !DstTy.isValid())
1865 break;
1866
1867 // If we have only one valid type, this is likely a copy between a virtual
1868 // and physical register.
1869 unsigned SrcSize = 0;
1870 unsigned DstSize = 0;
1871 if (SrcReg.isPhysical() && DstTy.isValid()) {
1872 const TargetRegisterClass *SrcRC =
1873 TRI->getMinimalPhysRegClassLLT(SrcReg, DstTy);
1874 if (SrcRC)
1875 SrcSize = TRI->getRegSizeInBits(*SrcRC);
1876 }
1877
1878 if (SrcSize == 0)
1879 SrcSize = TRI->getRegSizeInBits(SrcReg, *MRI);
1880
1881 if (DstReg.isPhysical() && SrcTy.isValid()) {
1882 const TargetRegisterClass *DstRC =
1883 TRI->getMinimalPhysRegClassLLT(DstReg, SrcTy);
1884 if (DstRC)
1885 DstSize = TRI->getRegSizeInBits(*DstRC);
1886 }
1887
1888 if (DstSize == 0)
1889 DstSize = TRI->getRegSizeInBits(DstReg, *MRI);
1890
1891 if (SrcSize != 0 && DstSize != 0 && SrcSize != DstSize) {
1892 if (!DstOp.getSubReg() && !SrcOp.getSubReg()) {
1893 report("Copy Instruction is illegal with mismatching sizes", MI);
1894 errs() << "Def Size = " << DstSize << ", Src Size = " << SrcSize
1895 << "\n";
1896 }
1897 }
1898 break;
1899 }
1900 case TargetOpcode::STATEPOINT: {
1901 StatepointOpers SO(MI);
1902 if (!MI->getOperand(SO.getIDPos()).isImm() ||
1903 !MI->getOperand(SO.getNBytesPos()).isImm() ||
1904 !MI->getOperand(SO.getNCallArgsPos()).isImm()) {
1905 report("meta operands to STATEPOINT not constant!", MI);
1906 break;
1907 }
1908
1909 auto VerifyStackMapConstant = [&](unsigned Offset) {
1910 if (Offset >= MI->getNumOperands()) {
1911 report("stack map constant to STATEPOINT is out of range!", MI);
1912 return;
1913 }
1914 if (!MI->getOperand(Offset - 1).isImm() ||
1915 MI->getOperand(Offset - 1).getImm() != StackMaps::ConstantOp ||
1916 !MI->getOperand(Offset).isImm())
1917 report("stack map constant to STATEPOINT not well formed!", MI);
1918 };
1919 VerifyStackMapConstant(SO.getCCIdx());
1920 VerifyStackMapConstant(SO.getFlagsIdx());
1921 VerifyStackMapConstant(SO.getNumDeoptArgsIdx());
1922 VerifyStackMapConstant(SO.getNumGCPtrIdx());
1923 VerifyStackMapConstant(SO.getNumAllocaIdx());
1924 VerifyStackMapConstant(SO.getNumGcMapEntriesIdx());
1925
1926 // Verify that all explicit statepoint defs are tied to gc operands as
1927 // they are expected to be a relocation of gc operands.
1928 unsigned FirstGCPtrIdx = SO.getFirstGCPtrIdx();
1929 unsigned LastGCPtrIdx = SO.getNumAllocaIdx() - 2;
1930 for (unsigned Idx = 0; Idx < MI->getNumDefs(); Idx++) {
1931 unsigned UseOpIdx;
1932 if (!MI->isRegTiedToUseOperand(Idx, &UseOpIdx)) {
1933 report("STATEPOINT defs expected to be tied", MI);
1934 break;
1935 }
1936 if (UseOpIdx < FirstGCPtrIdx || UseOpIdx > LastGCPtrIdx) {
1937 report("STATEPOINT def tied to non-gc operand", MI);
1938 break;
1939 }
1940 }
1941
1942 // TODO: verify we have properly encoded deopt arguments
1943 } break;
1944 case TargetOpcode::INSERT_SUBREG: {
1945 unsigned InsertedSize;
1946 if (unsigned SubIdx = MI->getOperand(2).getSubReg())
1947 InsertedSize = TRI->getSubRegIdxSize(SubIdx);
1948 else
1949 InsertedSize = TRI->getRegSizeInBits(MI->getOperand(2).getReg(), *MRI);
1950 unsigned SubRegSize = TRI->getSubRegIdxSize(MI->getOperand(3).getImm());
1951 if (SubRegSize < InsertedSize) {
1952 report("INSERT_SUBREG expected inserted value to have equal or lesser "
1953 "size than the subreg it was inserted into", MI);
1954 break;
1955 }
1956 } break;
1957 case TargetOpcode::REG_SEQUENCE: {
1958 unsigned NumOps = MI->getNumOperands();
1959 if (!(NumOps & 1)) {
1960 report("Invalid number of operands for REG_SEQUENCE", MI);
1961 break;
1962 }
1963
1964 for (unsigned I = 1; I != NumOps; I += 2) {
1965 const MachineOperand &RegOp = MI->getOperand(I);
1966 const MachineOperand &SubRegOp = MI->getOperand(I + 1);
1967
1968 if (!RegOp.isReg())
1969 report("Invalid register operand for REG_SEQUENCE", &RegOp, I);
1970
1971 if (!SubRegOp.isImm() || SubRegOp.getImm() == 0 ||
1972 SubRegOp.getImm() >= TRI->getNumSubRegIndices()) {
1973 report("Invalid subregister index operand for REG_SEQUENCE",
1974 &SubRegOp, I + 1);
1975 }
1976 }
1977
1978 Register DstReg = MI->getOperand(0).getReg();
1979 if (DstReg.isPhysical())
1980 report("REG_SEQUENCE does not support physical register results", MI);
1981
1982 if (MI->getOperand(0).getSubReg())
1983 report("Invalid subreg result for REG_SEQUENCE", MI);
1984
1985 break;
1986 }
1987 }
1988}
1989
1990void
1991MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
1992 const MachineInstr *MI = MO->getParent();
1993 const MCInstrDesc &MCID = MI->getDesc();
1994 unsigned NumDefs = MCID.getNumDefs();
1995 if (MCID.getOpcode() == TargetOpcode::PATCHPOINT)
1996 NumDefs = (MONum == 0 && MO->isReg()) ? NumDefs : 0;
1997
1998 // The first MCID.NumDefs operands must be explicit register defines
1999 if (MONum < NumDefs) {
2000 const MCOperandInfo &MCOI = MCID.operands()[MONum];
2001 if (!MO->isReg())
2002 report("Explicit definition must be a register", MO, MONum);
2003 else if (!MO->isDef() && !MCOI.isOptionalDef())
2004 report("Explicit definition marked as use", MO, MONum);
2005 else if (MO->isImplicit())
2006 report("Explicit definition marked as implicit", MO, MONum);
2007 } else if (MONum < MCID.getNumOperands()) {
2008 const MCOperandInfo &MCOI = MCID.operands()[MONum];
2009 // Don't check if it's the last operand in a variadic instruction. See,
2010 // e.g., LDM_RET in the arm back end. Check non-variadic operands only.
2011 bool IsOptional = MI->isVariadic() && MONum == MCID.getNumOperands() - 1;
2012 if (!IsOptional) {
2013 if (MO->isReg()) {
2014 if (MO->isDef() && !MCOI.isOptionalDef() && !MCID.variadicOpsAreDefs())
2015 report("Explicit operand marked as def", MO, MONum);
2016 if (MO->isImplicit())
2017 report("Explicit operand marked as implicit", MO, MONum);
2018 }
2019
2020 // Check that an instruction has register operands only as expected.
2021 if (MCOI.OperandType == MCOI::OPERAND_REGISTER &&
2022 !MO->isReg() && !MO->isFI())
2023 report("Expected a register operand.", MO, MONum);
2024 if (MO->isReg()) {
2027 !TII->isPCRelRegisterOperandLegal(*MO)))
2028 report("Expected a non-register operand.", MO, MONum);
2029 }
2030 }
2031
2032 int TiedTo = MCID.getOperandConstraint(MONum, MCOI::TIED_TO);
2033 if (TiedTo != -1) {
2034 if (!MO->isReg())
2035 report("Tied use must be a register", MO, MONum);
2036 else if (!MO->isTied())
2037 report("Operand should be tied", MO, MONum);
2038 else if (unsigned(TiedTo) != MI->findTiedOperandIdx(MONum))
2039 report("Tied def doesn't match MCInstrDesc", MO, MONum);
2040 else if (MO->getReg().isPhysical()) {
2041 const MachineOperand &MOTied = MI->getOperand(TiedTo);
2042 if (!MOTied.isReg())
2043 report("Tied counterpart must be a register", &MOTied, TiedTo);
2044 else if (MOTied.getReg().isPhysical() &&
2045 MO->getReg() != MOTied.getReg())
2046 report("Tied physical registers must match.", &MOTied, TiedTo);
2047 }
2048 } else if (MO->isReg() && MO->isTied())
2049 report("Explicit operand should not be tied", MO, MONum);
2050 } else {
2051 // ARM adds %reg0 operands to indicate predicates. We'll allow that.
2052 if (MO->isReg() && !MO->isImplicit() && !MI->isVariadic() && MO->getReg())
2053 report("Extra explicit operand on non-variadic instruction", MO, MONum);
2054 }
2055
2056 switch (MO->getType()) {
2058 // Verify debug flag on debug instructions. Check this first because reg0
2059 // indicates an undefined debug value.
2060 if (MI->isDebugInstr() && MO->isUse()) {
2061 if (!MO->isDebug())
2062 report("Register operand must be marked debug", MO, MONum);
2063 } else if (MO->isDebug()) {
2064 report("Register operand must not be marked debug", MO, MONum);
2065 }
2066
2067 const Register Reg = MO->getReg();
2068 if (!Reg)
2069 return;
2070 if (MRI->tracksLiveness() && !MI->isDebugInstr())
2071 checkLiveness(MO, MONum);
2072
2073 if (MO->isDef() && MO->isUndef() && !MO->getSubReg() &&
2074 MO->getReg().isVirtual()) // TODO: Apply to physregs too
2075 report("Undef virtual register def operands require a subregister", MO, MONum);
2076
2077 // Verify the consistency of tied operands.
2078 if (MO->isTied()) {
2079 unsigned OtherIdx = MI->findTiedOperandIdx(MONum);
2080 const MachineOperand &OtherMO = MI->getOperand(OtherIdx);
2081 if (!OtherMO.isReg())
2082 report("Must be tied to a register", MO, MONum);
2083 if (!OtherMO.isTied())
2084 report("Missing tie flags on tied operand", MO, MONum);
2085 if (MI->findTiedOperandIdx(OtherIdx) != MONum)
2086 report("Inconsistent tie links", MO, MONum);
2087 if (MONum < MCID.getNumDefs()) {
2088 if (OtherIdx < MCID.getNumOperands()) {
2089 if (-1 == MCID.getOperandConstraint(OtherIdx, MCOI::TIED_TO))
2090 report("Explicit def tied to explicit use without tie constraint",
2091 MO, MONum);
2092 } else {
2093 if (!OtherMO.isImplicit())
2094 report("Explicit def should be tied to implicit use", MO, MONum);
2095 }
2096 }
2097 }
2098
2099 // Verify two-address constraints after the twoaddressinstruction pass.
2100 // Both twoaddressinstruction pass and phi-node-elimination pass call
2101 // MRI->leaveSSA() to set MF as NoSSA, we should do the verification after
2102 // twoaddressinstruction pass not after phi-node-elimination pass. So we
2103 // shouldn't use the NoSSA as the condition, we should based on
2104 // TiedOpsRewritten property to verify two-address constraints, this
2105 // property will be set in twoaddressinstruction pass.
2106 unsigned DefIdx;
2107 if (MF->getProperties().hasProperty(
2109 MO->isUse() && MI->isRegTiedToDefOperand(MONum, &DefIdx) &&
2110 Reg != MI->getOperand(DefIdx).getReg())
2111 report("Two-address instruction operands must be identical", MO, MONum);
2112
2113 // Check register classes.
2114 unsigned SubIdx = MO->getSubReg();
2115
2116 if (Reg.isPhysical()) {
2117 if (SubIdx) {
2118 report("Illegal subregister index for physical register", MO, MONum);
2119 return;
2120 }
2121 if (MONum < MCID.getNumOperands()) {
2122 if (const TargetRegisterClass *DRC =
2123 TII->getRegClass(MCID, MONum, TRI, *MF)) {
2124 if (!DRC->contains(Reg)) {
2125 report("Illegal physical register for instruction", MO, MONum);
2126 errs() << printReg(Reg, TRI) << " is not a "
2127 << TRI->getRegClassName(DRC) << " register.\n";
2128 }
2129 }
2130 }
2131 if (MO->isRenamable()) {
2132 if (MRI->isReserved(Reg)) {
2133 report("isRenamable set on reserved register", MO, MONum);
2134 return;
2135 }
2136 }
2137 } else {
2138 // Virtual register.
2139 const TargetRegisterClass *RC = MRI->getRegClassOrNull(Reg);
2140 if (!RC) {
2141 // This is a generic virtual register.
2142
2143 // Do not allow undef uses for generic virtual registers. This ensures
2144 // getVRegDef can never fail and return null on a generic register.
2145 //
2146 // FIXME: This restriction should probably be broadened to all SSA
2147 // MIR. However, DetectDeadLanes/ProcessImplicitDefs technically still
2148 // run on the SSA function just before phi elimination.
2149 if (MO->isUndef())
2150 report("Generic virtual register use cannot be undef", MO, MONum);
2151
2152 // Debug value instruction is permitted to use undefined vregs.
2153 // This is a performance measure to skip the overhead of immediately
2154 // pruning unused debug operands. The final undef substitution occurs
2155 // when debug values are allocated in LDVImpl::handleDebugValue, so
2156 // these verifications always apply after this pass.
2157 if (isFunctionTracksDebugUserValues || !MO->isUse() ||
2158 !MI->isDebugValue() || !MRI->def_empty(Reg)) {
2159 // If we're post-Select, we can't have gvregs anymore.
2160 if (isFunctionSelected) {
2161 report("Generic virtual register invalid in a Selected function",
2162 MO, MONum);
2163 return;
2164 }
2165
2166 // The gvreg must have a type and it must not have a SubIdx.
2167 LLT Ty = MRI->getType(Reg);
2168 if (!Ty.isValid()) {
2169 report("Generic virtual register must have a valid type", MO,
2170 MONum);
2171 return;
2172 }
2173
2174 const RegisterBank *RegBank = MRI->getRegBankOrNull(Reg);
2175
2176 // If we're post-RegBankSelect, the gvreg must have a bank.
2177 if (!RegBank && isFunctionRegBankSelected) {
2178 report("Generic virtual register must have a bank in a "
2179 "RegBankSelected function",
2180 MO, MONum);
2181 return;
2182 }
2183
2184 // Make sure the register fits into its register bank if any.
2185 if (RegBank && Ty.isValid() &&
2186 RegBank->getSize() < Ty.getSizeInBits()) {
2187 report("Register bank is too small for virtual register", MO,
2188 MONum);
2189 errs() << "Register bank " << RegBank->getName() << " too small("
2190 << RegBank->getSize() << ") to fit " << Ty.getSizeInBits()
2191 << "-bits\n";
2192 return;
2193 }
2194 }
2195
2196 if (SubIdx) {
2197 report("Generic virtual register does not allow subregister index", MO,
2198 MONum);
2199 return;
2200 }
2201
2202 // If this is a target specific instruction and this operand
2203 // has register class constraint, the virtual register must
2204 // comply to it.
2205 if (!isPreISelGenericOpcode(MCID.getOpcode()) &&
2206 MONum < MCID.getNumOperands() &&
2207 TII->getRegClass(MCID, MONum, TRI, *MF)) {
2208 report("Virtual register does not match instruction constraint", MO,
2209 MONum);
2210 errs() << "Expect register class "
2211 << TRI->getRegClassName(
2212 TII->getRegClass(MCID, MONum, TRI, *MF))
2213 << " but got nothing\n";
2214 return;
2215 }
2216
2217 break;
2218 }
2219 if (SubIdx) {
2220 const TargetRegisterClass *SRC =
2221 TRI->getSubClassWithSubReg(RC, SubIdx);
2222 if (!SRC) {
2223 report("Invalid subregister index for virtual register", MO, MONum);
2224 errs() << "Register class " << TRI->getRegClassName(RC)
2225 << " does not support subreg index " << SubIdx << "\n";
2226 return;
2227 }
2228 if (RC != SRC) {
2229 report("Invalid register class for subregister index", MO, MONum);
2230 errs() << "Register class " << TRI->getRegClassName(RC)
2231 << " does not fully support subreg index " << SubIdx << "\n";
2232 return;
2233 }
2234 }
2235 if (MONum < MCID.getNumOperands()) {
2236 if (const TargetRegisterClass *DRC =
2237 TII->getRegClass(MCID, MONum, TRI, *MF)) {
2238 if (SubIdx) {
2239 const TargetRegisterClass *SuperRC =
2240 TRI->getLargestLegalSuperClass(RC, *MF);
2241 if (!SuperRC) {
2242 report("No largest legal super class exists.", MO, MONum);
2243 return;
2244 }
2245 DRC = TRI->getMatchingSuperRegClass(SuperRC, DRC, SubIdx);
2246 if (!DRC) {
2247 report("No matching super-reg register class.", MO, MONum);
2248 return;
2249 }
2250 }
2251 if (!RC->hasSuperClassEq(DRC)) {
2252 report("Illegal virtual register for instruction", MO, MONum);
2253 errs() << "Expected a " << TRI->getRegClassName(DRC)
2254 << " register, but got a " << TRI->getRegClassName(RC)
2255 << " register\n";
2256 }
2257 }
2258 }
2259 }
2260 break;
2261 }
2262
2264 regMasks.push_back(MO->getRegMask());
2265 break;
2266
2268 if (MI->isPHI() && !MO->getMBB()->isSuccessor(MI->getParent()))
2269 report("PHI operand is not in the CFG", MO, MONum);
2270 break;
2271
2273 if (LiveStks && LiveStks->hasInterval(MO->getIndex()) &&
2274 LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2275 int FI = MO->getIndex();
2276 LiveInterval &LI = LiveStks->getInterval(FI);
2277 SlotIndex Idx = LiveInts->getInstructionIndex(*MI);
2278
2279 bool stores = MI->mayStore();
2280 bool loads = MI->mayLoad();
2281 // For a memory-to-memory move, we need to check if the frame
2282 // index is used for storing or loading, by inspecting the
2283 // memory operands.
2284 if (stores && loads) {
2285 for (auto *MMO : MI->memoperands()) {
2286 const PseudoSourceValue *PSV = MMO->getPseudoValue();
2287 if (PSV == nullptr) continue;
2289 dyn_cast<FixedStackPseudoSourceValue>(PSV);
2290 if (Value == nullptr) continue;
2291 if (Value->getFrameIndex() != FI) continue;
2292
2293 if (MMO->isStore())
2294 loads = false;
2295 else
2296 stores = false;
2297 break;
2298 }
2299 if (loads == stores)
2300 report("Missing fixed stack memoperand.", MI);
2301 }
2302 if (loads && !LI.liveAt(Idx.getRegSlot(true))) {
2303 report("Instruction loads from dead spill slot", MO, MONum);
2304 errs() << "Live stack: " << LI << '\n';
2305 }
2306 if (stores && !LI.liveAt(Idx.getRegSlot())) {
2307 report("Instruction stores to dead spill slot", MO, MONum);
2308 errs() << "Live stack: " << LI << '\n';
2309 }
2310 }
2311 break;
2312
2314 if (MO->getCFIIndex() >= MF->getFrameInstructions().size())
2315 report("CFI instruction has invalid index", MO, MONum);
2316 break;
2317
2318 default:
2319 break;
2320 }
2321}
2322
2323void MachineVerifier::checkLivenessAtUse(const MachineOperand *MO,
2324 unsigned MONum, SlotIndex UseIdx,
2325 const LiveRange &LR,
2326 Register VRegOrUnit,
2327 LaneBitmask LaneMask) {
2328 LiveQueryResult LRQ = LR.Query(UseIdx);
2329 // Check if we have a segment at the use, note however that we only need one
2330 // live subregister range, the others may be dead.
2331 if (!LRQ.valueIn() && LaneMask.none()) {
2332 report("No live segment at use", MO, MONum);
2333 report_context_liverange(LR);
2334 report_context_vreg_regunit(VRegOrUnit);
2335 report_context(UseIdx);
2336 }
2337 if (MO->isKill() && !LRQ.isKill()) {
2338 report("Live range continues after kill flag", MO, MONum);
2339 report_context_liverange(LR);
2340 report_context_vreg_regunit(VRegOrUnit);
2341 if (LaneMask.any())
2342 report_context_lanemask(LaneMask);
2343 report_context(UseIdx);
2344 }
2345}
2346
2347void MachineVerifier::checkLivenessAtDef(const MachineOperand *MO,
2348 unsigned MONum, SlotIndex DefIdx,
2349 const LiveRange &LR,
2350 Register VRegOrUnit,
2351 bool SubRangeCheck,
2352 LaneBitmask LaneMask) {
2353 if (const VNInfo *VNI = LR.getVNInfoAt(DefIdx)) {
2354 // The LR can correspond to the whole reg and its def slot is not obliged
2355 // to be the same as the MO' def slot. E.g. when we check here "normal"
2356 // subreg MO but there is other EC subreg MO in the same instruction so the
2357 // whole reg has EC def slot and differs from the currently checked MO' def
2358 // slot. For example:
2359 // %0 [16e,32r:0) 0@16e L..3 [16e,32r:0) 0@16e L..C [16r,32r:0) 0@16r
2360 // Check that there is an early-clobber def of the same superregister
2361 // somewhere is performed in visitMachineFunctionAfter()
2362 if (((SubRangeCheck || MO->getSubReg() == 0) && VNI->def != DefIdx) ||
2363 !SlotIndex::isSameInstr(VNI->def, DefIdx) ||
2364 (VNI->def != DefIdx &&
2365 (!VNI->def.isEarlyClobber() || !DefIdx.isRegister()))) {
2366 report("Inconsistent valno->def", MO, MONum);
2367 report_context_liverange(LR);
2368 report_context_vreg_regunit(VRegOrUnit);
2369 if (LaneMask.any())
2370 report_context_lanemask(LaneMask);
2371 report_context(*VNI);
2372 report_context(DefIdx);
2373 }
2374 } else {
2375 report("No live segment at def", MO, MONum);
2376 report_context_liverange(LR);
2377 report_context_vreg_regunit(VRegOrUnit);
2378 if (LaneMask.any())
2379 report_context_lanemask(LaneMask);
2380 report_context(DefIdx);
2381 }
2382 // Check that, if the dead def flag is present, LiveInts agree.
2383 if (MO->isDead()) {
2384 LiveQueryResult LRQ = LR.Query(DefIdx);
2385 if (!LRQ.isDeadDef()) {
2386 assert(VRegOrUnit.isVirtual() && "Expecting a virtual register.");
2387 // A dead subreg def only tells us that the specific subreg is dead. There
2388 // could be other non-dead defs of other subregs, or we could have other
2389 // parts of the register being live through the instruction. So unless we
2390 // are checking liveness for a subrange it is ok for the live range to
2391 // continue, given that we have a dead def of a subregister.
2392 if (SubRangeCheck || MO->getSubReg() == 0) {
2393 report("Live range continues after dead def flag", MO, MONum);
2394 report_context_liverange(LR);
2395 report_context_vreg_regunit(VRegOrUnit);
2396 if (LaneMask.any())
2397 report_context_lanemask(LaneMask);
2398 }
2399 }
2400 }
2401}
2402
2403void MachineVerifier::checkLiveness(const MachineOperand *MO, unsigned MONum) {
2404 const MachineInstr *MI = MO->getParent();
2405 const Register Reg = MO->getReg();
2406 const unsigned SubRegIdx = MO->getSubReg();
2407
2408 const LiveInterval *LI = nullptr;
2409 if (LiveInts && Reg.isVirtual()) {
2410 if (LiveInts->hasInterval(Reg)) {
2411 LI = &LiveInts->getInterval(Reg);
2412 if (SubRegIdx != 0 && (MO->isDef() || !MO->isUndef()) && !LI->empty() &&
2413 !LI->hasSubRanges() && MRI->shouldTrackSubRegLiveness(Reg))
2414 report("Live interval for subreg operand has no subranges", MO, MONum);
2415 } else {
2416 report("Virtual register has no live interval", MO, MONum);
2417 }
2418 }
2419
2420 // Both use and def operands can read a register.
2421 if (MO->readsReg()) {
2422 if (MO->isKill())
2423 addRegWithSubRegs(regsKilled, Reg);
2424
2425 // Check that LiveVars knows this kill (unless we are inside a bundle, in
2426 // which case we have already checked that LiveVars knows any kills on the
2427 // bundle header instead).
2428 if (LiveVars && Reg.isVirtual() && MO->isKill() &&
2429 !MI->isBundledWithPred()) {
2430 LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
2431 if (!is_contained(VI.Kills, MI))
2432 report("Kill missing from LiveVariables", MO, MONum);
2433 }
2434
2435 // Check LiveInts liveness and kill.
2436 if (LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2437 SlotIndex UseIdx = LiveInts->getInstructionIndex(*MI);
2438 // Check the cached regunit intervals.
2439 if (Reg.isPhysical() && !isReserved(Reg)) {
2440 for (MCRegUnitIterator Units(Reg.asMCReg(), TRI); Units.isValid();
2441 ++Units) {
2442 if (MRI->isReservedRegUnit(*Units))
2443 continue;
2444 if (const LiveRange *LR = LiveInts->getCachedRegUnit(*Units))
2445 checkLivenessAtUse(MO, MONum, UseIdx, *LR, *Units);
2446 }
2447 }
2448
2449 if (Reg.isVirtual()) {
2450 // This is a virtual register interval.
2451 checkLivenessAtUse(MO, MONum, UseIdx, *LI, Reg);
2452
2453 if (LI->hasSubRanges() && !MO->isDef()) {
2454 LaneBitmask MOMask = SubRegIdx != 0
2455 ? TRI->getSubRegIndexLaneMask(SubRegIdx)
2456 : MRI->getMaxLaneMaskForVReg(Reg);
2457 LaneBitmask LiveInMask;
2458 for (const LiveInterval::SubRange &SR : LI->subranges()) {
2459 if ((MOMask & SR.LaneMask).none())
2460 continue;
2461 checkLivenessAtUse(MO, MONum, UseIdx, SR, Reg, SR.LaneMask);
2462 LiveQueryResult LRQ = SR.Query(UseIdx);
2463 if (LRQ.valueIn())
2464 LiveInMask |= SR.LaneMask;
2465 }
2466 // At least parts of the register has to be live at the use.
2467 if ((LiveInMask & MOMask).none()) {
2468 report("No live subrange at use", MO, MONum);
2469 report_context(*LI);
2470 report_context(UseIdx);
2471 }
2472 }
2473 }
2474 }
2475
2476 // Use of a dead register.
2477 if (!regsLive.count(Reg)) {
2478 if (Reg.isPhysical()) {
2479 // Reserved registers may be used even when 'dead'.
2480 bool Bad = !isReserved(Reg);
2481 // We are fine if just any subregister has a defined value.
2482 if (Bad) {
2483
2484 for (const MCPhysReg &SubReg : TRI->subregs(Reg)) {
2485 if (regsLive.count(SubReg)) {
2486 Bad = false;
2487 break;
2488 }
2489 }
2490 }
2491 // If there is an additional implicit-use of a super register we stop
2492 // here. By definition we are fine if the super register is not
2493 // (completely) dead, if the complete super register is dead we will
2494 // get a report for its operand.
2495 if (Bad) {
2496 for (const MachineOperand &MOP : MI->uses()) {
2497 if (!MOP.isReg() || !MOP.isImplicit())
2498 continue;
2499
2500 if (!MOP.getReg().isPhysical())
2501 continue;
2502
2503 if (llvm::is_contained(TRI->subregs(MOP.getReg()), Reg))
2504 Bad = false;
2505 }
2506 }
2507 if (Bad)
2508 report("Using an undefined physical register", MO, MONum);
2509 } else if (MRI->def_empty(Reg)) {
2510 report("Reading virtual register without a def", MO, MONum);
2511 } else {
2512 BBInfo &MInfo = MBBInfoMap[MI->getParent()];
2513 // We don't know which virtual registers are live in, so only complain
2514 // if vreg was killed in this MBB. Otherwise keep track of vregs that
2515 // must be live in. PHI instructions are handled separately.
2516 if (MInfo.regsKilled.count(Reg))
2517 report("Using a killed virtual register", MO, MONum);
2518 else if (!MI->isPHI())
2519 MInfo.vregsLiveIn.insert(std::make_pair(Reg, MI));
2520 }
2521 }
2522 }
2523
2524 if (MO->isDef()) {
2525 // Register defined.
2526 // TODO: verify that earlyclobber ops are not used.
2527 if (MO->isDead())
2528 addRegWithSubRegs(regsDead, Reg);
2529 else
2530 addRegWithSubRegs(regsDefined, Reg);
2531
2532 // Verify SSA form.
2533 if (MRI->isSSA() && Reg.isVirtual() &&
2534 std::next(MRI->def_begin(Reg)) != MRI->def_end())
2535 report("Multiple virtual register defs in SSA form", MO, MONum);
2536
2537 // Check LiveInts for a live segment, but only for virtual registers.
2538 if (LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2539 SlotIndex DefIdx = LiveInts->getInstructionIndex(*MI);
2540 DefIdx = DefIdx.getRegSlot(MO->isEarlyClobber());
2541
2542 if (Reg.isVirtual()) {
2543 checkLivenessAtDef(MO, MONum, DefIdx, *LI, Reg);
2544
2545 if (LI->hasSubRanges()) {
2546 LaneBitmask MOMask = SubRegIdx != 0
2547 ? TRI->getSubRegIndexLaneMask(SubRegIdx)
2548 : MRI->getMaxLaneMaskForVReg(Reg);
2549 for (const LiveInterval::SubRange &SR : LI->subranges()) {
2550 if ((SR.LaneMask & MOMask).none())
2551 continue;
2552 checkLivenessAtDef(MO, MONum, DefIdx, SR, Reg, true, SR.LaneMask);
2553 }
2554 }
2555 }
2556 }
2557 }
2558}
2559
2560// This function gets called after visiting all instructions in a bundle. The
2561// argument points to the bundle header.
2562// Normal stand-alone instructions are also considered 'bundles', and this
2563// function is called for all of them.
2564void MachineVerifier::visitMachineBundleAfter(const MachineInstr *MI) {
2565 BBInfo &MInfo = MBBInfoMap[MI->getParent()];
2566 set_union(MInfo.regsKilled, regsKilled);
2567 set_subtract(regsLive, regsKilled); regsKilled.clear();
2568 // Kill any masked registers.
2569 while (!regMasks.empty()) {
2570 const uint32_t *Mask = regMasks.pop_back_val();
2571 for (Register Reg : regsLive)
2572 if (Reg.isPhysical() &&
2573 MachineOperand::clobbersPhysReg(Mask, Reg.asMCReg()))
2574 regsDead.push_back(Reg);
2575 }
2576 set_subtract(regsLive, regsDead); regsDead.clear();
2577 set_union(regsLive, regsDefined); regsDefined.clear();
2578}
2579
2580void
2581MachineVerifier::visitMachineBasicBlockAfter(const MachineBasicBlock *MBB) {
2582 MBBInfoMap[MBB].regsLiveOut = regsLive;
2583 regsLive.clear();
2584
2585 if (Indexes) {
2586 SlotIndex stop = Indexes->getMBBEndIdx(MBB);
2587 if (!(stop > lastIndex)) {
2588 report("Block ends before last instruction index", MBB);
2589 errs() << "Block ends at " << stop
2590 << " last instruction was at " << lastIndex << '\n';
2591 }
2592 lastIndex = stop;
2593 }
2594}
2595
2596namespace {
2597// This implements a set of registers that serves as a filter: can filter other
2598// sets by passing through elements not in the filter and blocking those that
2599// are. Any filter implicitly includes the full set of physical registers upon
2600// creation, thus filtering them all out. The filter itself as a set only grows,
2601// and needs to be as efficient as possible.
2602struct VRegFilter {
2603 // Add elements to the filter itself. \pre Input set \p FromRegSet must have
2604 // no duplicates. Both virtual and physical registers are fine.
2605 template <typename RegSetT> void add(const RegSetT &FromRegSet) {
2606 SmallVector<Register, 0> VRegsBuffer;
2607 filterAndAdd(FromRegSet, VRegsBuffer);
2608 }
2609 // Filter \p FromRegSet through the filter and append passed elements into \p
2610 // ToVRegs. All elements appended are then added to the filter itself.
2611 // \returns true if anything changed.
2612 template <typename RegSetT>
2613 bool filterAndAdd(const RegSetT &FromRegSet,
2614 SmallVectorImpl<Register> &ToVRegs) {
2615 unsigned SparseUniverse = Sparse.size();
2616 unsigned NewSparseUniverse = SparseUniverse;
2617 unsigned NewDenseSize = Dense.size();
2618 size_t Begin = ToVRegs.size();
2619 for (Register Reg : FromRegSet) {
2620 if (!Reg.isVirtual())
2621 continue;
2622 unsigned Index = Register::virtReg2Index(Reg);
2623 if (Index < SparseUniverseMax) {
2624 if (Index < SparseUniverse && Sparse.test(Index))
2625 continue;
2626 NewSparseUniverse = std::max(NewSparseUniverse, Index + 1);
2627 } else {
2628 if (Dense.count(Reg))
2629 continue;
2630 ++NewDenseSize;
2631 }
2632 ToVRegs.push_back(Reg);
2633 }
2634 size_t End = ToVRegs.size();
2635 if (Begin == End)
2636 return false;
2637 // Reserving space in sets once performs better than doing so continuously
2638 // and pays easily for double look-ups (even in Dense with SparseUniverseMax
2639 // tuned all the way down) and double iteration (the second one is over a
2640 // SmallVector, which is a lot cheaper compared to DenseSet or BitVector).
2641 Sparse.resize(NewSparseUniverse);
2642 Dense.reserve(NewDenseSize);
2643 for (unsigned I = Begin; I < End; ++I) {
2644 Register Reg = ToVRegs[I];
2645 unsigned Index = Register::virtReg2Index(Reg);
2646 if (Index < SparseUniverseMax)
2647 Sparse.set(Index);
2648 else
2649 Dense.insert(Reg);
2650 }
2651 return true;
2652 }
2653
2654private:
2655 static constexpr unsigned SparseUniverseMax = 10 * 1024 * 8;
2656 // VRegs indexed within SparseUniverseMax are tracked by Sparse, those beyound
2657 // are tracked by Dense. The only purpose of the threashold and the Dense set
2658 // is to have a reasonably growing memory usage in pathological cases (large
2659 // number of very sparse VRegFilter instances live at the same time). In
2660 // practice even in the worst-by-execution time cases having all elements
2661 // tracked by Sparse (very large SparseUniverseMax scenario) tends to be more
2662 // space efficient than if tracked by Dense. The threashold is set to keep the
2663 // worst-case memory usage within 2x of figures determined empirically for
2664 // "all Dense" scenario in such worst-by-execution-time cases.
2665 BitVector Sparse;
2667};
2668
2669// Implements both a transfer function and a (binary, in-place) join operator
2670// for a dataflow over register sets with set union join and filtering transfer
2671// (out_b = in_b \ filter_b). filter_b is expected to be set-up ahead of time.
2672// Maintains out_b as its state, allowing for O(n) iteration over it at any
2673// time, where n is the size of the set (as opposed to O(U) where U is the
2674// universe). filter_b implicitly contains all physical registers at all times.
2675class FilteringVRegSet {
2676 VRegFilter Filter;
2678
2679public:
2680 // Set-up the filter_b. \pre Input register set \p RS must have no duplicates.
2681 // Both virtual and physical registers are fine.
2682 template <typename RegSetT> void addToFilter(const RegSetT &RS) {
2683 Filter.add(RS);
2684 }
2685 // Passes \p RS through the filter_b (transfer function) and adds what's left
2686 // to itself (out_b).
2687 template <typename RegSetT> bool add(const RegSetT &RS) {
2688 // Double-duty the Filter: to maintain VRegs a set (and the join operation
2689 // a set union) just add everything being added here to the Filter as well.
2690 return Filter.filterAndAdd(RS, VRegs);
2691 }
2692 using const_iterator = decltype(VRegs)::const_iterator;
2693 const_iterator begin() const { return VRegs.begin(); }
2694 const_iterator end() const { return VRegs.end(); }
2695 size_t size() const { return VRegs.size(); }
2696};
2697} // namespace
2698
2699// Calculate the largest possible vregsPassed sets. These are the registers that
2700// can pass through an MBB live, but may not be live every time. It is assumed
2701// that all vregsPassed sets are empty before the call.
2702void MachineVerifier::calcRegsPassed() {
2703 if (MF->empty())
2704 // ReversePostOrderTraversal doesn't handle empty functions.
2705 return;
2706
2707 for (const MachineBasicBlock *MB :
2709 FilteringVRegSet VRegs;
2710 BBInfo &Info = MBBInfoMap[MB];
2711 assert(Info.reachable);
2712
2713 VRegs.addToFilter(Info.regsKilled);
2714 VRegs.addToFilter(Info.regsLiveOut);
2715 for (const MachineBasicBlock *Pred : MB->predecessors()) {
2716 const BBInfo &PredInfo = MBBInfoMap[Pred];
2717 if (!PredInfo.reachable)
2718 continue;
2719
2720 VRegs.add(PredInfo.regsLiveOut);
2721 VRegs.add(PredInfo.vregsPassed);
2722 }
2723 Info.vregsPassed.reserve(VRegs.size());
2724 Info.vregsPassed.insert(VRegs.begin(), VRegs.end());
2725 }
2726}
2727
2728// Calculate the set of virtual registers that must be passed through each basic
2729// block in order to satisfy the requirements of successor blocks. This is very
2730// similar to calcRegsPassed, only backwards.
2731void MachineVerifier::calcRegsRequired() {
2732 // First push live-in regs to predecessors' vregsRequired.
2734 for (const auto &MBB : *MF) {
2735 BBInfo &MInfo = MBBInfoMap[&MBB];
2736 for (const MachineBasicBlock *Pred : MBB.predecessors()) {
2737 BBInfo &PInfo = MBBInfoMap[Pred];
2738 if (PInfo.addRequired(MInfo.vregsLiveIn))
2739 todo.insert(Pred);
2740 }
2741
2742 // Handle the PHI node.
2743 for (const MachineInstr &MI : MBB.phis()) {
2744 for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) {
2745 // Skip those Operands which are undef regs or not regs.
2746 if (!MI.getOperand(i).isReg() || !MI.getOperand(i).readsReg())
2747 continue;
2748
2749 // Get register and predecessor for one PHI edge.
2750 Register Reg = MI.getOperand(i).getReg();
2751 const MachineBasicBlock *Pred = MI.getOperand(i + 1).getMBB();
2752
2753 BBInfo &PInfo = MBBInfoMap[Pred];
2754 if (PInfo.addRequired(Reg))
2755 todo.insert(Pred);
2756 }
2757 }
2758 }
2759
2760 // Iteratively push vregsRequired to predecessors. This will converge to the
2761 // same final state regardless of DenseSet iteration order.
2762 while (!todo.empty()) {
2763 const MachineBasicBlock *MBB = *todo.begin();
2764 todo.erase(MBB);
2765 BBInfo &MInfo = MBBInfoMap[MBB];
2766 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
2767 if (Pred == MBB)
2768 continue;
2769 BBInfo &SInfo = MBBInfoMap[Pred];
2770 if (SInfo.addRequired(MInfo.vregsRequired))
2771 todo.insert(Pred);
2772 }
2773 }
2774}
2775
2776// Check PHI instructions at the beginning of MBB. It is assumed that
2777// calcRegsPassed has been run so BBInfo::isLiveOut is valid.
2778void MachineVerifier::checkPHIOps(const MachineBasicBlock &MBB) {
2779 BBInfo &MInfo = MBBInfoMap[&MBB];
2780
2782 for (const MachineInstr &Phi : MBB) {
2783 if (!Phi.isPHI())
2784 break;
2785 seen.clear();
2786
2787 const MachineOperand &MODef = Phi.getOperand(0);
2788 if (!MODef.isReg() || !MODef.isDef()) {
2789 report("Expected first PHI operand to be a register def", &MODef, 0);
2790 continue;
2791 }
2792 if (MODef.isTied() || MODef.isImplicit() || MODef.isInternalRead() ||
2793 MODef.isEarlyClobber() || MODef.isDebug())
2794 report("Unexpected flag on PHI operand", &MODef, 0);
2795 Register DefReg = MODef.getReg();
2796 if (!DefReg.isVirtual())
2797 report("Expected first PHI operand to be a virtual register", &MODef, 0);
2798
2799 for (unsigned I = 1, E = Phi.getNumOperands(); I != E; I += 2) {
2800 const MachineOperand &MO0 = Phi.getOperand(I);
2801 if (!MO0.isReg()) {
2802 report("Expected PHI operand to be a register", &MO0, I);
2803 continue;
2804 }
2805 if (MO0.isImplicit() || MO0.isInternalRead() || MO0.isEarlyClobber() ||
2806 MO0.isDebug() || MO0.isTied())
2807 report("Unexpected flag on PHI operand", &MO0, I);
2808
2809 const MachineOperand &MO1 = Phi.getOperand(I + 1);
2810 if (!MO1.isMBB()) {
2811 report("Expected PHI operand to be a basic block", &MO1, I + 1);
2812 continue;
2813 }
2814
2815 const MachineBasicBlock &Pre = *MO1.getMBB();
2816 if (!Pre.isSuccessor(&MBB)) {
2817 report("PHI input is not a predecessor block", &MO1, I + 1);
2818 continue;
2819 }
2820
2821 if (MInfo.reachable) {
2822 seen.insert(&Pre);
2823 BBInfo &PrInfo = MBBInfoMap[&Pre];
2824 if (!MO0.isUndef() && PrInfo.reachable &&
2825 !PrInfo.isLiveOut(MO0.getReg()))
2826 report("PHI operand is not live-out from predecessor", &MO0, I);
2827 }
2828 }
2829
2830 // Did we see all predecessors?
2831 if (MInfo.reachable) {
2832 for (MachineBasicBlock *Pred : MBB.predecessors()) {
2833 if (!seen.count(Pred)) {
2834 report("Missing PHI operand", &Phi);
2835 errs() << printMBBReference(*Pred)
2836 << " is a predecessor according to the CFG.\n";
2837 }
2838 }
2839 }
2840 }
2841}
2842
2843void MachineVerifier::visitMachineFunctionAfter() {
2844 calcRegsPassed();
2845
2846 for (const MachineBasicBlock &MBB : *MF)
2847 checkPHIOps(MBB);
2848
2849 // Now check liveness info if available
2850 calcRegsRequired();
2851
2852 // Check for killed virtual registers that should be live out.
2853 for (const auto &MBB : *MF) {
2854 BBInfo &MInfo = MBBInfoMap[&MBB];
2855 for (Register VReg : MInfo.vregsRequired)
2856 if (MInfo.regsKilled.count(VReg)) {
2857 report("Virtual register killed in block, but needed live out.", &MBB);
2858 errs() << "Virtual register " << printReg(VReg)
2859 << " is used after the block.\n";
2860 }
2861 }
2862
2863 if (!MF->empty()) {
2864 BBInfo &MInfo = MBBInfoMap[&MF->front()];
2865 for (Register VReg : MInfo.vregsRequired) {
2866 report("Virtual register defs don't dominate all uses.", MF);
2867 report_context_vreg(VReg);
2868 }
2869 }
2870
2871 if (LiveVars)
2872 verifyLiveVariables();
2873 if (LiveInts)
2874 verifyLiveIntervals();
2875
2876 // Check live-in list of each MBB. If a register is live into MBB, check
2877 // that the register is in regsLiveOut of each predecessor block. Since
2878 // this must come from a definition in the predecesssor or its live-in
2879 // list, this will catch a live-through case where the predecessor does not
2880 // have the register in its live-in list. This currently only checks
2881 // registers that have no aliases, are not allocatable and are not
2882 // reserved, which could mean a condition code register for instance.
2883 if (MRI->tracksLiveness())
2884 for (const auto &MBB : *MF)
2886 MCPhysReg LiveInReg = P.PhysReg;
2887 bool hasAliases = MCRegAliasIterator(LiveInReg, TRI, false).isValid();
2888 if (hasAliases || isAllocatable(LiveInReg) || isReserved(LiveInReg))
2889 continue;
2890 for (const MachineBasicBlock *Pred : MBB.predecessors()) {
2891 BBInfo &PInfo = MBBInfoMap[Pred];
2892 if (!PInfo.regsLiveOut.count(LiveInReg)) {
2893 report("Live in register not found to be live out from predecessor.",
2894 &MBB);
2895 errs() << TRI->getName(LiveInReg)
2896 << " not found to be live out from "
2897 << printMBBReference(*Pred) << "\n";
2898 }
2899 }
2900 }
2901
2902 for (auto CSInfo : MF->getCallSitesInfo())
2903 if (!CSInfo.first->isCall())
2904 report("Call site info referencing instruction that is not call", MF);
2905
2906 // If there's debug-info, check that we don't have any duplicate value
2907 // tracking numbers.
2908 if (MF->getFunction().getSubprogram()) {
2909 DenseSet<unsigned> SeenNumbers;
2910 for (const auto &MBB : *MF) {
2911 for (const auto &MI : MBB) {
2912 if (auto Num = MI.peekDebugInstrNum()) {
2913 auto Result = SeenNumbers.insert((unsigned)Num);
2914 if (!Result.second)
2915 report("Instruction has a duplicated value tracking number", &MI);
2916 }
2917 }
2918 }
2919 }
2920}
2921
2922void MachineVerifier::verifyLiveVariables() {
2923 assert(LiveVars && "Don't call verifyLiveVariables without LiveVars");
2924 for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
2926 LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
2927 for (const auto &MBB : *MF) {
2928 BBInfo &MInfo = MBBInfoMap[&MBB];
2929
2930 // Our vregsRequired should be identical to LiveVariables' AliveBlocks
2931 if (MInfo.vregsRequired.count(Reg)) {
2932 if (!VI.AliveBlocks.test(MBB.getNumber())) {
2933 report("LiveVariables: Block missing from AliveBlocks", &MBB);
2934 errs() << "Virtual register " << printReg(Reg)
2935 << " must be live through the block.\n";
2936 }
2937 } else {
2938 if (VI.AliveBlocks.test(MBB.getNumber())) {
2939 report("LiveVariables: Block should not be in AliveBlocks", &MBB);
2940 errs() << "Virtual register " << printReg(Reg)
2941 << " is not needed live through the block.\n";
2942 }
2943 }
2944 }
2945 }
2946}
2947
2948void MachineVerifier::verifyLiveIntervals() {
2949 assert(LiveInts && "Don't call verifyLiveIntervals without LiveInts");
2950 for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
2952
2953 // Spilling and splitting may leave unused registers around. Skip them.
2954 if (MRI->reg_nodbg_empty(Reg))
2955 continue;
2956
2957 if (!LiveInts->hasInterval(Reg)) {
2958 report("Missing live interval for virtual register", MF);
2959 errs() << printReg(Reg, TRI) << " still has defs or uses\n";
2960 continue;
2961 }
2962
2963 const LiveInterval &LI = LiveInts->getInterval(Reg);
2964 assert(Reg == LI.reg() && "Invalid reg to interval mapping");
2965 verifyLiveInterval(LI);
2966 }
2967
2968 // Verify all the cached regunit intervals.
2969 for (unsigned i = 0, e = TRI->getNumRegUnits(); i != e; ++i)
2970 if (const LiveRange *LR = LiveInts->getCachedRegUnit(i))
2971 verifyLiveRange(*LR, i);
2972}
2973
2974void MachineVerifier::verifyLiveRangeValue(const LiveRange &LR,
2975 const VNInfo *VNI, Register Reg,
2976 LaneBitmask LaneMask) {
2977 if (VNI->isUnused())
2978 return;
2979
2980 const VNInfo *DefVNI = LR.getVNInfoAt(VNI->def);
2981
2982 if (!DefVNI) {
2983 report("Value not live at VNInfo def and not marked unused", MF);
2984 report_context(LR, Reg, LaneMask);
2985 report_context(*VNI);
2986 return;
2987 }
2988
2989 if (DefVNI != VNI) {
2990 report("Live segment at def has different VNInfo", MF);
2991 report_context(LR, Reg, LaneMask);
2992 report_context(*VNI);
2993 return;
2994 }
2995
2996 const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(VNI->def);
2997 if (!MBB) {
2998 report("Invalid VNInfo definition index", MF);
2999 report_context(LR, Reg, LaneMask);
3000 report_context(*VNI);
3001 return;
3002 }
3003
3004 if (VNI->isPHIDef()) {
3005 if (VNI->def != LiveInts->getMBBStartIdx(MBB)) {
3006 report("PHIDef VNInfo is not defined at MBB start", MBB);
3007 report_context(LR, Reg, LaneMask);
3008 report_context(*VNI);
3009 }
3010 return;
3011 }
3012
3013 // Non-PHI def.
3014 const MachineInstr *MI = LiveInts->getInstructionFromIndex(VNI->def);
3015 if (!MI) {
3016 report("No instruction at VNInfo def index", MBB);
3017 report_context(LR, Reg, LaneMask);
3018 report_context(*VNI);
3019 return;
3020 }
3021
3022 if (Reg != 0) {
3023 bool hasDef = false;
3024 bool isEarlyClobber = false;
3025 for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
3026 if (!MOI->isReg() || !MOI->isDef())
3027 continue;
3028 if (Reg.isVirtual()) {
3029 if (MOI->getReg() != Reg)
3030 continue;
3031 } else {
3032 if (!MOI->getReg().isPhysical() || !TRI->hasRegUnit(MOI->getReg(), Reg))
3033 continue;
3034 }
3035 if (LaneMask.any() &&
3036 (TRI->getSubRegIndexLaneMask(MOI->getSubReg()) & LaneMask).none())
3037 continue;
3038 hasDef = true;
3039 if (MOI->isEarlyClobber())
3040 isEarlyClobber = true;
3041 }
3042
3043 if (!hasDef) {
3044 report("Defining instruction does not modify register", MI);
3045 report_context(LR, Reg, LaneMask);
3046 report_context(*VNI);
3047 }
3048
3049 // Early clobber defs begin at USE slots, but other defs must begin at
3050 // DEF slots.
3051 if (isEarlyClobber) {
3052 if (!VNI->def.isEarlyClobber()) {
3053 report("Early clobber def must be at an early-clobber slot", MBB);
3054 report_context(LR, Reg, LaneMask);
3055 report_context(*VNI);
3056 }
3057 } else if (!VNI->def.isRegister()) {
3058 report("Non-PHI, non-early clobber def must be at a register slot", MBB);
3059 report_context(LR, Reg, LaneMask);
3060 report_context(*VNI);
3061 }
3062 }
3063}
3064
3065void MachineVerifier::verifyLiveRangeSegment(const LiveRange &LR,
3067 Register Reg,
3068 LaneBitmask LaneMask) {
3069 const LiveRange::Segment &S = *I;
3070 const VNInfo *VNI = S.valno;
3071 assert(VNI && "Live segment has no valno");
3072
3073 if (VNI->id >= LR.getNumValNums() || VNI != LR.getValNumInfo(VNI->id)) {
3074 report("Foreign valno in live segment", MF);
3075 report_context(LR, Reg, LaneMask);
3076 report_context(S);
3077 report_context(*VNI);
3078 }
3079
3080 if (VNI->isUnused()) {
3081 report("Live segment valno is marked unused", MF);
3082 report_context(LR, Reg, LaneMask);
3083 report_context(S);
3084 }
3085
3086 const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(S.start);
3087 if (!MBB) {
3088 report("Bad start of live segment, no basic block", MF);
3089 report_context(LR, Reg, LaneMask);
3090 report_context(S);
3091 return;
3092 }
3093 SlotIndex MBBStartIdx = LiveInts->getMBBStartIdx(MBB);
3094 if (S.start != MBBStartIdx && S.start != VNI->def) {
3095 report("Live segment must begin at MBB entry or valno def", MBB);
3096 report_context(LR, Reg, LaneMask);
3097 report_context(S);
3098 }
3099
3100 const MachineBasicBlock *EndMBB =
3101 LiveInts->getMBBFromIndex(S.end.getPrevSlot());
3102 if (!EndMBB) {
3103 report("Bad end of live segment, no basic block", MF);
3104 report_context(LR, Reg, LaneMask);
3105 report_context(S);
3106 return;
3107 }
3108
3109 // No more checks for live-out segments.
3110 if (S.end == LiveInts->getMBBEndIdx(EndMBB))
3111 return;
3112
3113 // RegUnit intervals are allowed dead phis.
3114 if (!Reg.isVirtual() && VNI->isPHIDef() && S.start == VNI->def &&
3115 S.end == VNI->def.getDeadSlot())
3116 return;
3117
3118 // The live segment is ending inside EndMBB
3119 const MachineInstr *MI =
3121 if (!MI) {
3122 report("Live segment doesn't end at a valid instruction", EndMBB);
3123 report_context(LR, Reg, LaneMask);
3124 report_context(S);
3125 return;
3126 }
3127
3128 // The block slot must refer to a basic block boundary.
3129 if (S.end.isBlock()) {
3130 report("Live segment ends at B slot of an instruction", EndMBB);
3131 report_context(LR, Reg, LaneMask);
3132 report_context(S);
3133 }
3134
3135 if (S.end.isDead()) {
3136 // Segment ends on the dead slot.
3137 // That means there must be a dead def.
3138 if (!SlotIndex::isSameInstr(S.start, S.end)) {
3139 report("Live segment ending at dead slot spans instructions", EndMBB);
3140 report_context(LR, Reg, LaneMask);
3141 report_context(S);
3142 }
3143 }
3144
3145 // After tied operands are rewritten, a live segment can only end at an
3146 // early-clobber slot if it is being redefined by an early-clobber def.
3147 // TODO: Before tied operands are rewritten, a live segment can only end at an
3148 // early-clobber slot if the last use is tied to an early-clobber def.
3149 if (MF->getProperties().hasProperty(
3151 S.end.isEarlyClobber()) {
3152 if (I+1 == LR.end() || (I+1)->start != S.end) {
3153 report("Live segment ending at early clobber slot must be "
3154 "redefined by an EC def in the same instruction", EndMBB);
3155 report_context(LR, Reg, LaneMask);
3156 report_context(S);
3157 }
3158 }
3159
3160 // The following checks only apply to virtual registers. Physreg liveness
3161 // is too weird to check.
3162 if (Reg.isVirtual()) {
3163 // A live segment can end with either a redefinition, a kill flag on a
3164 // use, or a dead flag on a def.
3165 bool hasRead = false;
3166 bool hasSubRegDef = false;
3167 bool hasDeadDef = false;
3168 for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
3169 if (!MOI->isReg() || MOI->getReg() != Reg)
3170 continue;
3171 unsigned Sub = MOI->getSubReg();
3172 LaneBitmask SLM = Sub != 0 ? TRI->getSubRegIndexLaneMask(Sub)
3174 if (MOI->isDef()) {
3175 if (Sub != 0) {
3176 hasSubRegDef = true;
3177 // An operand %0:sub0 reads %0:sub1..n. Invert the lane
3178 // mask for subregister defs. Read-undef defs will be handled by
3179 // readsReg below.
3180 SLM = ~SLM;
3181 }
3182 if (MOI->isDead())
3183 hasDeadDef = true;
3184 }
3185 if (LaneMask.any() && (LaneMask & SLM).none())
3186 continue;
3187 if (MOI->readsReg())
3188 hasRead = true;
3189 }
3190 if (S.end.isDead()) {
3191 // Make sure that the corresponding machine operand for a "dead" live
3192 // range has the dead flag. We cannot perform this check for subregister
3193 // liveranges as partially dead values are allowed.
3194 if (LaneMask.none() && !hasDeadDef) {
3195 report("Instruction ending live segment on dead slot has no dead flag",
3196 MI);
3197 report_context(LR, Reg, LaneMask);
3198 report_context(S);
3199 }
3200 } else {
3201 if (!hasRead) {
3202 // When tracking subregister liveness, the main range must start new
3203 // values on partial register writes, even if there is no read.
3204 if (!MRI->shouldTrackSubRegLiveness(Reg) || LaneMask.any() ||
3205 !hasSubRegDef) {
3206 report("Instruction ending live segment doesn't read the register",
3207 MI);
3208 report_context(LR, Reg, LaneMask);
3209 report_context(S);
3210 }
3211 }
3212 }
3213 }
3214
3215 // Now check all the basic blocks in this live segment.
3217 // Is this live segment the beginning of a non-PHIDef VN?
3218 if (S.start == VNI->def && !VNI->isPHIDef()) {
3219 // Not live-in to any blocks.
3220 if (MBB == EndMBB)
3221 return;
3222 // Skip this block.
3223 ++MFI;
3224 }
3225
3227 if (LaneMask.any()) {
3228 LiveInterval &OwnerLI = LiveInts->getInterval(Reg);
3229 OwnerLI.computeSubRangeUndefs(Undefs, LaneMask, *MRI, *Indexes);
3230 }
3231
3232 while (true) {
3233 assert(LiveInts->isLiveInToMBB(LR, &*MFI));
3234 // We don't know how to track physregs into a landing pad.
3235 if (!Reg.isVirtual() && MFI->isEHPad()) {
3236 if (&*MFI == EndMBB)
3237 break;
3238 ++MFI;
3239 continue;
3240 }
3241
3242 // Is VNI a PHI-def in the current block?
3243 bool IsPHI = VNI->isPHIDef() &&
3244 VNI->def == LiveInts->getMBBStartIdx(&*MFI);
3245
3246 // Check that VNI is live-out of all predecessors.
3247 for (const MachineBasicBlock *Pred : MFI->predecessors()) {
3248 SlotIndex PEnd = LiveInts->getMBBEndIdx(Pred);
3249 // Predecessor of landing pad live-out on last call.
3250 if (MFI->isEHPad()) {
3251 for (const MachineInstr &MI : llvm::reverse(*Pred)) {
3252 if (MI.isCall()) {
3253 PEnd = Indexes->getInstructionIndex(MI).getBoundaryIndex();
3254 break;
3255 }
3256 }
3257 }
3258 const VNInfo *PVNI = LR.getVNInfoBefore(PEnd);
3259
3260 // All predecessors must have a live-out value. However for a phi
3261 // instruction with subregister intervals
3262 // only one of the subregisters (not necessarily the current one) needs to
3263 // be defined.
3264 if (!PVNI && (LaneMask.none() || !IsPHI)) {
3265 if (LiveRangeCalc::isJointlyDominated(Pred, Undefs, *Indexes))
3266 continue;
3267 report("Register not marked live out of predecessor", Pred);
3268 report_context(LR, Reg, LaneMask);
3269 report_context(*VNI);
3270 errs() << " live into " << printMBBReference(*MFI) << '@'
3271 << LiveInts->getMBBStartIdx(&*MFI) << ", not live before "
3272 << PEnd << '\n';
3273 continue;
3274 }
3275
3276 // Only PHI-defs can take different predecessor values.
3277 if (!IsPHI && PVNI != VNI) {
3278 report("Different value live out of predecessor", Pred);
3279 report_context(LR, Reg, LaneMask);
3280 errs() << "Valno #" << PVNI->id << " live out of "
3281 << printMBBReference(*Pred) << '@' << PEnd << "\nValno #"
3282 << VNI->id << " live into " << printMBBReference(*MFI) << '@'
3283 << LiveInts->getMBBStartIdx(&*MFI) << '\n';
3284 }
3285 }
3286 if (&*MFI == EndMBB)
3287 break;
3288 ++MFI;
3289 }
3290}
3291
3292void MachineVerifier::verifyLiveRange(const LiveRange &LR, Register Reg,
3293 LaneBitmask LaneMask) {
3294 for (const VNInfo *VNI : LR.valnos)
3295 verifyLiveRangeValue(LR, VNI, Reg, LaneMask);
3296
3297 for (LiveRange::const_iterator I = LR.begin(), E = LR.end(); I != E; ++I)
3298 verifyLiveRangeSegment(LR, I, Reg, LaneMask);
3299}
3300
3301void MachineVerifier::verifyLiveInterval(const LiveInterval &LI) {
3302 Register Reg = LI.reg();
3303 assert(Reg.isVirtual());
3304 verifyLiveRange(LI, Reg);
3305
3307 LaneBitmask MaxMask = MRI->getMaxLaneMaskForVReg(Reg);
3308 for (const LiveInterval::SubRange &SR : LI.subranges()) {
3309 if ((Mask & SR.LaneMask).any()) {
3310 report("Lane masks of sub ranges overlap in live interval", MF);
3311 report_context(LI);
3312 }
3313 if ((SR.LaneMask & ~MaxMask).any()) {
3314 report("Subrange lanemask is invalid", MF);
3315 report_context(LI);
3316 }
3317 if (SR.empty()) {
3318 report("Subrange must not be empty", MF);
3319 report_context(SR, LI.reg(), SR.LaneMask);
3320 }
3321 Mask |= SR.LaneMask;
3322 verifyLiveRange(SR, LI.reg(), SR.LaneMask);
3323 if (!LI.covers(SR)) {
3324 report("A Subrange is not covered by the main range", MF);
3325 report_context(LI);
3326 }
3327 }
3328
3329 // Check the LI only has one connected component.
3330 ConnectedVNInfoEqClasses ConEQ(*LiveInts);
3331 unsigned NumComp = ConEQ.Classify(LI);
3332 if (NumComp > 1) {
3333 report("Multiple connected components in live interval", MF);
3334 report_context(LI);
3335 for (unsigned comp = 0; comp != NumComp; ++comp) {
3336 errs() << comp << ": valnos";
3337 for (const VNInfo *I : LI.valnos)
3338 if (comp == ConEQ.getEqClass(I))
3339 errs() << ' ' << I->id;
3340 errs() << '\n';
3341 }
3342 }
3343}
3344
3345namespace {
3346
3347 // FrameSetup and FrameDestroy can have zero adjustment, so using a single
3348 // integer, we can't tell whether it is a FrameSetup or FrameDestroy if the
3349 // value is zero.
3350 // We use a bool plus an integer to capture the stack state.
3351 struct StackStateOfBB {
3352 StackStateOfBB() = default;
3353 StackStateOfBB(int EntryVal, int ExitVal, bool EntrySetup, bool ExitSetup) :
3354 EntryValue(EntryVal), ExitValue(ExitVal), EntryIsSetup(EntrySetup),
3355 ExitIsSetup(ExitSetup) {}
3356
3357 // Can be negative, which means we are setting up a frame.
3358 int EntryValue = 0;
3359 int ExitValue = 0;
3360 bool EntryIsSetup = false;
3361 bool ExitIsSetup = false;
3362 };
3363
3364} // end anonymous namespace
3365
3366/// Make sure on every path through the CFG, a FrameSetup <n> is always followed
3367/// by a FrameDestroy <n>, stack adjustments are identical on all
3368/// CFG edges to a merge point, and frame is destroyed at end of a return block.
3369void MachineVerifier::verifyStackFrame() {
3370 unsigned FrameSetupOpcode = TII->getCallFrameSetupOpcode();
3371 unsigned FrameDestroyOpcode = TII->getCallFrameDestroyOpcode();
3372 if (FrameSetupOpcode == ~0u && FrameDestroyOpcode == ~0u)
3373 return;
3374
3376 SPState.resize(MF->getNumBlockIDs());
3378
3379 // Visit the MBBs in DFS order.
3380 for (df_ext_iterator<const MachineFunction *,
3382 DFI = df_ext_begin(MF, Reachable), DFE = df_ext_end(MF, Reachable);
3383 DFI != DFE; ++DFI) {
3384 const MachineBasicBlock *MBB = *DFI;
3385
3386 StackStateOfBB BBState;
3387 // Check the exit state of the DFS stack predecessor.
3388 if (DFI.getPathLength() >= 2) {
3389 const MachineBasicBlock *StackPred = DFI.getPath(DFI.getPathLength() - 2);
3390 assert(Reachable.count(StackPred) &&
3391 "DFS stack predecessor is already visited.\n");
3392 BBState.EntryValue = SPState[StackPred->getNumber()].ExitValue;
3393 BBState.EntryIsSetup = SPState[StackPred->getNumber()].ExitIsSetup;
3394 BBState.ExitValue = BBState.EntryValue;
3395 BBState.ExitIsSetup = BBState.EntryIsSetup;
3396 }
3397
3398 // Update stack state by checking contents of MBB.
3399 for (const auto &I : *MBB) {
3400 if (I.getOpcode() == FrameSetupOpcode) {
3401 if (BBState.ExitIsSetup)
3402 report("FrameSetup is after another FrameSetup", &I);
3403 BBState.ExitValue -= TII->getFrameTotalSize(I);
3404 BBState.ExitIsSetup = true;
3405 }
3406
3407 if (I.getOpcode() == FrameDestroyOpcode) {
3408 int Size = TII->getFrameTotalSize(I);
3409 if (!BBState.ExitIsSetup)
3410 report("FrameDestroy is not after a FrameSetup", &I);
3411 int AbsSPAdj = BBState.ExitValue < 0 ? -BBState.ExitValue :
3412 BBState.ExitValue;
3413 if (BBState.ExitIsSetup && AbsSPAdj != Size) {
3414 report("FrameDestroy <n> is after FrameSetup <m>", &I);
3415 errs() << "FrameDestroy <" << Size << "> is after FrameSetup <"
3416 << AbsSPAdj << ">.\n";
3417 }
3418 BBState.ExitValue += Size;
3419 BBState.ExitIsSetup = false;
3420 }
3421 }
3422 SPState[MBB->getNumber()] = BBState;
3423
3424 // Make sure the exit state of any predecessor is consistent with the entry
3425 // state.
3426 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
3427 if (Reachable.count(Pred) &&
3428 (SPState[Pred->getNumber()].ExitValue != BBState.EntryValue ||
3429 SPState[Pred->getNumber()].ExitIsSetup != BBState.EntryIsSetup)) {
3430 report("The exit stack state of a predecessor is inconsistent.", MBB);
3431 errs() << "Predecessor " << printMBBReference(*Pred)
3432 << " has exit state (" << SPState[Pred->getNumber()].ExitValue
3433 << ", " << SPState[Pred->getNumber()].ExitIsSetup << "), while "
3434 << printMBBReference(*MBB) << " has entry state ("
3435 << BBState.EntryValue << ", " << BBState.EntryIsSetup << ").\n";
3436 }
3437 }
3438
3439 // Make sure the entry state of any successor is consistent with the exit
3440 // state.
3441 for (const MachineBasicBlock *Succ : MBB->successors()) {
3442 if (Reachable.count(Succ) &&
3443 (SPState[Succ->getNumber()].EntryValue != BBState.ExitValue ||
3444 SPState[Succ->getNumber()].EntryIsSetup != BBState.ExitIsSetup)) {
3445 report("The entry stack state of a successor is inconsistent.", MBB);
3446 errs() << "Successor " << printMBBReference(*Succ)
3447 << " has entry state (" << SPState[Succ->getNumber()].EntryValue
3448 << ", " << SPState[Succ->getNumber()].EntryIsSetup << "), while "
3449 << printMBBReference(*MBB) << " has exit state ("
3450 << BBState.ExitValue << ", " << BBState.ExitIsSetup << ").\n";
3451 }
3452 }
3453
3454 // Make sure a basic block with return ends with zero stack adjustment.
3455 if (!MBB->empty() && MBB->back().isReturn()) {
3456 if (BBState.ExitIsSetup)
3457 report("A return block ends with a FrameSetup.", MBB);
3458 if (BBState.ExitValue)
3459 report("A return block ends with a nonzero stack adjustment.", MBB);
3460 }
3461 }
3462}
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
aarch64 promote const
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator MBBI
static bool isLoad(int Opcode)
static bool isStore(int Opcode)
SmallVector< MachineOperand, 4 > Cond
This file implements the BitVector class.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines the DenseMap class.
This file defines the DenseSet and SmallDenseSet classes.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
std::string Name
uint64_t Size
const HexagonInstrInfo * TII
hexagon widen stores
IRTranslator LLVM IR MI
A common definition of LaneBitmask for use in TableGen and CodeGen.
Implement a low-level type suitable for MachineInstr level instruction selection.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
unsigned Reg
modulo schedule Modulo Schedule test pass
return ToRemove size() > 0
#define P(N)
ppc ctr loops verify
const char LLVMTargetMachineRef TM
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:38
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
unsigned UseOpIdx
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
@ VI
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isLiveOut(const MachineBasicBlock &MBB, unsigned Reg)
This file contains some templates that are useful if you are working with the STL at all.
This file defines generic set operations that may be used on set's of different types,...
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static unsigned getSize(unsigned Kind)
const fltSemantics & getSemantics() const
Definition: APFloat.h:1267
Represent the analysis usage information of a pass.
AnalysisUsage & addUsedIfAvailable()
Add the specified Pass class to the set of analyses used by this pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:163
LLVM Basic Block Representation.
Definition: BasicBlock.h:56
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches,...
Definition: BasicBlock.h:495
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:127
bool test(unsigned Idx) const
Definition: BitVector.h:454
void clear()
clear - Removes all bits from the bitvector.
Definition: BitVector.h:328
iterator_range< const_set_bits_iterator > set_bits() const
Definition: BitVector.h:133
size_type size() const
size - Returns the number of bits in this bitvector.
Definition: BitVector.h:152
ConnectedVNInfoEqClasses - Helper class that can divide VNInfos in a LiveInterval into equivalence cl...
Definition: LiveInterval.h:998
ConstMIBundleOperands - Iterate over all operands in a const bundle of machine instructions.
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:256
const APFloat & getValueAPF() const
Definition: Constants.h:297
This is the shared class of boolean and integer constants.
Definition: Constants.h:78
unsigned getBitWidth() const
getBitWidth - Return the bitwidth of this constant.
Definition: Constants.h:135
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:220
Implements a dense probed hash-table based set.
Definition: DenseSet.h:271
Register getReg() const
Base class for user error types.
Definition: Error.h:348
A specialized PseudoSourceValue for holding FixedStack values, which must include a frame index.
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:308
bool isPredicated(const MachineInstr &MI) const override
Returns true if the instruction is already predicated.
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
Analyze the branching code at the end of MBB, returning true if it cannot be understood (e....
static unsigned getNumOperandRegisters(unsigned Flag)
getNumOperandRegisters - Extract the number of registers field from the inline asm operand flag.
Definition: InlineAsm.h:363
constexpr unsigned getScalarSizeInBits() const
constexpr bool isScalar() const
constexpr bool isValid() const
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr bool isPointer() const
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
constexpr unsigned getAddressSpace() const
constexpr LLT getScalarType() const
constexpr TypeSize getSizeInBytes() const
Returns the total size of the type in bytes, i.e.
A live range for subregisters.
Definition: LiveInterval.h:693
LiveInterval - This class represents the liveness of a register, or stack slot.
Definition: LiveInterval.h:686
Register reg() const
Definition: LiveInterval.h:717
bool hasSubRanges() const
Returns true if subregister liveness information is available.
Definition: LiveInterval.h:803
iterator_range< subrange_iterator > subranges()
Definition: LiveInterval.h:775
void computeSubRangeUndefs(SmallVectorImpl< SlotIndex > &Undefs, LaneBitmask LaneMask, const MachineRegisterInfo &MRI, const SlotIndexes &Indexes) const
For a given lane mask LaneMask, compute indexes at which the lane is marked undefined by subregister ...
bool hasInterval(Register Reg) const
SlotIndex getMBBStartIdx(const MachineBasicBlock *mbb) const
Return the first index in the given basic block.
MachineInstr * getInstructionFromIndex(SlotIndex index) const
Returns the instruction associated with the given index.
SlotIndex getInstructionIndex(const MachineInstr &Instr) const
Returns the base index of the given instruction.
SlotIndex getMBBEndIdx(const MachineBasicBlock *mbb) const
Return the last index in the given basic block.
LiveRange * getCachedRegUnit(unsigned Unit)
Return the live range for register unit Unit if it has already been computed, or nullptr if it hasn't...
LiveInterval & getInterval(Register Reg)
bool isNotInMIMap(const MachineInstr &Instr) const
Returns true if the specified machine instr has been removed or was never entered in the map.
MachineBasicBlock * getMBBFromIndex(SlotIndex index) const
bool isLiveInToMBB(const LiveRange &LR, const MachineBasicBlock *mbb) const
void print(raw_ostream &O, const Module *=nullptr) const override
Implement the dump method.
Result of a LiveRange query.
Definition: LiveInterval.h:90
bool isDeadDef() const
Return true if this instruction has a dead def.
Definition: LiveInterval.h:117
VNInfo * valueIn() const
Return the value that is live-in to the instruction.
Definition: LiveInterval.h:105
bool isKill() const
Return true if the live-in value is killed by this instruction.
Definition: LiveInterval.h:112
static LLVM_ATTRIBUTE_UNUSED bool isJointlyDominated(const MachineBasicBlock *MBB, ArrayRef< SlotIndex > Defs, const SlotIndexes &Indexes)
A diagnostic function to check if the end of the block MBB is jointly dominated by the blocks corresp...
This class represents the liveness of a register, stack slot, etc.
Definition: LiveInterval.h:157
VNInfo * getValNumInfo(unsigned ValNo)
getValNumInfo - Returns pointer to the specified val#.
Definition: LiveInterval.h:317
bool liveAt(SlotIndex index) const
Definition: LiveInterval.h:401
bool covers(const LiveRange &Other) const
Returns true if all segments of the Other live range are completely covered by this live range.
bool empty() const
Definition: LiveInterval.h:382
LiveQueryResult Query(SlotIndex Idx) const
Query Liveness at Idx.
Definition: LiveInterval.h:541
iterator end()
Definition: LiveInterval.h:216
VNInfo * getVNInfoBefore(SlotIndex Idx) const
getVNInfoBefore - Return the VNInfo that is live up to but not necessarilly including Idx,...
Definition: LiveInterval.h:429
unsigned getNumValNums() const
Definition: LiveInterval.h:313
iterator begin()
Definition: LiveInterval.h:215
VNInfoList valnos
Definition: LiveInterval.h:204
VNInfo * getVNInfoAt(SlotIndex Idx) const
getVNInfoAt - Return the VNInfo that is live at Idx, or NULL.
Definition: LiveInterval.h:421
LiveInterval & getInterval(int Slot)
Definition: LiveStacks.h:68
bool hasInterval(int Slot) const
Definition: LiveStacks.h:82
VarInfo & getVarInfo(Register Reg)
getVarInfo - Return the VarInfo structure for the specified VIRTUAL register.
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition: MCAsmInfo.h:56
ExceptionHandling getExceptionHandlingType() const
Definition: MCAsmInfo.h:781
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:237
ArrayRef< MCOperandInfo > operands() const
Definition: MCInstrDesc.h:239
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
Definition: MCInstrDesc.h:247
bool variadicOpsAreDefs() const
Return true if variadic operands of this instruction are definitions.
Definition: MCInstrDesc.h:417
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
Definition: MCInstrDesc.h:219
unsigned getOpcode() const
Return the opcode number for this descriptor.
Definition: MCInstrDesc.h:230
This holds information about one operand of a machine instruction, indicating the register class for ...
Definition: MCInstrDesc.h:85
bool isOptionalDef() const
Set if this operand is a optional def.
Definition: MCInstrDesc.h:113
uint8_t OperandType
Information about the type of the operand.
Definition: MCInstrDesc.h:97
MCRegAliasIterator enumerates all registers aliasing Reg.
bool isValid() const
isValid - returns true if this iterator is not yet at the end.
bool isValid() const
isValid - Returns true until all the operands have been visited.
bool isInlineAsmBrIndirectTarget() const
Returns true if this is the indirect dest of an INLINEASM_BR.
unsigned pred_size() const
bool isEHPad() const
Returns true if the block is a landing pad.
iterator_range< livein_iterator > liveins() const
iterator_range< iterator > phis()
Returns a range that iterates over the phis in the basic block.
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
bool isIRBlockAddressTaken() const
Test whether this block is the target of an IR BlockAddress.
unsigned succ_size() const
BasicBlock * getAddressTakenIRBlock() const
Retrieves the BasicBlock which corresponds to this MachineBasicBlock.
bool isPredecessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a predecessor of this block.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
iterator_range< succ_iterator > successors()
bool isSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a successor of this block.
iterator_range< pred_iterator > predecessors()
StringRef getName() const
Return the name of the corresponding LLVM basic block, or an empty string.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
BitVector getPristineRegs(const MachineFunction &MF) const
Return a set of physical registers that are pristine.
An AnalysisManager<MachineFunction> that also exposes IR analysis results.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
bool hasProperty(Property P) const
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineFunctionProperties & getProperties() const
Get the function properties.
const MachineBasicBlock & front() const
bool verify(Pass *p=nullptr, const char *Banner=nullptr, bool AbortOnError=true) const
Run the current MachineFunction through the machine code verifier, useful for debugger use.
void print(raw_ostream &OS, const SlotIndexes *=nullptr) const
print - Print out the MachineFunction in a format suitable for debugging to the specified stream.
Representation of each machine instruction.
Definition: MachineInstr.h:68
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:516
bool isReturn(QueryType Type=AnyInBundle) const
Definition: MachineInstr.h:862
bool isTerminator(QueryType Type=AnyInBundle) const
Returns true if this instruction part of the terminator for a basic block.
Definition: MachineInstr.h:896
bool isBarrier(QueryType Type=AnyInBundle) const
Returns true if the specified instruction stops control flow from executing the instruction immediate...
Definition: MachineInstr.h:887
A description of a memory reference used in the backend.
const PseudoSourceValue * getPseudoValue() const
uint64_t getSize() const
Return the size in bytes of the memory reference.
AtomicOrdering getSuccessOrdering() const
Return the atomic ordering requirements for this memory operation.
uint64_t getSizeInBits() const
Return the size in bits of the memory reference.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
int64_t getImm() const
bool readsReg() const
readsReg - Returns true if this operand reads the previous value of its register.
bool isImplicit() const
bool isIntrinsicID() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
ArrayRef< int > getShuffleMask() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
bool isShuffleMask() const
unsigned getCFIIndex() const
bool isRenamable() const
isRenamable - Returns true if this register may be renamed, i.e.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
bool isEarlyClobber() const
Register getReg() const
getReg - Returns the register number.
Intrinsic::ID getIntrinsicID() const
bool isInternalRead() const
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
const uint32_t * getRegMask() const
getRegMask - Returns a bit mask of registers preserved by this RegMask operand.
void print(raw_ostream &os, const TargetRegisterInfo *TRI=nullptr, const TargetIntrinsicInfo *IntrinsicInfo=nullptr) const
Print the MachineOperand to os.
@ MO_CFIIndex
MCCFIInstruction index.
@ MO_RegisterMask
Mask of preserved registers.
@ MO_MachineBasicBlock
MachineBasicBlock reference.
@ MO_FrameIndex
Abstract Stack Frame Index.
@ MO_Register
Register operand.
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:91
virtual void print(raw_ostream &OS, const Module *M) const
print - Print out the internal state of the pass.
Definition: Pass.cpp:130
AnalysisType * getAnalysisIfAvailable() const
getAnalysisIfAvailable<AnalysisType>() - Subclasses use this function to get analysis information tha...
Special value supplied for machine level alias analysis.
Holds all the information related to register banks.
RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
This class implements the register bank concept.
Definition: RegisterBank.h:28
unsigned getSize() const
Get the maximal size in bits that fits in this register bank.
Definition: RegisterBank.h:54
const char * getName() const
Get a user friendly name of this register bank.
Definition: RegisterBank.h:51
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:97
static Register index2VirtReg(unsigned Index)
Convert a 0-based index to a virtual register number.
Definition: Register.h:84
static unsigned virtReg2Index(Register Reg)
Convert a virtual register number to a 0-based index.
Definition: Register.h:77
bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:91
static bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:65
SlotIndex - An opaque wrapper around machine indexes.
Definition: SlotIndexes.h:82
static bool isSameInstr(SlotIndex A, SlotIndex B)
isSameInstr - Return true if A and B refer to the same instruction.
Definition: SlotIndexes.h:198
bool isBlock() const
isBlock - Returns true if this is a block boundary slot.
Definition: SlotIndexes.h:231
SlotIndex getDeadSlot() const
Returns the dead def kill slot for the current instruction.
Definition: SlotIndexes.h:264
bool isEarlyClobber() const
isEarlyClobber - Returns true if this is an early-clobber slot.
Definition: SlotIndexes.h:234
bool isRegister() const
isRegister - Returns true if this is a normal register use/def slot.
Definition: SlotIndexes.h:238
SlotIndex getBoundaryIndex() const
Returns the boundary index for associated with this index.
Definition: SlotIndexes.h:253
SlotIndex getPrevSlot() const
Returns the previous slot in the index list.
Definition: SlotIndexes.h:294
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
Definition: SlotIndexes.h:259
bool isDead() const
isDead - Returns true if this is a dead def kill slot.
Definition: SlotIndexes.h:241
SlotIndexes pass.
Definition: SlotIndexes.h:319
SlotIndex getMBBEndIdx(unsigned Num) const
Returns the last index in the given basic block number.
Definition: SlotIndexes.h:481
MBBIndexIterator MBBIndexBegin() const
Returns an iterator for the begin of the idx2MBBMap.
Definition: SlotIndexes.h:509
MBBIndexIterator MBBIndexEnd() const
Return an iterator for the end of the idx2MBBMap.
Definition: SlotIndexes.h:514
SmallVectorImpl< IdxMBBPair >::const_iterator MBBIndexIterator
Iterator over the idx2MBBMap (sorted pairs of slot index of basic block begin and basic block)
Definition: SlotIndexes.h:492
SlotIndex getInstructionIndex(const MachineInstr &MI, bool IgnoreBundle=false) const
Returns the base index for the given instruction.
Definition: SlotIndexes.h:390
SlotIndex getMBBStartIdx(unsigned Num) const
Returns the first index in the given basic block number.
Definition: SlotIndexes.h:471
bool hasIndex(const MachineInstr &instr) const
Returns true if the given machine instr is mapped to an index, otherwise returns false.
Definition: SlotIndexes.h:385
size_type size() const
Definition: SmallPtrSet.h:93
bool erase(PtrType Ptr)
erase - If the set contains the specified pointer, remove it and return true, otherwise return false.
Definition: SmallPtrSet.h:379
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:383
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:365
iterator begin() const
Definition: SmallPtrSet.h:403
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:450
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
void resize(size_type N)
Definition: SmallVector.h:642
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
Register getReg() const
MI-level Statepoint operands.
Definition: StackMaps.h:158
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
TargetInstrInfo - Interface to description of machine instruction set.
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:78
bool hasSuperClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a super-class of or equal to this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const RegisterBankInfo * getRegBankInfo() const
If the information for the register banks is available, return it.
virtual const TargetInstrInfo * getInstrInfo() const
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
VNInfo - Value Number Information.
Definition: LiveInterval.h:53
bool isUnused() const
Returns true if this value is unused.
Definition: LiveInterval.h:81
unsigned id
The ID number of this value.
Definition: LiveInterval.h:58
SlotIndex def
The index of the defining instruction.
Definition: LiveInterval.h:61
bool isPHIDef() const
Returns true if this value is defined by a PHI instruction (or was, PHI instructions may have been el...
Definition: LiveInterval.h:78
LLVM Value Representation.
Definition: Value.h:74
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:206
Iterator for intrusive lists based on ilist_node.
self_iterator getIterator()
Definition: ilist_node.h:82
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:289
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
const CustomOperand< const MCSubtargetInfo & > Msg[]
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:119
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
AttributeList getAttributes(LLVMContext &C, ID id)
Return the attributes for an intrinsic.
@ OPERAND_REGISTER
Definition: MCInstrDesc.h:61
@ OPERAND_IMMEDIATE
Definition: MCInstrDesc.h:60
Reg
All possible values of the reg field in the ModR/M byte.
constexpr double e
Definition: MathExtras.h:31
const_iterator begin(StringRef path, Style style=Style::native)
Get begin iterator over path.
Definition: Path.cpp:226
const_iterator end(StringRef path)
Get end iterator over path.
Definition: Path.cpp:235
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:397
@ Offset
Definition: DWP.cpp:406
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1782
@ SjLj
setjmp/longjmp based exceptions
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
Definition: TargetOpcodes.h:30
Printable printRegUnit(unsigned Unit, const TargetRegisterInfo *TRI)
Create Printable object to print register units on a raw_ostream.
void append_range(Container &C, Range &&R)
Wrapper function to append a range to a container.
Definition: STLExtras.h:2092
void set_subtract(S1Ty &S1, const S2Ty &S2)
set_subtract(A, B) - Compute A := A - B
Definition: SetOperations.h:82
Printable PrintLaneMask(LaneBitmask LaneMask)
Create Printable object to print LaneBitmasks on a raw_ostream.
Definition: LaneBitmask.h:92
bool isPreISelGenericOptimizationHint(unsigned Opcode)
Definition: TargetOpcodes.h:42
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
void initializeMachineVerifierPassPass(PassRegistry &)
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:495
df_ext_iterator< T, SetTy > df_ext_begin(const T &G, SetTy &S)
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1796
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:145
bool set_union(S1Ty &S1, const S2Ty &S2)
set_union(A, B) - Compute A := A u B, return whether A changed.
Definition: SetOperations.h:23
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
void verifyMachineFunction(MachineFunctionAnalysisManager *, const std::string &Banner, const MachineFunction &MF)
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1909
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1939
df_ext_iterator< T, SetTy > df_ext_end(const T &G, SetTy &S)
Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
FunctionPass * createMachineVerifierPass(const std::string &Banner)
createMachineVerifierPass - This pass verifies cenerated machine code instructions for correctness.
Definition: BitVector.h:851
static unsigned getSizeInBits(const fltSemantics &Sem)
Returns the size of the floating point number (in bits) in the given semantics.
Definition: APFloat.cpp:314
static constexpr LaneBitmask getAll()
Definition: LaneBitmask.h:82
constexpr bool none() const
Definition: LaneBitmask.h:52
constexpr bool any() const
Definition: LaneBitmask.h:53
static constexpr LaneBitmask getNone()
Definition: LaneBitmask.h:81
This represents a simple continuous liveness interval for a value.
Definition: LiveInterval.h:162
VarInfo - This represents the regions where a virtual register is live in the program.
Definition: LiveVariables.h:80
Pair of physical register and lane mask.