LLVM 18.0.0git
MachineVerifier.cpp
Go to the documentation of this file.
1//===- MachineVerifier.cpp - Machine Code Verifier ------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Pass to verify generated machine code. The following is checked:
10//
11// Operand counts: All explicit operands must be present.
12//
13// Register classes: All physical and virtual register operands must be
14// compatible with the register class required by the instruction descriptor.
15//
16// Register live intervals: Registers must be defined only once, and must be
17// defined before use.
18//
19// The machine code verifier is enabled with the command-line option
20// -verify-machineinstrs.
21//===----------------------------------------------------------------------===//
22
23#include "llvm/ADT/BitVector.h"
24#include "llvm/ADT/DenseMap.h"
25#include "llvm/ADT/DenseSet.h"
28#include "llvm/ADT/STLExtras.h"
32#include "llvm/ADT/StringRef.h"
33#include "llvm/ADT/Twine.h"
60#include "llvm/IR/BasicBlock.h"
61#include "llvm/IR/Constants.h"
63#include "llvm/IR/Function.h"
64#include "llvm/IR/InlineAsm.h"
67#include "llvm/MC/LaneBitmask.h"
68#include "llvm/MC/MCAsmInfo.h"
69#include "llvm/MC/MCDwarf.h"
70#include "llvm/MC/MCInstrDesc.h"
73#include "llvm/Pass.h"
77#include "llvm/Support/ModRef.h"
80#include <algorithm>
81#include <cassert>
82#include <cstddef>
83#include <cstdint>
84#include <iterator>
85#include <string>
86#include <utility>
87
88using namespace llvm;
89
90namespace {
91
92 struct MachineVerifier {
93 MachineVerifier(Pass *pass, const char *b) : PASS(pass), Banner(b) {}
94
95 MachineVerifier(const char *b, LiveVariables *LiveVars,
96 LiveIntervals *LiveInts, LiveStacks *LiveStks,
97 SlotIndexes *Indexes)
98 : Banner(b), LiveVars(LiveVars), LiveInts(LiveInts), LiveStks(LiveStks),
99 Indexes(Indexes) {}
100
101 unsigned verify(const MachineFunction &MF);
102
103 Pass *const PASS = nullptr;
104 const char *Banner;
105 const MachineFunction *MF = nullptr;
106 const TargetMachine *TM = nullptr;
107 const TargetInstrInfo *TII = nullptr;
108 const TargetRegisterInfo *TRI = nullptr;
109 const MachineRegisterInfo *MRI = nullptr;
110 const RegisterBankInfo *RBI = nullptr;
111
112 unsigned foundErrors = 0;
113
114 // Avoid querying the MachineFunctionProperties for each operand.
115 bool isFunctionRegBankSelected = false;
116 bool isFunctionSelected = false;
117 bool isFunctionTracksDebugUserValues = false;
118
119 using RegVector = SmallVector<Register, 16>;
120 using RegMaskVector = SmallVector<const uint32_t *, 4>;
121 using RegSet = DenseSet<Register>;
124
125 const MachineInstr *FirstNonPHI = nullptr;
126 const MachineInstr *FirstTerminator = nullptr;
127 BlockSet FunctionBlocks;
128
129 BitVector regsReserved;
130 RegSet regsLive;
131 RegVector regsDefined, regsDead, regsKilled;
132 RegMaskVector regMasks;
133
134 SlotIndex lastIndex;
135
136 // Add Reg and any sub-registers to RV
137 void addRegWithSubRegs(RegVector &RV, Register Reg) {
138 RV.push_back(Reg);
139 if (Reg.isPhysical())
140 append_range(RV, TRI->subregs(Reg.asMCReg()));
141 }
142
143 struct BBInfo {
144 // Is this MBB reachable from the MF entry point?
145 bool reachable = false;
146
147 // Vregs that must be live in because they are used without being
148 // defined. Map value is the user. vregsLiveIn doesn't include regs
149 // that only are used by PHI nodes.
150 RegMap vregsLiveIn;
151
152 // Regs killed in MBB. They may be defined again, and will then be in both
153 // regsKilled and regsLiveOut.
154 RegSet regsKilled;
155
156 // Regs defined in MBB and live out. Note that vregs passing through may
157 // be live out without being mentioned here.
158 RegSet regsLiveOut;
159
160 // Vregs that pass through MBB untouched. This set is disjoint from
161 // regsKilled and regsLiveOut.
162 RegSet vregsPassed;
163
164 // Vregs that must pass through MBB because they are needed by a successor
165 // block. This set is disjoint from regsLiveOut.
166 RegSet vregsRequired;
167
168 // Set versions of block's predecessor and successor lists.
169 BlockSet Preds, Succs;
170
171 BBInfo() = default;
172
173 // Add register to vregsRequired if it belongs there. Return true if
174 // anything changed.
175 bool addRequired(Register Reg) {
176 if (!Reg.isVirtual())
177 return false;
178 if (regsLiveOut.count(Reg))
179 return false;
180 return vregsRequired.insert(Reg).second;
181 }
182
183 // Same for a full set.
184 bool addRequired(const RegSet &RS) {
185 bool Changed = false;
186 for (Register Reg : RS)
187 Changed |= addRequired(Reg);
188 return Changed;
189 }
190
191 // Same for a full map.
192 bool addRequired(const RegMap &RM) {
193 bool Changed = false;
194 for (const auto &I : RM)
195 Changed |= addRequired(I.first);
196 return Changed;
197 }
198
199 // Live-out registers are either in regsLiveOut or vregsPassed.
200 bool isLiveOut(Register Reg) const {
201 return regsLiveOut.count(Reg) || vregsPassed.count(Reg);
202 }
203 };
204
205 // Extra register info per MBB.
207
208 bool isReserved(Register Reg) {
209 return Reg.id() < regsReserved.size() && regsReserved.test(Reg.id());
210 }
211
212 bool isAllocatable(Register Reg) const {
213 return Reg.id() < TRI->getNumRegs() && TRI->isInAllocatableClass(Reg) &&
214 !regsReserved.test(Reg.id());
215 }
216
217 // Analysis information if available
218 LiveVariables *LiveVars = nullptr;
219 LiveIntervals *LiveInts = nullptr;
220 LiveStacks *LiveStks = nullptr;
221 SlotIndexes *Indexes = nullptr;
222
223 void visitMachineFunctionBefore();
224 void visitMachineBasicBlockBefore(const MachineBasicBlock *MBB);
225 void visitMachineBundleBefore(const MachineInstr *MI);
226
227 /// Verify that all of \p MI's virtual register operands are scalars.
228 /// \returns True if all virtual register operands are scalar. False
229 /// otherwise.
230 bool verifyAllRegOpsScalar(const MachineInstr &MI,
231 const MachineRegisterInfo &MRI);
232 bool verifyVectorElementMatch(LLT Ty0, LLT Ty1, const MachineInstr *MI);
233
234 bool verifyGIntrinsicSideEffects(const MachineInstr *MI);
235 bool verifyGIntrinsicConvergence(const MachineInstr *MI);
236 void verifyPreISelGenericInstruction(const MachineInstr *MI);
237
238 void visitMachineInstrBefore(const MachineInstr *MI);
239 void visitMachineOperand(const MachineOperand *MO, unsigned MONum);
240 void visitMachineBundleAfter(const MachineInstr *MI);
241 void visitMachineBasicBlockAfter(const MachineBasicBlock *MBB);
242 void visitMachineFunctionAfter();
243
244 void report(const char *msg, const MachineFunction *MF);
245 void report(const char *msg, const MachineBasicBlock *MBB);
246 void report(const char *msg, const MachineInstr *MI);
247 void report(const char *msg, const MachineOperand *MO, unsigned MONum,
248 LLT MOVRegType = LLT{});
249 void report(const Twine &Msg, const MachineInstr *MI);
250
251 void report_context(const LiveInterval &LI) const;
252 void report_context(const LiveRange &LR, Register VRegUnit,
253 LaneBitmask LaneMask) const;
254 void report_context(const LiveRange::Segment &S) const;
255 void report_context(const VNInfo &VNI) const;
256 void report_context(SlotIndex Pos) const;
257 void report_context(MCPhysReg PhysReg) const;
258 void report_context_liverange(const LiveRange &LR) const;
259 void report_context_lanemask(LaneBitmask LaneMask) const;
260 void report_context_vreg(Register VReg) const;
261 void report_context_vreg_regunit(Register VRegOrUnit) const;
262
263 void verifyInlineAsm(const MachineInstr *MI);
264
265 void checkLiveness(const MachineOperand *MO, unsigned MONum);
266 void checkLivenessAtUse(const MachineOperand *MO, unsigned MONum,
267 SlotIndex UseIdx, const LiveRange &LR,
268 Register VRegOrUnit,
269 LaneBitmask LaneMask = LaneBitmask::getNone());
270 void checkLivenessAtDef(const MachineOperand *MO, unsigned MONum,
271 SlotIndex DefIdx, const LiveRange &LR,
272 Register VRegOrUnit, bool SubRangeCheck = false,
273 LaneBitmask LaneMask = LaneBitmask::getNone());
274
275 void markReachable(const MachineBasicBlock *MBB);
276 void calcRegsPassed();
277 void checkPHIOps(const MachineBasicBlock &MBB);
278
279 void calcRegsRequired();
280 void verifyLiveVariables();
281 void verifyLiveIntervals();
282 void verifyLiveInterval(const LiveInterval&);
283 void verifyLiveRangeValue(const LiveRange &, const VNInfo *, Register,
285 void verifyLiveRangeSegment(const LiveRange &,
288 void verifyLiveRange(const LiveRange &, Register,
289 LaneBitmask LaneMask = LaneBitmask::getNone());
290
291 void verifyStackFrame();
292
293 void verifySlotIndexes() const;
294 void verifyProperties(const MachineFunction &MF);
295 };
296
297 struct MachineVerifierPass : public MachineFunctionPass {
298 static char ID; // Pass ID, replacement for typeid
299
300 const std::string Banner;
301
302 MachineVerifierPass(std::string banner = std::string())
303 : MachineFunctionPass(ID), Banner(std::move(banner)) {
305 }
306
307 void getAnalysisUsage(AnalysisUsage &AU) const override {
312 AU.setPreservesAll();
314 }
315
316 bool runOnMachineFunction(MachineFunction &MF) override {
317 // Skip functions that have known verification problems.
318 // FIXME: Remove this mechanism when all problematic passes have been
319 // fixed.
320 if (MF.getProperties().hasProperty(
321 MachineFunctionProperties::Property::FailsVerification))
322 return false;
323
324 unsigned FoundErrors = MachineVerifier(this, Banner.c_str()).verify(MF);
325 if (FoundErrors)
326 report_fatal_error("Found "+Twine(FoundErrors)+" machine code errors.");
327 return false;
328 }
329 };
330
331} // end anonymous namespace
332
333char MachineVerifierPass::ID = 0;
334
335INITIALIZE_PASS(MachineVerifierPass, "machineverifier",
336 "Verify generated machine code", false, false)
337
339 return new MachineVerifierPass(Banner);
340}
341
343 const std::string &Banner,
344 const MachineFunction &MF) {
345 // TODO: Use MFAM after porting below analyses.
346 // LiveVariables *LiveVars;
347 // LiveIntervals *LiveInts;
348 // LiveStacks *LiveStks;
349 // SlotIndexes *Indexes;
350 unsigned FoundErrors = MachineVerifier(nullptr, Banner.c_str()).verify(MF);
351 if (FoundErrors)
352 report_fatal_error("Found " + Twine(FoundErrors) + " machine code errors.");
353}
354
355bool MachineFunction::verify(Pass *p, const char *Banner, bool AbortOnErrors)
356 const {
357 MachineFunction &MF = const_cast<MachineFunction&>(*this);
358 unsigned FoundErrors = MachineVerifier(p, Banner).verify(MF);
359 if (AbortOnErrors && FoundErrors)
360 report_fatal_error("Found "+Twine(FoundErrors)+" machine code errors.");
361 return FoundErrors == 0;
362}
363
365 const char *Banner, bool AbortOnErrors) const {
366 MachineFunction &MF = const_cast<MachineFunction &>(*this);
367 unsigned FoundErrors =
368 MachineVerifier(Banner, nullptr, LiveInts, nullptr, Indexes).verify(MF);
369 if (AbortOnErrors && FoundErrors)
370 report_fatal_error("Found " + Twine(FoundErrors) + " machine code errors.");
371 return FoundErrors == 0;
372}
373
374void MachineVerifier::verifySlotIndexes() const {
375 if (Indexes == nullptr)
376 return;
377
378 // Ensure the IdxMBB list is sorted by slot indexes.
381 E = Indexes->MBBIndexEnd(); I != E; ++I) {
382 assert(!Last.isValid() || I->first > Last);
383 Last = I->first;
384 }
385}
386
387void MachineVerifier::verifyProperties(const MachineFunction &MF) {
388 // If a pass has introduced virtual registers without clearing the
389 // NoVRegs property (or set it without allocating the vregs)
390 // then report an error.
391 if (MF.getProperties().hasProperty(
393 MRI->getNumVirtRegs())
394 report("Function has NoVRegs property but there are VReg operands", &MF);
395}
396
397unsigned MachineVerifier::verify(const MachineFunction &MF) {
398 foundErrors = 0;
399
400 this->MF = &MF;
401 TM = &MF.getTarget();
404 RBI = MF.getSubtarget().getRegBankInfo();
405 MRI = &MF.getRegInfo();
406
407 const bool isFunctionFailedISel = MF.getProperties().hasProperty(
409
410 // If we're mid-GlobalISel and we already triggered the fallback path then
411 // it's expected that the MIR is somewhat broken but that's ok since we'll
412 // reset it and clear the FailedISel attribute in ResetMachineFunctions.
413 if (isFunctionFailedISel)
414 return foundErrors;
415
416 isFunctionRegBankSelected = MF.getProperties().hasProperty(
418 isFunctionSelected = MF.getProperties().hasProperty(
420 isFunctionTracksDebugUserValues = MF.getProperties().hasProperty(
422
423 if (PASS) {
424 LiveInts = PASS->getAnalysisIfAvailable<LiveIntervals>();
425 // We don't want to verify LiveVariables if LiveIntervals is available.
426 if (!LiveInts)
427 LiveVars = PASS->getAnalysisIfAvailable<LiveVariables>();
428 LiveStks = PASS->getAnalysisIfAvailable<LiveStacks>();
429 Indexes = PASS->getAnalysisIfAvailable<SlotIndexes>();
430 }
431
432 verifySlotIndexes();
433
434 verifyProperties(MF);
435
436 visitMachineFunctionBefore();
437 for (const MachineBasicBlock &MBB : MF) {
438 visitMachineBasicBlockBefore(&MBB);
439 // Keep track of the current bundle header.
440 const MachineInstr *CurBundle = nullptr;
441 // Do we expect the next instruction to be part of the same bundle?
442 bool InBundle = false;
443
444 for (const MachineInstr &MI : MBB.instrs()) {
445 if (MI.getParent() != &MBB) {
446 report("Bad instruction parent pointer", &MBB);
447 errs() << "Instruction: " << MI;
448 continue;
449 }
450
451 // Check for consistent bundle flags.
452 if (InBundle && !MI.isBundledWithPred())
453 report("Missing BundledPred flag, "
454 "BundledSucc was set on predecessor",
455 &MI);
456 if (!InBundle && MI.isBundledWithPred())
457 report("BundledPred flag is set, "
458 "but BundledSucc not set on predecessor",
459 &MI);
460
461 // Is this a bundle header?
462 if (!MI.isInsideBundle()) {
463 if (CurBundle)
464 visitMachineBundleAfter(CurBundle);
465 CurBundle = &MI;
466 visitMachineBundleBefore(CurBundle);
467 } else if (!CurBundle)
468 report("No bundle header", &MI);
469 visitMachineInstrBefore(&MI);
470 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
471 const MachineOperand &Op = MI.getOperand(I);
472 if (Op.getParent() != &MI) {
473 // Make sure to use correct addOperand / removeOperand / ChangeTo
474 // functions when replacing operands of a MachineInstr.
475 report("Instruction has operand with wrong parent set", &MI);
476 }
477
478 visitMachineOperand(&Op, I);
479 }
480
481 // Was this the last bundled instruction?
482 InBundle = MI.isBundledWithSucc();
483 }
484 if (CurBundle)
485 visitMachineBundleAfter(CurBundle);
486 if (InBundle)
487 report("BundledSucc flag set on last instruction in block", &MBB.back());
488 visitMachineBasicBlockAfter(&MBB);
489 }
490 visitMachineFunctionAfter();
491
492 // Clean up.
493 regsLive.clear();
494 regsDefined.clear();
495 regsDead.clear();
496 regsKilled.clear();
497 regMasks.clear();
498 MBBInfoMap.clear();
499
500 return foundErrors;
501}
502
503void MachineVerifier::report(const char *msg, const MachineFunction *MF) {
504 assert(MF);
505 errs() << '\n';
506 if (!foundErrors++) {
507 if (Banner)
508 errs() << "# " << Banner << '\n';
509 if (LiveInts != nullptr)
510 LiveInts->print(errs());
511 else
512 MF->print(errs(), Indexes);
513 }
514 errs() << "*** Bad machine code: " << msg << " ***\n"
515 << "- function: " << MF->getName() << "\n";
516}
517
518void MachineVerifier::report(const char *msg, const MachineBasicBlock *MBB) {
519 assert(MBB);
520 report(msg, MBB->getParent());
521 errs() << "- basic block: " << printMBBReference(*MBB) << ' '
522 << MBB->getName() << " (" << (const void *)MBB << ')';
523 if (Indexes)
524 errs() << " [" << Indexes->getMBBStartIdx(MBB)
525 << ';' << Indexes->getMBBEndIdx(MBB) << ')';
526 errs() << '\n';
527}
528
529void MachineVerifier::report(const char *msg, const MachineInstr *MI) {
530 assert(MI);
531 report(msg, MI->getParent());
532 errs() << "- instruction: ";
533 if (Indexes && Indexes->hasIndex(*MI))
534 errs() << Indexes->getInstructionIndex(*MI) << '\t';
535 MI->print(errs(), /*IsStandalone=*/true);
536}
537
538void MachineVerifier::report(const char *msg, const MachineOperand *MO,
539 unsigned MONum, LLT MOVRegType) {
540 assert(MO);
541 report(msg, MO->getParent());
542 errs() << "- operand " << MONum << ": ";
543 MO->print(errs(), MOVRegType, TRI);
544 errs() << "\n";
545}
546
547void MachineVerifier::report(const Twine &Msg, const MachineInstr *MI) {
548 report(Msg.str().c_str(), MI);
549}
550
551void MachineVerifier::report_context(SlotIndex Pos) const {
552 errs() << "- at: " << Pos << '\n';
553}
554
555void MachineVerifier::report_context(const LiveInterval &LI) const {
556 errs() << "- interval: " << LI << '\n';
557}
558
559void MachineVerifier::report_context(const LiveRange &LR, Register VRegUnit,
560 LaneBitmask LaneMask) const {
561 report_context_liverange(LR);
562 report_context_vreg_regunit(VRegUnit);
563 if (LaneMask.any())
564 report_context_lanemask(LaneMask);
565}
566
567void MachineVerifier::report_context(const LiveRange::Segment &S) const {
568 errs() << "- segment: " << S << '\n';
569}
570
571void MachineVerifier::report_context(const VNInfo &VNI) const {
572 errs() << "- ValNo: " << VNI.id << " (def " << VNI.def << ")\n";
573}
574
575void MachineVerifier::report_context_liverange(const LiveRange &LR) const {
576 errs() << "- liverange: " << LR << '\n';
577}
578
579void MachineVerifier::report_context(MCPhysReg PReg) const {
580 errs() << "- p. register: " << printReg(PReg, TRI) << '\n';
581}
582
583void MachineVerifier::report_context_vreg(Register VReg) const {
584 errs() << "- v. register: " << printReg(VReg, TRI) << '\n';
585}
586
587void MachineVerifier::report_context_vreg_regunit(Register VRegOrUnit) const {
588 if (VRegOrUnit.isVirtual()) {
589 report_context_vreg(VRegOrUnit);
590 } else {
591 errs() << "- regunit: " << printRegUnit(VRegOrUnit, TRI) << '\n';
592 }
593}
594
595void MachineVerifier::report_context_lanemask(LaneBitmask LaneMask) const {
596 errs() << "- lanemask: " << PrintLaneMask(LaneMask) << '\n';
597}
598
599void MachineVerifier::markReachable(const MachineBasicBlock *MBB) {
600 BBInfo &MInfo = MBBInfoMap[MBB];
601 if (!MInfo.reachable) {
602 MInfo.reachable = true;
603 for (const MachineBasicBlock *Succ : MBB->successors())
604 markReachable(Succ);
605 }
606}
607
608void MachineVerifier::visitMachineFunctionBefore() {
609 lastIndex = SlotIndex();
610 regsReserved = MRI->reservedRegsFrozen() ? MRI->getReservedRegs()
611 : TRI->getReservedRegs(*MF);
612
613 if (!MF->empty())
614 markReachable(&MF->front());
615
616 // Build a set of the basic blocks in the function.
617 FunctionBlocks.clear();
618 for (const auto &MBB : *MF) {
619 FunctionBlocks.insert(&MBB);
620 BBInfo &MInfo = MBBInfoMap[&MBB];
621
622 MInfo.Preds.insert(MBB.pred_begin(), MBB.pred_end());
623 if (MInfo.Preds.size() != MBB.pred_size())
624 report("MBB has duplicate entries in its predecessor list.", &MBB);
625
626 MInfo.Succs.insert(MBB.succ_begin(), MBB.succ_end());
627 if (MInfo.Succs.size() != MBB.succ_size())
628 report("MBB has duplicate entries in its successor list.", &MBB);
629 }
630
631 // Check that the register use lists are sane.
632 MRI->verifyUseLists();
633
634 if (!MF->empty())
635 verifyStackFrame();
636}
637
638void
639MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
640 FirstTerminator = nullptr;
641 FirstNonPHI = nullptr;
642
643 if (!MF->getProperties().hasProperty(
644 MachineFunctionProperties::Property::NoPHIs) && MRI->tracksLiveness()) {
645 // If this block has allocatable physical registers live-in, check that
646 // it is an entry block or landing pad.
647 for (const auto &LI : MBB->liveins()) {
648 if (isAllocatable(LI.PhysReg) && !MBB->isEHPad() &&
649 MBB->getIterator() != MBB->getParent()->begin() &&
651 report("MBB has allocatable live-in, but isn't entry, landing-pad, or "
652 "inlineasm-br-indirect-target.",
653 MBB);
654 report_context(LI.PhysReg);
655 }
656 }
657 }
658
659 if (MBB->isIRBlockAddressTaken()) {
661 report("ir-block-address-taken is associated with basic block not used by "
662 "a blockaddress.",
663 MBB);
664 }
665
666 // Count the number of landing pad successors.
668 for (const auto *succ : MBB->successors()) {
669 if (succ->isEHPad())
670 LandingPadSuccs.insert(succ);
671 if (!FunctionBlocks.count(succ))
672 report("MBB has successor that isn't part of the function.", MBB);
673 if (!MBBInfoMap[succ].Preds.count(MBB)) {
674 report("Inconsistent CFG", MBB);
675 errs() << "MBB is not in the predecessor list of the successor "
676 << printMBBReference(*succ) << ".\n";
677 }
678 }
679
680 // Check the predecessor list.
681 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
682 if (!FunctionBlocks.count(Pred))
683 report("MBB has predecessor that isn't part of the function.", MBB);
684 if (!MBBInfoMap[Pred].Succs.count(MBB)) {
685 report("Inconsistent CFG", MBB);
686 errs() << "MBB is not in the successor list of the predecessor "
687 << printMBBReference(*Pred) << ".\n";
688 }
689 }
690
691 const MCAsmInfo *AsmInfo = TM->getMCAsmInfo();
692 const BasicBlock *BB = MBB->getBasicBlock();
693 const Function &F = MF->getFunction();
694 if (LandingPadSuccs.size() > 1 &&
695 !(AsmInfo &&
697 BB && isa<SwitchInst>(BB->getTerminator())) &&
698 !isScopedEHPersonality(classifyEHPersonality(F.getPersonalityFn())))
699 report("MBB has more than one landing pad successor", MBB);
700
701 // Call analyzeBranch. If it succeeds, there several more conditions to check.
702 MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
704 if (!TII->analyzeBranch(*const_cast<MachineBasicBlock *>(MBB), TBB, FBB,
705 Cond)) {
706 // Ok, analyzeBranch thinks it knows what's going on with this block. Let's
707 // check whether its answers match up with reality.
708 if (!TBB && !FBB) {
709 // Block falls through to its successor.
710 if (!MBB->empty() && MBB->back().isBarrier() &&
711 !TII->isPredicated(MBB->back())) {
712 report("MBB exits via unconditional fall-through but ends with a "
713 "barrier instruction!", MBB);
714 }
715 if (!Cond.empty()) {
716 report("MBB exits via unconditional fall-through but has a condition!",
717 MBB);
718 }
719 } else if (TBB && !FBB && Cond.empty()) {
720 // Block unconditionally branches somewhere.
721 if (MBB->empty()) {
722 report("MBB exits via unconditional branch but doesn't contain "
723 "any instructions!", MBB);
724 } else if (!MBB->back().isBarrier()) {
725 report("MBB exits via unconditional branch but doesn't end with a "
726 "barrier instruction!", MBB);
727 } else if (!MBB->back().isTerminator()) {
728 report("MBB exits via unconditional branch but the branch isn't a "
729 "terminator instruction!", MBB);
730 }
731 } else if (TBB && !FBB && !Cond.empty()) {
732 // Block conditionally branches somewhere, otherwise falls through.
733 if (MBB->empty()) {
734 report("MBB exits via conditional branch/fall-through but doesn't "
735 "contain any instructions!", MBB);
736 } else if (MBB->back().isBarrier()) {
737 report("MBB exits via conditional branch/fall-through but ends with a "
738 "barrier instruction!", MBB);
739 } else if (!MBB->back().isTerminator()) {
740 report("MBB exits via conditional branch/fall-through but the branch "
741 "isn't a terminator instruction!", MBB);
742 }
743 } else if (TBB && FBB) {
744 // Block conditionally branches somewhere, otherwise branches
745 // somewhere else.
746 if (MBB->empty()) {
747 report("MBB exits via conditional branch/branch but doesn't "
748 "contain any instructions!", MBB);
749 } else if (!MBB->back().isBarrier()) {
750 report("MBB exits via conditional branch/branch but doesn't end with a "
751 "barrier instruction!", MBB);
752 } else if (!MBB->back().isTerminator()) {
753 report("MBB exits via conditional branch/branch but the branch "
754 "isn't a terminator instruction!", MBB);
755 }
756 if (Cond.empty()) {
757 report("MBB exits via conditional branch/branch but there's no "
758 "condition!", MBB);
759 }
760 } else {
761 report("analyzeBranch returned invalid data!", MBB);
762 }
763
764 // Now check that the successors match up with the answers reported by
765 // analyzeBranch.
766 if (TBB && !MBB->isSuccessor(TBB))
767 report("MBB exits via jump or conditional branch, but its target isn't a "
768 "CFG successor!",
769 MBB);
770 if (FBB && !MBB->isSuccessor(FBB))
771 report("MBB exits via conditional branch, but its target isn't a CFG "
772 "successor!",
773 MBB);
774
775 // There might be a fallthrough to the next block if there's either no
776 // unconditional true branch, or if there's a condition, and one of the
777 // branches is missing.
778 bool Fallthrough = !TBB || (!Cond.empty() && !FBB);
779
780 // A conditional fallthrough must be an actual CFG successor, not
781 // unreachable. (Conversely, an unconditional fallthrough might not really
782 // be a successor, because the block might end in unreachable.)
783 if (!Cond.empty() && !FBB) {
785 if (MBBI == MF->end()) {
786 report("MBB conditionally falls through out of function!", MBB);
787 } else if (!MBB->isSuccessor(&*MBBI))
788 report("MBB exits via conditional branch/fall-through but the CFG "
789 "successors don't match the actual successors!",
790 MBB);
791 }
792
793 // Verify that there aren't any extra un-accounted-for successors.
794 for (const MachineBasicBlock *SuccMBB : MBB->successors()) {
795 // If this successor is one of the branch targets, it's okay.
796 if (SuccMBB == TBB || SuccMBB == FBB)
797 continue;
798 // If we might have a fallthrough, and the successor is the fallthrough
799 // block, that's also ok.
800 if (Fallthrough && SuccMBB == MBB->getNextNode())
801 continue;
802 // Also accept successors which are for exception-handling or might be
803 // inlineasm_br targets.
804 if (SuccMBB->isEHPad() || SuccMBB->isInlineAsmBrIndirectTarget())
805 continue;
806 report("MBB has unexpected successors which are not branch targets, "
807 "fallthrough, EHPads, or inlineasm_br targets.",
808 MBB);
809 }
810 }
811
812 regsLive.clear();
813 if (MRI->tracksLiveness()) {
814 for (const auto &LI : MBB->liveins()) {
815 if (!Register::isPhysicalRegister(LI.PhysReg)) {
816 report("MBB live-in list contains non-physical register", MBB);
817 continue;
818 }
819 for (const MCPhysReg &SubReg : TRI->subregs_inclusive(LI.PhysReg))
820 regsLive.insert(SubReg);
821 }
822 }
823
824 const MachineFrameInfo &MFI = MF->getFrameInfo();
825 BitVector PR = MFI.getPristineRegs(*MF);
826 for (unsigned I : PR.set_bits()) {
827 for (const MCPhysReg &SubReg : TRI->subregs_inclusive(I))
828 regsLive.insert(SubReg);
829 }
830
831 regsKilled.clear();
832 regsDefined.clear();
833
834 if (Indexes)
835 lastIndex = Indexes->getMBBStartIdx(MBB);
836}
837
838// This function gets called for all bundle headers, including normal
839// stand-alone unbundled instructions.
840void MachineVerifier::visitMachineBundleBefore(const MachineInstr *MI) {
841 if (Indexes && Indexes->hasIndex(*MI)) {
842 SlotIndex idx = Indexes->getInstructionIndex(*MI);
843 if (!(idx > lastIndex)) {
844 report("Instruction index out of order", MI);
845 errs() << "Last instruction was at " << lastIndex << '\n';
846 }
847 lastIndex = idx;
848 }
849
850 // Ensure non-terminators don't follow terminators.
851 if (MI->isTerminator()) {
852 if (!FirstTerminator)
853 FirstTerminator = MI;
854 } else if (FirstTerminator) {
855 // For GlobalISel, G_INVOKE_REGION_START is a terminator that we allow to
856 // precede non-terminators.
857 if (FirstTerminator->getOpcode() != TargetOpcode::G_INVOKE_REGION_START) {
858 report("Non-terminator instruction after the first terminator", MI);
859 errs() << "First terminator was:\t" << *FirstTerminator;
860 }
861 }
862}
863
864// The operands on an INLINEASM instruction must follow a template.
865// Verify that the flag operands make sense.
866void MachineVerifier::verifyInlineAsm(const MachineInstr *MI) {
867 // The first two operands on INLINEASM are the asm string and global flags.
868 if (MI->getNumOperands() < 2) {
869 report("Too few operands on inline asm", MI);
870 return;
871 }
872 if (!MI->getOperand(0).isSymbol())
873 report("Asm string must be an external symbol", MI);
874 if (!MI->getOperand(1).isImm())
875 report("Asm flags must be an immediate", MI);
876 // Allowed flags are Extra_HasSideEffects = 1, Extra_IsAlignStack = 2,
877 // Extra_AsmDialect = 4, Extra_MayLoad = 8, and Extra_MayStore = 16,
878 // and Extra_IsConvergent = 32.
879 if (!isUInt<6>(MI->getOperand(1).getImm()))
880 report("Unknown asm flags", &MI->getOperand(1), 1);
881
882 static_assert(InlineAsm::MIOp_FirstOperand == 2, "Asm format changed");
883
884 unsigned OpNo = InlineAsm::MIOp_FirstOperand;
885 unsigned NumOps;
886 for (unsigned e = MI->getNumOperands(); OpNo < e; OpNo += NumOps) {
887 const MachineOperand &MO = MI->getOperand(OpNo);
888 // There may be implicit ops after the fixed operands.
889 if (!MO.isImm())
890 break;
891 const InlineAsm::Flag F(MO.getImm());
892 NumOps = 1 + F.getNumOperandRegisters();
893 }
894
895 if (OpNo > MI->getNumOperands())
896 report("Missing operands in last group", MI);
897
898 // An optional MDNode follows the groups.
899 if (OpNo < MI->getNumOperands() && MI->getOperand(OpNo).isMetadata())
900 ++OpNo;
901
902 // All trailing operands must be implicit registers.
903 for (unsigned e = MI->getNumOperands(); OpNo < e; ++OpNo) {
904 const MachineOperand &MO = MI->getOperand(OpNo);
905 if (!MO.isReg() || !MO.isImplicit())
906 report("Expected implicit register after groups", &MO, OpNo);
907 }
908
909 if (MI->getOpcode() == TargetOpcode::INLINEASM_BR) {
910 const MachineBasicBlock *MBB = MI->getParent();
911
912 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = MI->getNumOperands();
913 i != e; ++i) {
914 const MachineOperand &MO = MI->getOperand(i);
915
916 if (!MO.isMBB())
917 continue;
918
919 // Check the successor & predecessor lists look ok, assume they are
920 // not. Find the indirect target without going through the successors.
921 const MachineBasicBlock *IndirectTargetMBB = MO.getMBB();
922 if (!IndirectTargetMBB) {
923 report("INLINEASM_BR indirect target does not exist", &MO, i);
924 break;
925 }
926
927 if (!MBB->isSuccessor(IndirectTargetMBB))
928 report("INLINEASM_BR indirect target missing from successor list", &MO,
929 i);
930
931 if (!IndirectTargetMBB->isPredecessor(MBB))
932 report("INLINEASM_BR indirect target predecessor list missing parent",
933 &MO, i);
934 }
935 }
936}
937
938bool MachineVerifier::verifyAllRegOpsScalar(const MachineInstr &MI,
939 const MachineRegisterInfo &MRI) {
940 if (none_of(MI.explicit_operands(), [&MRI](const MachineOperand &Op) {
941 if (!Op.isReg())
942 return false;
943 const auto Reg = Op.getReg();
944 if (Reg.isPhysical())
945 return false;
946 return !MRI.getType(Reg).isScalar();
947 }))
948 return true;
949 report("All register operands must have scalar types", &MI);
950 return false;
951}
952
953/// Check that types are consistent when two operands need to have the same
954/// number of vector elements.
955/// \return true if the types are valid.
956bool MachineVerifier::verifyVectorElementMatch(LLT Ty0, LLT Ty1,
957 const MachineInstr *MI) {
958 if (Ty0.isVector() != Ty1.isVector()) {
959 report("operand types must be all-vector or all-scalar", MI);
960 // Generally we try to report as many issues as possible at once, but in
961 // this case it's not clear what should we be comparing the size of the
962 // scalar with: the size of the whole vector or its lane. Instead of
963 // making an arbitrary choice and emitting not so helpful message, let's
964 // avoid the extra noise and stop here.
965 return false;
966 }
967
968 if (Ty0.isVector() && Ty0.getNumElements() != Ty1.getNumElements()) {
969 report("operand types must preserve number of vector elements", MI);
970 return false;
971 }
972
973 return true;
974}
975
976bool MachineVerifier::verifyGIntrinsicSideEffects(const MachineInstr *MI) {
977 auto Opcode = MI->getOpcode();
978 bool NoSideEffects = Opcode == TargetOpcode::G_INTRINSIC ||
979 Opcode == TargetOpcode::G_INTRINSIC_CONVERGENT;
980 unsigned IntrID = cast<GIntrinsic>(MI)->getIntrinsicID();
981 if (IntrID != 0 && IntrID < Intrinsic::num_intrinsics) {
983 MF->getFunction().getContext(), static_cast<Intrinsic::ID>(IntrID));
984 bool DeclHasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
985 if (NoSideEffects && DeclHasSideEffects) {
986 report(Twine(TII->getName(Opcode),
987 " used with intrinsic that accesses memory"),
988 MI);
989 return false;
990 }
991 if (!NoSideEffects && !DeclHasSideEffects) {
992 report(Twine(TII->getName(Opcode), " used with readnone intrinsic"), MI);
993 return false;
994 }
995 }
996
997 return true;
998}
999
1000bool MachineVerifier::verifyGIntrinsicConvergence(const MachineInstr *MI) {
1001 auto Opcode = MI->getOpcode();
1002 bool NotConvergent = Opcode == TargetOpcode::G_INTRINSIC ||
1003 Opcode == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS;
1004 unsigned IntrID = cast<GIntrinsic>(MI)->getIntrinsicID();
1005 if (IntrID != 0 && IntrID < Intrinsic::num_intrinsics) {
1007 MF->getFunction().getContext(), static_cast<Intrinsic::ID>(IntrID));
1008 bool DeclIsConvergent = Attrs.hasFnAttr(Attribute::Convergent);
1009 if (NotConvergent && DeclIsConvergent) {
1010 report(Twine(TII->getName(Opcode), " used with a convergent intrinsic"),
1011 MI);
1012 return false;
1013 }
1014 if (!NotConvergent && !DeclIsConvergent) {
1015 report(
1016 Twine(TII->getName(Opcode), " used with a non-convergent intrinsic"),
1017 MI);
1018 return false;
1019 }
1020 }
1021
1022 return true;
1023}
1024
1025void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
1026 if (isFunctionSelected)
1027 report("Unexpected generic instruction in a Selected function", MI);
1028
1029 const MCInstrDesc &MCID = MI->getDesc();
1030 unsigned NumOps = MI->getNumOperands();
1031
1032 // Branches must reference a basic block if they are not indirect
1033 if (MI->isBranch() && !MI->isIndirectBranch()) {
1034 bool HasMBB = false;
1035 for (const MachineOperand &Op : MI->operands()) {
1036 if (Op.isMBB()) {
1037 HasMBB = true;
1038 break;
1039 }
1040 }
1041
1042 if (!HasMBB) {
1043 report("Branch instruction is missing a basic block operand or "
1044 "isIndirectBranch property",
1045 MI);
1046 }
1047 }
1048
1049 // Check types.
1051 for (unsigned I = 0, E = std::min(MCID.getNumOperands(), NumOps);
1052 I != E; ++I) {
1053 if (!MCID.operands()[I].isGenericType())
1054 continue;
1055 // Generic instructions specify type equality constraints between some of
1056 // their operands. Make sure these are consistent.
1057 size_t TypeIdx = MCID.operands()[I].getGenericTypeIndex();
1058 Types.resize(std::max(TypeIdx + 1, Types.size()));
1059
1060 const MachineOperand *MO = &MI->getOperand(I);
1061 if (!MO->isReg()) {
1062 report("generic instruction must use register operands", MI);
1063 continue;
1064 }
1065
1066 LLT OpTy = MRI->getType(MO->getReg());
1067 // Don't report a type mismatch if there is no actual mismatch, only a
1068 // type missing, to reduce noise:
1069 if (OpTy.isValid()) {
1070 // Only the first valid type for a type index will be printed: don't
1071 // overwrite it later so it's always clear which type was expected:
1072 if (!Types[TypeIdx].isValid())
1073 Types[TypeIdx] = OpTy;
1074 else if (Types[TypeIdx] != OpTy)
1075 report("Type mismatch in generic instruction", MO, I, OpTy);
1076 } else {
1077 // Generic instructions must have types attached to their operands.
1078 report("Generic instruction is missing a virtual register type", MO, I);
1079 }
1080 }
1081
1082 // Generic opcodes must not have physical register operands.
1083 for (unsigned I = 0; I < MI->getNumOperands(); ++I) {
1084 const MachineOperand *MO = &MI->getOperand(I);
1085 if (MO->isReg() && MO->getReg().isPhysical())
1086 report("Generic instruction cannot have physical register", MO, I);
1087 }
1088
1089 // Avoid out of bounds in checks below. This was already reported earlier.
1090 if (MI->getNumOperands() < MCID.getNumOperands())
1091 return;
1092
1094 if (!TII->verifyInstruction(*MI, ErrorInfo))
1095 report(ErrorInfo.data(), MI);
1096
1097 // Verify properties of various specific instruction types
1098 unsigned Opc = MI->getOpcode();
1099 switch (Opc) {
1100 case TargetOpcode::G_ASSERT_SEXT:
1101 case TargetOpcode::G_ASSERT_ZEXT: {
1102 std::string OpcName =
1103 Opc == TargetOpcode::G_ASSERT_ZEXT ? "G_ASSERT_ZEXT" : "G_ASSERT_SEXT";
1104 if (!MI->getOperand(2).isImm()) {
1105 report(Twine(OpcName, " expects an immediate operand #2"), MI);
1106 break;
1107 }
1108
1109 Register Dst = MI->getOperand(0).getReg();
1110 Register Src = MI->getOperand(1).getReg();
1111 LLT SrcTy = MRI->getType(Src);
1112 int64_t Imm = MI->getOperand(2).getImm();
1113 if (Imm <= 0) {
1114 report(Twine(OpcName, " size must be >= 1"), MI);
1115 break;
1116 }
1117
1118 if (Imm >= SrcTy.getScalarSizeInBits()) {
1119 report(Twine(OpcName, " size must be less than source bit width"), MI);
1120 break;
1121 }
1122
1123 const RegisterBank *SrcRB = RBI->getRegBank(Src, *MRI, *TRI);
1124 const RegisterBank *DstRB = RBI->getRegBank(Dst, *MRI, *TRI);
1125
1126 // Allow only the source bank to be set.
1127 if ((SrcRB && DstRB && SrcRB != DstRB) || (DstRB && !SrcRB)) {
1128 report(Twine(OpcName, " cannot change register bank"), MI);
1129 break;
1130 }
1131
1132 // Don't allow a class change. Do allow member class->regbank.
1133 const TargetRegisterClass *DstRC = MRI->getRegClassOrNull(Dst);
1134 if (DstRC && DstRC != MRI->getRegClassOrNull(Src)) {
1135 report(
1136 Twine(OpcName, " source and destination register classes must match"),
1137 MI);
1138 break;
1139 }
1140
1141 break;
1142 }
1143
1144 case TargetOpcode::G_CONSTANT:
1145 case TargetOpcode::G_FCONSTANT: {
1146 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1147 if (DstTy.isVector())
1148 report("Instruction cannot use a vector result type", MI);
1149
1150 if (MI->getOpcode() == TargetOpcode::G_CONSTANT) {
1151 if (!MI->getOperand(1).isCImm()) {
1152 report("G_CONSTANT operand must be cimm", MI);
1153 break;
1154 }
1155
1156 const ConstantInt *CI = MI->getOperand(1).getCImm();
1157 if (CI->getBitWidth() != DstTy.getSizeInBits())
1158 report("inconsistent constant size", MI);
1159 } else {
1160 if (!MI->getOperand(1).isFPImm()) {
1161 report("G_FCONSTANT operand must be fpimm", MI);
1162 break;
1163 }
1164 const ConstantFP *CF = MI->getOperand(1).getFPImm();
1165
1167 DstTy.getSizeInBits()) {
1168 report("inconsistent constant size", MI);
1169 }
1170 }
1171
1172 break;
1173 }
1174 case TargetOpcode::G_LOAD:
1175 case TargetOpcode::G_STORE:
1176 case TargetOpcode::G_ZEXTLOAD:
1177 case TargetOpcode::G_SEXTLOAD: {
1178 LLT ValTy = MRI->getType(MI->getOperand(0).getReg());
1179 LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
1180 if (!PtrTy.isPointer())
1181 report("Generic memory instruction must access a pointer", MI);
1182
1183 // Generic loads and stores must have a single MachineMemOperand
1184 // describing that access.
1185 if (!MI->hasOneMemOperand()) {
1186 report("Generic instruction accessing memory must have one mem operand",
1187 MI);
1188 } else {
1189 const MachineMemOperand &MMO = **MI->memoperands_begin();
1190 if (MI->getOpcode() == TargetOpcode::G_ZEXTLOAD ||
1191 MI->getOpcode() == TargetOpcode::G_SEXTLOAD) {
1192 if (MMO.getSizeInBits() >= ValTy.getSizeInBits())
1193 report("Generic extload must have a narrower memory type", MI);
1194 } else if (MI->getOpcode() == TargetOpcode::G_LOAD) {
1195 if (MMO.getSize() > ValTy.getSizeInBytes())
1196 report("load memory size cannot exceed result size", MI);
1197 } else if (MI->getOpcode() == TargetOpcode::G_STORE) {
1198 if (ValTy.getSizeInBytes() < MMO.getSize())
1199 report("store memory size cannot exceed value size", MI);
1200 }
1201
1202 const AtomicOrdering Order = MMO.getSuccessOrdering();
1203 if (Opc == TargetOpcode::G_STORE) {
1204 if (Order == AtomicOrdering::Acquire ||
1206 report("atomic store cannot use acquire ordering", MI);
1207
1208 } else {
1209 if (Order == AtomicOrdering::Release ||
1211 report("atomic load cannot use release ordering", MI);
1212 }
1213 }
1214
1215 break;
1216 }
1217 case TargetOpcode::G_PHI: {
1218 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1219 if (!DstTy.isValid() || !all_of(drop_begin(MI->operands()),
1220 [this, &DstTy](const MachineOperand &MO) {
1221 if (!MO.isReg())
1222 return true;
1223 LLT Ty = MRI->getType(MO.getReg());
1224 if (!Ty.isValid() || (Ty != DstTy))
1225 return false;
1226 return true;
1227 }))
1228 report("Generic Instruction G_PHI has operands with incompatible/missing "
1229 "types",
1230 MI);
1231 break;
1232 }
1233 case TargetOpcode::G_BITCAST: {
1234 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1235 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1236 if (!DstTy.isValid() || !SrcTy.isValid())
1237 break;
1238
1239 if (SrcTy.isPointer() != DstTy.isPointer())
1240 report("bitcast cannot convert between pointers and other types", MI);
1241
1242 if (SrcTy.getSizeInBits() != DstTy.getSizeInBits())
1243 report("bitcast sizes must match", MI);
1244
1245 if (SrcTy == DstTy)
1246 report("bitcast must change the type", MI);
1247
1248 break;
1249 }
1250 case TargetOpcode::G_INTTOPTR:
1251 case TargetOpcode::G_PTRTOINT:
1252 case TargetOpcode::G_ADDRSPACE_CAST: {
1253 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1254 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1255 if (!DstTy.isValid() || !SrcTy.isValid())
1256 break;
1257
1258 verifyVectorElementMatch(DstTy, SrcTy, MI);
1259
1260 DstTy = DstTy.getScalarType();
1261 SrcTy = SrcTy.getScalarType();
1262
1263 if (MI->getOpcode() == TargetOpcode::G_INTTOPTR) {
1264 if (!DstTy.isPointer())
1265 report("inttoptr result type must be a pointer", MI);
1266 if (SrcTy.isPointer())
1267 report("inttoptr source type must not be a pointer", MI);
1268 } else if (MI->getOpcode() == TargetOpcode::G_PTRTOINT) {
1269 if (!SrcTy.isPointer())
1270 report("ptrtoint source type must be a pointer", MI);
1271 if (DstTy.isPointer())
1272 report("ptrtoint result type must not be a pointer", MI);
1273 } else {
1274 assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST);
1275 if (!SrcTy.isPointer() || !DstTy.isPointer())
1276 report("addrspacecast types must be pointers", MI);
1277 else {
1278 if (SrcTy.getAddressSpace() == DstTy.getAddressSpace())
1279 report("addrspacecast must convert different address spaces", MI);
1280 }
1281 }
1282
1283 break;
1284 }
1285 case TargetOpcode::G_PTR_ADD: {
1286 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1287 LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
1288 LLT OffsetTy = MRI->getType(MI->getOperand(2).getReg());
1289 if (!DstTy.isValid() || !PtrTy.isValid() || !OffsetTy.isValid())
1290 break;
1291
1292 if (!PtrTy.getScalarType().isPointer())
1293 report("gep first operand must be a pointer", MI);
1294
1295 if (OffsetTy.getScalarType().isPointer())
1296 report("gep offset operand must not be a pointer", MI);
1297
1298 // TODO: Is the offset allowed to be a scalar with a vector?
1299 break;
1300 }
1301 case TargetOpcode::G_PTRMASK: {
1302 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1303 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1304 LLT MaskTy = MRI->getType(MI->getOperand(2).getReg());
1305 if (!DstTy.isValid() || !SrcTy.isValid() || !MaskTy.isValid())
1306 break;
1307
1308 if (!DstTy.getScalarType().isPointer())
1309 report("ptrmask result type must be a pointer", MI);
1310
1311 if (!MaskTy.getScalarType().isScalar())
1312 report("ptrmask mask type must be an integer", MI);
1313
1314 verifyVectorElementMatch(DstTy, MaskTy, MI);
1315 break;
1316 }
1317 case TargetOpcode::G_SEXT:
1318 case TargetOpcode::G_ZEXT:
1319 case TargetOpcode::G_ANYEXT:
1320 case TargetOpcode::G_TRUNC:
1321 case TargetOpcode::G_FPEXT:
1322 case TargetOpcode::G_FPTRUNC: {
1323 // Number of operands and presense of types is already checked (and
1324 // reported in case of any issues), so no need to report them again. As
1325 // we're trying to report as many issues as possible at once, however, the
1326 // instructions aren't guaranteed to have the right number of operands or
1327 // types attached to them at this point
1328 assert(MCID.getNumOperands() == 2 && "Expected 2 operands G_*{EXT,TRUNC}");
1329 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1330 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1331 if (!DstTy.isValid() || !SrcTy.isValid())
1332 break;
1333
1334 LLT DstElTy = DstTy.getScalarType();
1335 LLT SrcElTy = SrcTy.getScalarType();
1336 if (DstElTy.isPointer() || SrcElTy.isPointer())
1337 report("Generic extend/truncate can not operate on pointers", MI);
1338
1339 verifyVectorElementMatch(DstTy, SrcTy, MI);
1340
1341 unsigned DstSize = DstElTy.getSizeInBits();
1342 unsigned SrcSize = SrcElTy.getSizeInBits();
1343 switch (MI->getOpcode()) {
1344 default:
1345 if (DstSize <= SrcSize)
1346 report("Generic extend has destination type no larger than source", MI);
1347 break;
1348 case TargetOpcode::G_TRUNC:
1349 case TargetOpcode::G_FPTRUNC:
1350 if (DstSize >= SrcSize)
1351 report("Generic truncate has destination type no smaller than source",
1352 MI);
1353 break;
1354 }
1355 break;
1356 }
1357 case TargetOpcode::G_SELECT: {
1358 LLT SelTy = MRI->getType(MI->getOperand(0).getReg());
1359 LLT CondTy = MRI->getType(MI->getOperand(1).getReg());
1360 if (!SelTy.isValid() || !CondTy.isValid())
1361 break;
1362
1363 // Scalar condition select on a vector is valid.
1364 if (CondTy.isVector())
1365 verifyVectorElementMatch(SelTy, CondTy, MI);
1366 break;
1367 }
1368 case TargetOpcode::G_MERGE_VALUES: {
1369 // G_MERGE_VALUES should only be used to merge scalars into a larger scalar,
1370 // e.g. s2N = MERGE sN, sN
1371 // Merging multiple scalars into a vector is not allowed, should use
1372 // G_BUILD_VECTOR for that.
1373 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1374 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1375 if (DstTy.isVector() || SrcTy.isVector())
1376 report("G_MERGE_VALUES cannot operate on vectors", MI);
1377
1378 const unsigned NumOps = MI->getNumOperands();
1379 if (DstTy.getSizeInBits() != SrcTy.getSizeInBits() * (NumOps - 1))
1380 report("G_MERGE_VALUES result size is inconsistent", MI);
1381
1382 for (unsigned I = 2; I != NumOps; ++I) {
1383 if (MRI->getType(MI->getOperand(I).getReg()) != SrcTy)
1384 report("G_MERGE_VALUES source types do not match", MI);
1385 }
1386
1387 break;
1388 }
1389 case TargetOpcode::G_UNMERGE_VALUES: {
1390 unsigned NumDsts = MI->getNumOperands() - 1;
1391 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1392 for (unsigned i = 1; i < NumDsts; ++i) {
1393 if (MRI->getType(MI->getOperand(i).getReg()) != DstTy) {
1394 report("G_UNMERGE_VALUES destination types do not match", MI);
1395 break;
1396 }
1397 }
1398
1399 LLT SrcTy = MRI->getType(MI->getOperand(NumDsts).getReg());
1400 if (DstTy.isVector()) {
1401 // This case is the converse of G_CONCAT_VECTORS.
1402 if (!SrcTy.isVector() || SrcTy.getScalarType() != DstTy.getScalarType() ||
1403 SrcTy.getNumElements() != NumDsts * DstTy.getNumElements())
1404 report("G_UNMERGE_VALUES source operand does not match vector "
1405 "destination operands",
1406 MI);
1407 } else if (SrcTy.isVector()) {
1408 // This case is the converse of G_BUILD_VECTOR, but relaxed to allow
1409 // mismatched types as long as the total size matches:
1410 // %0:_(s64), %1:_(s64) = G_UNMERGE_VALUES %2:_(<4 x s32>)
1411 if (SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits())
1412 report("G_UNMERGE_VALUES vector source operand does not match scalar "
1413 "destination operands",
1414 MI);
1415 } else {
1416 // This case is the converse of G_MERGE_VALUES.
1417 if (SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits()) {
1418 report("G_UNMERGE_VALUES scalar source operand does not match scalar "
1419 "destination operands",
1420 MI);
1421 }
1422 }
1423 break;
1424 }
1425 case TargetOpcode::G_BUILD_VECTOR: {
1426 // Source types must be scalars, dest type a vector. Total size of scalars
1427 // must match the dest vector size.
1428 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1429 LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg());
1430 if (!DstTy.isVector() || SrcEltTy.isVector()) {
1431 report("G_BUILD_VECTOR must produce a vector from scalar operands", MI);
1432 break;
1433 }
1434
1435 if (DstTy.getElementType() != SrcEltTy)
1436 report("G_BUILD_VECTOR result element type must match source type", MI);
1437
1438 if (DstTy.getNumElements() != MI->getNumOperands() - 1)
1439 report("G_BUILD_VECTOR must have an operand for each elemement", MI);
1440
1441 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1442 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1443 report("G_BUILD_VECTOR source operand types are not homogeneous", MI);
1444
1445 break;
1446 }
1447 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1448 // Source types must be scalars, dest type a vector. Scalar types must be
1449 // larger than the dest vector elt type, as this is a truncating operation.
1450 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1451 LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg());
1452 if (!DstTy.isVector() || SrcEltTy.isVector())
1453 report("G_BUILD_VECTOR_TRUNC must produce a vector from scalar operands",
1454 MI);
1455 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1456 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1457 report("G_BUILD_VECTOR_TRUNC source operand types are not homogeneous",
1458 MI);
1459 if (SrcEltTy.getSizeInBits() <= DstTy.getElementType().getSizeInBits())
1460 report("G_BUILD_VECTOR_TRUNC source operand types are not larger than "
1461 "dest elt type",
1462 MI);
1463 break;
1464 }
1465 case TargetOpcode::G_CONCAT_VECTORS: {
1466 // Source types should be vectors, and total size should match the dest
1467 // vector size.
1468 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1469 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1470 if (!DstTy.isVector() || !SrcTy.isVector())
1471 report("G_CONCAT_VECTOR requires vector source and destination operands",
1472 MI);
1473
1474 if (MI->getNumOperands() < 3)
1475 report("G_CONCAT_VECTOR requires at least 2 source operands", MI);
1476
1477 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1478 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1479 report("G_CONCAT_VECTOR source operand types are not homogeneous", MI);
1480 if (DstTy.getNumElements() !=
1481 SrcTy.getNumElements() * (MI->getNumOperands() - 1))
1482 report("G_CONCAT_VECTOR num dest and source elements should match", MI);
1483 break;
1484 }
1485 case TargetOpcode::G_ICMP:
1486 case TargetOpcode::G_FCMP: {
1487 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1488 LLT SrcTy = MRI->getType(MI->getOperand(2).getReg());
1489
1490 if ((DstTy.isVector() != SrcTy.isVector()) ||
1491 (DstTy.isVector() && DstTy.getNumElements() != SrcTy.getNumElements()))
1492 report("Generic vector icmp/fcmp must preserve number of lanes", MI);
1493
1494 break;
1495 }
1496 case TargetOpcode::G_EXTRACT: {
1497 const MachineOperand &SrcOp = MI->getOperand(1);
1498 if (!SrcOp.isReg()) {
1499 report("extract source must be a register", MI);
1500 break;
1501 }
1502
1503 const MachineOperand &OffsetOp = MI->getOperand(2);
1504 if (!OffsetOp.isImm()) {
1505 report("extract offset must be a constant", MI);
1506 break;
1507 }
1508
1509 unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
1510 unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
1511 if (SrcSize == DstSize)
1512 report("extract source must be larger than result", MI);
1513
1514 if (DstSize + OffsetOp.getImm() > SrcSize)
1515 report("extract reads past end of register", MI);
1516 break;
1517 }
1518 case TargetOpcode::G_INSERT: {
1519 const MachineOperand &SrcOp = MI->getOperand(2);
1520 if (!SrcOp.isReg()) {
1521 report("insert source must be a register", MI);
1522 break;
1523 }
1524
1525 const MachineOperand &OffsetOp = MI->getOperand(3);
1526 if (!OffsetOp.isImm()) {
1527 report("insert offset must be a constant", MI);
1528 break;
1529 }
1530
1531 unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
1532 unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
1533
1534 if (DstSize <= SrcSize)
1535 report("inserted size must be smaller than total register", MI);
1536
1537 if (SrcSize + OffsetOp.getImm() > DstSize)
1538 report("insert writes past end of register", MI);
1539
1540 break;
1541 }
1542 case TargetOpcode::G_JUMP_TABLE: {
1543 if (!MI->getOperand(1).isJTI())
1544 report("G_JUMP_TABLE source operand must be a jump table index", MI);
1545 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1546 if (!DstTy.isPointer())
1547 report("G_JUMP_TABLE dest operand must have a pointer type", MI);
1548 break;
1549 }
1550 case TargetOpcode::G_BRJT: {
1551 if (!MRI->getType(MI->getOperand(0).getReg()).isPointer())
1552 report("G_BRJT src operand 0 must be a pointer type", MI);
1553
1554 if (!MI->getOperand(1).isJTI())
1555 report("G_BRJT src operand 1 must be a jump table index", MI);
1556
1557 const auto &IdxOp = MI->getOperand(2);
1558 if (!IdxOp.isReg() || MRI->getType(IdxOp.getReg()).isPointer())
1559 report("G_BRJT src operand 2 must be a scalar reg type", MI);
1560 break;
1561 }
1562 case TargetOpcode::G_INTRINSIC:
1563 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1564 case TargetOpcode::G_INTRINSIC_CONVERGENT:
1565 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS: {
1566 // TODO: Should verify number of def and use operands, but the current
1567 // interface requires passing in IR types for mangling.
1568 const MachineOperand &IntrIDOp = MI->getOperand(MI->getNumExplicitDefs());
1569 if (!IntrIDOp.isIntrinsicID()) {
1570 report("G_INTRINSIC first src operand must be an intrinsic ID", MI);
1571 break;
1572 }
1573
1574 if (!verifyGIntrinsicSideEffects(MI))
1575 break;
1576 if (!verifyGIntrinsicConvergence(MI))
1577 break;
1578
1579 break;
1580 }
1581 case TargetOpcode::G_SEXT_INREG: {
1582 if (!MI->getOperand(2).isImm()) {
1583 report("G_SEXT_INREG expects an immediate operand #2", MI);
1584 break;
1585 }
1586
1587 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1588 int64_t Imm = MI->getOperand(2).getImm();
1589 if (Imm <= 0)
1590 report("G_SEXT_INREG size must be >= 1", MI);
1591 if (Imm >= SrcTy.getScalarSizeInBits())
1592 report("G_SEXT_INREG size must be less than source bit width", MI);
1593 break;
1594 }
1595 case TargetOpcode::G_SHUFFLE_VECTOR: {
1596 const MachineOperand &MaskOp = MI->getOperand(3);
1597 if (!MaskOp.isShuffleMask()) {
1598 report("Incorrect mask operand type for G_SHUFFLE_VECTOR", MI);
1599 break;
1600 }
1601
1602 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1603 LLT Src0Ty = MRI->getType(MI->getOperand(1).getReg());
1604 LLT Src1Ty = MRI->getType(MI->getOperand(2).getReg());
1605
1606 if (Src0Ty != Src1Ty)
1607 report("Source operands must be the same type", MI);
1608
1609 if (Src0Ty.getScalarType() != DstTy.getScalarType())
1610 report("G_SHUFFLE_VECTOR cannot change element type", MI);
1611
1612 // Don't check that all operands are vector because scalars are used in
1613 // place of 1 element vectors.
1614 int SrcNumElts = Src0Ty.isVector() ? Src0Ty.getNumElements() : 1;
1615 int DstNumElts = DstTy.isVector() ? DstTy.getNumElements() : 1;
1616
1617 ArrayRef<int> MaskIdxes = MaskOp.getShuffleMask();
1618
1619 if (static_cast<int>(MaskIdxes.size()) != DstNumElts)
1620 report("Wrong result type for shufflemask", MI);
1621
1622 for (int Idx : MaskIdxes) {
1623 if (Idx < 0)
1624 continue;
1625
1626 if (Idx >= 2 * SrcNumElts)
1627 report("Out of bounds shuffle index", MI);
1628 }
1629
1630 break;
1631 }
1632 case TargetOpcode::G_DYN_STACKALLOC: {
1633 const MachineOperand &DstOp = MI->getOperand(0);
1634 const MachineOperand &AllocOp = MI->getOperand(1);
1635 const MachineOperand &AlignOp = MI->getOperand(2);
1636
1637 if (!DstOp.isReg() || !MRI->getType(DstOp.getReg()).isPointer()) {
1638 report("dst operand 0 must be a pointer type", MI);
1639 break;
1640 }
1641
1642 if (!AllocOp.isReg() || !MRI->getType(AllocOp.getReg()).isScalar()) {
1643 report("src operand 1 must be a scalar reg type", MI);
1644 break;
1645 }
1646
1647 if (!AlignOp.isImm()) {
1648 report("src operand 2 must be an immediate type", MI);
1649 break;
1650 }
1651 break;
1652 }
1653 case TargetOpcode::G_MEMCPY_INLINE:
1654 case TargetOpcode::G_MEMCPY:
1655 case TargetOpcode::G_MEMMOVE: {
1656 ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
1657 if (MMOs.size() != 2) {
1658 report("memcpy/memmove must have 2 memory operands", MI);
1659 break;
1660 }
1661
1662 if ((!MMOs[0]->isStore() || MMOs[0]->isLoad()) ||
1663 (MMOs[1]->isStore() || !MMOs[1]->isLoad())) {
1664 report("wrong memory operand types", MI);
1665 break;
1666 }
1667
1668 if (MMOs[0]->getSize() != MMOs[1]->getSize())
1669 report("inconsistent memory operand sizes", MI);
1670
1671 LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg());
1672 LLT SrcPtrTy = MRI->getType(MI->getOperand(1).getReg());
1673
1674 if (!DstPtrTy.isPointer() || !SrcPtrTy.isPointer()) {
1675 report("memory instruction operand must be a pointer", MI);
1676 break;
1677 }
1678
1679 if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
1680 report("inconsistent store address space", MI);
1681 if (SrcPtrTy.getAddressSpace() != MMOs[1]->getAddrSpace())
1682 report("inconsistent load address space", MI);
1683
1684 if (Opc != TargetOpcode::G_MEMCPY_INLINE)
1685 if (!MI->getOperand(3).isImm() || (MI->getOperand(3).getImm() & ~1LL))
1686 report("'tail' flag (operand 3) must be an immediate 0 or 1", MI);
1687
1688 break;
1689 }
1690 case TargetOpcode::G_BZERO:
1691 case TargetOpcode::G_MEMSET: {
1692 ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
1693 std::string Name = Opc == TargetOpcode::G_MEMSET ? "memset" : "bzero";
1694 if (MMOs.size() != 1) {
1695 report(Twine(Name, " must have 1 memory operand"), MI);
1696 break;
1697 }
1698
1699 if ((!MMOs[0]->isStore() || MMOs[0]->isLoad())) {
1700 report(Twine(Name, " memory operand must be a store"), MI);
1701 break;
1702 }
1703
1704 LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg());
1705 if (!DstPtrTy.isPointer()) {
1706 report(Twine(Name, " operand must be a pointer"), MI);
1707 break;
1708 }
1709
1710 if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
1711 report("inconsistent " + Twine(Name, " address space"), MI);
1712
1713 if (!MI->getOperand(MI->getNumOperands() - 1).isImm() ||
1714 (MI->getOperand(MI->getNumOperands() - 1).getImm() & ~1LL))
1715 report("'tail' flag (last operand) must be an immediate 0 or 1", MI);
1716
1717 break;
1718 }
1719 case TargetOpcode::G_VECREDUCE_SEQ_FADD:
1720 case TargetOpcode::G_VECREDUCE_SEQ_FMUL: {
1721 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1722 LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg());
1723 LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg());
1724 if (!DstTy.isScalar())
1725 report("Vector reduction requires a scalar destination type", MI);
1726 if (!Src1Ty.isScalar())
1727 report("Sequential FADD/FMUL vector reduction requires a scalar 1st operand", MI);
1728 if (!Src2Ty.isVector())
1729 report("Sequential FADD/FMUL vector reduction must have a vector 2nd operand", MI);
1730 break;
1731 }
1732 case TargetOpcode::G_VECREDUCE_FADD:
1733 case TargetOpcode::G_VECREDUCE_FMUL:
1734 case TargetOpcode::G_VECREDUCE_FMAX:
1735 case TargetOpcode::G_VECREDUCE_FMIN:
1736 case TargetOpcode::G_VECREDUCE_FMAXIMUM:
1737 case TargetOpcode::G_VECREDUCE_FMINIMUM:
1738 case TargetOpcode::G_VECREDUCE_ADD:
1739 case TargetOpcode::G_VECREDUCE_MUL:
1740 case TargetOpcode::G_VECREDUCE_AND:
1741 case TargetOpcode::G_VECREDUCE_OR:
1742 case TargetOpcode::G_VECREDUCE_XOR:
1743 case TargetOpcode::G_VECREDUCE_SMAX:
1744 case TargetOpcode::G_VECREDUCE_SMIN:
1745 case TargetOpcode::G_VECREDUCE_UMAX:
1746 case TargetOpcode::G_VECREDUCE_UMIN: {
1747 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1748 if (!DstTy.isScalar())
1749 report("Vector reduction requires a scalar destination type", MI);
1750 break;
1751 }
1752
1753 case TargetOpcode::G_SBFX:
1754 case TargetOpcode::G_UBFX: {
1755 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1756 if (DstTy.isVector()) {
1757 report("Bitfield extraction is not supported on vectors", MI);
1758 break;
1759 }
1760 break;
1761 }
1762 case TargetOpcode::G_SHL:
1763 case TargetOpcode::G_LSHR:
1764 case TargetOpcode::G_ASHR:
1765 case TargetOpcode::G_ROTR:
1766 case TargetOpcode::G_ROTL: {
1767 LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg());
1768 LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg());
1769 if (Src1Ty.isVector() != Src2Ty.isVector()) {
1770 report("Shifts and rotates require operands to be either all scalars or "
1771 "all vectors",
1772 MI);
1773 break;
1774 }
1775 break;
1776 }
1777 case TargetOpcode::G_LLROUND:
1778 case TargetOpcode::G_LROUND: {
1779 verifyAllRegOpsScalar(*MI, *MRI);
1780 break;
1781 }
1782 case TargetOpcode::G_IS_FPCLASS: {
1783 LLT DestTy = MRI->getType(MI->getOperand(0).getReg());
1784 LLT DestEltTy = DestTy.getScalarType();
1785 if (!DestEltTy.isScalar()) {
1786 report("Destination must be a scalar or vector of scalars", MI);
1787 break;
1788 }
1789 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1790 LLT SrcEltTy = SrcTy.getScalarType();
1791 if (!SrcEltTy.isScalar()) {
1792 report("Source must be a scalar or vector of scalars", MI);
1793 break;
1794 }
1795 if (!verifyVectorElementMatch(DestTy, SrcTy, MI))
1796 break;
1797 const MachineOperand &TestMO = MI->getOperand(2);
1798 if (!TestMO.isImm()) {
1799 report("floating-point class set (operand 2) must be an immediate", MI);
1800 break;
1801 }
1802 int64_t Test = TestMO.getImm();
1803 if (Test < 0 || Test > fcAllFlags) {
1804 report("Incorrect floating-point class set (operand 2)", MI);
1805 break;
1806 }
1807 break;
1808 }
1809 case TargetOpcode::G_ASSERT_ALIGN: {
1810 if (MI->getOperand(2).getImm() < 1)
1811 report("alignment immediate must be >= 1", MI);
1812 break;
1813 }
1814 case TargetOpcode::G_CONSTANT_POOL: {
1815 if (!MI->getOperand(1).isCPI())
1816 report("Src operand 1 must be a constant pool index", MI);
1817 if (!MRI->getType(MI->getOperand(0).getReg()).isPointer())
1818 report("Dst operand 0 must be a pointer", MI);
1819 break;
1820 }
1821 default:
1822 break;
1823 }
1824}
1825
1826void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
1827 const MCInstrDesc &MCID = MI->getDesc();
1828 if (MI->getNumOperands() < MCID.getNumOperands()) {
1829 report("Too few operands", MI);
1830 errs() << MCID.getNumOperands() << " operands expected, but "
1831 << MI->getNumOperands() << " given.\n";
1832 }
1833
1834 if (MI->getFlag(MachineInstr::NoConvergent) && !MCID.isConvergent())
1835 report("NoConvergent flag expected only on convergent instructions.", MI);
1836
1837 if (MI->isPHI()) {
1838 if (MF->getProperties().hasProperty(
1840 report("Found PHI instruction with NoPHIs property set", MI);
1841
1842 if (FirstNonPHI)
1843 report("Found PHI instruction after non-PHI", MI);
1844 } else if (FirstNonPHI == nullptr)
1845 FirstNonPHI = MI;
1846
1847 // Check the tied operands.
1848 if (MI->isInlineAsm())
1849 verifyInlineAsm(MI);
1850
1851 // Check that unspillable terminators define a reg and have at most one use.
1852 if (TII->isUnspillableTerminator(MI)) {
1853 if (!MI->getOperand(0).isReg() || !MI->getOperand(0).isDef())
1854 report("Unspillable Terminator does not define a reg", MI);
1855 Register Def = MI->getOperand(0).getReg();
1856 if (Def.isVirtual() &&
1857 !MF->getProperties().hasProperty(
1859 std::distance(MRI->use_nodbg_begin(Def), MRI->use_nodbg_end()) > 1)
1860 report("Unspillable Terminator expected to have at most one use!", MI);
1861 }
1862
1863 // A fully-formed DBG_VALUE must have a location. Ignore partially formed
1864 // DBG_VALUEs: these are convenient to use in tests, but should never get
1865 // generated.
1866 if (MI->isDebugValue() && MI->getNumOperands() == 4)
1867 if (!MI->getDebugLoc())
1868 report("Missing DebugLoc for debug instruction", MI);
1869
1870 // Meta instructions should never be the subject of debug value tracking,
1871 // they don't create a value in the output program at all.
1872 if (MI->isMetaInstruction() && MI->peekDebugInstrNum())
1873 report("Metadata instruction should not have a value tracking number", MI);
1874
1875 // Check the MachineMemOperands for basic consistency.
1876 for (MachineMemOperand *Op : MI->memoperands()) {
1877 if (Op->isLoad() && !MI->mayLoad())
1878 report("Missing mayLoad flag", MI);
1879 if (Op->isStore() && !MI->mayStore())
1880 report("Missing mayStore flag", MI);
1881 }
1882
1883 // Debug values must not have a slot index.
1884 // Other instructions must have one, unless they are inside a bundle.
1885 if (LiveInts) {
1886 bool mapped = !LiveInts->isNotInMIMap(*MI);
1887 if (MI->isDebugOrPseudoInstr()) {
1888 if (mapped)
1889 report("Debug instruction has a slot index", MI);
1890 } else if (MI->isInsideBundle()) {
1891 if (mapped)
1892 report("Instruction inside bundle has a slot index", MI);
1893 } else {
1894 if (!mapped)
1895 report("Missing slot index", MI);
1896 }
1897 }
1898
1899 unsigned Opc = MCID.getOpcode();
1901 verifyPreISelGenericInstruction(MI);
1902 return;
1903 }
1904
1906 if (!TII->verifyInstruction(*MI, ErrorInfo))
1907 report(ErrorInfo.data(), MI);
1908
1909 // Verify properties of various specific instruction types
1910 switch (MI->getOpcode()) {
1911 case TargetOpcode::COPY: {
1912 const MachineOperand &DstOp = MI->getOperand(0);
1913 const MachineOperand &SrcOp = MI->getOperand(1);
1914 const Register SrcReg = SrcOp.getReg();
1915 const Register DstReg = DstOp.getReg();
1916
1917 LLT DstTy = MRI->getType(DstReg);
1918 LLT SrcTy = MRI->getType(SrcReg);
1919 if (SrcTy.isValid() && DstTy.isValid()) {
1920 // If both types are valid, check that the types are the same.
1921 if (SrcTy != DstTy) {
1922 report("Copy Instruction is illegal with mismatching types", MI);
1923 errs() << "Def = " << DstTy << ", Src = " << SrcTy << "\n";
1924 }
1925
1926 break;
1927 }
1928
1929 if (!SrcTy.isValid() && !DstTy.isValid())
1930 break;
1931
1932 // If we have only one valid type, this is likely a copy between a virtual
1933 // and physical register.
1934 unsigned SrcSize = 0;
1935 unsigned DstSize = 0;
1936 if (SrcReg.isPhysical() && DstTy.isValid()) {
1937 const TargetRegisterClass *SrcRC =
1938 TRI->getMinimalPhysRegClassLLT(SrcReg, DstTy);
1939 if (SrcRC)
1940 SrcSize = TRI->getRegSizeInBits(*SrcRC);
1941 }
1942
1943 if (SrcSize == 0)
1944 SrcSize = TRI->getRegSizeInBits(SrcReg, *MRI);
1945
1946 if (DstReg.isPhysical() && SrcTy.isValid()) {
1947 const TargetRegisterClass *DstRC =
1948 TRI->getMinimalPhysRegClassLLT(DstReg, SrcTy);
1949 if (DstRC)
1950 DstSize = TRI->getRegSizeInBits(*DstRC);
1951 }
1952
1953 if (DstSize == 0)
1954 DstSize = TRI->getRegSizeInBits(DstReg, *MRI);
1955
1956 if (SrcSize != 0 && DstSize != 0 && SrcSize != DstSize) {
1957 if (!DstOp.getSubReg() && !SrcOp.getSubReg()) {
1958 report("Copy Instruction is illegal with mismatching sizes", MI);
1959 errs() << "Def Size = " << DstSize << ", Src Size = " << SrcSize
1960 << "\n";
1961 }
1962 }
1963 break;
1964 }
1965 case TargetOpcode::STATEPOINT: {
1966 StatepointOpers SO(MI);
1967 if (!MI->getOperand(SO.getIDPos()).isImm() ||
1968 !MI->getOperand(SO.getNBytesPos()).isImm() ||
1969 !MI->getOperand(SO.getNCallArgsPos()).isImm()) {
1970 report("meta operands to STATEPOINT not constant!", MI);
1971 break;
1972 }
1973
1974 auto VerifyStackMapConstant = [&](unsigned Offset) {
1975 if (Offset >= MI->getNumOperands()) {
1976 report("stack map constant to STATEPOINT is out of range!", MI);
1977 return;
1978 }
1979 if (!MI->getOperand(Offset - 1).isImm() ||
1980 MI->getOperand(Offset - 1).getImm() != StackMaps::ConstantOp ||
1981 !MI->getOperand(Offset).isImm())
1982 report("stack map constant to STATEPOINT not well formed!", MI);
1983 };
1984 VerifyStackMapConstant(SO.getCCIdx());
1985 VerifyStackMapConstant(SO.getFlagsIdx());
1986 VerifyStackMapConstant(SO.getNumDeoptArgsIdx());
1987 VerifyStackMapConstant(SO.getNumGCPtrIdx());
1988 VerifyStackMapConstant(SO.getNumAllocaIdx());
1989 VerifyStackMapConstant(SO.getNumGcMapEntriesIdx());
1990
1991 // Verify that all explicit statepoint defs are tied to gc operands as
1992 // they are expected to be a relocation of gc operands.
1993 unsigned FirstGCPtrIdx = SO.getFirstGCPtrIdx();
1994 unsigned LastGCPtrIdx = SO.getNumAllocaIdx() - 2;
1995 for (unsigned Idx = 0; Idx < MI->getNumDefs(); Idx++) {
1996 unsigned UseOpIdx;
1997 if (!MI->isRegTiedToUseOperand(Idx, &UseOpIdx)) {
1998 report("STATEPOINT defs expected to be tied", MI);
1999 break;
2000 }
2001 if (UseOpIdx < FirstGCPtrIdx || UseOpIdx > LastGCPtrIdx) {
2002 report("STATEPOINT def tied to non-gc operand", MI);
2003 break;
2004 }
2005 }
2006
2007 // TODO: verify we have properly encoded deopt arguments
2008 } break;
2009 case TargetOpcode::INSERT_SUBREG: {
2010 unsigned InsertedSize;
2011 if (unsigned SubIdx = MI->getOperand(2).getSubReg())
2012 InsertedSize = TRI->getSubRegIdxSize(SubIdx);
2013 else
2014 InsertedSize = TRI->getRegSizeInBits(MI->getOperand(2).getReg(), *MRI);
2015 unsigned SubRegSize = TRI->getSubRegIdxSize(MI->getOperand(3).getImm());
2016 if (SubRegSize < InsertedSize) {
2017 report("INSERT_SUBREG expected inserted value to have equal or lesser "
2018 "size than the subreg it was inserted into", MI);
2019 break;
2020 }
2021 } break;
2022 case TargetOpcode::REG_SEQUENCE: {
2023 unsigned NumOps = MI->getNumOperands();
2024 if (!(NumOps & 1)) {
2025 report("Invalid number of operands for REG_SEQUENCE", MI);
2026 break;
2027 }
2028
2029 for (unsigned I = 1; I != NumOps; I += 2) {
2030 const MachineOperand &RegOp = MI->getOperand(I);
2031 const MachineOperand &SubRegOp = MI->getOperand(I + 1);
2032
2033 if (!RegOp.isReg())
2034 report("Invalid register operand for REG_SEQUENCE", &RegOp, I);
2035
2036 if (!SubRegOp.isImm() || SubRegOp.getImm() == 0 ||
2037 SubRegOp.getImm() >= TRI->getNumSubRegIndices()) {
2038 report("Invalid subregister index operand for REG_SEQUENCE",
2039 &SubRegOp, I + 1);
2040 }
2041 }
2042
2043 Register DstReg = MI->getOperand(0).getReg();
2044 if (DstReg.isPhysical())
2045 report("REG_SEQUENCE does not support physical register results", MI);
2046
2047 if (MI->getOperand(0).getSubReg())
2048 report("Invalid subreg result for REG_SEQUENCE", MI);
2049
2050 break;
2051 }
2052 }
2053}
2054
2055void
2056MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
2057 const MachineInstr *MI = MO->getParent();
2058 const MCInstrDesc &MCID = MI->getDesc();
2059 unsigned NumDefs = MCID.getNumDefs();
2060 if (MCID.getOpcode() == TargetOpcode::PATCHPOINT)
2061 NumDefs = (MONum == 0 && MO->isReg()) ? NumDefs : 0;
2062
2063 // The first MCID.NumDefs operands must be explicit register defines
2064 if (MONum < NumDefs) {
2065 const MCOperandInfo &MCOI = MCID.operands()[MONum];
2066 if (!MO->isReg())
2067 report("Explicit definition must be a register", MO, MONum);
2068 else if (!MO->isDef() && !MCOI.isOptionalDef())
2069 report("Explicit definition marked as use", MO, MONum);
2070 else if (MO->isImplicit())
2071 report("Explicit definition marked as implicit", MO, MONum);
2072 } else if (MONum < MCID.getNumOperands()) {
2073 const MCOperandInfo &MCOI = MCID.operands()[MONum];
2074 // Don't check if it's the last operand in a variadic instruction. See,
2075 // e.g., LDM_RET in the arm back end. Check non-variadic operands only.
2076 bool IsOptional = MI->isVariadic() && MONum == MCID.getNumOperands() - 1;
2077 if (!IsOptional) {
2078 if (MO->isReg()) {
2079 if (MO->isDef() && !MCOI.isOptionalDef() && !MCID.variadicOpsAreDefs())
2080 report("Explicit operand marked as def", MO, MONum);
2081 if (MO->isImplicit())
2082 report("Explicit operand marked as implicit", MO, MONum);
2083 }
2084
2085 // Check that an instruction has register operands only as expected.
2086 if (MCOI.OperandType == MCOI::OPERAND_REGISTER &&
2087 !MO->isReg() && !MO->isFI())
2088 report("Expected a register operand.", MO, MONum);
2089 if (MO->isReg()) {
2092 !TII->isPCRelRegisterOperandLegal(*MO)))
2093 report("Expected a non-register operand.", MO, MONum);
2094 }
2095 }
2096
2097 int TiedTo = MCID.getOperandConstraint(MONum, MCOI::TIED_TO);
2098 if (TiedTo != -1) {
2099 if (!MO->isReg())
2100 report("Tied use must be a register", MO, MONum);
2101 else if (!MO->isTied())
2102 report("Operand should be tied", MO, MONum);
2103 else if (unsigned(TiedTo) != MI->findTiedOperandIdx(MONum))
2104 report("Tied def doesn't match MCInstrDesc", MO, MONum);
2105 else if (MO->getReg().isPhysical()) {
2106 const MachineOperand &MOTied = MI->getOperand(TiedTo);
2107 if (!MOTied.isReg())
2108 report("Tied counterpart must be a register", &MOTied, TiedTo);
2109 else if (MOTied.getReg().isPhysical() &&
2110 MO->getReg() != MOTied.getReg())
2111 report("Tied physical registers must match.", &MOTied, TiedTo);
2112 }
2113 } else if (MO->isReg() && MO->isTied())
2114 report("Explicit operand should not be tied", MO, MONum);
2115 } else {
2116 // ARM adds %reg0 operands to indicate predicates. We'll allow that.
2117 if (MO->isReg() && !MO->isImplicit() && !MI->isVariadic() && MO->getReg())
2118 report("Extra explicit operand on non-variadic instruction", MO, MONum);
2119 }
2120
2121 switch (MO->getType()) {
2123 // Verify debug flag on debug instructions. Check this first because reg0
2124 // indicates an undefined debug value.
2125 if (MI->isDebugInstr() && MO->isUse()) {
2126 if (!MO->isDebug())
2127 report("Register operand must be marked debug", MO, MONum);
2128 } else if (MO->isDebug()) {
2129 report("Register operand must not be marked debug", MO, MONum);
2130 }
2131
2132 const Register Reg = MO->getReg();
2133 if (!Reg)
2134 return;
2135 if (MRI->tracksLiveness() && !MI->isDebugInstr())
2136 checkLiveness(MO, MONum);
2137
2138 if (MO->isDef() && MO->isUndef() && !MO->getSubReg() &&
2139 MO->getReg().isVirtual()) // TODO: Apply to physregs too
2140 report("Undef virtual register def operands require a subregister", MO, MONum);
2141
2142 // Verify the consistency of tied operands.
2143 if (MO->isTied()) {
2144 unsigned OtherIdx = MI->findTiedOperandIdx(MONum);
2145 const MachineOperand &OtherMO = MI->getOperand(OtherIdx);
2146 if (!OtherMO.isReg())
2147 report("Must be tied to a register", MO, MONum);
2148 if (!OtherMO.isTied())
2149 report("Missing tie flags on tied operand", MO, MONum);
2150 if (MI->findTiedOperandIdx(OtherIdx) != MONum)
2151 report("Inconsistent tie links", MO, MONum);
2152 if (MONum < MCID.getNumDefs()) {
2153 if (OtherIdx < MCID.getNumOperands()) {
2154 if (-1 == MCID.getOperandConstraint(OtherIdx, MCOI::TIED_TO))
2155 report("Explicit def tied to explicit use without tie constraint",
2156 MO, MONum);
2157 } else {
2158 if (!OtherMO.isImplicit())
2159 report("Explicit def should be tied to implicit use", MO, MONum);
2160 }
2161 }
2162 }
2163
2164 // Verify two-address constraints after the twoaddressinstruction pass.
2165 // Both twoaddressinstruction pass and phi-node-elimination pass call
2166 // MRI->leaveSSA() to set MF as NoSSA, we should do the verification after
2167 // twoaddressinstruction pass not after phi-node-elimination pass. So we
2168 // shouldn't use the NoSSA as the condition, we should based on
2169 // TiedOpsRewritten property to verify two-address constraints, this
2170 // property will be set in twoaddressinstruction pass.
2171 unsigned DefIdx;
2172 if (MF->getProperties().hasProperty(
2174 MO->isUse() && MI->isRegTiedToDefOperand(MONum, &DefIdx) &&
2175 Reg != MI->getOperand(DefIdx).getReg())
2176 report("Two-address instruction operands must be identical", MO, MONum);
2177
2178 // Check register classes.
2179 unsigned SubIdx = MO->getSubReg();
2180
2181 if (Reg.isPhysical()) {
2182 if (SubIdx) {
2183 report("Illegal subregister index for physical register", MO, MONum);
2184 return;
2185 }
2186 if (MONum < MCID.getNumOperands()) {
2187 if (const TargetRegisterClass *DRC =
2188 TII->getRegClass(MCID, MONum, TRI, *MF)) {
2189 if (!DRC->contains(Reg)) {
2190 report("Illegal physical register for instruction", MO, MONum);
2191 errs() << printReg(Reg, TRI) << " is not a "
2192 << TRI->getRegClassName(DRC) << " register.\n";
2193 }
2194 }
2195 }
2196 if (MO->isRenamable()) {
2197 if (MRI->isReserved(Reg)) {
2198 report("isRenamable set on reserved register", MO, MONum);
2199 return;
2200 }
2201 }
2202 } else {
2203 // Virtual register.
2204 const TargetRegisterClass *RC = MRI->getRegClassOrNull(Reg);
2205 if (!RC) {
2206 // This is a generic virtual register.
2207
2208 // Do not allow undef uses for generic virtual registers. This ensures
2209 // getVRegDef can never fail and return null on a generic register.
2210 //
2211 // FIXME: This restriction should probably be broadened to all SSA
2212 // MIR. However, DetectDeadLanes/ProcessImplicitDefs technically still
2213 // run on the SSA function just before phi elimination.
2214 if (MO->isUndef())
2215 report("Generic virtual register use cannot be undef", MO, MONum);
2216
2217 // Debug value instruction is permitted to use undefined vregs.
2218 // This is a performance measure to skip the overhead of immediately
2219 // pruning unused debug operands. The final undef substitution occurs
2220 // when debug values are allocated in LDVImpl::handleDebugValue, so
2221 // these verifications always apply after this pass.
2222 if (isFunctionTracksDebugUserValues || !MO->isUse() ||
2223 !MI->isDebugValue() || !MRI->def_empty(Reg)) {
2224 // If we're post-Select, we can't have gvregs anymore.
2225 if (isFunctionSelected) {
2226 report("Generic virtual register invalid in a Selected function",
2227 MO, MONum);
2228 return;
2229 }
2230
2231 // The gvreg must have a type and it must not have a SubIdx.
2232 LLT Ty = MRI->getType(Reg);
2233 if (!Ty.isValid()) {
2234 report("Generic virtual register must have a valid type", MO,
2235 MONum);
2236 return;
2237 }
2238
2239 const RegisterBank *RegBank = MRI->getRegBankOrNull(Reg);
2240 const RegisterBankInfo *RBI = MF->getSubtarget().getRegBankInfo();
2241
2242 // If we're post-RegBankSelect, the gvreg must have a bank.
2243 if (!RegBank && isFunctionRegBankSelected) {
2244 report("Generic virtual register must have a bank in a "
2245 "RegBankSelected function",
2246 MO, MONum);
2247 return;
2248 }
2249
2250 // Make sure the register fits into its register bank if any.
2251 if (RegBank && Ty.isValid() &&
2252 RBI->getMaximumSize(RegBank->getID()) < Ty.getSizeInBits()) {
2253 report("Register bank is too small for virtual register", MO,
2254 MONum);
2255 errs() << "Register bank " << RegBank->getName() << " too small("
2256 << RBI->getMaximumSize(RegBank->getID()) << ") to fit "
2257 << Ty.getSizeInBits() << "-bits\n";
2258 return;
2259 }
2260 }
2261
2262 if (SubIdx) {
2263 report("Generic virtual register does not allow subregister index", MO,
2264 MONum);
2265 return;
2266 }
2267
2268 // If this is a target specific instruction and this operand
2269 // has register class constraint, the virtual register must
2270 // comply to it.
2271 if (!isPreISelGenericOpcode(MCID.getOpcode()) &&
2272 MONum < MCID.getNumOperands() &&
2273 TII->getRegClass(MCID, MONum, TRI, *MF)) {
2274 report("Virtual register does not match instruction constraint", MO,
2275 MONum);
2276 errs() << "Expect register class "
2277 << TRI->getRegClassName(
2278 TII->getRegClass(MCID, MONum, TRI, *MF))
2279 << " but got nothing\n";
2280 return;
2281 }
2282
2283 break;
2284 }
2285 if (SubIdx) {
2286 const TargetRegisterClass *SRC =
2287 TRI->getSubClassWithSubReg(RC, SubIdx);
2288 if (!SRC) {
2289 report("Invalid subregister index for virtual register", MO, MONum);
2290 errs() << "Register class " << TRI->getRegClassName(RC)
2291 << " does not support subreg index " << SubIdx << "\n";
2292 return;
2293 }
2294 if (RC != SRC) {
2295 report("Invalid register class for subregister index", MO, MONum);
2296 errs() << "Register class " << TRI->getRegClassName(RC)
2297 << " does not fully support subreg index " << SubIdx << "\n";
2298 return;
2299 }
2300 }
2301 if (MONum < MCID.getNumOperands()) {
2302 if (const TargetRegisterClass *DRC =
2303 TII->getRegClass(MCID, MONum, TRI, *MF)) {
2304 if (SubIdx) {
2305 const TargetRegisterClass *SuperRC =
2306 TRI->getLargestLegalSuperClass(RC, *MF);
2307 if (!SuperRC) {
2308 report("No largest legal super class exists.", MO, MONum);
2309 return;
2310 }
2311 DRC = TRI->getMatchingSuperRegClass(SuperRC, DRC, SubIdx);
2312 if (!DRC) {
2313 report("No matching super-reg register class.", MO, MONum);
2314 return;
2315 }
2316 }
2317 if (!RC->hasSuperClassEq(DRC)) {
2318 report("Illegal virtual register for instruction", MO, MONum);
2319 errs() << "Expected a " << TRI->getRegClassName(DRC)
2320 << " register, but got a " << TRI->getRegClassName(RC)
2321 << " register\n";
2322 }
2323 }
2324 }
2325 }
2326 break;
2327 }
2328
2330 regMasks.push_back(MO->getRegMask());
2331 break;
2332
2334 if (MI->isPHI() && !MO->getMBB()->isSuccessor(MI->getParent()))
2335 report("PHI operand is not in the CFG", MO, MONum);
2336 break;
2337
2339 if (LiveStks && LiveStks->hasInterval(MO->getIndex()) &&
2340 LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2341 int FI = MO->getIndex();
2342 LiveInterval &LI = LiveStks->getInterval(FI);
2343 SlotIndex Idx = LiveInts->getInstructionIndex(*MI);
2344
2345 bool stores = MI->mayStore();
2346 bool loads = MI->mayLoad();
2347 // For a memory-to-memory move, we need to check if the frame
2348 // index is used for storing or loading, by inspecting the
2349 // memory operands.
2350 if (stores && loads) {
2351 for (auto *MMO : MI->memoperands()) {
2352 const PseudoSourceValue *PSV = MMO->getPseudoValue();
2353 if (PSV == nullptr) continue;
2355 dyn_cast<FixedStackPseudoSourceValue>(PSV);
2356 if (Value == nullptr) continue;
2357 if (Value->getFrameIndex() != FI) continue;
2358
2359 if (MMO->isStore())
2360 loads = false;
2361 else
2362 stores = false;
2363 break;
2364 }
2365 if (loads == stores)
2366 report("Missing fixed stack memoperand.", MI);
2367 }
2368 if (loads && !LI.liveAt(Idx.getRegSlot(true))) {
2369 report("Instruction loads from dead spill slot", MO, MONum);
2370 errs() << "Live stack: " << LI << '\n';
2371 }
2372 if (stores && !LI.liveAt(Idx.getRegSlot())) {
2373 report("Instruction stores to dead spill slot", MO, MONum);
2374 errs() << "Live stack: " << LI << '\n';
2375 }
2376 }
2377 break;
2378
2380 if (MO->getCFIIndex() >= MF->getFrameInstructions().size())
2381 report("CFI instruction has invalid index", MO, MONum);
2382 break;
2383
2384 default:
2385 break;
2386 }
2387}
2388
2389void MachineVerifier::checkLivenessAtUse(const MachineOperand *MO,
2390 unsigned MONum, SlotIndex UseIdx,
2391 const LiveRange &LR,
2392 Register VRegOrUnit,
2393 LaneBitmask LaneMask) {
2394 const MachineInstr *MI = MO->getParent();
2395 LiveQueryResult LRQ = LR.Query(UseIdx);
2396 bool HasValue = LRQ.valueIn() || (MI->isPHI() && LRQ.valueOut());
2397 // Check if we have a segment at the use, note however that we only need one
2398 // live subregister range, the others may be dead.
2399 if (!HasValue && LaneMask.none()) {
2400 report("No live segment at use", MO, MONum);
2401 report_context_liverange(LR);
2402 report_context_vreg_regunit(VRegOrUnit);
2403 report_context(UseIdx);
2404 }
2405 if (MO->isKill() && !LRQ.isKill()) {
2406 report("Live range continues after kill flag", MO, MONum);
2407 report_context_liverange(LR);
2408 report_context_vreg_regunit(VRegOrUnit);
2409 if (LaneMask.any())
2410 report_context_lanemask(LaneMask);
2411 report_context(UseIdx);
2412 }
2413}
2414
2415void MachineVerifier::checkLivenessAtDef(const MachineOperand *MO,
2416 unsigned MONum, SlotIndex DefIdx,
2417 const LiveRange &LR,
2418 Register VRegOrUnit,
2419 bool SubRangeCheck,
2420 LaneBitmask LaneMask) {
2421 if (const VNInfo *VNI = LR.getVNInfoAt(DefIdx)) {
2422 // The LR can correspond to the whole reg and its def slot is not obliged
2423 // to be the same as the MO' def slot. E.g. when we check here "normal"
2424 // subreg MO but there is other EC subreg MO in the same instruction so the
2425 // whole reg has EC def slot and differs from the currently checked MO' def
2426 // slot. For example:
2427 // %0 [16e,32r:0) 0@16e L..3 [16e,32r:0) 0@16e L..C [16r,32r:0) 0@16r
2428 // Check that there is an early-clobber def of the same superregister
2429 // somewhere is performed in visitMachineFunctionAfter()
2430 if (((SubRangeCheck || MO->getSubReg() == 0) && VNI->def != DefIdx) ||
2431 !SlotIndex::isSameInstr(VNI->def, DefIdx) ||
2432 (VNI->def != DefIdx &&
2433 (!VNI->def.isEarlyClobber() || !DefIdx.isRegister()))) {
2434 report("Inconsistent valno->def", MO, MONum);
2435 report_context_liverange(LR);
2436 report_context_vreg_regunit(VRegOrUnit);
2437 if (LaneMask.any())
2438 report_context_lanemask(LaneMask);
2439 report_context(*VNI);
2440 report_context(DefIdx);
2441 }
2442 } else {
2443 report("No live segment at def", MO, MONum);
2444 report_context_liverange(LR);
2445 report_context_vreg_regunit(VRegOrUnit);
2446 if (LaneMask.any())
2447 report_context_lanemask(LaneMask);
2448 report_context(DefIdx);
2449 }
2450 // Check that, if the dead def flag is present, LiveInts agree.
2451 if (MO->isDead()) {
2452 LiveQueryResult LRQ = LR.Query(DefIdx);
2453 if (!LRQ.isDeadDef()) {
2454 assert(VRegOrUnit.isVirtual() && "Expecting a virtual register.");
2455 // A dead subreg def only tells us that the specific subreg is dead. There
2456 // could be other non-dead defs of other subregs, or we could have other
2457 // parts of the register being live through the instruction. So unless we
2458 // are checking liveness for a subrange it is ok for the live range to
2459 // continue, given that we have a dead def of a subregister.
2460 if (SubRangeCheck || MO->getSubReg() == 0) {
2461 report("Live range continues after dead def flag", MO, MONum);
2462 report_context_liverange(LR);
2463 report_context_vreg_regunit(VRegOrUnit);
2464 if (LaneMask.any())
2465 report_context_lanemask(LaneMask);
2466 }
2467 }
2468 }
2469}
2470
2471void MachineVerifier::checkLiveness(const MachineOperand *MO, unsigned MONum) {
2472 const MachineInstr *MI = MO->getParent();
2473 const Register Reg = MO->getReg();
2474 const unsigned SubRegIdx = MO->getSubReg();
2475
2476 const LiveInterval *LI = nullptr;
2477 if (LiveInts && Reg.isVirtual()) {
2478 if (LiveInts->hasInterval(Reg)) {
2479 LI = &LiveInts->getInterval(Reg);
2480 if (SubRegIdx != 0 && (MO->isDef() || !MO->isUndef()) && !LI->empty() &&
2481 !LI->hasSubRanges() && MRI->shouldTrackSubRegLiveness(Reg))
2482 report("Live interval for subreg operand has no subranges", MO, MONum);
2483 } else {
2484 report("Virtual register has no live interval", MO, MONum);
2485 }
2486 }
2487
2488 // Both use and def operands can read a register.
2489 if (MO->readsReg()) {
2490 if (MO->isKill())
2491 addRegWithSubRegs(regsKilled, Reg);
2492
2493 // Check that LiveVars knows this kill (unless we are inside a bundle, in
2494 // which case we have already checked that LiveVars knows any kills on the
2495 // bundle header instead).
2496 if (LiveVars && Reg.isVirtual() && MO->isKill() &&
2497 !MI->isBundledWithPred()) {
2498 LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
2499 if (!is_contained(VI.Kills, MI))
2500 report("Kill missing from LiveVariables", MO, MONum);
2501 }
2502
2503 // Check LiveInts liveness and kill.
2504 if (LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2505 SlotIndex UseIdx;
2506 if (MI->isPHI()) {
2507 // PHI use occurs on the edge, so check for live out here instead.
2508 UseIdx = LiveInts->getMBBEndIdx(
2509 MI->getOperand(MONum + 1).getMBB()).getPrevSlot();
2510 } else {
2511 UseIdx = LiveInts->getInstructionIndex(*MI);
2512 }
2513 // Check the cached regunit intervals.
2514 if (Reg.isPhysical() && !isReserved(Reg)) {
2515 for (MCRegUnit Unit : TRI->regunits(Reg.asMCReg())) {
2516 if (MRI->isReservedRegUnit(Unit))
2517 continue;
2518 if (const LiveRange *LR = LiveInts->getCachedRegUnit(Unit))
2519 checkLivenessAtUse(MO, MONum, UseIdx, *LR, Unit);
2520 }
2521 }
2522
2523 if (Reg.isVirtual()) {
2524 // This is a virtual register interval.
2525 checkLivenessAtUse(MO, MONum, UseIdx, *LI, Reg);
2526
2527 if (LI->hasSubRanges() && !MO->isDef()) {
2528 LaneBitmask MOMask = SubRegIdx != 0
2529 ? TRI->getSubRegIndexLaneMask(SubRegIdx)
2530 : MRI->getMaxLaneMaskForVReg(Reg);
2531 LaneBitmask LiveInMask;
2532 for (const LiveInterval::SubRange &SR : LI->subranges()) {
2533 if ((MOMask & SR.LaneMask).none())
2534 continue;
2535 checkLivenessAtUse(MO, MONum, UseIdx, SR, Reg, SR.LaneMask);
2536 LiveQueryResult LRQ = SR.Query(UseIdx);
2537 if (LRQ.valueIn() || (MI->isPHI() && LRQ.valueOut()))
2538 LiveInMask |= SR.LaneMask;
2539 }
2540 // At least parts of the register has to be live at the use.
2541 if ((LiveInMask & MOMask).none()) {
2542 report("No live subrange at use", MO, MONum);
2543 report_context(*LI);
2544 report_context(UseIdx);
2545 }
2546 // For PHIs all lanes should be live
2547 if (MI->isPHI() && LiveInMask != MOMask) {
2548 report("Not all lanes of PHI source live at use", MO, MONum);
2549 report_context(*LI);
2550 report_context(UseIdx);
2551 }
2552 }
2553 }
2554 }
2555
2556 // Use of a dead register.
2557 if (!regsLive.count(Reg)) {
2558 if (Reg.isPhysical()) {
2559 // Reserved registers may be used even when 'dead'.
2560 bool Bad = !isReserved(Reg);
2561 // We are fine if just any subregister has a defined value.
2562 if (Bad) {
2563
2564 for (const MCPhysReg &SubReg : TRI->subregs(Reg)) {
2565 if (regsLive.count(SubReg)) {
2566 Bad = false;
2567 break;
2568 }
2569 }
2570 }
2571 // If there is an additional implicit-use of a super register we stop
2572 // here. By definition we are fine if the super register is not
2573 // (completely) dead, if the complete super register is dead we will
2574 // get a report for its operand.
2575 if (Bad) {
2576 for (const MachineOperand &MOP : MI->uses()) {
2577 if (!MOP.isReg() || !MOP.isImplicit())
2578 continue;
2579
2580 if (!MOP.getReg().isPhysical())
2581 continue;
2582
2583 if (llvm::is_contained(TRI->subregs(MOP.getReg()), Reg))
2584 Bad = false;
2585 }
2586 }
2587 if (Bad)
2588 report("Using an undefined physical register", MO, MONum);
2589 } else if (MRI->def_empty(Reg)) {
2590 report("Reading virtual register without a def", MO, MONum);
2591 } else {
2592 BBInfo &MInfo = MBBInfoMap[MI->getParent()];
2593 // We don't know which virtual registers are live in, so only complain
2594 // if vreg was killed in this MBB. Otherwise keep track of vregs that
2595 // must be live in. PHI instructions are handled separately.
2596 if (MInfo.regsKilled.count(Reg))
2597 report("Using a killed virtual register", MO, MONum);
2598 else if (!MI->isPHI())
2599 MInfo.vregsLiveIn.insert(std::make_pair(Reg, MI));
2600 }
2601 }
2602 }
2603
2604 if (MO->isDef()) {
2605 // Register defined.
2606 // TODO: verify that earlyclobber ops are not used.
2607 if (MO->isDead())
2608 addRegWithSubRegs(regsDead, Reg);
2609 else
2610 addRegWithSubRegs(regsDefined, Reg);
2611
2612 // Verify SSA form.
2613 if (MRI->isSSA() && Reg.isVirtual() &&
2614 std::next(MRI->def_begin(Reg)) != MRI->def_end())
2615 report("Multiple virtual register defs in SSA form", MO, MONum);
2616
2617 // Check LiveInts for a live segment, but only for virtual registers.
2618 if (LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2619 SlotIndex DefIdx = LiveInts->getInstructionIndex(*MI);
2620 DefIdx = DefIdx.getRegSlot(MO->isEarlyClobber());
2621
2622 if (Reg.isVirtual()) {
2623 checkLivenessAtDef(MO, MONum, DefIdx, *LI, Reg);
2624
2625 if (LI->hasSubRanges()) {
2626 LaneBitmask MOMask = SubRegIdx != 0
2627 ? TRI->getSubRegIndexLaneMask(SubRegIdx)
2628 : MRI->getMaxLaneMaskForVReg(Reg);
2629 for (const LiveInterval::SubRange &SR : LI->subranges()) {
2630 if ((SR.LaneMask & MOMask).none())
2631 continue;
2632 checkLivenessAtDef(MO, MONum, DefIdx, SR, Reg, true, SR.LaneMask);
2633 }
2634 }
2635 }
2636 }
2637 }
2638}
2639
2640// This function gets called after visiting all instructions in a bundle. The
2641// argument points to the bundle header.
2642// Normal stand-alone instructions are also considered 'bundles', and this
2643// function is called for all of them.
2644void MachineVerifier::visitMachineBundleAfter(const MachineInstr *MI) {
2645 BBInfo &MInfo = MBBInfoMap[MI->getParent()];
2646 set_union(MInfo.regsKilled, regsKilled);
2647 set_subtract(regsLive, regsKilled); regsKilled.clear();
2648 // Kill any masked registers.
2649 while (!regMasks.empty()) {
2650 const uint32_t *Mask = regMasks.pop_back_val();
2651 for (Register Reg : regsLive)
2652 if (Reg.isPhysical() &&
2653 MachineOperand::clobbersPhysReg(Mask, Reg.asMCReg()))
2654 regsDead.push_back(Reg);
2655 }
2656 set_subtract(regsLive, regsDead); regsDead.clear();
2657 set_union(regsLive, regsDefined); regsDefined.clear();
2658}
2659
2660void
2661MachineVerifier::visitMachineBasicBlockAfter(const MachineBasicBlock *MBB) {
2662 MBBInfoMap[MBB].regsLiveOut = regsLive;
2663 regsLive.clear();
2664
2665 if (Indexes) {
2666 SlotIndex stop = Indexes->getMBBEndIdx(MBB);
2667 if (!(stop > lastIndex)) {
2668 report("Block ends before last instruction index", MBB);
2669 errs() << "Block ends at " << stop
2670 << " last instruction was at " << lastIndex << '\n';
2671 }
2672 lastIndex = stop;
2673 }
2674}
2675
2676namespace {
2677// This implements a set of registers that serves as a filter: can filter other
2678// sets by passing through elements not in the filter and blocking those that
2679// are. Any filter implicitly includes the full set of physical registers upon
2680// creation, thus filtering them all out. The filter itself as a set only grows,
2681// and needs to be as efficient as possible.
2682struct VRegFilter {
2683 // Add elements to the filter itself. \pre Input set \p FromRegSet must have
2684 // no duplicates. Both virtual and physical registers are fine.
2685 template <typename RegSetT> void add(const RegSetT &FromRegSet) {
2686 SmallVector<Register, 0> VRegsBuffer;
2687 filterAndAdd(FromRegSet, VRegsBuffer);
2688 }
2689 // Filter \p FromRegSet through the filter and append passed elements into \p
2690 // ToVRegs. All elements appended are then added to the filter itself.
2691 // \returns true if anything changed.
2692 template <typename RegSetT>
2693 bool filterAndAdd(const RegSetT &FromRegSet,
2694 SmallVectorImpl<Register> &ToVRegs) {
2695 unsigned SparseUniverse = Sparse.size();
2696 unsigned NewSparseUniverse = SparseUniverse;
2697 unsigned NewDenseSize = Dense.size();
2698 size_t Begin = ToVRegs.size();
2699 for (Register Reg : FromRegSet) {
2700 if (!Reg.isVirtual())
2701 continue;
2702 unsigned Index = Register::virtReg2Index(Reg);
2703 if (Index < SparseUniverseMax) {
2704 if (Index < SparseUniverse && Sparse.test(Index))
2705 continue;
2706 NewSparseUniverse = std::max(NewSparseUniverse, Index + 1);
2707 } else {
2708 if (Dense.count(Reg))
2709 continue;
2710 ++NewDenseSize;
2711 }
2712 ToVRegs.push_back(Reg);
2713 }
2714 size_t End = ToVRegs.size();
2715 if (Begin == End)
2716 return false;
2717 // Reserving space in sets once performs better than doing so continuously
2718 // and pays easily for double look-ups (even in Dense with SparseUniverseMax
2719 // tuned all the way down) and double iteration (the second one is over a
2720 // SmallVector, which is a lot cheaper compared to DenseSet or BitVector).
2721 Sparse.resize(NewSparseUniverse);
2722 Dense.reserve(NewDenseSize);
2723 for (unsigned I = Begin; I < End; ++I) {
2724 Register Reg = ToVRegs[I];
2725 unsigned Index = Register::virtReg2Index(Reg);
2726 if (Index < SparseUniverseMax)
2727 Sparse.set(Index);
2728 else
2729 Dense.insert(Reg);
2730 }
2731 return true;
2732 }
2733
2734private:
2735 static constexpr unsigned SparseUniverseMax = 10 * 1024 * 8;
2736 // VRegs indexed within SparseUniverseMax are tracked by Sparse, those beyound
2737 // are tracked by Dense. The only purpose of the threashold and the Dense set
2738 // is to have a reasonably growing memory usage in pathological cases (large
2739 // number of very sparse VRegFilter instances live at the same time). In
2740 // practice even in the worst-by-execution time cases having all elements
2741 // tracked by Sparse (very large SparseUniverseMax scenario) tends to be more
2742 // space efficient than if tracked by Dense. The threashold is set to keep the
2743 // worst-case memory usage within 2x of figures determined empirically for
2744 // "all Dense" scenario in such worst-by-execution-time cases.
2745 BitVector Sparse;
2747};
2748
2749// Implements both a transfer function and a (binary, in-place) join operator
2750// for a dataflow over register sets with set union join and filtering transfer
2751// (out_b = in_b \ filter_b). filter_b is expected to be set-up ahead of time.
2752// Maintains out_b as its state, allowing for O(n) iteration over it at any
2753// time, where n is the size of the set (as opposed to O(U) where U is the
2754// universe). filter_b implicitly contains all physical registers at all times.
2755class FilteringVRegSet {
2756 VRegFilter Filter;
2758
2759public:
2760 // Set-up the filter_b. \pre Input register set \p RS must have no duplicates.
2761 // Both virtual and physical registers are fine.
2762 template <typename RegSetT> void addToFilter(const RegSetT &RS) {
2763 Filter.add(RS);
2764 }
2765 // Passes \p RS through the filter_b (transfer function) and adds what's left
2766 // to itself (out_b).
2767 template <typename RegSetT> bool add(const RegSetT &RS) {
2768 // Double-duty the Filter: to maintain VRegs a set (and the join operation
2769 // a set union) just add everything being added here to the Filter as well.
2770 return Filter.filterAndAdd(RS, VRegs);
2771 }
2772 using const_iterator = decltype(VRegs)::const_iterator;
2773 const_iterator begin() const { return VRegs.begin(); }
2774 const_iterator end() const { return VRegs.end(); }
2775 size_t size() const { return VRegs.size(); }
2776};
2777} // namespace
2778
2779// Calculate the largest possible vregsPassed sets. These are the registers that
2780// can pass through an MBB live, but may not be live every time. It is assumed
2781// that all vregsPassed sets are empty before the call.
2782void MachineVerifier::calcRegsPassed() {
2783 if (MF->empty())
2784 // ReversePostOrderTraversal doesn't handle empty functions.
2785 return;
2786
2787 for (const MachineBasicBlock *MB :
2789 FilteringVRegSet VRegs;
2790 BBInfo &Info = MBBInfoMap[MB];
2791 assert(Info.reachable);
2792
2793 VRegs.addToFilter(Info.regsKilled);
2794 VRegs.addToFilter(Info.regsLiveOut);
2795 for (const MachineBasicBlock *Pred : MB->predecessors()) {
2796 const BBInfo &PredInfo = MBBInfoMap[Pred];
2797 if (!PredInfo.reachable)
2798 continue;
2799
2800 VRegs.add(PredInfo.regsLiveOut);
2801 VRegs.add(PredInfo.vregsPassed);
2802 }
2803 Info.vregsPassed.reserve(VRegs.size());
2804 Info.vregsPassed.insert(VRegs.begin(), VRegs.end());
2805 }
2806}
2807
2808// Calculate the set of virtual registers that must be passed through each basic
2809// block in order to satisfy the requirements of successor blocks. This is very
2810// similar to calcRegsPassed, only backwards.
2811void MachineVerifier::calcRegsRequired() {
2812 // First push live-in regs to predecessors' vregsRequired.
2814 for (const auto &MBB : *MF) {
2815 BBInfo &MInfo = MBBInfoMap[&MBB];
2816 for (const MachineBasicBlock *Pred : MBB.predecessors()) {
2817 BBInfo &PInfo = MBBInfoMap[Pred];
2818 if (PInfo.addRequired(MInfo.vregsLiveIn))
2819 todo.insert(Pred);
2820 }
2821
2822 // Handle the PHI node.
2823 for (const MachineInstr &MI : MBB.phis()) {
2824 for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) {
2825 // Skip those Operands which are undef regs or not regs.
2826 if (!MI.getOperand(i).isReg() || !MI.getOperand(i).readsReg())
2827 continue;
2828
2829 // Get register and predecessor for one PHI edge.
2830 Register Reg = MI.getOperand(i).getReg();
2831 const MachineBasicBlock *Pred = MI.getOperand(i + 1).getMBB();
2832
2833 BBInfo &PInfo = MBBInfoMap[Pred];
2834 if (PInfo.addRequired(Reg))
2835 todo.insert(Pred);
2836 }
2837 }
2838 }
2839
2840 // Iteratively push vregsRequired to predecessors. This will converge to the
2841 // same final state regardless of DenseSet iteration order.
2842 while (!todo.empty()) {
2843 const MachineBasicBlock *MBB = *todo.begin();
2844 todo.erase(MBB);
2845 BBInfo &MInfo = MBBInfoMap[MBB];
2846 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
2847 if (Pred == MBB)
2848 continue;
2849 BBInfo &SInfo = MBBInfoMap[Pred];
2850 if (SInfo.addRequired(MInfo.vregsRequired))
2851 todo.insert(Pred);
2852 }
2853 }
2854}
2855
2856// Check PHI instructions at the beginning of MBB. It is assumed that
2857// calcRegsPassed has been run so BBInfo::isLiveOut is valid.
2858void MachineVerifier::checkPHIOps(const MachineBasicBlock &MBB) {
2859 BBInfo &MInfo = MBBInfoMap[&MBB];
2860
2862 for (const MachineInstr &Phi : MBB) {
2863 if (!Phi.isPHI())
2864 break;
2865 seen.clear();
2866
2867 const MachineOperand &MODef = Phi.getOperand(0);
2868 if (!MODef.isReg() || !MODef.isDef()) {
2869 report("Expected first PHI operand to be a register def", &MODef, 0);
2870 continue;
2871 }
2872 if (MODef.isTied() || MODef.isImplicit() || MODef.isInternalRead() ||
2873 MODef.isEarlyClobber() || MODef.isDebug())
2874 report("Unexpected flag on PHI operand", &MODef, 0);
2875 Register DefReg = MODef.getReg();
2876 if (!DefReg.isVirtual())
2877 report("Expected first PHI operand to be a virtual register", &MODef, 0);
2878
2879 for (unsigned I = 1, E = Phi.getNumOperands(); I != E; I += 2) {
2880 const MachineOperand &MO0 = Phi.getOperand(I);
2881 if (!MO0.isReg()) {
2882 report("Expected PHI operand to be a register", &MO0, I);
2883 continue;
2884 }
2885 if (MO0.isImplicit() || MO0.isInternalRead() || MO0.isEarlyClobber() ||
2886 MO0.isDebug() || MO0.isTied())
2887 report("Unexpected flag on PHI operand", &MO0, I);
2888
2889 const MachineOperand &MO1 = Phi.getOperand(I + 1);
2890 if (!MO1.isMBB()) {
2891 report("Expected PHI operand to be a basic block", &MO1, I + 1);
2892 continue;
2893 }
2894
2895 const MachineBasicBlock &Pre = *MO1.getMBB();
2896 if (!Pre.isSuccessor(&MBB)) {
2897 report("PHI input is not a predecessor block", &MO1, I + 1);
2898 continue;
2899 }
2900
2901 if (MInfo.reachable) {
2902 seen.insert(&Pre);
2903 BBInfo &PrInfo = MBBInfoMap[&Pre];
2904 if (!MO0.isUndef() && PrInfo.reachable &&
2905 !PrInfo.isLiveOut(MO0.getReg()))
2906 report("PHI operand is not live-out from predecessor", &MO0, I);
2907 }
2908 }
2909
2910 // Did we see all predecessors?
2911 if (MInfo.reachable) {
2912 for (MachineBasicBlock *Pred : MBB.predecessors()) {
2913 if (!seen.count(Pred)) {
2914 report("Missing PHI operand", &Phi);
2915 errs() << printMBBReference(*Pred)
2916 << " is a predecessor according to the CFG.\n";
2917 }
2918 }
2919 }
2920 }
2921}
2922
2923void MachineVerifier::visitMachineFunctionAfter() {
2924 calcRegsPassed();
2925
2926 for (const MachineBasicBlock &MBB : *MF)
2927 checkPHIOps(MBB);
2928
2929 // Now check liveness info if available
2930 calcRegsRequired();
2931
2932 // Check for killed virtual registers that should be live out.
2933 for (const auto &MBB : *MF) {
2934 BBInfo &MInfo = MBBInfoMap[&MBB];
2935 for (Register VReg : MInfo.vregsRequired)
2936 if (MInfo.regsKilled.count(VReg)) {
2937 report("Virtual register killed in block, but needed live out.", &MBB);
2938 errs() << "Virtual register " << printReg(VReg)
2939 << " is used after the block.\n";
2940 }
2941 }
2942
2943 if (!MF->empty()) {
2944 BBInfo &MInfo = MBBInfoMap[&MF->front()];
2945 for (Register VReg : MInfo.vregsRequired) {
2946 report("Virtual register defs don't dominate all uses.", MF);
2947 report_context_vreg(VReg);
2948 }
2949 }
2950
2951 if (LiveVars)
2952 verifyLiveVariables();
2953 if (LiveInts)
2954 verifyLiveIntervals();
2955
2956 // Check live-in list of each MBB. If a register is live into MBB, check
2957 // that the register is in regsLiveOut of each predecessor block. Since
2958 // this must come from a definition in the predecesssor or its live-in
2959 // list, this will catch a live-through case where the predecessor does not
2960 // have the register in its live-in list. This currently only checks
2961 // registers that have no aliases, are not allocatable and are not
2962 // reserved, which could mean a condition code register for instance.
2963 if (MRI->tracksLiveness())
2964 for (const auto &MBB : *MF)
2966 MCPhysReg LiveInReg = P.PhysReg;
2967 bool hasAliases = MCRegAliasIterator(LiveInReg, TRI, false).isValid();
2968 if (hasAliases || isAllocatable(LiveInReg) || isReserved(LiveInReg))
2969 continue;
2970 for (const MachineBasicBlock *Pred : MBB.predecessors()) {
2971 BBInfo &PInfo = MBBInfoMap[Pred];
2972 if (!PInfo.regsLiveOut.count(LiveInReg)) {
2973 report("Live in register not found to be live out from predecessor.",
2974 &MBB);
2975 errs() << TRI->getName(LiveInReg)
2976 << " not found to be live out from "
2977 << printMBBReference(*Pred) << "\n";
2978 }
2979 }
2980 }
2981
2982 for (auto CSInfo : MF->getCallSitesInfo())
2983 if (!CSInfo.first->isCall())
2984 report("Call site info referencing instruction that is not call", MF);
2985
2986 // If there's debug-info, check that we don't have any duplicate value
2987 // tracking numbers.
2988 if (MF->getFunction().getSubprogram()) {
2989 DenseSet<unsigned> SeenNumbers;
2990 for (const auto &MBB : *MF) {
2991 for (const auto &MI : MBB) {
2992 if (auto Num = MI.peekDebugInstrNum()) {
2993 auto Result = SeenNumbers.insert((unsigned)Num);
2994 if (!Result.second)
2995 report("Instruction has a duplicated value tracking number", &MI);
2996 }
2997 }
2998 }
2999 }
3000}
3001
3002void MachineVerifier::verifyLiveVariables() {
3003 assert(LiveVars && "Don't call verifyLiveVariables without LiveVars");
3004 for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
3006 LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
3007 for (const auto &MBB : *MF) {
3008 BBInfo &MInfo = MBBInfoMap[&MBB];
3009
3010 // Our vregsRequired should be identical to LiveVariables' AliveBlocks
3011 if (MInfo.vregsRequired.count(Reg)) {
3012 if (!VI.AliveBlocks.test(MBB.getNumber())) {
3013 report("LiveVariables: Block missing from AliveBlocks", &MBB);
3014 errs() << "Virtual register " << printReg(Reg)
3015 << " must be live through the block.\n";
3016 }
3017 } else {
3018 if (VI.AliveBlocks.test(MBB.getNumber())) {
3019 report("LiveVariables: Block should not be in AliveBlocks", &MBB);
3020 errs() << "Virtual register " << printReg(Reg)
3021 << " is not needed live through the block.\n";
3022 }
3023 }
3024 }
3025 }
3026}
3027
3028void MachineVerifier::verifyLiveIntervals() {
3029 assert(LiveInts && "Don't call verifyLiveIntervals without LiveInts");
3030 for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
3032
3033 // Spilling and splitting may leave unused registers around. Skip them.
3034 if (MRI->reg_nodbg_empty(Reg))
3035 continue;
3036
3037 if (!LiveInts->hasInterval(Reg)) {
3038 report("Missing live interval for virtual register", MF);
3039 errs() << printReg(Reg, TRI) << " still has defs or uses\n";
3040 continue;
3041 }
3042
3043 const LiveInterval &LI = LiveInts->getInterval(Reg);
3044 assert(Reg == LI.reg() && "Invalid reg to interval mapping");
3045 verifyLiveInterval(LI);
3046 }
3047
3048 // Verify all the cached regunit intervals.
3049 for (unsigned i = 0, e = TRI->getNumRegUnits(); i != e; ++i)
3050 if (const LiveRange *LR = LiveInts->getCachedRegUnit(i))
3051 verifyLiveRange(*LR, i);
3052}
3053
3054void MachineVerifier::verifyLiveRangeValue(const LiveRange &LR,
3055 const VNInfo *VNI, Register Reg,
3056 LaneBitmask LaneMask) {
3057 if (VNI->isUnused())
3058 return;
3059
3060 const VNInfo *DefVNI = LR.getVNInfoAt(VNI->def);
3061
3062 if (!DefVNI) {
3063 report("Value not live at VNInfo def and not marked unused", MF);
3064 report_context(LR, Reg, LaneMask);
3065 report_context(*VNI);
3066 return;
3067 }
3068
3069 if (DefVNI != VNI) {
3070 report("Live segment at def has different VNInfo", MF);
3071 report_context(LR, Reg, LaneMask);
3072 report_context(*VNI);
3073 return;
3074 }
3075
3076 const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(VNI->def);
3077 if (!MBB) {
3078 report("Invalid VNInfo definition index", MF);
3079 report_context(LR, Reg, LaneMask);
3080 report_context(*VNI);
3081 return;
3082 }
3083
3084 if (VNI->isPHIDef()) {
3085 if (VNI->def != LiveInts->getMBBStartIdx(MBB)) {
3086 report("PHIDef VNInfo is not defined at MBB start", MBB);
3087 report_context(LR, Reg, LaneMask);
3088 report_context(*VNI);
3089 }
3090 return;
3091 }
3092
3093 // Non-PHI def.
3094 const MachineInstr *MI = LiveInts->getInstructionFromIndex(VNI->def);
3095 if (!MI) {
3096 report("No instruction at VNInfo def index", MBB);
3097 report_context(LR, Reg, LaneMask);
3098 report_context(*VNI);
3099 return;
3100 }
3101
3102 if (Reg != 0) {
3103 bool hasDef = false;
3104 bool isEarlyClobber = false;
3105 for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
3106 if (!MOI->isReg() || !MOI->isDef())
3107 continue;
3108 if (Reg.isVirtual()) {
3109 if (MOI->getReg() != Reg)
3110 continue;
3111 } else {
3112 if (!MOI->getReg().isPhysical() || !TRI->hasRegUnit(MOI->getReg(), Reg))
3113 continue;
3114 }
3115 if (LaneMask.any() &&
3116 (TRI->getSubRegIndexLaneMask(MOI->getSubReg()) & LaneMask).none())
3117 continue;
3118 hasDef = true;
3119 if (MOI->isEarlyClobber())
3120 isEarlyClobber = true;
3121 }
3122
3123 if (!hasDef) {
3124 report("Defining instruction does not modify register", MI);
3125 report_context(LR, Reg, LaneMask);
3126 report_context(*VNI);
3127 }
3128
3129 // Early clobber defs begin at USE slots, but other defs must begin at
3130 // DEF slots.
3131 if (isEarlyClobber) {
3132 if (!VNI->def.isEarlyClobber()) {
3133 report("Early clobber def must be at an early-clobber slot", MBB);
3134 report_context(LR, Reg, LaneMask);
3135 report_context(*VNI);
3136 }
3137 } else if (!VNI->def.isRegister()) {
3138 report("Non-PHI, non-early clobber def must be at a register slot", MBB);
3139 report_context(LR, Reg, LaneMask);
3140 report_context(*VNI);
3141 }
3142 }
3143}
3144
3145void MachineVerifier::verifyLiveRangeSegment(const LiveRange &LR,
3147 Register Reg,
3148 LaneBitmask LaneMask) {
3149 const LiveRange::Segment &S = *I;
3150 const VNInfo *VNI = S.valno;
3151 assert(VNI && "Live segment has no valno");
3152
3153 if (VNI->id >= LR.getNumValNums() || VNI != LR.getValNumInfo(VNI->id)) {
3154 report("Foreign valno in live segment", MF);
3155 report_context(LR, Reg, LaneMask);
3156 report_context(S);
3157 report_context(*VNI);
3158 }
3159
3160 if (VNI->isUnused()) {
3161 report("Live segment valno is marked unused", MF);
3162 report_context(LR, Reg, LaneMask);
3163 report_context(S);
3164 }
3165
3166 const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(S.start);
3167 if (!MBB) {
3168 report("Bad start of live segment, no basic block", MF);
3169 report_context(LR, Reg, LaneMask);
3170 report_context(S);
3171 return;
3172 }
3173 SlotIndex MBBStartIdx = LiveInts->getMBBStartIdx(MBB);
3174 if (S.start != MBBStartIdx && S.start != VNI->def) {
3175 report("Live segment must begin at MBB entry or valno def", MBB);
3176 report_context(LR, Reg, LaneMask);
3177 report_context(S);
3178 }
3179
3180 const MachineBasicBlock *EndMBB =
3181 LiveInts->getMBBFromIndex(S.end.getPrevSlot());
3182 if (!EndMBB) {
3183 report("Bad end of live segment, no basic block", MF);
3184 report_context(LR, Reg, LaneMask);
3185 report_context(S);
3186 return;
3187 }
3188
3189 // Checks for non-live-out segments.
3190 if (S.end != LiveInts->getMBBEndIdx(EndMBB)) {
3191 // RegUnit intervals are allowed dead phis.
3192 if (!Reg.isVirtual() && VNI->isPHIDef() && S.start == VNI->def &&
3193 S.end == VNI->def.getDeadSlot())
3194 return;
3195
3196 // The live segment is ending inside EndMBB
3197 const MachineInstr *MI =
3199 if (!MI) {
3200 report("Live segment doesn't end at a valid instruction", EndMBB);
3201 report_context(LR, Reg, LaneMask);
3202 report_context(S);
3203 return;
3204 }
3205
3206 // The block slot must refer to a basic block boundary.
3207 if (S.end.isBlock()) {
3208 report("Live segment ends at B slot of an instruction", EndMBB);
3209 report_context(LR, Reg, LaneMask);
3210 report_context(S);
3211 }
3212
3213 if (S.end.isDead()) {
3214 // Segment ends on the dead slot.
3215 // That means there must be a dead def.
3216 if (!SlotIndex::isSameInstr(S.start, S.end)) {
3217 report("Live segment ending at dead slot spans instructions", EndMBB);
3218 report_context(LR, Reg, LaneMask);
3219 report_context(S);
3220 }
3221 }
3222
3223 // After tied operands are rewritten, a live segment can only end at an
3224 // early-clobber slot if it is being redefined by an early-clobber def.
3225 // TODO: Before tied operands are rewritten, a live segment can only end at
3226 // an early-clobber slot if the last use is tied to an early-clobber def.
3227 if (MF->getProperties().hasProperty(
3229 S.end.isEarlyClobber()) {
3230 if (I + 1 == LR.end() || (I + 1)->start != S.end) {
3231 report("Live segment ending at early clobber slot must be "
3232 "redefined by an EC def in the same instruction",
3233 EndMBB);
3234 report_context(LR, Reg, LaneMask);
3235 report_context(S);
3236 }
3237 }
3238
3239 // The following checks only apply to virtual registers. Physreg liveness
3240 // is too weird to check.
3241 if (Reg.isVirtual()) {
3242 // A live segment can end with either a redefinition, a kill flag on a
3243 // use, or a dead flag on a def.
3244 bool hasRead = false;
3245 bool hasSubRegDef = false;
3246 bool hasDeadDef = false;
3247 for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
3248 if (!MOI->isReg() || MOI->getReg() != Reg)
3249 continue;
3250 unsigned Sub = MOI->getSubReg();
3251 LaneBitmask SLM =
3252 Sub != 0 ? TRI->getSubRegIndexLaneMask(Sub) : LaneBitmask::getAll();
3253 if (MOI->isDef()) {
3254 if (Sub != 0) {
3255 hasSubRegDef = true;
3256 // An operand %0:sub0 reads %0:sub1..n. Invert the lane
3257 // mask for subregister defs. Read-undef defs will be handled by
3258 // readsReg below.
3259 SLM = ~SLM;
3260 }
3261 if (MOI->isDead())
3262 hasDeadDef = true;
3263 }
3264 if (LaneMask.any() && (LaneMask & SLM).none())
3265 continue;
3266 if (MOI->readsReg())
3267 hasRead = true;
3268 }
3269 if (S.end.isDead()) {
3270 // Make sure that the corresponding machine operand for a "dead" live
3271 // range has the dead flag. We cannot perform this check for subregister
3272 // liveranges as partially dead values are allowed.
3273 if (LaneMask.none() && !hasDeadDef) {
3274 report(
3275 "Instruction ending live segment on dead slot has no dead flag",
3276 MI);
3277 report_context(LR, Reg, LaneMask);
3278 report_context(S);
3279 }
3280 } else {
3281 if (!hasRead) {
3282 // When tracking subregister liveness, the main range must start new
3283 // values on partial register writes, even if there is no read.
3284 if (!MRI->shouldTrackSubRegLiveness(Reg) || LaneMask.any() ||
3285 !hasSubRegDef) {
3286 report("Instruction ending live segment doesn't read the register",
3287 MI);
3288 report_context(LR, Reg, LaneMask);
3289 report_context(S);
3290 }
3291 }
3292 }
3293 }
3294 }
3295
3296 // Now check all the basic blocks in this live segment.
3298 // Is this live segment the beginning of a non-PHIDef VN?
3299 if (S.start == VNI->def && !VNI->isPHIDef()) {
3300 // Not live-in to any blocks.
3301 if (MBB == EndMBB)
3302 return;
3303 // Skip this block.
3304 ++MFI;
3305 }
3306
3308 if (LaneMask.any()) {
3309 LiveInterval &OwnerLI = LiveInts->getInterval(Reg);
3310 OwnerLI.computeSubRangeUndefs(Undefs, LaneMask, *MRI, *Indexes);
3311 }
3312
3313 while (true) {
3314 assert(LiveInts->isLiveInToMBB(LR, &*MFI));
3315 // We don't know how to track physregs into a landing pad.
3316 if (!Reg.isVirtual() && MFI->isEHPad()) {
3317 if (&*MFI == EndMBB)
3318 break;
3319 ++MFI;
3320 continue;
3321 }
3322
3323 // Is VNI a PHI-def in the current block?
3324 bool IsPHI = VNI->isPHIDef() &&
3325 VNI->def == LiveInts->getMBBStartIdx(&*MFI);
3326
3327 // Check that VNI is live-out of all predecessors.
3328 for (const MachineBasicBlock *Pred : MFI->predecessors()) {
3329 SlotIndex PEnd = LiveInts->getMBBEndIdx(Pred);
3330 // Predecessor of landing pad live-out on last call.
3331 if (MFI->isEHPad()) {
3332 for (const MachineInstr &MI : llvm::reverse(*Pred)) {
3333 if (MI.isCall()) {
3334 PEnd = Indexes->getInstructionIndex(MI).getBoundaryIndex();
3335 break;
3336 }
3337 }
3338 }
3339 const VNInfo *PVNI = LR.getVNInfoBefore(PEnd);
3340
3341 // All predecessors must have a live-out value. However for a phi
3342 // instruction with subregister intervals
3343 // only one of the subregisters (not necessarily the current one) needs to
3344 // be defined.
3345 if (!PVNI && (LaneMask.none() || !IsPHI)) {
3346 if (LiveRangeCalc::isJointlyDominated(Pred, Undefs, *Indexes))
3347 continue;
3348 report("Register not marked live out of predecessor", Pred);
3349 report_context(LR, Reg, LaneMask);
3350 report_context(*VNI);
3351 errs() << " live into " << printMBBReference(*MFI) << '@'
3352 << LiveInts->getMBBStartIdx(&*MFI) << ", not live before "
3353 << PEnd << '\n';
3354 continue;
3355 }
3356
3357 // Only PHI-defs can take different predecessor values.
3358 if (!IsPHI && PVNI != VNI) {
3359 report("Different value live out of predecessor", Pred);
3360 report_context(LR, Reg, LaneMask);
3361 errs() << "Valno #" << PVNI->id << " live out of "
3362 << printMBBReference(*Pred) << '@' << PEnd << "\nValno #"
3363 << VNI->id << " live into " << printMBBReference(*MFI) << '@'
3364 << LiveInts->getMBBStartIdx(&*MFI) << '\n';
3365 }
3366 }
3367 if (&*MFI == EndMBB)
3368 break;
3369 ++MFI;
3370 }
3371}
3372
3373void MachineVerifier::verifyLiveRange(const LiveRange &LR, Register Reg,
3374 LaneBitmask LaneMask) {
3375 for (const VNInfo *VNI : LR.valnos)
3376 verifyLiveRangeValue(LR, VNI, Reg, LaneMask);
3377
3378 for (LiveRange::const_iterator I = LR.begin(), E = LR.end(); I != E; ++I)
3379 verifyLiveRangeSegment(LR, I, Reg, LaneMask);
3380}
3381
3382void MachineVerifier::verifyLiveInterval(const LiveInterval &LI) {
3383 Register Reg = LI.reg();
3384 assert(Reg.isVirtual());
3385 verifyLiveRange(LI, Reg);
3386
3387 if (LI.hasSubRanges()) {
3389 LaneBitmask MaxMask = MRI->getMaxLaneMaskForVReg(Reg);
3390 for (const LiveInterval::SubRange &SR : LI.subranges()) {
3391 if ((Mask & SR.LaneMask).any()) {
3392 report("Lane masks of sub ranges overlap in live interval", MF);
3393 report_context(LI);
3394 }
3395 if ((SR.LaneMask & ~MaxMask).any()) {
3396 report("Subrange lanemask is invalid", MF);
3397 report_context(LI);
3398 }
3399 if (SR.empty()) {
3400 report("Subrange must not be empty", MF);
3401 report_context(SR, LI.reg(), SR.LaneMask);
3402 }
3403 Mask |= SR.LaneMask;
3404 verifyLiveRange(SR, LI.reg(), SR.LaneMask);
3405 if (!LI.covers(SR)) {
3406 report("A Subrange is not covered by the main range", MF);
3407 report_context(LI);
3408 }
3409 }
3410 }
3411
3412 // Check the LI only has one connected component.
3413 ConnectedVNInfoEqClasses ConEQ(*LiveInts);
3414 unsigned NumComp = ConEQ.Classify(LI);
3415 if (NumComp > 1) {
3416 report("Multiple connected components in live interval", MF);
3417 report_context(LI);
3418 for (unsigned comp = 0; comp != NumComp; ++comp) {
3419 errs() << comp << ": valnos";
3420 for (const VNInfo *I : LI.valnos)
3421 if (comp == ConEQ.getEqClass(I))
3422 errs() << ' ' << I->id;
3423 errs() << '\n';
3424 }
3425 }
3426}
3427
3428namespace {
3429
3430 // FrameSetup and FrameDestroy can have zero adjustment, so using a single
3431 // integer, we can't tell whether it is a FrameSetup or FrameDestroy if the
3432 // value is zero.
3433 // We use a bool plus an integer to capture the stack state.
3434 struct StackStateOfBB {
3435 StackStateOfBB() = default;
3436 StackStateOfBB(int EntryVal, int ExitVal, bool EntrySetup, bool ExitSetup) :
3437 EntryValue(EntryVal), ExitValue(ExitVal), EntryIsSetup(EntrySetup),
3438 ExitIsSetup(ExitSetup) {}
3439
3440 // Can be negative, which means we are setting up a frame.
3441 int EntryValue = 0;
3442 int ExitValue = 0;
3443 bool EntryIsSetup = false;
3444 bool ExitIsSetup = false;
3445 };
3446
3447} // end anonymous namespace
3448
3449/// Make sure on every path through the CFG, a FrameSetup <n> is always followed
3450/// by a FrameDestroy <n>, stack adjustments are identical on all
3451/// CFG edges to a merge point, and frame is destroyed at end of a return block.
3452void MachineVerifier::verifyStackFrame() {
3453 unsigned FrameSetupOpcode = TII->getCallFrameSetupOpcode();
3454 unsigned FrameDestroyOpcode = TII->getCallFrameDestroyOpcode();
3455 if (FrameSetupOpcode == ~0u && FrameDestroyOpcode == ~0u)
3456 return;
3457
3459 SPState.resize(MF->getNumBlockIDs());
3461
3462 // Visit the MBBs in DFS order.
3463 for (df_ext_iterator<const MachineFunction *,
3465 DFI = df_ext_begin(MF, Reachable), DFE = df_ext_end(MF, Reachable);
3466 DFI != DFE; ++DFI) {
3467 const MachineBasicBlock *MBB = *DFI;
3468
3469 StackStateOfBB BBState;
3470 // Check the exit state of the DFS stack predecessor.
3471 if (DFI.getPathLength() >= 2) {
3472 const MachineBasicBlock *StackPred = DFI.getPath(DFI.getPathLength() - 2);
3473 assert(Reachable.count(StackPred) &&
3474 "DFS stack predecessor is already visited.\n");
3475 BBState.EntryValue = SPState[StackPred->getNumber()].ExitValue;
3476 BBState.EntryIsSetup = SPState[StackPred->getNumber()].ExitIsSetup;
3477 BBState.ExitValue = BBState.EntryValue;
3478 BBState.ExitIsSetup = BBState.EntryIsSetup;
3479 }
3480
3481 if ((int)MBB->getCallFrameSize() != -BBState.EntryValue) {
3482 report("Call frame size on entry does not match value computed from "
3483 "predecessor",
3484 MBB);
3485 errs() << "Call frame size on entry " << MBB->getCallFrameSize()
3486 << " does not match value computed from predecessor "
3487 << -BBState.EntryValue << '\n';
3488 }
3489
3490 // Update stack state by checking contents of MBB.
3491 for (const auto &I : *MBB) {
3492 if (I.getOpcode() == FrameSetupOpcode) {
3493 if (BBState.ExitIsSetup)
3494 report("FrameSetup is after another FrameSetup", &I);
3495 BBState.ExitValue -= TII->getFrameTotalSize(I);
3496 BBState.ExitIsSetup = true;
3497 }
3498
3499 if (I.getOpcode() == FrameDestroyOpcode) {
3500 int Size = TII->getFrameTotalSize(I);
3501 if (!BBState.ExitIsSetup)
3502 report("FrameDestroy is not after a FrameSetup", &I);
3503 int AbsSPAdj = BBState.ExitValue < 0 ? -BBState.ExitValue :
3504 BBState.ExitValue;
3505 if (BBState.ExitIsSetup && AbsSPAdj != Size) {
3506 report("FrameDestroy <n> is after FrameSetup <m>", &I);
3507 errs() << "FrameDestroy <" << Size << "> is after FrameSetup <"
3508 << AbsSPAdj << ">.\n";
3509 }
3510 BBState.ExitValue += Size;
3511 BBState.ExitIsSetup = false;
3512 }
3513 }
3514 SPState[MBB->getNumber()] = BBState;
3515
3516 // Make sure the exit state of any predecessor is consistent with the entry
3517 // state.
3518 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
3519 if (Reachable.count(Pred) &&
3520 (SPState[Pred->getNumber()].ExitValue != BBState.EntryValue ||
3521 SPState[Pred->getNumber()].ExitIsSetup != BBState.EntryIsSetup)) {
3522 report("The exit stack state of a predecessor is inconsistent.", MBB);
3523 errs() << "Predecessor " << printMBBReference(*Pred)
3524 << " has exit state (" << SPState[Pred->getNumber()].ExitValue
3525 << ", " << SPState[Pred->getNumber()].ExitIsSetup << "), while "
3526 << printMBBReference(*MBB) << " has entry state ("
3527 << BBState.EntryValue << ", " << BBState.EntryIsSetup << ").\n";
3528 }
3529 }
3530
3531 // Make sure the entry state of any successor is consistent with the exit
3532 // state.
3533 for (const MachineBasicBlock *Succ : MBB->successors()) {
3534 if (Reachable.count(Succ) &&
3535 (SPState[Succ->getNumber()].EntryValue != BBState.ExitValue ||
3536 SPState[Succ->getNumber()].EntryIsSetup != BBState.ExitIsSetup)) {
3537 report("The entry stack state of a successor is inconsistent.", MBB);
3538 errs() << "Successor " << printMBBReference(*Succ)
3539 << " has entry state (" << SPState[Succ->getNumber()].EntryValue
3540 << ", " << SPState[Succ->getNumber()].EntryIsSetup << "), while "
3541 << printMBBReference(*MBB) << " has exit state ("
3542 << BBState.ExitValue << ", " << BBState.ExitIsSetup << ").\n";
3543 }
3544 }
3545
3546 // Make sure a basic block with return ends with zero stack adjustment.
3547 if (!MBB->empty() && MBB->back().isReturn()) {
3548 if (BBState.ExitIsSetup)
3549 report("A return block ends with a FrameSetup.", MBB);
3550 if (BBState.ExitValue)
3551 report("A return block ends with a nonzero stack adjustment.", MBB);
3552 }
3553 }
3554}
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
aarch64 promote const
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator MBBI
static bool isLoad(int Opcode)
static bool isStore(int Opcode)
This file implements the BitVector class.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines the DenseMap class.
This file defines the DenseSet and SmallDenseSet classes.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
std::string Name
uint64_t Size
bool End
Definition: ELF_riscv.cpp:469
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
hexagon widen stores
IRTranslator LLVM IR MI
A common definition of LaneBitmask for use in TableGen and CodeGen.
Implement a low-level type suitable for MachineInstr level instruction selection.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
unsigned Reg
modulo schedule Modulo Schedule test pass
#define P(N)
ppc ctr loops verify
const char LLVMTargetMachineRef TM
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:38
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isLiveOut(const MachineBasicBlock &MBB, unsigned Reg)
This file contains some templates that are useful if you are working with the STL at all.
This file defines generic set operations that may be used on set's of different types,...
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static unsigned getSize(unsigned Kind)
const fltSemantics & getSemantics() const
Definition: APFloat.h:1301
Represent the analysis usage information of a pass.
AnalysisUsage & addUsedIfAvailable()
Add the specified Pass class to the set of analyses used by this pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
LLVM Basic Block Representation.
Definition: BasicBlock.h:56
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches,...
Definition: BasicBlock.h:516
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:127
bool test(unsigned Idx) const
Definition: BitVector.h:461
void clear()
clear - Removes all bits from the bitvector.
Definition: BitVector.h:335
iterator_range< const_set_bits_iterator > set_bits() const
Definition: BitVector.h:140
size_type size() const
size - Returns the number of bits in this bitvector.
Definition: BitVector.h:159
ConnectedVNInfoEqClasses - Helper class that can divide VNInfos in a LiveInterval into equivalence cl...
Definition: LiveInterval.h:998
ConstMIBundleOperands - Iterate over all operands in a const bundle of machine instructions.
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:260
const APFloat & getValueAPF() const
Definition: Constants.h:296
This is the shared class of boolean and integer constants.
Definition: Constants.h:78
unsigned getBitWidth() const
getBitWidth - Return the bitwidth of this constant.
Definition: Constants.h:139
This class represents an Operation in the Expression.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:220
Implements a dense probed hash-table based set.
Definition: DenseSet.h:271
Register getReg() const
Base class for user error types.
Definition: Error.h:352
A specialized PseudoSourceValue for holding FixedStack values, which must include a frame index.
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:311
bool isPredicated(const MachineInstr &MI) const override
Returns true if the instruction is already predicated.
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
Analyze the branching code at the end of MBB, returning true if it cannot be understood (e....
constexpr unsigned getScalarSizeInBits() const
Definition: LowLevelType.h:249
constexpr bool isScalar() const
Definition: LowLevelType.h:139
constexpr bool isValid() const
Definition: LowLevelType.h:137
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
Definition: LowLevelType.h:149
constexpr bool isVector() const
Definition: LowLevelType.h:145
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
Definition: LowLevelType.h:175
constexpr bool isPointer() const
Definition: LowLevelType.h:141
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
Definition: LowLevelType.h:272
constexpr unsigned getAddressSpace() const
Definition: LowLevelType.h:262
constexpr LLT getScalarType() const
Definition: LowLevelType.h:190
constexpr TypeSize getSizeInBytes() const
Returns the total size of the type in bytes, i.e.
Definition: LowLevelType.h:185
A live range for subregisters.
Definition: LiveInterval.h:693
LiveInterval - This class represents the liveness of a register, or stack slot.
Definition: LiveInterval.h:686
Register reg() const
Definition: LiveInterval.h:717
bool hasSubRanges() const
Returns true if subregister liveness information is available.
Definition: LiveInterval.h:803
iterator_range< subrange_iterator > subranges()
Definition: LiveInterval.h:775
void computeSubRangeUndefs(SmallVectorImpl< SlotIndex > &Undefs, LaneBitmask LaneMask, const MachineRegisterInfo &MRI, const SlotIndexes &Indexes) const
For a given lane mask LaneMask, compute indexes at which the lane is marked undefined by subregister ...
bool hasInterval(Register Reg) const
SlotIndex getMBBStartIdx(const MachineBasicBlock *mbb) const
Return the first index in the given basic block.
MachineInstr * getInstructionFromIndex(SlotIndex index) const
Returns the instruction associated with the given index.
SlotIndex getInstructionIndex(const MachineInstr &Instr) const
Returns the base index of the given instruction.
SlotIndex getMBBEndIdx(const MachineBasicBlock *mbb) const
Return the last index in the given basic block.
LiveRange * getCachedRegUnit(unsigned Unit)
Return the live range for register unit Unit if it has already been computed, or nullptr if it hasn't...
LiveInterval & getInterval(Register Reg)
bool isNotInMIMap(const MachineInstr &Instr) const
Returns true if the specified machine instr has been removed or was never entered in the map.
MachineBasicBlock * getMBBFromIndex(SlotIndex index) const
bool isLiveInToMBB(const LiveRange &LR, const MachineBasicBlock *mbb) const
void print(raw_ostream &O, const Module *=nullptr) const override
Implement the dump method.
Result of a LiveRange query.
Definition: LiveInterval.h:90
bool isDeadDef() const
Return true if this instruction has a dead def.
Definition: LiveInterval.h:117
VNInfo * valueIn() const
Return the value that is live-in to the instruction.
Definition: LiveInterval.h:105
VNInfo * valueOut() const
Return the value leaving the instruction, if any.
Definition: LiveInterval.h:123
bool isKill() const
Return true if the live-in value is killed by this instruction.
Definition: LiveInterval.h:112
static LLVM_ATTRIBUTE_UNUSED bool isJointlyDominated(const MachineBasicBlock *MBB, ArrayRef< SlotIndex > Defs, const SlotIndexes &Indexes)
A diagnostic function to check if the end of the block MBB is jointly dominated by the blocks corresp...
This class represents the liveness of a register, stack slot, etc.
Definition: LiveInterval.h:157
VNInfo * getValNumInfo(unsigned ValNo)
getValNumInfo - Returns pointer to the specified val#.
Definition: LiveInterval.h:317
bool liveAt(SlotIndex index) const
Definition: LiveInterval.h:401
bool covers(const LiveRange &Other) const
Returns true if all segments of the Other live range are completely covered by this live range.
bool empty() const
Definition: LiveInterval.h:382
LiveQueryResult Query(SlotIndex Idx) const
Query Liveness at Idx.
Definition: LiveInterval.h:541
iterator end()
Definition: LiveInterval.h:216
VNInfo * getVNInfoBefore(SlotIndex Idx) const
getVNInfoBefore - Return the VNInfo that is live up to but not necessarilly including Idx,...
Definition: LiveInterval.h:429
unsigned getNumValNums() const
Definition: LiveInterval.h:313
iterator begin()
Definition: LiveInterval.h:215
VNInfoList valnos
Definition: LiveInterval.h:204
VNInfo * getVNInfoAt(SlotIndex Idx) const
getVNInfoAt - Return the VNInfo that is live at Idx, or NULL.
Definition: LiveInterval.h:421
LiveInterval & getInterval(int Slot)
Definition: LiveStacks.h:68
bool hasInterval(int Slot) const
Definition: LiveStacks.h:82
VarInfo & getVarInfo(Register Reg)
getVarInfo - Return the VarInfo structure for the specified VIRTUAL register.
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition: MCAsmInfo.h:56
ExceptionHandling getExceptionHandlingType() const
Definition: MCAsmInfo.h:781
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:237
ArrayRef< MCOperandInfo > operands() const
Definition: MCInstrDesc.h:239
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
Definition: MCInstrDesc.h:248
bool isConvergent() const
Return true if this instruction is convergent.
Definition: MCInstrDesc.h:415
bool variadicOpsAreDefs() const
Return true if variadic operands of this instruction are definitions.
Definition: MCInstrDesc.h:418
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
Definition: MCInstrDesc.h:219
unsigned getOpcode() const
Return the opcode number for this descriptor.
Definition: MCInstrDesc.h:230
This holds information about one operand of a machine instruction, indicating the register class for ...
Definition: MCInstrDesc.h:85
bool isOptionalDef() const
Set if this operand is a optional def.
Definition: MCInstrDesc.h:113
uint8_t OperandType
Information about the type of the operand.
Definition: MCInstrDesc.h:97
MCRegAliasIterator enumerates all registers aliasing Reg.
bool isValid() const
isValid - Returns true until all the operands have been visited.
bool isInlineAsmBrIndirectTarget() const
Returns true if this is the indirect dest of an INLINEASM_BR.
unsigned pred_size() const
bool isEHPad() const
Returns true if the block is a landing pad.
iterator_range< livein_iterator > liveins() const
iterator_range< iterator > phis()
Returns a range that iterates over the phis in the basic block.
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
bool isIRBlockAddressTaken() const
Test whether this block is the target of an IR BlockAddress.
unsigned succ_size() const
BasicBlock * getAddressTakenIRBlock() const
Retrieves the BasicBlock which corresponds to this MachineBasicBlock.
bool isPredecessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a predecessor of this block.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
unsigned getCallFrameSize() const
Return the call frame size on entry to this basic block.
iterator_range< succ_iterator > successors()
bool isSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a successor of this block.
iterator_range< pred_iterator > predecessors()
StringRef getName() const
Return the name of the corresponding LLVM basic block, or an empty string.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
BitVector getPristineRegs(const MachineFunction &MF) const
Return a set of physical registers that are pristine.
An AnalysisManager<MachineFunction> that also exposes IR analysis results.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
bool hasProperty(Property P) const
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineFunctionProperties & getProperties() const
Get the function properties.
const MachineBasicBlock & front() const
bool verify(Pass *p=nullptr, const char *Banner=nullptr, bool AbortOnError=true) const
Run the current MachineFunction through the machine code verifier, useful for debugger use.
void print(raw_ostream &OS, const SlotIndexes *=nullptr) const
print - Print out the MachineFunction in a format suitable for debugging to the specified stream.
Representation of each machine instruction.
Definition: MachineInstr.h:68
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:543
bool isReturn(QueryType Type=AnyInBundle) const
Definition: MachineInstr.h:905
bool isTerminator(QueryType Type=AnyInBundle) const
Returns true if this instruction part of the terminator for a basic block.
Definition: MachineInstr.h:939
bool isBarrier(QueryType Type=AnyInBundle) const
Returns true if the specified instruction stops control flow from executing the instruction immediate...
Definition: MachineInstr.h:930
A description of a memory reference used in the backend.
const PseudoSourceValue * getPseudoValue() const
uint64_t getSize() const
Return the size in bytes of the memory reference.
AtomicOrdering getSuccessOrdering() const
Return the atomic ordering requirements for this memory operation.
uint64_t getSizeInBits() const
Return the size in bits of the memory reference.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
int64_t getImm() const
bool readsReg() const
readsReg - Returns true if this operand reads the previous value of its register.
bool isImplicit() const
bool isIntrinsicID() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
ArrayRef< int > getShuffleMask() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
bool isShuffleMask() const
unsigned getCFIIndex() const
bool isRenamable() const
isRenamable - Returns true if this register may be renamed, i.e.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
bool isEarlyClobber() const
Register getReg() const
getReg - Returns the register number.
bool isInternalRead() const
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
const uint32_t * getRegMask() const
getRegMask - Returns a bit mask of registers preserved by this RegMask operand.
void print(raw_ostream &os, const TargetRegisterInfo *TRI=nullptr, const TargetIntrinsicInfo *IntrinsicInfo=nullptr) const
Print the MachineOperand to os.
@ MO_CFIIndex
MCCFIInstruction index.
@ MO_RegisterMask
Mask of preserved registers.
@ MO_MachineBasicBlock
MachineBasicBlock reference.
@ MO_FrameIndex
Abstract Stack Frame Index.
@ MO_Register
Register operand.
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:94
virtual void print(raw_ostream &OS, const Module *M) const
print - Print out the internal state of the pass.
Definition: Pass.cpp:130
AnalysisType * getAnalysisIfAvailable() const
getAnalysisIfAvailable<AnalysisType>() - Subclasses use this function to get analysis information tha...
Special value supplied for machine level alias analysis.
Holds all the information related to register banks.
const RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
unsigned getMaximumSize(unsigned RegBankID) const
Get the maximum size in bits that fits in the given register bank.
This class implements the register bank concept.
Definition: RegisterBank.h:28
const char * getName() const
Get a user friendly name of this register bank.
Definition: RegisterBank.h:50
unsigned getID() const
Get the identifier of this register bank.
Definition: RegisterBank.h:46
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
static Register index2VirtReg(unsigned Index)
Convert a 0-based index to a virtual register number.
Definition: Register.h:84
static unsigned virtReg2Index(Register Reg)
Convert a virtual register number to a 0-based index.
Definition: Register.h:77
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:91
static constexpr bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:65
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:95
SlotIndex - An opaque wrapper around machine indexes.
Definition: SlotIndexes.h:68
static bool isSameInstr(SlotIndex A, SlotIndex B)
isSameInstr - Return true if A and B refer to the same instruction.
Definition: SlotIndexes.h:180
bool isBlock() const
isBlock - Returns true if this is a block boundary slot.
Definition: SlotIndexes.h:213
SlotIndex getDeadSlot() const
Returns the dead def kill slot for the current instruction.
Definition: SlotIndexes.h:246
bool isEarlyClobber() const
isEarlyClobber - Returns true if this is an early-clobber slot.
Definition: SlotIndexes.h:216
bool isRegister() const
isRegister - Returns true if this is a normal register use/def slot.
Definition: SlotIndexes.h:220
SlotIndex getBoundaryIndex() const
Returns the boundary index for associated with this index.
Definition: SlotIndexes.h:235
SlotIndex getPrevSlot() const
Returns the previous slot in the index list.
Definition: SlotIndexes.h:276
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
Definition: SlotIndexes.h:241
bool isDead() const
isDead - Returns true if this is a dead def kill slot.
Definition: SlotIndexes.h:223
SlotIndexes pass.
Definition: SlotIndexes.h:301
SlotIndex getMBBEndIdx(unsigned Num) const
Returns the last index in the given basic block number.
Definition: SlotIndexes.h:463
MBBIndexIterator MBBIndexBegin() const
Returns an iterator for the begin of the idx2MBBMap.
Definition: SlotIndexes.h:491
MBBIndexIterator MBBIndexEnd() const
Return an iterator for the end of the idx2MBBMap.
Definition: SlotIndexes.h:496
SmallVectorImpl< IdxMBBPair >::const_iterator MBBIndexIterator
Iterator over the idx2MBBMap (sorted pairs of slot index of basic block begin and basic block)
Definition: SlotIndexes.h:474
SlotIndex getInstructionIndex(const MachineInstr &MI, bool IgnoreBundle=false) const
Returns the base index for the given instruction.
Definition: SlotIndexes.h:372
SlotIndex getMBBStartIdx(unsigned Num) const
Returns the first index in the given basic block number.
Definition: SlotIndexes.h:453
bool hasIndex(const MachineInstr &instr) const
Returns true if the given machine instr is mapped to an index, otherwise returns false.
Definition: SlotIndexes.h:367
size_type size() const
Definition: SmallPtrSet.h:93
bool erase(PtrType Ptr)
erase - If the set contains the specified pointer, remove it and return true, otherwise return false.
Definition: SmallPtrSet.h:380
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:384
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:366
iterator begin() const
Definition: SmallPtrSet.h:404
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:451
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
void resize(size_type N)
Definition: SmallVector.h:642
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
Register getReg() const
MI-level Statepoint operands.
Definition: StackMaps.h:158
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
TargetInstrInfo - Interface to description of machine instruction set.
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:78
bool hasSuperClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a super-class of or equal to this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const RegisterBankInfo * getRegBankInfo() const
If the information for the register banks is available, return it.
virtual const TargetInstrInfo * getInstrInfo() const
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
VNInfo - Value Number Information.
Definition: LiveInterval.h:53
bool isUnused() const
Returns true if this value is unused.
Definition: LiveInterval.h:81
unsigned id
The ID number of this value.
Definition: LiveInterval.h:58
SlotIndex def
The index of the defining instruction.
Definition: LiveInterval.h:61
bool isPHIDef() const
Returns true if this value is defined by a PHI instruction (or was, PHI instructions may have been el...
Definition: LiveInterval.h:78
LLVM Value Representation.
Definition: Value.h:74
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:206
Iterator for intrusive lists based on ilist_node.
self_iterator getIterator()
Definition: ilist_node.h:82
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:289
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
const CustomOperand< const MCSubtargetInfo & > Msg[]
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:119
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
AttributeList getAttributes(LLVMContext &C, ID id)
Return the attributes for an intrinsic.
@ OPERAND_REGISTER
Definition: MCInstrDesc.h:61
@ OPERAND_IMMEDIATE
Definition: MCInstrDesc.h:60
Reg
All possible values of the reg field in the ModR/M byte.
constexpr double e
Definition: MathExtras.h:31
NodeAddr< PhiNode * > Phi
Definition: RDFGraph.h:390
NodeAddr< DefNode * > Def
Definition: RDFGraph.h:384
const_iterator begin(StringRef path, Style style=Style::native)
Get begin iterator over path.
Definition: Path.cpp:228
const_iterator end(StringRef path)
Get end iterator over path.
Definition: Path.cpp:237
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:330
@ Offset
Definition: DWP.cpp:440
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1727
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition: STLExtras.h:1685
@ SjLj
setjmp/longjmp based exceptions
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
Definition: TargetOpcodes.h:30
Printable printRegUnit(unsigned Unit, const TargetRegisterInfo *TRI)
Create Printable object to print register units on a raw_ostream.
void append_range(Container &C, Range &&R)
Wrapper function to append a range to a container.
Definition: STLExtras.h:2037
void set_subtract(S1Ty &S1, const S2Ty &S2)
set_subtract(A, B) - Compute A := A - B
Definition: SetOperations.h:82
Printable PrintLaneMask(LaneBitmask LaneMask)
Create Printable object to print LaneBitmasks on a raw_ostream.
Definition: LaneBitmask.h:92
bool isPreISelGenericOptimizationHint(unsigned Opcode)
Definition: TargetOpcodes.h:42
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
void initializeMachineVerifierPassPass(PassRegistry &)
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:429
detail::ValueMatchesPoly< M > HasValue(M Matcher)
Definition: Error.h:221
df_ext_iterator< T, SetTy > df_ext_begin(const T &G, SetTy &S)
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1741
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:156
bool set_union(S1Ty &S1, const S2Ty &S2)
set_union(A, B) - Compute A := A u B, return whether A changed.
Definition: SetOperations.h:23
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
void verifyMachineFunction(MachineFunctionAnalysisManager *, const std::string &Banner, const MachineFunction &MF)
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1854
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1884
df_ext_iterator< T, SetTy > df_ext_end(const T &G, SetTy &S)
Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
FunctionPass * createMachineVerifierPass(const std::string &Banner)
createMachineVerifierPass - This pass verifies cenerated machine code instructions for correctness.
Implement std::hash so that hash_code can be used in STL containers.
Definition: BitVector.h:858
static unsigned getSizeInBits(const fltSemantics &Sem)
Returns the size of the floating point number (in bits) in the given semantics.
Definition: APFloat.cpp:331
static constexpr LaneBitmask getAll()
Definition: LaneBitmask.h:82
constexpr bool none() const
Definition: LaneBitmask.h:52
constexpr bool any() const
Definition: LaneBitmask.h:53
static constexpr LaneBitmask getNone()
Definition: LaneBitmask.h:81
This represents a simple continuous liveness interval for a value.
Definition: LiveInterval.h:162
VarInfo - This represents the regions where a virtual register is live in the program.
Definition: LiveVariables.h:80
Pair of physical register and lane mask.