LLVM 19.0.0git
MachineInstr.cpp
Go to the documentation of this file.
1//===- lib/CodeGen/MachineInstr.cpp ---------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Methods common to all machine instructions.
10//
11//===----------------------------------------------------------------------===//
12
14#include "llvm/ADT/ArrayRef.h"
15#include "llvm/ADT/Hashing.h"
16#include "llvm/ADT/STLExtras.h"
37#include "llvm/IR/Constants.h"
39#include "llvm/IR/DebugLoc.h"
40#include "llvm/IR/Function.h"
41#include "llvm/IR/InlineAsm.h"
42#include "llvm/IR/LLVMContext.h"
43#include "llvm/IR/Metadata.h"
44#include "llvm/IR/Module.h"
46#include "llvm/IR/Operator.h"
47#include "llvm/MC/MCInstrDesc.h"
51#include "llvm/Support/Debug.h"
56#include <algorithm>
57#include <cassert>
58#include <cstdint>
59#include <cstring>
60#include <utility>
61
62using namespace llvm;
63
65 if (const MachineBasicBlock *MBB = MI.getParent())
66 if (const MachineFunction *MF = MBB->getParent())
67 return MF;
68 return nullptr;
69}
70
71// Try to crawl up to the machine function and get TRI and IntrinsicInfo from
72// it.
74 const TargetRegisterInfo *&TRI,
76 const TargetIntrinsicInfo *&IntrinsicInfo,
77 const TargetInstrInfo *&TII) {
78
79 if (const MachineFunction *MF = getMFIfAvailable(MI)) {
80 TRI = MF->getSubtarget().getRegisterInfo();
81 MRI = &MF->getRegInfo();
82 IntrinsicInfo = MF->getTarget().getIntrinsicInfo();
83 TII = MF->getSubtarget().getInstrInfo();
84 }
85}
86
88 for (MCPhysReg ImpDef : MCID->implicit_defs())
89 addOperand(MF, MachineOperand::CreateReg(ImpDef, true, true));
90 for (MCPhysReg ImpUse : MCID->implicit_uses())
91 addOperand(MF, MachineOperand::CreateReg(ImpUse, false, true));
92}
93
94/// MachineInstr ctor - This constructor creates a MachineInstr and adds the
95/// implicit operands. It reserves space for the number of operands specified by
96/// the MCInstrDesc.
97MachineInstr::MachineInstr(MachineFunction &MF, const MCInstrDesc &TID,
98 DebugLoc DL, bool NoImp)
99 : MCID(&TID), NumOperands(0), Flags(0), AsmPrinterFlags(0),
100 DbgLoc(std::move(DL)), DebugInstrNum(0) {
101 assert(DbgLoc.hasTrivialDestructor() && "Expected trivial destructor");
102
103 // Reserve space for the expected number of operands.
104 if (unsigned NumOps = MCID->getNumOperands() + MCID->implicit_defs().size() +
105 MCID->implicit_uses().size()) {
106 CapOperands = OperandCapacity::get(NumOps);
107 Operands = MF.allocateOperandArray(CapOperands);
108 }
109
110 if (!NoImp)
112}
113
114/// MachineInstr ctor - Copies MachineInstr arg exactly.
115/// Does not copy the number from debug instruction numbering, to preserve
116/// uniqueness.
117MachineInstr::MachineInstr(MachineFunction &MF, const MachineInstr &MI)
118 : MCID(&MI.getDesc()), NumOperands(0), Flags(0), AsmPrinterFlags(0),
119 Info(MI.Info), DbgLoc(MI.getDebugLoc()), DebugInstrNum(0) {
120 assert(DbgLoc.hasTrivialDestructor() && "Expected trivial destructor");
121
122 CapOperands = OperandCapacity::get(MI.getNumOperands());
123 Operands = MF.allocateOperandArray(CapOperands);
124
125 // Copy operands.
126 for (const MachineOperand &MO : MI.operands())
127 addOperand(MF, MO);
128
129 // Replicate ties between the operands, which addOperand was not
130 // able to do reliably.
131 for (unsigned i = 0, e = getNumOperands(); i < e; ++i) {
132 MachineOperand &NewMO = getOperand(i);
133 const MachineOperand &OrigMO = MI.getOperand(i);
134 NewMO.TiedTo = OrigMO.TiedTo;
135 }
136
137 // Copy all the sensible flags.
138 setFlags(MI.Flags);
139}
140
142 if (getParent())
143 getMF()->handleChangeDesc(*this, TID);
144 MCID = &TID;
145}
146
148 MovePos->getParent()->splice(MovePos, getParent(), getIterator());
149}
150
151/// getRegInfo - If this instruction is embedded into a MachineFunction,
152/// return the MachineRegisterInfo object for the current function, otherwise
153/// return null.
154MachineRegisterInfo *MachineInstr::getRegInfo() {
156 return &MBB->getParent()->getRegInfo();
157 return nullptr;
158}
159
160const MachineRegisterInfo *MachineInstr::getRegInfo() const {
161 if (const MachineBasicBlock *MBB = getParent())
162 return &MBB->getParent()->getRegInfo();
163 return nullptr;
164}
165
166void MachineInstr::removeRegOperandsFromUseLists(MachineRegisterInfo &MRI) {
167 for (MachineOperand &MO : operands())
168 if (MO.isReg())
169 MRI.removeRegOperandFromUseList(&MO);
170}
171
172void MachineInstr::addRegOperandsToUseLists(MachineRegisterInfo &MRI) {
173 for (MachineOperand &MO : operands())
174 if (MO.isReg())
175 MRI.addRegOperandToUseList(&MO);
176}
177
180 assert(MBB && "Use MachineInstrBuilder to add operands to dangling instrs");
182 assert(MF && "Use MachineInstrBuilder to add operands to dangling instrs");
183 addOperand(*MF, Op);
184}
185
186/// Move NumOps MachineOperands from Src to Dst, with support for overlapping
187/// ranges. If MRI is non-null also update use-def chains.
189 unsigned NumOps, MachineRegisterInfo *MRI) {
190 if (MRI)
191 return MRI->moveOperands(Dst, Src, NumOps);
192 // MachineOperand is a trivially copyable type so we can just use memmove.
193 assert(Dst && Src && "Unknown operands");
194 std::memmove(Dst, Src, NumOps * sizeof(MachineOperand));
195}
196
197/// addOperand - Add the specified operand to the instruction. If it is an
198/// implicit operand, it is added to the end of the operand list. If it is
199/// an explicit operand it is added at the end of the explicit operand list
200/// (before the first implicit operand).
202 assert(isUInt<LLVM_MI_NUMOPERANDS_BITS>(NumOperands + 1) &&
203 "Cannot add more operands.");
204 assert(MCID && "Cannot add operands before providing an instr descriptor");
205
206 // Check if we're adding one of our existing operands.
207 if (&Op >= Operands && &Op < Operands + NumOperands) {
208 // This is unusual: MI->addOperand(MI->getOperand(i)).
209 // If adding Op requires reallocating or moving existing operands around,
210 // the Op reference could go stale. Support it by copying Op.
211 MachineOperand CopyOp(Op);
212 return addOperand(MF, CopyOp);
213 }
214
215 // Find the insert location for the new operand. Implicit registers go at
216 // the end, everything else goes before the implicit regs.
217 //
218 // FIXME: Allow mixed explicit and implicit operands on inline asm.
219 // InstrEmitter::EmitSpecialNode() is marking inline asm clobbers as
220 // implicit-defs, but they must not be moved around. See the FIXME in
221 // InstrEmitter.cpp.
222 unsigned OpNo = getNumOperands();
223 bool isImpReg = Op.isReg() && Op.isImplicit();
224 if (!isImpReg && !isInlineAsm()) {
225 while (OpNo && Operands[OpNo-1].isReg() && Operands[OpNo-1].isImplicit()) {
226 --OpNo;
227 assert(!Operands[OpNo].isTied() && "Cannot move tied operands");
228 }
229 }
230
231 // OpNo now points as the desired insertion point. Unless this is a variadic
232 // instruction, only implicit regs are allowed beyond MCID->getNumOperands().
233 // RegMask operands go between the explicit and implicit operands.
234 MachineRegisterInfo *MRI = getRegInfo();
235
236 // Determine if the Operands array needs to be reallocated.
237 // Save the old capacity and operand array.
238 OperandCapacity OldCap = CapOperands;
239 MachineOperand *OldOperands = Operands;
240 if (!OldOperands || OldCap.getSize() == getNumOperands()) {
241 CapOperands = OldOperands ? OldCap.getNext() : OldCap.get(1);
242 Operands = MF.allocateOperandArray(CapOperands);
243 // Move the operands before the insertion point.
244 if (OpNo)
245 moveOperands(Operands, OldOperands, OpNo, MRI);
246 }
247
248 // Move the operands following the insertion point.
249 if (OpNo != NumOperands)
250 moveOperands(Operands + OpNo + 1, OldOperands + OpNo, NumOperands - OpNo,
251 MRI);
252 ++NumOperands;
253
254 // Deallocate the old operand array.
255 if (OldOperands != Operands && OldOperands)
256 MF.deallocateOperandArray(OldCap, OldOperands);
257
258 // Copy Op into place. It still needs to be inserted into the MRI use lists.
259 MachineOperand *NewMO = new (Operands + OpNo) MachineOperand(Op);
260 NewMO->ParentMI = this;
261
262 // When adding a register operand, tell MRI about it.
263 if (NewMO->isReg()) {
264 // Ensure isOnRegUseList() returns false, regardless of Op's status.
265 NewMO->Contents.Reg.Prev = nullptr;
266 // Ignore existing ties. This is not a property that can be copied.
267 NewMO->TiedTo = 0;
268 // Add the new operand to MRI, but only for instructions in an MBB.
269 if (MRI)
270 MRI->addRegOperandToUseList(NewMO);
271 // The MCID operand information isn't accurate until we start adding
272 // explicit operands. The implicit operands are added first, then the
273 // explicits are inserted before them.
274 if (!isImpReg) {
275 // Tie uses to defs as indicated in MCInstrDesc.
276 if (NewMO->isUse()) {
277 int DefIdx = MCID->getOperandConstraint(OpNo, MCOI::TIED_TO);
278 if (DefIdx != -1)
279 tieOperands(DefIdx, OpNo);
280 }
281 // If the register operand is flagged as early, mark the operand as such.
282 if (MCID->getOperandConstraint(OpNo, MCOI::EARLY_CLOBBER) != -1)
283 NewMO->setIsEarlyClobber(true);
284 }
285 // Ensure debug instructions set debug flag on register uses.
286 if (NewMO->isUse() && isDebugInstr())
287 NewMO->setIsDebug();
288 }
289}
290
291void MachineInstr::removeOperand(unsigned OpNo) {
292 assert(OpNo < getNumOperands() && "Invalid operand number");
293 untieRegOperand(OpNo);
294
295#ifndef NDEBUG
296 // Moving tied operands would break the ties.
297 for (unsigned i = OpNo + 1, e = getNumOperands(); i != e; ++i)
298 if (Operands[i].isReg())
299 assert(!Operands[i].isTied() && "Cannot move tied operands");
300#endif
301
302 MachineRegisterInfo *MRI = getRegInfo();
303 if (MRI && Operands[OpNo].isReg())
304 MRI->removeRegOperandFromUseList(Operands + OpNo);
305
306 // Don't call the MachineOperand destructor. A lot of this code depends on
307 // MachineOperand having a trivial destructor anyway, and adding a call here
308 // wouldn't make it 'destructor-correct'.
309
310 if (unsigned N = NumOperands - 1 - OpNo)
311 moveOperands(Operands + OpNo, Operands + OpNo + 1, N, MRI);
312 --NumOperands;
313}
314
315void MachineInstr::setExtraInfo(MachineFunction &MF,
317 MCSymbol *PreInstrSymbol,
318 MCSymbol *PostInstrSymbol,
319 MDNode *HeapAllocMarker, MDNode *PCSections,
320 uint32_t CFIType) {
321 bool HasPreInstrSymbol = PreInstrSymbol != nullptr;
322 bool HasPostInstrSymbol = PostInstrSymbol != nullptr;
323 bool HasHeapAllocMarker = HeapAllocMarker != nullptr;
324 bool HasPCSections = PCSections != nullptr;
325 bool HasCFIType = CFIType != 0;
326 int NumPointers = MMOs.size() + HasPreInstrSymbol + HasPostInstrSymbol +
327 HasHeapAllocMarker + HasPCSections + HasCFIType;
328
329 // Drop all extra info if there is none.
330 if (NumPointers <= 0) {
331 Info.clear();
332 return;
333 }
334
335 // If more than one pointer, then store out of line. Store heap alloc markers
336 // out of line because PointerSumType cannot hold more than 4 tag types with
337 // 32-bit pointers.
338 // FIXME: Maybe we should make the symbols in the extra info mutable?
339 else if (NumPointers > 1 || HasHeapAllocMarker || HasPCSections ||
340 HasCFIType) {
341 Info.set<EIIK_OutOfLine>(
342 MF.createMIExtraInfo(MMOs, PreInstrSymbol, PostInstrSymbol,
343 HeapAllocMarker, PCSections, CFIType));
344 return;
345 }
346
347 // Otherwise store the single pointer inline.
348 if (HasPreInstrSymbol)
349 Info.set<EIIK_PreInstrSymbol>(PreInstrSymbol);
350 else if (HasPostInstrSymbol)
351 Info.set<EIIK_PostInstrSymbol>(PostInstrSymbol);
352 else
353 Info.set<EIIK_MMO>(MMOs[0]);
354}
355
357 if (memoperands_empty())
358 return;
359
360 setExtraInfo(MF, {}, getPreInstrSymbol(), getPostInstrSymbol(),
362}
363
366 if (MMOs.empty()) {
367 dropMemRefs(MF);
368 return;
369 }
370
371 setExtraInfo(MF, MMOs, getPreInstrSymbol(), getPostInstrSymbol(),
373}
374
376 MachineMemOperand *MO) {
379 MMOs.push_back(MO);
380 setMemRefs(MF, MMOs);
381}
382
384 if (this == &MI)
385 // Nothing to do for a self-clone!
386 return;
387
388 assert(&MF == MI.getMF() &&
389 "Invalid machine functions when cloning memory refrences!");
390 // See if we can just steal the extra info already allocated for the
391 // instruction. We can do this whenever the pre- and post-instruction symbols
392 // are the same (including null).
393 if (getPreInstrSymbol() == MI.getPreInstrSymbol() &&
394 getPostInstrSymbol() == MI.getPostInstrSymbol() &&
395 getHeapAllocMarker() == MI.getHeapAllocMarker() &&
396 getPCSections() == MI.getPCSections()) {
397 Info = MI.Info;
398 return;
399 }
400
401 // Otherwise, fall back on a copy-based clone.
402 setMemRefs(MF, MI.memoperands());
403}
404
405/// Check to see if the MMOs pointed to by the two MemRefs arrays are
406/// identical.
409 if (LHS.size() != RHS.size())
410 return false;
411
412 auto LHSPointees = make_pointee_range(LHS);
413 auto RHSPointees = make_pointee_range(RHS);
414 return std::equal(LHSPointees.begin(), LHSPointees.end(),
415 RHSPointees.begin());
416}
417
420 // Try handling easy numbers of MIs with simpler mechanisms.
421 if (MIs.empty()) {
422 dropMemRefs(MF);
423 return;
424 }
425 if (MIs.size() == 1) {
426 cloneMemRefs(MF, *MIs[0]);
427 return;
428 }
429 // Because an empty memoperands list provides *no* information and must be
430 // handled conservatively (assuming the instruction can do anything), the only
431 // way to merge with it is to drop all other memoperands.
432 if (MIs[0]->memoperands_empty()) {
433 dropMemRefs(MF);
434 return;
435 }
436
437 // Handle the general case.
439 // Start with the first instruction.
440 assert(&MF == MIs[0]->getMF() &&
441 "Invalid machine functions when cloning memory references!");
442 MergedMMOs.append(MIs[0]->memoperands_begin(), MIs[0]->memoperands_end());
443 // Now walk all the other instructions and accumulate any different MMOs.
444 for (const MachineInstr &MI : make_pointee_range(MIs.slice(1))) {
445 assert(&MF == MI.getMF() &&
446 "Invalid machine functions when cloning memory references!");
447
448 // Skip MIs with identical operands to the first. This is a somewhat
449 // arbitrary hack but will catch common cases without being quadratic.
450 // TODO: We could fully implement merge semantics here if needed.
451 if (hasIdenticalMMOs(MIs[0]->memoperands(), MI.memoperands()))
452 continue;
453
454 // Because an empty memoperands list provides *no* information and must be
455 // handled conservatively (assuming the instruction can do anything), the
456 // only way to merge with it is to drop all other memoperands.
457 if (MI.memoperands_empty()) {
458 dropMemRefs(MF);
459 return;
460 }
461
462 // Otherwise accumulate these into our temporary buffer of the merged state.
463 MergedMMOs.append(MI.memoperands_begin(), MI.memoperands_end());
464 }
465
466 setMemRefs(MF, MergedMMOs);
467}
468
470 // Do nothing if old and new symbols are the same.
471 if (Symbol == getPreInstrSymbol())
472 return;
473
474 // If there was only one symbol and we're removing it, just clear info.
475 if (!Symbol && Info.is<EIIK_PreInstrSymbol>()) {
476 Info.clear();
477 return;
478 }
479
480 setExtraInfo(MF, memoperands(), Symbol, getPostInstrSymbol(),
482}
483
485 // Do nothing if old and new symbols are the same.
486 if (Symbol == getPostInstrSymbol())
487 return;
488
489 // If there was only one symbol and we're removing it, just clear info.
490 if (!Symbol && Info.is<EIIK_PostInstrSymbol>()) {
491 Info.clear();
492 return;
493 }
494
495 setExtraInfo(MF, memoperands(), getPreInstrSymbol(), Symbol,
497}
498
500 // Do nothing if old and new symbols are the same.
501 if (Marker == getHeapAllocMarker())
502 return;
503
504 setExtraInfo(MF, memoperands(), getPreInstrSymbol(), getPostInstrSymbol(),
505 Marker, getPCSections(), getCFIType());
506}
507
509 // Do nothing if old and new symbols are the same.
510 if (PCSections == getPCSections())
511 return;
512
513 setExtraInfo(MF, memoperands(), getPreInstrSymbol(), getPostInstrSymbol(),
514 getHeapAllocMarker(), PCSections, getCFIType());
515}
516
518 // Do nothing if old and new types are the same.
519 if (Type == getCFIType())
520 return;
521
522 setExtraInfo(MF, memoperands(), getPreInstrSymbol(), getPostInstrSymbol(),
524}
525
527 const MachineInstr &MI) {
528 if (this == &MI)
529 // Nothing to do for a self-clone!
530 return;
531
532 assert(&MF == MI.getMF() &&
533 "Invalid machine functions when cloning instruction symbols!");
534
535 setPreInstrSymbol(MF, MI.getPreInstrSymbol());
536 setPostInstrSymbol(MF, MI.getPostInstrSymbol());
537 setHeapAllocMarker(MF, MI.getHeapAllocMarker());
538 setPCSections(MF, MI.getPCSections());
539}
540
542 // For now, the just return the union of the flags. If the flags get more
543 // complicated over time, we might need more logic here.
544 return getFlags() | Other.getFlags();
545}
546
548 uint32_t MIFlags = 0;
549 // Copy the wrapping flags.
550 if (const OverflowingBinaryOperator *OB =
551 dyn_cast<OverflowingBinaryOperator>(&I)) {
552 if (OB->hasNoSignedWrap())
554 if (OB->hasNoUnsignedWrap())
556 }
557
558 // Copy the exact flag.
559 if (const PossiblyExactOperator *PE = dyn_cast<PossiblyExactOperator>(&I))
560 if (PE->isExact())
562
563 // Copy the fast-math flags.
564 if (const FPMathOperator *FP = dyn_cast<FPMathOperator>(&I)) {
565 const FastMathFlags Flags = FP->getFastMathFlags();
566 if (Flags.noNaNs())
568 if (Flags.noInfs())
570 if (Flags.noSignedZeros())
572 if (Flags.allowReciprocal())
574 if (Flags.allowContract())
576 if (Flags.approxFunc())
578 if (Flags.allowReassoc())
580 }
581
582 if (I.getMetadata(LLVMContext::MD_unpredictable))
584
585 return MIFlags;
586}
587
590}
591
592bool MachineInstr::hasPropertyInBundle(uint64_t Mask, QueryType Type) const {
593 assert(!isBundledWithPred() && "Must be called on bundle header");
595 if (MII->getDesc().getFlags() & Mask) {
596 if (Type == AnyInBundle)
597 return true;
598 } else {
599 if (Type == AllInBundle && !MII->isBundle())
600 return false;
601 }
602 // This was the last instruction in the bundle.
603 if (!MII->isBundledWithSucc())
604 return Type == AllInBundle;
605 }
606}
607
609 MICheckType Check) const {
610 // If opcodes or number of operands are not the same then the two
611 // instructions are obviously not identical.
612 if (Other.getOpcode() != getOpcode() ||
613 Other.getNumOperands() != getNumOperands())
614 return false;
615
616 if (isBundle()) {
617 // We have passed the test above that both instructions have the same
618 // opcode, so we know that both instructions are bundles here. Let's compare
619 // MIs inside the bundle.
620 assert(Other.isBundle() && "Expected that both instructions are bundles.");
623 // Loop until we analysed the last intruction inside at least one of the
624 // bundles.
625 while (I1->isBundledWithSucc() && I2->isBundledWithSucc()) {
626 ++I1;
627 ++I2;
628 if (!I1->isIdenticalTo(*I2, Check))
629 return false;
630 }
631 // If we've reached the end of just one of the two bundles, but not both,
632 // the instructions are not identical.
633 if (I1->isBundledWithSucc() || I2->isBundledWithSucc())
634 return false;
635 }
636
637 // Check operands to make sure they match.
638 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
639 const MachineOperand &MO = getOperand(i);
640 const MachineOperand &OMO = Other.getOperand(i);
641 if (!MO.isReg()) {
642 if (!MO.isIdenticalTo(OMO))
643 return false;
644 continue;
645 }
646
647 // Clients may or may not want to ignore defs when testing for equality.
648 // For example, machine CSE pass only cares about finding common
649 // subexpressions, so it's safe to ignore virtual register defs.
650 if (MO.isDef()) {
651 if (Check == IgnoreDefs)
652 continue;
653 else if (Check == IgnoreVRegDefs) {
654 if (!MO.getReg().isVirtual() || !OMO.getReg().isVirtual())
655 if (!MO.isIdenticalTo(OMO))
656 return false;
657 } else {
658 if (!MO.isIdenticalTo(OMO))
659 return false;
660 if (Check == CheckKillDead && MO.isDead() != OMO.isDead())
661 return false;
662 }
663 } else {
664 if (!MO.isIdenticalTo(OMO))
665 return false;
666 if (Check == CheckKillDead && MO.isKill() != OMO.isKill())
667 return false;
668 }
669 }
670 // If DebugLoc does not match then two debug instructions are not identical.
671 if (isDebugInstr())
672 if (getDebugLoc() && Other.getDebugLoc() &&
673 getDebugLoc() != Other.getDebugLoc())
674 return false;
675 // If pre- or post-instruction symbols do not match then the two instructions
676 // are not identical.
677 if (getPreInstrSymbol() != Other.getPreInstrSymbol() ||
678 getPostInstrSymbol() != Other.getPostInstrSymbol())
679 return false;
680 // Call instructions with different CFI types are not identical.
681 if (isCall() && getCFIType() != Other.getCFIType())
682 return false;
683
684 return true;
685}
686
688 if (!isDebugValueLike() || !Other.isDebugValueLike())
689 return false;
690 if (getDebugLoc() != Other.getDebugLoc())
691 return false;
692 if (getDebugVariable() != Other.getDebugVariable())
693 return false;
694 if (getNumDebugOperands() != Other.getNumDebugOperands())
695 return false;
696 for (unsigned OpIdx = 0; OpIdx < getNumDebugOperands(); ++OpIdx)
697 if (!getDebugOperand(OpIdx).isIdenticalTo(Other.getDebugOperand(OpIdx)))
698 return false;
701 Other.getDebugExpression(), Other.isIndirectDebugValue()))
702 return false;
703 return true;
704}
705
707 return getParent()->getParent();
708}
709
711 assert(getParent() && "Not embedded in a basic block!");
712 return getParent()->remove(this);
713}
714
716 assert(getParent() && "Not embedded in a basic block!");
717 return getParent()->remove_instr(this);
718}
719
721 assert(getParent() && "Not embedded in a basic block!");
722 getParent()->erase(this);
723}
724
726 assert(getParent() && "Not embedded in a basic block!");
727 getParent()->erase_instr(this);
728}
729
731 if (!isCall(Type))
732 return false;
733 switch (getOpcode()) {
734 case TargetOpcode::PATCHPOINT:
735 case TargetOpcode::STACKMAP:
736 case TargetOpcode::STATEPOINT:
737 case TargetOpcode::FENTRY_CALL:
738 return false;
739 }
740 return true;
741}
742
744 if (isBundle())
747}
748
750 unsigned NumOperands = MCID->getNumOperands();
751 if (!MCID->isVariadic())
752 return NumOperands;
753
754 for (unsigned I = NumOperands, E = getNumOperands(); I != E; ++I) {
755 const MachineOperand &MO = getOperand(I);
756 // The operands must always be in the following order:
757 // - explicit reg defs,
758 // - other explicit operands (reg uses, immediates, etc.),
759 // - implicit reg defs
760 // - implicit reg uses
761 if (MO.isReg() && MO.isImplicit())
762 break;
763 ++NumOperands;
764 }
765 return NumOperands;
766}
767
769 unsigned NumDefs = MCID->getNumDefs();
770 if (!MCID->isVariadic())
771 return NumDefs;
772
773 for (unsigned I = NumDefs, E = getNumOperands(); I != E; ++I) {
774 const MachineOperand &MO = getOperand(I);
775 if (!MO.isReg() || !MO.isDef() || MO.isImplicit())
776 break;
777 ++NumDefs;
778 }
779 return NumDefs;
780}
781
783 assert(!isBundledWithPred() && "MI is already bundled with its predecessor");
786 --Pred;
787 assert(!Pred->isBundledWithSucc() && "Inconsistent bundle flags");
788 Pred->setFlag(BundledSucc);
789}
790
792 assert(!isBundledWithSucc() && "MI is already bundled with its successor");
795 ++Succ;
796 assert(!Succ->isBundledWithPred() && "Inconsistent bundle flags");
797 Succ->setFlag(BundledPred);
798}
799
801 assert(isBundledWithPred() && "MI isn't bundled with its predecessor");
804 --Pred;
805 assert(Pred->isBundledWithSucc() && "Inconsistent bundle flags");
806 Pred->clearFlag(BundledSucc);
807}
808
810 assert(isBundledWithSucc() && "MI isn't bundled with its successor");
813 ++Succ;
814 assert(Succ->isBundledWithPred() && "Inconsistent bundle flags");
815 Succ->clearFlag(BundledPred);
816}
817
819 if (isInlineAsm()) {
820 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
821 if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
822 return true;
823 }
824 return false;
825}
826
828 assert(isInlineAsm() && "getInlineAsmDialect() only works for inline asms!");
829 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
830 return InlineAsm::AsmDialect((ExtraInfo & InlineAsm::Extra_AsmDialect) != 0);
831}
832
834 unsigned *GroupNo) const {
835 assert(isInlineAsm() && "Expected an inline asm instruction");
836 assert(OpIdx < getNumOperands() && "OpIdx out of range");
837
838 // Ignore queries about the initial operands.
840 return -1;
841
842 unsigned Group = 0;
843 unsigned NumOps;
844 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e;
845 i += NumOps) {
846 const MachineOperand &FlagMO = getOperand(i);
847 // If we reach the implicit register operands, stop looking.
848 if (!FlagMO.isImm())
849 return -1;
850 const InlineAsm::Flag F(FlagMO.getImm());
851 NumOps = 1 + F.getNumOperandRegisters();
852 if (i + NumOps > OpIdx) {
853 if (GroupNo)
854 *GroupNo = Group;
855 return i;
856 }
857 ++Group;
858 }
859 return -1;
860}
861
863 assert(isDebugLabel() && "not a DBG_LABEL");
864 return cast<DILabel>(getOperand(0).getMetadata());
865}
866
868 assert((isDebugValueLike()) && "not a DBG_VALUE*");
869 unsigned VariableOp = isNonListDebugValue() ? 2 : 0;
870 return getOperand(VariableOp);
871}
872
874 assert((isDebugValueLike()) && "not a DBG_VALUE*");
875 unsigned VariableOp = isNonListDebugValue() ? 2 : 0;
876 return getOperand(VariableOp);
877}
878
880 return cast<DILocalVariable>(getDebugVariableOp().getMetadata());
881}
882
884 assert((isDebugValueLike()) && "not a DBG_VALUE*");
885 unsigned ExpressionOp = isNonListDebugValue() ? 3 : 1;
886 return getOperand(ExpressionOp);
887}
888
890 assert((isDebugValueLike()) && "not a DBG_VALUE*");
891 unsigned ExpressionOp = isNonListDebugValue() ? 3 : 1;
892 return getOperand(ExpressionOp);
893}
894
896 return cast<DIExpression>(getDebugExpressionOp().getMetadata());
897}
898
901}
902
905 const TargetInstrInfo *TII,
906 const TargetRegisterInfo *TRI) const {
907 assert(getParent() && "Can't have an MBB reference here!");
908 assert(getMF() && "Can't have an MF reference here!");
909 const MachineFunction &MF = *getMF();
910
911 // Most opcodes have fixed constraints in their MCInstrDesc.
912 if (!isInlineAsm())
913 return TII->getRegClass(getDesc(), OpIdx, TRI, MF);
914
915 if (!getOperand(OpIdx).isReg())
916 return nullptr;
917
918 // For tied uses on inline asm, get the constraint from the def.
919 unsigned DefIdx;
920 if (getOperand(OpIdx).isUse() && isRegTiedToDefOperand(OpIdx, &DefIdx))
921 OpIdx = DefIdx;
922
923 // Inline asm stores register class constraints in the flag word.
924 int FlagIdx = findInlineAsmFlagIdx(OpIdx);
925 if (FlagIdx < 0)
926 return nullptr;
927
928 const InlineAsm::Flag F(getOperand(FlagIdx).getImm());
929 unsigned RCID;
930 if ((F.isRegUseKind() || F.isRegDefKind() || F.isRegDefEarlyClobberKind()) &&
931 F.hasRegClassConstraint(RCID))
932 return TRI->getRegClass(RCID);
933
934 // Assume that all registers in a memory operand are pointers.
935 if (F.isMemKind())
936 return TRI->getPointerRegClass(MF);
937
938 return nullptr;
939}
940
942 Register Reg, const TargetRegisterClass *CurRC, const TargetInstrInfo *TII,
943 const TargetRegisterInfo *TRI, bool ExploreBundle) const {
944 // Check every operands inside the bundle if we have
945 // been asked to.
946 if (ExploreBundle)
947 for (ConstMIBundleOperands OpndIt(*this); OpndIt.isValid() && CurRC;
948 ++OpndIt)
949 CurRC = OpndIt->getParent()->getRegClassConstraintEffectForVRegImpl(
950 OpndIt.getOperandNo(), Reg, CurRC, TII, TRI);
951 else
952 // Otherwise, just check the current operands.
953 for (unsigned i = 0, e = NumOperands; i < e && CurRC; ++i)
954 CurRC = getRegClassConstraintEffectForVRegImpl(i, Reg, CurRC, TII, TRI);
955 return CurRC;
956}
957
958const TargetRegisterClass *MachineInstr::getRegClassConstraintEffectForVRegImpl(
959 unsigned OpIdx, Register Reg, const TargetRegisterClass *CurRC,
960 const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const {
961 assert(CurRC && "Invalid initial register class");
962 // Check if Reg is constrained by some of its use/def from MI.
963 const MachineOperand &MO = getOperand(OpIdx);
964 if (!MO.isReg() || MO.getReg() != Reg)
965 return CurRC;
966 // If yes, accumulate the constraints through the operand.
967 return getRegClassConstraintEffect(OpIdx, CurRC, TII, TRI);
968}
969
971 unsigned OpIdx, const TargetRegisterClass *CurRC,
972 const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const {
973 const TargetRegisterClass *OpRC = getRegClassConstraint(OpIdx, TII, TRI);
974 const MachineOperand &MO = getOperand(OpIdx);
975 assert(MO.isReg() &&
976 "Cannot get register constraints for non-register operand");
977 assert(CurRC && "Invalid initial register class");
978 if (unsigned SubIdx = MO.getSubReg()) {
979 if (OpRC)
980 CurRC = TRI->getMatchingSuperRegClass(CurRC, OpRC, SubIdx);
981 else
982 CurRC = TRI->getSubClassWithSubReg(CurRC, SubIdx);
983 } else if (OpRC)
984 CurRC = TRI->getCommonSubClass(CurRC, OpRC);
985 return CurRC;
986}
987
988/// Return the number of instructions inside the MI bundle, not counting the
989/// header instruction.
992 unsigned Size = 0;
993 while (I->isBundledWithSucc()) {
994 ++Size;
995 ++I;
996 }
997 return Size;
998}
999
1000/// Returns true if the MachineInstr has an implicit-use operand of exactly
1001/// the given register (not considering sub/super-registers).
1003 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1004 const MachineOperand &MO = getOperand(i);
1005 if (MO.isReg() && MO.isUse() && MO.isImplicit() && MO.getReg() == Reg)
1006 return true;
1007 }
1008 return false;
1009}
1010
1011/// findRegisterUseOperandIdx() - Returns the MachineOperand that is a use of
1012/// the specific register or -1 if it is not found. It further tightens
1013/// the search criteria to a use that kills the register if isKill is true.
1015 Register Reg, bool isKill, const TargetRegisterInfo *TRI) const {
1016 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1017 const MachineOperand &MO = getOperand(i);
1018 if (!MO.isReg() || !MO.isUse())
1019 continue;
1020 Register MOReg = MO.getReg();
1021 if (!MOReg)
1022 continue;
1023 if (MOReg == Reg || (TRI && Reg && MOReg && TRI->regsOverlap(MOReg, Reg)))
1024 if (!isKill || MO.isKill())
1025 return i;
1026 }
1027 return -1;
1028}
1029
1030/// readsWritesVirtualRegister - Return a pair of bools (reads, writes)
1031/// indicating if this instruction reads or writes Reg. This also considers
1032/// partial defines.
1033std::pair<bool,bool>
1035 SmallVectorImpl<unsigned> *Ops) const {
1036 bool PartDef = false; // Partial redefine.
1037 bool FullDef = false; // Full define.
1038 bool Use = false;
1039
1040 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1041 const MachineOperand &MO = getOperand(i);
1042 if (!MO.isReg() || MO.getReg() != Reg)
1043 continue;
1044 if (Ops)
1045 Ops->push_back(i);
1046 if (MO.isUse())
1047 Use |= !MO.isUndef();
1048 else if (MO.getSubReg() && !MO.isUndef())
1049 // A partial def undef doesn't count as reading the register.
1050 PartDef = true;
1051 else
1052 FullDef = true;
1053 }
1054 // A partial redefine uses Reg unless there is also a full define.
1055 return std::make_pair(Use || (PartDef && !FullDef), PartDef || FullDef);
1056}
1057
1058/// findRegisterDefOperandIdx() - Returns the operand index that is a def of
1059/// the specified register or -1 if it is not found. If isDead is true, defs
1060/// that are not dead are skipped. If TargetRegisterInfo is non-null, then it
1061/// also checks if there is a def of a super-register.
1062int
1064 const TargetRegisterInfo *TRI) const {
1065 bool isPhys = Reg.isPhysical();
1066 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1067 const MachineOperand &MO = getOperand(i);
1068 // Accept regmask operands when Overlap is set.
1069 // Ignore them when looking for a specific def operand (Overlap == false).
1070 if (isPhys && Overlap && MO.isRegMask() && MO.clobbersPhysReg(Reg))
1071 return i;
1072 if (!MO.isReg() || !MO.isDef())
1073 continue;
1074 Register MOReg = MO.getReg();
1075 bool Found = (MOReg == Reg);
1076 if (!Found && TRI && isPhys && MOReg.isPhysical()) {
1077 if (Overlap)
1078 Found = TRI->regsOverlap(MOReg, Reg);
1079 else
1080 Found = TRI->isSubRegister(MOReg, Reg);
1081 }
1082 if (Found && (!isDead || MO.isDead()))
1083 return i;
1084 }
1085 return -1;
1086}
1087
1088/// findFirstPredOperandIdx() - Find the index of the first operand in the
1089/// operand list that is used to represent the predicate. It returns -1 if
1090/// none is found.
1092 // Don't call MCID.findFirstPredOperandIdx() because this variant
1093 // is sometimes called on an instruction that's not yet complete, and
1094 // so the number of operands is less than the MCID indicates. In
1095 // particular, the PTX target does this.
1096 const MCInstrDesc &MCID = getDesc();
1097 if (MCID.isPredicable()) {
1098 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
1099 if (MCID.operands()[i].isPredicate())
1100 return i;
1101 }
1102
1103 return -1;
1104}
1105
1106// MachineOperand::TiedTo is 4 bits wide.
1107const unsigned TiedMax = 15;
1108
1109/// tieOperands - Mark operands at DefIdx and UseIdx as tied to each other.
1110///
1111/// Use and def operands can be tied together, indicated by a non-zero TiedTo
1112/// field. TiedTo can have these values:
1113///
1114/// 0: Operand is not tied to anything.
1115/// 1 to TiedMax-1: Tied to getOperand(TiedTo-1).
1116/// TiedMax: Tied to an operand >= TiedMax-1.
1117///
1118/// The tied def must be one of the first TiedMax operands on a normal
1119/// instruction. INLINEASM instructions allow more tied defs.
1120///
1121void MachineInstr::tieOperands(unsigned DefIdx, unsigned UseIdx) {
1122 MachineOperand &DefMO = getOperand(DefIdx);
1123 MachineOperand &UseMO = getOperand(UseIdx);
1124 assert(DefMO.isDef() && "DefIdx must be a def operand");
1125 assert(UseMO.isUse() && "UseIdx must be a use operand");
1126 assert(!DefMO.isTied() && "Def is already tied to another use");
1127 assert(!UseMO.isTied() && "Use is already tied to another def");
1128
1129 if (DefIdx < TiedMax)
1130 UseMO.TiedTo = DefIdx + 1;
1131 else {
1132 // Inline asm can use the group descriptors to find tied operands,
1133 // statepoint tied operands are trivial to match (1-1 reg def with reg use),
1134 // but on normal instruction, the tied def must be within the first TiedMax
1135 // operands.
1136 assert((isInlineAsm() || getOpcode() == TargetOpcode::STATEPOINT) &&
1137 "DefIdx out of range");
1138 UseMO.TiedTo = TiedMax;
1139 }
1140
1141 // UseIdx can be out of range, we'll search for it in findTiedOperandIdx().
1142 DefMO.TiedTo = std::min(UseIdx + 1, TiedMax);
1143}
1144
1145/// Given the index of a tied register operand, find the operand it is tied to.
1146/// Defs are tied to uses and vice versa. Returns the index of the tied operand
1147/// which must exist.
1148unsigned MachineInstr::findTiedOperandIdx(unsigned OpIdx) const {
1149 const MachineOperand &MO = getOperand(OpIdx);
1150 assert(MO.isTied() && "Operand isn't tied");
1151
1152 // Normally TiedTo is in range.
1153 if (MO.TiedTo < TiedMax)
1154 return MO.TiedTo - 1;
1155
1156 // Uses on normal instructions can be out of range.
1157 if (!isInlineAsm() && getOpcode() != TargetOpcode::STATEPOINT) {
1158 // Normal tied defs must be in the 0..TiedMax-1 range.
1159 if (MO.isUse())
1160 return TiedMax - 1;
1161 // MO is a def. Search for the tied use.
1162 for (unsigned i = TiedMax - 1, e = getNumOperands(); i != e; ++i) {
1163 const MachineOperand &UseMO = getOperand(i);
1164 if (UseMO.isReg() && UseMO.isUse() && UseMO.TiedTo == OpIdx + 1)
1165 return i;
1166 }
1167 llvm_unreachable("Can't find tied use");
1168 }
1169
1170 if (getOpcode() == TargetOpcode::STATEPOINT) {
1171 // In STATEPOINT defs correspond 1-1 to GC pointer operands passed
1172 // on registers.
1173 StatepointOpers SO(this);
1174 unsigned CurUseIdx = SO.getFirstGCPtrIdx();
1175 assert(CurUseIdx != -1U && "only gc pointer statepoint operands can be tied");
1176 unsigned NumDefs = getNumDefs();
1177 for (unsigned CurDefIdx = 0; CurDefIdx < NumDefs; ++CurDefIdx) {
1178 while (!getOperand(CurUseIdx).isReg())
1179 CurUseIdx = StackMaps::getNextMetaArgIdx(this, CurUseIdx);
1180 if (OpIdx == CurDefIdx)
1181 return CurUseIdx;
1182 if (OpIdx == CurUseIdx)
1183 return CurDefIdx;
1184 CurUseIdx = StackMaps::getNextMetaArgIdx(this, CurUseIdx);
1185 }
1186 llvm_unreachable("Can't find tied use");
1187 }
1188
1189 // Now deal with inline asm by parsing the operand group descriptor flags.
1190 // Find the beginning of each operand group.
1191 SmallVector<unsigned, 8> GroupIdx;
1192 unsigned OpIdxGroup = ~0u;
1193 unsigned NumOps;
1194 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e;
1195 i += NumOps) {
1196 const MachineOperand &FlagMO = getOperand(i);
1197 assert(FlagMO.isImm() && "Invalid tied operand on inline asm");
1198 unsigned CurGroup = GroupIdx.size();
1199 GroupIdx.push_back(i);
1200 const InlineAsm::Flag F(FlagMO.getImm());
1201 NumOps = 1 + F.getNumOperandRegisters();
1202 // OpIdx belongs to this operand group.
1203 if (OpIdx > i && OpIdx < i + NumOps)
1204 OpIdxGroup = CurGroup;
1205 unsigned TiedGroup;
1206 if (!F.isUseOperandTiedToDef(TiedGroup))
1207 continue;
1208 // Operands in this group are tied to operands in TiedGroup which must be
1209 // earlier. Find the number of operands between the two groups.
1210 unsigned Delta = i - GroupIdx[TiedGroup];
1211
1212 // OpIdx is a use tied to TiedGroup.
1213 if (OpIdxGroup == CurGroup)
1214 return OpIdx - Delta;
1215
1216 // OpIdx is a def tied to this use group.
1217 if (OpIdxGroup == TiedGroup)
1218 return OpIdx + Delta;
1219 }
1220 llvm_unreachable("Invalid tied operand on inline asm");
1221}
1222
1223/// clearKillInfo - Clears kill flags on all operands.
1224///
1226 for (MachineOperand &MO : operands()) {
1227 if (MO.isReg() && MO.isUse())
1228 MO.setIsKill(false);
1229 }
1230}
1231
1233 unsigned SubIdx,
1234 const TargetRegisterInfo &RegInfo) {
1235 if (ToReg.isPhysical()) {
1236 if (SubIdx)
1237 ToReg = RegInfo.getSubReg(ToReg, SubIdx);
1238 for (MachineOperand &MO : operands()) {
1239 if (!MO.isReg() || MO.getReg() != FromReg)
1240 continue;
1241 MO.substPhysReg(ToReg, RegInfo);
1242 }
1243 } else {
1244 for (MachineOperand &MO : operands()) {
1245 if (!MO.isReg() || MO.getReg() != FromReg)
1246 continue;
1247 MO.substVirtReg(ToReg, SubIdx, RegInfo);
1248 }
1249 }
1250}
1251
1252/// isSafeToMove - Return true if it is safe to move this instruction. If
1253/// SawStore is set to true, it means that there is a store (or call) between
1254/// the instruction's location and its intended destination.
1255bool MachineInstr::isSafeToMove(AAResults *AA, bool &SawStore) const {
1256 // Ignore stuff that we obviously can't move.
1257 //
1258 // Treat volatile loads as stores. This is not strictly necessary for
1259 // volatiles, but it is required for atomic loads. It is not allowed to move
1260 // a load across an atomic load with Ordering > Monotonic.
1261 if (mayStore() || isCall() || isPHI() ||
1262 (mayLoad() && hasOrderedMemoryRef())) {
1263 SawStore = true;
1264 return false;
1265 }
1266
1267 if (isPosition() || isDebugInstr() || isTerminator() ||
1270 return false;
1271
1272 // See if this instruction does a load. If so, we have to guarantee that the
1273 // loaded value doesn't change between the load and the its intended
1274 // destination. The check for isInvariantLoad gives the target the chance to
1275 // classify the load as always returning a constant, e.g. a constant pool
1276 // load.
1278 // Otherwise, this is a real load. If there is a store between the load and
1279 // end of block, we can't move it.
1280 return !SawStore;
1281
1282 return true;
1283}
1284
1286 bool UseTBAA, const MachineMemOperand *MMOa,
1287 const MachineMemOperand *MMOb) {
1288 // The following interface to AA is fashioned after DAGCombiner::isAlias and
1289 // operates with MachineMemOperand offset with some important assumptions:
1290 // - LLVM fundamentally assumes flat address spaces.
1291 // - MachineOperand offset can *only* result from legalization and cannot
1292 // affect queries other than the trivial case of overlap checking.
1293 // - These offsets never wrap and never step outside of allocated objects.
1294 // - There should never be any negative offsets here.
1295 //
1296 // FIXME: Modify API to hide this math from "user"
1297 // Even before we go to AA we can reason locally about some memory objects. It
1298 // can save compile time, and possibly catch some corner cases not currently
1299 // covered.
1300
1301 int64_t OffsetA = MMOa->getOffset();
1302 int64_t OffsetB = MMOb->getOffset();
1303 int64_t MinOffset = std::min(OffsetA, OffsetB);
1304
1305 uint64_t WidthA = MMOa->getSize();
1306 uint64_t WidthB = MMOb->getSize();
1307 bool KnownWidthA = WidthA != MemoryLocation::UnknownSize;
1308 bool KnownWidthB = WidthB != MemoryLocation::UnknownSize;
1309
1310 const Value *ValA = MMOa->getValue();
1311 const Value *ValB = MMOb->getValue();
1312 bool SameVal = (ValA && ValB && (ValA == ValB));
1313 if (!SameVal) {
1314 const PseudoSourceValue *PSVa = MMOa->getPseudoValue();
1315 const PseudoSourceValue *PSVb = MMOb->getPseudoValue();
1316 if (PSVa && ValB && !PSVa->mayAlias(&MFI))
1317 return false;
1318 if (PSVb && ValA && !PSVb->mayAlias(&MFI))
1319 return false;
1320 if (PSVa && PSVb && (PSVa == PSVb))
1321 SameVal = true;
1322 }
1323
1324 if (SameVal) {
1325 if (!KnownWidthA || !KnownWidthB)
1326 return true;
1327 int64_t MaxOffset = std::max(OffsetA, OffsetB);
1328 int64_t LowWidth = (MinOffset == OffsetA) ? WidthA : WidthB;
1329 return (MinOffset + LowWidth > MaxOffset);
1330 }
1331
1332 if (!AA)
1333 return true;
1334
1335 if (!ValA || !ValB)
1336 return true;
1337
1338 assert((OffsetA >= 0) && "Negative MachineMemOperand offset");
1339 assert((OffsetB >= 0) && "Negative MachineMemOperand offset");
1340
1341 int64_t OverlapA =
1342 KnownWidthA ? WidthA + OffsetA - MinOffset : MemoryLocation::UnknownSize;
1343 int64_t OverlapB =
1344 KnownWidthB ? WidthB + OffsetB - MinOffset : MemoryLocation::UnknownSize;
1345
1346 return !AA->isNoAlias(
1347 MemoryLocation(ValA, OverlapA, UseTBAA ? MMOa->getAAInfo() : AAMDNodes()),
1348 MemoryLocation(ValB, OverlapB,
1349 UseTBAA ? MMOb->getAAInfo() : AAMDNodes()));
1350}
1351
1353 bool UseTBAA) const {
1354 const MachineFunction *MF = getMF();
1356 const MachineFrameInfo &MFI = MF->getFrameInfo();
1357
1358 // Exclude call instruction which may alter the memory but can not be handled
1359 // by this function.
1360 if (isCall() || Other.isCall())
1361 return true;
1362
1363 // If neither instruction stores to memory, they can't alias in any
1364 // meaningful way, even if they read from the same address.
1365 if (!mayStore() && !Other.mayStore())
1366 return false;
1367
1368 // Both instructions must be memory operations to be able to alias.
1369 if (!mayLoadOrStore() || !Other.mayLoadOrStore())
1370 return false;
1371
1372 // Let the target decide if memory accesses cannot possibly overlap.
1374 return false;
1375
1376 // Memory operations without memory operands may access anything. Be
1377 // conservative and assume `MayAlias`.
1378 if (memoperands_empty() || Other.memoperands_empty())
1379 return true;
1380
1381 // Skip if there are too many memory operands.
1382 auto NumChecks = getNumMemOperands() * Other.getNumMemOperands();
1383 if (NumChecks > TII->getMemOperandAACheckLimit())
1384 return true;
1385
1386 // Check each pair of memory operands from both instructions, which can't
1387 // alias only if all pairs won't alias.
1388 for (auto *MMOa : memoperands())
1389 for (auto *MMOb : Other.memoperands())
1390 if (MemOperandsHaveAlias(MFI, AA, UseTBAA, MMOa, MMOb))
1391 return true;
1392
1393 return false;
1394}
1395
1396/// hasOrderedMemoryRef - Return true if this instruction may have an ordered
1397/// or volatile memory reference, or if the information describing the memory
1398/// reference is not available. Return false if it is known to have no ordered
1399/// memory references.
1401 // An instruction known never to access memory won't have a volatile access.
1402 if (!mayStore() &&
1403 !mayLoad() &&
1404 !isCall() &&
1406 return false;
1407
1408 // Otherwise, if the instruction has no memory reference information,
1409 // conservatively assume it wasn't preserved.
1410 if (memoperands_empty())
1411 return true;
1412
1413 // Check if any of our memory operands are ordered.
1414 return llvm::any_of(memoperands(), [](const MachineMemOperand *MMO) {
1415 return !MMO->isUnordered();
1416 });
1417}
1418
1419/// isDereferenceableInvariantLoad - Return true if this instruction will never
1420/// trap and is loading from a location whose value is invariant across a run of
1421/// this function.
1423 // If the instruction doesn't load at all, it isn't an invariant load.
1424 if (!mayLoad())
1425 return false;
1426
1427 // If the instruction has lost its memoperands, conservatively assume that
1428 // it may not be an invariant load.
1429 if (memoperands_empty())
1430 return false;
1431
1432 const MachineFrameInfo &MFI = getParent()->getParent()->getFrameInfo();
1433
1434 for (MachineMemOperand *MMO : memoperands()) {
1435 if (!MMO->isUnordered())
1436 // If the memory operand has ordering side effects, we can't move the
1437 // instruction. Such an instruction is technically an invariant load,
1438 // but the caller code would need updated to expect that.
1439 return false;
1440 if (MMO->isStore()) return false;
1441 if (MMO->isInvariant() && MMO->isDereferenceable())
1442 continue;
1443
1444 // A load from a constant PseudoSourceValue is invariant.
1445 if (const PseudoSourceValue *PSV = MMO->getPseudoValue()) {
1446 if (PSV->isConstant(&MFI))
1447 continue;
1448 }
1449
1450 // Otherwise assume conservatively.
1451 return false;
1452 }
1453
1454 // Everything checks out.
1455 return true;
1456}
1457
1458/// isConstantValuePHI - If the specified instruction is a PHI that always
1459/// merges together the same virtual register, return the register, otherwise
1460/// return 0.
1462 if (!isPHI())
1463 return 0;
1464 assert(getNumOperands() >= 3 &&
1465 "It's illegal to have a PHI without source operands");
1466
1467 Register Reg = getOperand(1).getReg();
1468 for (unsigned i = 3, e = getNumOperands(); i < e; i += 2)
1469 if (getOperand(i).getReg() != Reg)
1470 return 0;
1471 return Reg;
1472}
1473
1476 return true;
1477 if (isInlineAsm()) {
1478 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
1479 if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
1480 return true;
1481 }
1482
1483 return false;
1484}
1485
1487 return mayStore() || isCall() ||
1489}
1490
1491/// allDefsAreDead - Return true if all the defs of this instruction are dead.
1492///
1494 for (const MachineOperand &MO : operands()) {
1495 if (!MO.isReg() || MO.isUse())
1496 continue;
1497 if (!MO.isDead())
1498 return false;
1499 }
1500 return true;
1501}
1502
1504 for (const MachineOperand &MO : implicit_operands()) {
1505 if (!MO.isReg() || MO.isUse())
1506 continue;
1507 if (!MO.isDead())
1508 return false;
1509 }
1510 return true;
1511}
1512
1513/// copyImplicitOps - Copy implicit register operands from specified
1514/// instruction to this instruction.
1516 const MachineInstr &MI) {
1517 for (const MachineOperand &MO :
1518 llvm::drop_begin(MI.operands(), MI.getDesc().getNumOperands()))
1519 if ((MO.isReg() && MO.isImplicit()) || MO.isRegMask())
1520 addOperand(MF, MO);
1521}
1522
1524 const MCInstrDesc &MCID = getDesc();
1525 if (MCID.Opcode == TargetOpcode::STATEPOINT)
1526 return true;
1527 for (unsigned I = 0, E = getNumOperands(); I < E; ++I) {
1528 const auto &Operand = getOperand(I);
1529 if (!Operand.isReg() || Operand.isDef())
1530 // Ignore the defined registers as MCID marks only the uses as tied.
1531 continue;
1532 int ExpectedTiedIdx = MCID.getOperandConstraint(I, MCOI::TIED_TO);
1533 int TiedIdx = Operand.isTied() ? int(findTiedOperandIdx(I)) : -1;
1534 if (ExpectedTiedIdx != TiedIdx)
1535 return true;
1536 }
1537 return false;
1538}
1539
1541 const MachineRegisterInfo &MRI) const {
1542 const MachineOperand &Op = getOperand(OpIdx);
1543 if (!Op.isReg())
1544 return LLT{};
1545
1546 if (isVariadic() || OpIdx >= getNumExplicitOperands())
1547 return MRI.getType(Op.getReg());
1548
1549 auto &OpInfo = getDesc().operands()[OpIdx];
1550 if (!OpInfo.isGenericType())
1551 return MRI.getType(Op.getReg());
1552
1553 if (PrintedTypes[OpInfo.getGenericTypeIndex()])
1554 return LLT{};
1555
1556 LLT TypeToPrint = MRI.getType(Op.getReg());
1557 // Don't mark the type index printed if it wasn't actually printed: maybe
1558 // another operand with the same type index has an actual type attached:
1559 if (TypeToPrint.isValid())
1560 PrintedTypes.set(OpInfo.getGenericTypeIndex());
1561 return TypeToPrint;
1562}
1563
1564#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1566 dbgs() << " ";
1567 print(dbgs());
1568}
1569
1570LLVM_DUMP_METHOD void MachineInstr::dumprImpl(
1571 const MachineRegisterInfo &MRI, unsigned Depth, unsigned MaxDepth,
1572 SmallPtrSetImpl<const MachineInstr *> &AlreadySeenInstrs) const {
1573 if (Depth >= MaxDepth)
1574 return;
1575 if (!AlreadySeenInstrs.insert(this).second)
1576 return;
1577 // PadToColumn always inserts at least one space.
1578 // Don't mess up the alignment if we don't want any space.
1579 if (Depth)
1580 fdbgs().PadToColumn(Depth * 2);
1581 print(fdbgs());
1582 for (const MachineOperand &MO : operands()) {
1583 if (!MO.isReg() || MO.isDef())
1584 continue;
1585 Register Reg = MO.getReg();
1586 if (Reg.isPhysical())
1587 continue;
1588 const MachineInstr *NewMI = MRI.getUniqueVRegDef(Reg);
1589 if (NewMI == nullptr)
1590 continue;
1591 NewMI->dumprImpl(MRI, Depth + 1, MaxDepth, AlreadySeenInstrs);
1592 }
1593}
1594
1596 unsigned MaxDepth) const {
1597 SmallPtrSet<const MachineInstr *, 16> AlreadySeenInstrs;
1598 dumprImpl(MRI, 0, MaxDepth, AlreadySeenInstrs);
1599}
1600#endif
1601
1602void MachineInstr::print(raw_ostream &OS, bool IsStandalone, bool SkipOpers,
1603 bool SkipDebugLoc, bool AddNewLine,
1604 const TargetInstrInfo *TII) const {
1605 const Module *M = nullptr;
1606 const Function *F = nullptr;
1607 if (const MachineFunction *MF = getMFIfAvailable(*this)) {
1608 F = &MF->getFunction();
1609 M = F->getParent();
1610 if (!TII)
1611 TII = MF->getSubtarget().getInstrInfo();
1612 }
1613
1614 ModuleSlotTracker MST(M);
1615 if (F)
1616 MST.incorporateFunction(*F);
1617 print(OS, MST, IsStandalone, SkipOpers, SkipDebugLoc, AddNewLine, TII);
1618}
1619
1621 bool IsStandalone, bool SkipOpers, bool SkipDebugLoc,
1622 bool AddNewLine, const TargetInstrInfo *TII) const {
1623 // We can be a bit tidier if we know the MachineFunction.
1624 const TargetRegisterInfo *TRI = nullptr;
1625 const MachineRegisterInfo *MRI = nullptr;
1626 const TargetIntrinsicInfo *IntrinsicInfo = nullptr;
1627 tryToGetTargetInfo(*this, TRI, MRI, IntrinsicInfo, TII);
1628
1629 if (isCFIInstruction())
1630 assert(getNumOperands() == 1 && "Expected 1 operand in CFI instruction");
1631
1632 SmallBitVector PrintedTypes(8);
1633 bool ShouldPrintRegisterTies = IsStandalone || hasComplexRegisterTies();
1634 auto getTiedOperandIdx = [&](unsigned OpIdx) {
1635 if (!ShouldPrintRegisterTies)
1636 return 0U;
1637 const MachineOperand &MO = getOperand(OpIdx);
1638 if (MO.isReg() && MO.isTied() && !MO.isDef())
1639 return findTiedOperandIdx(OpIdx);
1640 return 0U;
1641 };
1642 unsigned StartOp = 0;
1643 unsigned e = getNumOperands();
1644
1645 // Print explicitly defined operands on the left of an assignment syntax.
1646 while (StartOp < e) {
1647 const MachineOperand &MO = getOperand(StartOp);
1648 if (!MO.isReg() || !MO.isDef() || MO.isImplicit())
1649 break;
1650
1651 if (StartOp != 0)
1652 OS << ", ";
1653
1654 LLT TypeToPrint = MRI ? getTypeToPrint(StartOp, PrintedTypes, *MRI) : LLT{};
1655 unsigned TiedOperandIdx = getTiedOperandIdx(StartOp);
1656 MO.print(OS, MST, TypeToPrint, StartOp, /*PrintDef=*/false, IsStandalone,
1657 ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo);
1658 ++StartOp;
1659 }
1660
1661 if (StartOp != 0)
1662 OS << " = ";
1663
1665 OS << "frame-setup ";
1667 OS << "frame-destroy ";
1669 OS << "nnan ";
1671 OS << "ninf ";
1673 OS << "nsz ";
1675 OS << "arcp ";
1677 OS << "contract ";
1679 OS << "afn ";
1681 OS << "reassoc ";
1683 OS << "nuw ";
1685 OS << "nsw ";
1687 OS << "exact ";
1689 OS << "nofpexcept ";
1691 OS << "nomerge ";
1692
1693 // Print the opcode name.
1694 if (TII)
1695 OS << TII->getName(getOpcode());
1696 else
1697 OS << "UNKNOWN";
1698
1699 if (SkipOpers)
1700 return;
1701
1702 // Print the rest of the operands.
1703 bool FirstOp = true;
1704 unsigned AsmDescOp = ~0u;
1705 unsigned AsmOpCount = 0;
1706
1708 // Print asm string.
1709 OS << " ";
1710 const unsigned OpIdx = InlineAsm::MIOp_AsmString;
1711 LLT TypeToPrint = MRI ? getTypeToPrint(OpIdx, PrintedTypes, *MRI) : LLT{};
1712 unsigned TiedOperandIdx = getTiedOperandIdx(OpIdx);
1713 getOperand(OpIdx).print(OS, MST, TypeToPrint, OpIdx, /*PrintDef=*/true, IsStandalone,
1714 ShouldPrintRegisterTies, TiedOperandIdx, TRI,
1715 IntrinsicInfo);
1716
1717 // Print HasSideEffects, MayLoad, MayStore, IsAlignStack
1718 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
1719 if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
1720 OS << " [sideeffect]";
1721 if (ExtraInfo & InlineAsm::Extra_MayLoad)
1722 OS << " [mayload]";
1723 if (ExtraInfo & InlineAsm::Extra_MayStore)
1724 OS << " [maystore]";
1725 if (ExtraInfo & InlineAsm::Extra_IsConvergent)
1726 OS << " [isconvergent]";
1727 if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
1728 OS << " [alignstack]";
1730 OS << " [attdialect]";
1732 OS << " [inteldialect]";
1733
1734 StartOp = AsmDescOp = InlineAsm::MIOp_FirstOperand;
1735 FirstOp = false;
1736 }
1737
1738 for (unsigned i = StartOp, e = getNumOperands(); i != e; ++i) {
1739 const MachineOperand &MO = getOperand(i);
1740
1741 if (FirstOp) FirstOp = false; else OS << ",";
1742 OS << " ";
1743
1744 if (isDebugValueLike() && MO.isMetadata()) {
1745 // Pretty print DBG_VALUE* instructions.
1746 auto *DIV = dyn_cast<DILocalVariable>(MO.getMetadata());
1747 if (DIV && !DIV->getName().empty())
1748 OS << "!\"" << DIV->getName() << '\"';
1749 else {
1750 LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
1751 unsigned TiedOperandIdx = getTiedOperandIdx(i);
1752 MO.print(OS, MST, TypeToPrint, i, /*PrintDef=*/true, IsStandalone,
1753 ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo);
1754 }
1755 } else if (isDebugLabel() && MO.isMetadata()) {
1756 // Pretty print DBG_LABEL instructions.
1757 auto *DIL = dyn_cast<DILabel>(MO.getMetadata());
1758 if (DIL && !DIL->getName().empty())
1759 OS << "\"" << DIL->getName() << '\"';
1760 else {
1761 LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
1762 unsigned TiedOperandIdx = getTiedOperandIdx(i);
1763 MO.print(OS, MST, TypeToPrint, i, /*PrintDef=*/true, IsStandalone,
1764 ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo);
1765 }
1766 } else if (i == AsmDescOp && MO.isImm()) {
1767 // Pretty print the inline asm operand descriptor.
1768 OS << '$' << AsmOpCount++;
1769 unsigned Flag = MO.getImm();
1770 const InlineAsm::Flag F(Flag);
1771 OS << ":[";
1772 OS << F.getKindName();
1773
1774 unsigned RCID;
1775 if (!F.isImmKind() && !F.isMemKind() && F.hasRegClassConstraint(RCID)) {
1776 if (TRI) {
1777 OS << ':' << TRI->getRegClassName(TRI->getRegClass(RCID));
1778 } else
1779 OS << ":RC" << RCID;
1780 }
1781
1782 if (F.isMemKind()) {
1783 const InlineAsm::ConstraintCode MCID = F.getMemoryConstraintID();
1784 OS << ":" << InlineAsm::getMemConstraintName(MCID);
1785 }
1786
1787 unsigned TiedTo;
1788 if (F.isUseOperandTiedToDef(TiedTo))
1789 OS << " tiedto:$" << TiedTo;
1790
1791 if ((F.isRegDefKind() || F.isRegDefEarlyClobberKind() ||
1792 F.isRegUseKind()) &&
1793 F.getRegMayBeFolded()) {
1794 OS << " foldable";
1795 }
1796
1797 OS << ']';
1798
1799 // Compute the index of the next operand descriptor.
1800 AsmDescOp += 1 + F.getNumOperandRegisters();
1801 } else {
1802 LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
1803 unsigned TiedOperandIdx = getTiedOperandIdx(i);
1804 if (MO.isImm() && isOperandSubregIdx(i))
1806 else
1807 MO.print(OS, MST, TypeToPrint, i, /*PrintDef=*/true, IsStandalone,
1808 ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo);
1809 }
1810 }
1811
1812 // Print any optional symbols attached to this instruction as-if they were
1813 // operands.
1814 if (MCSymbol *PreInstrSymbol = getPreInstrSymbol()) {
1815 if (!FirstOp) {
1816 FirstOp = false;
1817 OS << ',';
1818 }
1819 OS << " pre-instr-symbol ";
1820 MachineOperand::printSymbol(OS, *PreInstrSymbol);
1821 }
1822 if (MCSymbol *PostInstrSymbol = getPostInstrSymbol()) {
1823 if (!FirstOp) {
1824 FirstOp = false;
1825 OS << ',';
1826 }
1827 OS << " post-instr-symbol ";
1828 MachineOperand::printSymbol(OS, *PostInstrSymbol);
1829 }
1830 if (MDNode *HeapAllocMarker = getHeapAllocMarker()) {
1831 if (!FirstOp) {
1832 FirstOp = false;
1833 OS << ',';
1834 }
1835 OS << " heap-alloc-marker ";
1836 HeapAllocMarker->printAsOperand(OS, MST);
1837 }
1838 if (MDNode *PCSections = getPCSections()) {
1839 if (!FirstOp) {
1840 FirstOp = false;
1841 OS << ',';
1842 }
1843 OS << " pcsections ";
1844 PCSections->printAsOperand(OS, MST);
1845 }
1846 if (uint32_t CFIType = getCFIType()) {
1847 if (!FirstOp)
1848 OS << ',';
1849 OS << " cfi-type " << CFIType;
1850 }
1851
1852 if (DebugInstrNum) {
1853 if (!FirstOp)
1854 OS << ",";
1855 OS << " debug-instr-number " << DebugInstrNum;
1856 }
1857
1858 if (!SkipDebugLoc) {
1859 if (const DebugLoc &DL = getDebugLoc()) {
1860 if (!FirstOp)
1861 OS << ',';
1862 OS << " debug-location ";
1863 DL->printAsOperand(OS, MST);
1864 }
1865 }
1866
1867 if (!memoperands_empty()) {
1869 const LLVMContext *Context = nullptr;
1870 std::unique_ptr<LLVMContext> CtxPtr;
1871 const MachineFrameInfo *MFI = nullptr;
1872 if (const MachineFunction *MF = getMFIfAvailable(*this)) {
1873 MFI = &MF->getFrameInfo();
1874 Context = &MF->getFunction().getContext();
1875 } else {
1876 CtxPtr = std::make_unique<LLVMContext>();
1877 Context = CtxPtr.get();
1878 }
1879
1880 OS << " :: ";
1881 bool NeedComma = false;
1882 for (const MachineMemOperand *Op : memoperands()) {
1883 if (NeedComma)
1884 OS << ", ";
1885 Op->print(OS, MST, SSNs, *Context, MFI, TII);
1886 NeedComma = true;
1887 }
1888 }
1889
1890 if (SkipDebugLoc)
1891 return;
1892
1893 bool HaveSemi = false;
1894
1895 // Print debug location information.
1896 if (const DebugLoc &DL = getDebugLoc()) {
1897 if (!HaveSemi) {
1898 OS << ';';
1899 HaveSemi = true;
1900 }
1901 OS << ' ';
1902 DL.print(OS);
1903 }
1904
1905 // Print extra comments for DEBUG_VALUE and friends if they are well-formed.
1906 if ((isNonListDebugValue() && getNumOperands() >= 4) ||
1907 (isDebugValueList() && getNumOperands() >= 2) ||
1908 (isDebugRef() && getNumOperands() >= 3)) {
1909 if (getDebugVariableOp().isMetadata()) {
1910 if (!HaveSemi) {
1911 OS << ";";
1912 HaveSemi = true;
1913 }
1914 auto *DV = getDebugVariable();
1915 OS << " line no:" << DV->getLine();
1917 OS << " indirect";
1918 }
1919 }
1920 // TODO: DBG_LABEL
1921
1922 if (AddNewLine)
1923 OS << '\n';
1924}
1925
1927 const TargetRegisterInfo *RegInfo,
1928 bool AddIfNotFound) {
1929 bool isPhysReg = IncomingReg.isPhysical();
1930 bool hasAliases = isPhysReg &&
1931 MCRegAliasIterator(IncomingReg, RegInfo, false).isValid();
1932 bool Found = false;
1934 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1935 MachineOperand &MO = getOperand(i);
1936 if (!MO.isReg() || !MO.isUse() || MO.isUndef())
1937 continue;
1938
1939 // DEBUG_VALUE nodes do not contribute to code generation and should
1940 // always be ignored. Failure to do so may result in trying to modify
1941 // KILL flags on DEBUG_VALUE nodes.
1942 if (MO.isDebug())
1943 continue;
1944
1945 Register Reg = MO.getReg();
1946 if (!Reg)
1947 continue;
1948
1949 if (Reg == IncomingReg) {
1950 if (!Found) {
1951 if (MO.isKill())
1952 // The register is already marked kill.
1953 return true;
1954 if (isPhysReg && isRegTiedToDefOperand(i))
1955 // Two-address uses of physregs must not be marked kill.
1956 return true;
1957 MO.setIsKill();
1958 Found = true;
1959 }
1960 } else if (hasAliases && MO.isKill() && Reg.isPhysical()) {
1961 // A super-register kill already exists.
1962 if (RegInfo->isSuperRegister(IncomingReg, Reg))
1963 return true;
1964 if (RegInfo->isSubRegister(IncomingReg, Reg))
1965 DeadOps.push_back(i);
1966 }
1967 }
1968
1969 // Trim unneeded kill operands.
1970 while (!DeadOps.empty()) {
1971 unsigned OpIdx = DeadOps.back();
1972 if (getOperand(OpIdx).isImplicit() &&
1973 (!isInlineAsm() || findInlineAsmFlagIdx(OpIdx) < 0))
1974 removeOperand(OpIdx);
1975 else
1976 getOperand(OpIdx).setIsKill(false);
1977 DeadOps.pop_back();
1978 }
1979
1980 // If not found, this means an alias of one of the operands is killed. Add a
1981 // new implicit operand if required.
1982 if (!Found && AddIfNotFound) {
1984 false /*IsDef*/,
1985 true /*IsImp*/,
1986 true /*IsKill*/));
1987 return true;
1988 }
1989 return Found;
1990}
1991
1993 const TargetRegisterInfo *RegInfo) {
1994 if (!Reg.isPhysical())
1995 RegInfo = nullptr;
1996 for (MachineOperand &MO : operands()) {
1997 if (!MO.isReg() || !MO.isUse() || !MO.isKill())
1998 continue;
1999 Register OpReg = MO.getReg();
2000 if ((RegInfo && RegInfo->regsOverlap(Reg, OpReg)) || Reg == OpReg)
2001 MO.setIsKill(false);
2002 }
2003}
2004
2006 const TargetRegisterInfo *RegInfo,
2007 bool AddIfNotFound) {
2008 bool isPhysReg = Reg.isPhysical();
2009 bool hasAliases = isPhysReg &&
2010 MCRegAliasIterator(Reg, RegInfo, false).isValid();
2011 bool Found = false;
2013 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
2014 MachineOperand &MO = getOperand(i);
2015 if (!MO.isReg() || !MO.isDef())
2016 continue;
2017 Register MOReg = MO.getReg();
2018 if (!MOReg)
2019 continue;
2020
2021 if (MOReg == Reg) {
2022 MO.setIsDead();
2023 Found = true;
2024 } else if (hasAliases && MO.isDead() && MOReg.isPhysical()) {
2025 // There exists a super-register that's marked dead.
2026 if (RegInfo->isSuperRegister(Reg, MOReg))
2027 return true;
2028 if (RegInfo->isSubRegister(Reg, MOReg))
2029 DeadOps.push_back(i);
2030 }
2031 }
2032
2033 // Trim unneeded dead operands.
2034 while (!DeadOps.empty()) {
2035 unsigned OpIdx = DeadOps.back();
2036 if (getOperand(OpIdx).isImplicit() &&
2037 (!isInlineAsm() || findInlineAsmFlagIdx(OpIdx) < 0))
2038 removeOperand(OpIdx);
2039 else
2040 getOperand(OpIdx).setIsDead(false);
2041 DeadOps.pop_back();
2042 }
2043
2044 // If not found, this means an alias of one of the operands is dead. Add a
2045 // new implicit operand if required.
2046 if (Found || !AddIfNotFound)
2047 return Found;
2048
2050 true /*IsDef*/,
2051 true /*IsImp*/,
2052 false /*IsKill*/,
2053 true /*IsDead*/));
2054 return true;
2055}
2056
2058 for (MachineOperand &MO : operands()) {
2059 if (!MO.isReg() || !MO.isDef() || MO.getReg() != Reg)
2060 continue;
2061 MO.setIsDead(false);
2062 }
2063}
2064
2066 for (MachineOperand &MO : operands()) {
2067 if (!MO.isReg() || !MO.isDef() || MO.getReg() != Reg || MO.getSubReg() == 0)
2068 continue;
2069 MO.setIsUndef(IsUndef);
2070 }
2071}
2072
2074 const TargetRegisterInfo *RegInfo) {
2075 if (Reg.isPhysical()) {
2076 MachineOperand *MO = findRegisterDefOperand(Reg, false, false, RegInfo);
2077 if (MO)
2078 return;
2079 } else {
2080 for (const MachineOperand &MO : operands()) {
2081 if (MO.isReg() && MO.getReg() == Reg && MO.isDef() &&
2082 MO.getSubReg() == 0)
2083 return;
2084 }
2085 }
2087 true /*IsDef*/,
2088 true /*IsImp*/));
2089}
2090
2092 const TargetRegisterInfo &TRI) {
2093 bool HasRegMask = false;
2094 for (MachineOperand &MO : operands()) {
2095 if (MO.isRegMask()) {
2096 HasRegMask = true;
2097 continue;
2098 }
2099 if (!MO.isReg() || !MO.isDef()) continue;
2100 Register Reg = MO.getReg();
2101 if (!Reg.isPhysical())
2102 continue;
2103 // If there are no uses, including partial uses, the def is dead.
2104 if (llvm::none_of(UsedRegs,
2105 [&](MCRegister Use) { return TRI.regsOverlap(Use, Reg); }))
2106 MO.setIsDead();
2107 }
2108
2109 // This is a call with a register mask operand.
2110 // Mask clobbers are always dead, so add defs for the non-dead defines.
2111 if (HasRegMask)
2112 for (const Register &UsedReg : UsedRegs)
2113 addRegisterDefined(UsedReg, &TRI);
2114}
2115
2116unsigned
2118 // Build up a buffer of hash code components.
2119 SmallVector<size_t, 16> HashComponents;
2120 HashComponents.reserve(MI->getNumOperands() + 1);
2121 HashComponents.push_back(MI->getOpcode());
2122 for (const MachineOperand &MO : MI->operands()) {
2123 if (MO.isReg() && MO.isDef() && MO.getReg().isVirtual())
2124 continue; // Skip virtual register defs.
2125
2126 HashComponents.push_back(hash_value(MO));
2127 }
2128 return hash_combine_range(HashComponents.begin(), HashComponents.end());
2129}
2130
2132 // Find the source location cookie.
2133 uint64_t LocCookie = 0;
2134 const MDNode *LocMD = nullptr;
2135 for (unsigned i = getNumOperands(); i != 0; --i) {
2136 if (getOperand(i-1).isMetadata() &&
2137 (LocMD = getOperand(i-1).getMetadata()) &&
2138 LocMD->getNumOperands() != 0) {
2139 if (const ConstantInt *CI =
2140 mdconst::dyn_extract<ConstantInt>(LocMD->getOperand(0))) {
2141 LocCookie = CI->getZExtValue();
2142 break;
2143 }
2144 }
2145 }
2146
2147 if (const MachineBasicBlock *MBB = getParent())
2148 if (const MachineFunction *MF = MBB->getParent())
2149 return MF->getMMI().getModule()->getContext().emitError(LocCookie, Msg);
2150 report_fatal_error(Msg);
2151}
2152
2154 const MCInstrDesc &MCID, bool IsIndirect,
2155 Register Reg, const MDNode *Variable,
2156 const MDNode *Expr) {
2157 assert(isa<DILocalVariable>(Variable) && "not a variable");
2158 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
2159 assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
2160 "Expected inlined-at fields to agree");
2161 auto MIB = BuildMI(MF, DL, MCID).addReg(Reg);
2162 if (IsIndirect)
2163 MIB.addImm(0U);
2164 else
2165 MIB.addReg(0U);
2166 return MIB.addMetadata(Variable).addMetadata(Expr);
2167}
2168
2170 const MCInstrDesc &MCID, bool IsIndirect,
2171 ArrayRef<MachineOperand> DebugOps,
2172 const MDNode *Variable, const MDNode *Expr) {
2173 assert(isa<DILocalVariable>(Variable) && "not a variable");
2174 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
2175 assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
2176 "Expected inlined-at fields to agree");
2177 if (MCID.Opcode == TargetOpcode::DBG_VALUE) {
2178 assert(DebugOps.size() == 1 &&
2179 "DBG_VALUE must contain exactly one debug operand");
2180 MachineOperand DebugOp = DebugOps[0];
2181 if (DebugOp.isReg())
2182 return BuildMI(MF, DL, MCID, IsIndirect, DebugOp.getReg(), Variable,
2183 Expr);
2184
2185 auto MIB = BuildMI(MF, DL, MCID).add(DebugOp);
2186 if (IsIndirect)
2187 MIB.addImm(0U);
2188 else
2189 MIB.addReg(0U);
2190 return MIB.addMetadata(Variable).addMetadata(Expr);
2191 }
2192
2193 auto MIB = BuildMI(MF, DL, MCID);
2194 MIB.addMetadata(Variable).addMetadata(Expr);
2195 for (const MachineOperand &DebugOp : DebugOps)
2196 if (DebugOp.isReg())
2197 MIB.addReg(DebugOp.getReg());
2198 else
2199 MIB.add(DebugOp);
2200 return MIB;
2201}
2202
2205 const DebugLoc &DL, const MCInstrDesc &MCID,
2206 bool IsIndirect, Register Reg,
2207 const MDNode *Variable, const MDNode *Expr) {
2208 MachineFunction &MF = *BB.getParent();
2209 MachineInstr *MI = BuildMI(MF, DL, MCID, IsIndirect, Reg, Variable, Expr);
2210 BB.insert(I, MI);
2211 return MachineInstrBuilder(MF, MI);
2212}
2213
2216 const DebugLoc &DL, const MCInstrDesc &MCID,
2217 bool IsIndirect,
2218 ArrayRef<MachineOperand> DebugOps,
2219 const MDNode *Variable, const MDNode *Expr) {
2220 MachineFunction &MF = *BB.getParent();
2221 MachineInstr *MI =
2222 BuildMI(MF, DL, MCID, IsIndirect, DebugOps, Variable, Expr);
2223 BB.insert(I, MI);
2224 return MachineInstrBuilder(MF, *MI);
2225}
2226
2227/// Compute the new DIExpression to use with a DBG_VALUE for a spill slot.
2228/// This prepends DW_OP_deref when spilling an indirect DBG_VALUE.
2229static const DIExpression *
2231 SmallVectorImpl<const MachineOperand *> &SpilledOperands) {
2232 assert(MI.getDebugVariable()->isValidLocationForIntrinsic(MI.getDebugLoc()) &&
2233 "Expected inlined-at fields to agree");
2234
2235 const DIExpression *Expr = MI.getDebugExpression();
2236 if (MI.isIndirectDebugValue()) {
2237 assert(MI.getDebugOffset().getImm() == 0 &&
2238 "DBG_VALUE with nonzero offset");
2240 } else if (MI.isDebugValueList()) {
2241 // We will replace the spilled register with a frame index, so
2242 // immediately deref all references to the spilled register.
2243 std::array<uint64_t, 1> Ops{{dwarf::DW_OP_deref}};
2244 for (const MachineOperand *Op : SpilledOperands) {
2245 unsigned OpIdx = MI.getDebugOperandIndex(Op);
2246 Expr = DIExpression::appendOpsToArg(Expr, Ops, OpIdx);
2247 }
2248 }
2249 return Expr;
2250}
2252 Register SpillReg) {
2253 assert(MI.hasDebugOperandForReg(SpillReg) && "Spill Reg is not used in MI.");
2255 for (const MachineOperand &Op : MI.getDebugOperandsForReg(SpillReg))
2256 SpillOperands.push_back(&Op);
2257 return computeExprForSpill(MI, SpillOperands);
2258}
2259
2262 const MachineInstr &Orig,
2263 int FrameIndex, Register SpillReg) {
2264 assert(!Orig.isDebugRef() &&
2265 "DBG_INSTR_REF should not reference a virtual register.");
2266 const DIExpression *Expr = computeExprForSpill(Orig, SpillReg);
2267 MachineInstrBuilder NewMI =
2268 BuildMI(BB, I, Orig.getDebugLoc(), Orig.getDesc());
2269 // Non-Variadic Operands: Location, Offset, Variable, Expression
2270 // Variadic Operands: Variable, Expression, Locations...
2271 if (Orig.isNonListDebugValue())
2272 NewMI.addFrameIndex(FrameIndex).addImm(0U);
2273 NewMI.addMetadata(Orig.getDebugVariable()).addMetadata(Expr);
2274 if (Orig.isDebugValueList()) {
2275 for (const MachineOperand &Op : Orig.debug_operands())
2276 if (Op.isReg() && Op.getReg() == SpillReg)
2277 NewMI.addFrameIndex(FrameIndex);
2278 else
2279 NewMI.add(MachineOperand(Op));
2280 }
2281 return NewMI;
2282}
2285 const MachineInstr &Orig, int FrameIndex,
2286 SmallVectorImpl<const MachineOperand *> &SpilledOperands) {
2287 const DIExpression *Expr = computeExprForSpill(Orig, SpilledOperands);
2288 MachineInstrBuilder NewMI =
2289 BuildMI(BB, I, Orig.getDebugLoc(), Orig.getDesc());
2290 // Non-Variadic Operands: Location, Offset, Variable, Expression
2291 // Variadic Operands: Variable, Expression, Locations...
2292 if (Orig.isNonListDebugValue())
2293 NewMI.addFrameIndex(FrameIndex).addImm(0U);
2294 NewMI.addMetadata(Orig.getDebugVariable()).addMetadata(Expr);
2295 if (Orig.isDebugValueList()) {
2296 for (const MachineOperand &Op : Orig.debug_operands())
2297 if (is_contained(SpilledOperands, &Op))
2298 NewMI.addFrameIndex(FrameIndex);
2299 else
2300 NewMI.add(MachineOperand(Op));
2301 }
2302 return NewMI;
2303}
2304
2306 Register Reg) {
2307 const DIExpression *Expr = computeExprForSpill(Orig, Reg);
2308 if (Orig.isNonListDebugValue())
2310 for (MachineOperand &Op : Orig.getDebugOperandsForReg(Reg))
2311 Op.ChangeToFrameIndex(FrameIndex);
2312 Orig.getDebugExpressionOp().setMetadata(Expr);
2313}
2314
2317 MachineInstr &MI = *this;
2318 if (!MI.getOperand(0).isReg())
2319 return;
2320
2322 for (MachineBasicBlock::iterator DE = MI.getParent()->end();
2323 DI != DE; ++DI) {
2324 if (!DI->isDebugValue())
2325 return;
2326 if (DI->hasDebugOperandForReg(MI.getOperand(0).getReg()))
2327 DbgValues.push_back(&*DI);
2328 }
2329}
2330
2332 // Collect matching debug values.
2334
2335 if (!getOperand(0).isReg())
2336 return;
2337
2338 Register DefReg = getOperand(0).getReg();
2339 auto *MRI = getRegInfo();
2340 for (auto &MO : MRI->use_operands(DefReg)) {
2341 auto *DI = MO.getParent();
2342 if (!DI->isDebugValue())
2343 continue;
2344 if (DI->hasDebugOperandForReg(DefReg)) {
2345 DbgValues.push_back(DI);
2346 }
2347 }
2348
2349 // Propagate Reg to debug value instructions.
2350 for (auto *DBI : DbgValues)
2351 for (MachineOperand &Op : DBI->getDebugOperandsForReg(DefReg))
2352 Op.setReg(Reg);
2353}
2354
2356
2357static unsigned getSpillSlotSize(const MMOList &Accesses,
2358 const MachineFrameInfo &MFI) {
2359 unsigned Size = 0;
2360 for (const auto *A : Accesses)
2361 if (MFI.isSpillSlotObjectIndex(
2362 cast<FixedStackPseudoSourceValue>(A->getPseudoValue())
2363 ->getFrameIndex()))
2364 Size += A->getSize();
2365 return Size;
2366}
2367
2368std::optional<unsigned>
2370 int FI;
2371 if (TII->isStoreToStackSlotPostFE(*this, FI)) {
2372 const MachineFrameInfo &MFI = getMF()->getFrameInfo();
2373 if (MFI.isSpillSlotObjectIndex(FI))
2374 return (*memoperands_begin())->getSize();
2375 }
2376 return std::nullopt;
2377}
2378
2379std::optional<unsigned>
2381 MMOList Accesses;
2382 if (TII->hasStoreToStackSlot(*this, Accesses))
2383 return getSpillSlotSize(Accesses, getMF()->getFrameInfo());
2384 return std::nullopt;
2385}
2386
2387std::optional<unsigned>
2389 int FI;
2390 if (TII->isLoadFromStackSlotPostFE(*this, FI)) {
2391 const MachineFrameInfo &MFI = getMF()->getFrameInfo();
2392 if (MFI.isSpillSlotObjectIndex(FI))
2393 return (*memoperands_begin())->getSize();
2394 }
2395 return std::nullopt;
2396}
2397
2398std::optional<unsigned>
2400 MMOList Accesses;
2401 if (TII->hasLoadFromStackSlot(*this, Accesses))
2402 return getSpillSlotSize(Accesses, getMF()->getFrameInfo());
2403 return std::nullopt;
2404}
2405
2407 if (DebugInstrNum == 0)
2408 DebugInstrNum = getParent()->getParent()->getNewDebugInstrNum();
2409 return DebugInstrNum;
2410}
2411
2413 if (DebugInstrNum == 0)
2414 DebugInstrNum = MF.getNewDebugInstrNum();
2415 return DebugInstrNum;
2416}
2417
2418std::tuple<LLT, LLT> MachineInstr::getFirst2LLTs() const {
2419 return std::tuple(getRegInfo()->getType(getOperand(0).getReg()),
2420 getRegInfo()->getType(getOperand(1).getReg()));
2421}
2422
2423std::tuple<LLT, LLT, LLT> MachineInstr::getFirst3LLTs() const {
2424 return std::tuple(getRegInfo()->getType(getOperand(0).getReg()),
2425 getRegInfo()->getType(getOperand(1).getReg()),
2426 getRegInfo()->getType(getOperand(2).getReg()));
2427}
2428
2429std::tuple<LLT, LLT, LLT, LLT> MachineInstr::getFirst4LLTs() const {
2430 return std::tuple(getRegInfo()->getType(getOperand(0).getReg()),
2431 getRegInfo()->getType(getOperand(1).getReg()),
2432 getRegInfo()->getType(getOperand(2).getReg()),
2433 getRegInfo()->getType(getOperand(3).getReg()));
2434}
2435
2436std::tuple<LLT, LLT, LLT, LLT, LLT> MachineInstr::getFirst5LLTs() const {
2437 return std::tuple(getRegInfo()->getType(getOperand(0).getReg()),
2438 getRegInfo()->getType(getOperand(1).getReg()),
2439 getRegInfo()->getType(getOperand(2).getReg()),
2440 getRegInfo()->getType(getOperand(3).getReg()),
2441 getRegInfo()->getType(getOperand(4).getReg()));
2442}
2443
2444std::tuple<Register, LLT, Register, LLT>
2446 Register Reg0 = getOperand(0).getReg();
2447 Register Reg1 = getOperand(1).getReg();
2448 return std::tuple(Reg0, getRegInfo()->getType(Reg0), Reg1,
2449 getRegInfo()->getType(Reg1));
2450}
2451
2452std::tuple<Register, LLT, Register, LLT, Register, LLT>
2454 Register Reg0 = getOperand(0).getReg();
2455 Register Reg1 = getOperand(1).getReg();
2456 Register Reg2 = getOperand(2).getReg();
2457 return std::tuple(Reg0, getRegInfo()->getType(Reg0), Reg1,
2458 getRegInfo()->getType(Reg1), Reg2,
2459 getRegInfo()->getType(Reg2));
2460}
2461
2462std::tuple<Register, LLT, Register, LLT, Register, LLT, Register, LLT>
2464 Register Reg0 = getOperand(0).getReg();
2465 Register Reg1 = getOperand(1).getReg();
2466 Register Reg2 = getOperand(2).getReg();
2467 Register Reg3 = getOperand(3).getReg();
2468 return std::tuple(
2469 Reg0, getRegInfo()->getType(Reg0), Reg1, getRegInfo()->getType(Reg1),
2470 Reg2, getRegInfo()->getType(Reg2), Reg3, getRegInfo()->getType(Reg3));
2471}
2472
2474 LLT>
2476 Register Reg0 = getOperand(0).getReg();
2477 Register Reg1 = getOperand(1).getReg();
2478 Register Reg2 = getOperand(2).getReg();
2479 Register Reg3 = getOperand(3).getReg();
2480 Register Reg4 = getOperand(4).getReg();
2481 return std::tuple(
2482 Reg0, getRegInfo()->getType(Reg0), Reg1, getRegInfo()->getType(Reg1),
2483 Reg2, getRegInfo()->getType(Reg2), Reg3, getRegInfo()->getType(Reg3),
2484 Reg4, getRegInfo()->getType(Reg4));
2485}
2486
2489 assert(InsertBefore != nullptr && "invalid iterator");
2490 assert(InsertBefore->getParent() == this &&
2491 "iterator points to operand of other inst");
2492 if (Ops.empty())
2493 return;
2494
2495 // Do one pass to untie operands.
2497 for (const MachineOperand &MO : operands()) {
2498 if (MO.isReg() && MO.isTied()) {
2499 unsigned OpNo = getOperandNo(&MO);
2500 unsigned TiedTo = findTiedOperandIdx(OpNo);
2501 TiedOpIndices[OpNo] = TiedTo;
2502 untieRegOperand(OpNo);
2503 }
2504 }
2505
2506 unsigned OpIdx = getOperandNo(InsertBefore);
2507 unsigned NumOperands = getNumOperands();
2508 unsigned OpsToMove = NumOperands - OpIdx;
2509
2511 MovingOps.reserve(OpsToMove);
2512
2513 for (unsigned I = 0; I < OpsToMove; ++I) {
2514 MovingOps.emplace_back(getOperand(OpIdx));
2515 removeOperand(OpIdx);
2516 }
2517 for (const MachineOperand &MO : Ops)
2518 addOperand(MO);
2519 for (const MachineOperand &OpMoved : MovingOps)
2520 addOperand(OpMoved);
2521
2522 // Re-tie operands.
2523 for (auto [Tie1, Tie2] : TiedOpIndices) {
2524 if (Tie1 >= OpIdx)
2525 Tie1 += Ops.size();
2526 if (Tie2 >= OpIdx)
2527 Tie2 += Ops.size();
2528 tieOperands(Tie1, Tie2);
2529 }
2530}
2531
2532bool MachineInstr::mayFoldInlineAsmRegOp(unsigned OpId) const {
2533 assert(OpId && "expected non-zero operand id");
2534 assert(isInlineAsm() && "should only be used on inline asm");
2535
2536 if (!getOperand(OpId).isReg())
2537 return false;
2538
2539 const MachineOperand &MD = getOperand(OpId - 1);
2540 if (!MD.isImm())
2541 return false;
2542
2543 InlineAsm::Flag F(MD.getImm());
2544 if (F.isRegUseKind() || F.isRegDefKind() || F.isRegDefEarlyClobberKind())
2545 return F.getRegMayBeFolded();
2546 return false;
2547}
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
Definition: Compiler.h:529
This file contains the declarations for the subclasses of Constant, which represent the different fla...
uint64_t Size
#define Check(C,...)
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
static const unsigned MaxDepth
Implement a low-level type suitable for MachineInstr level instruction selection.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
Return the first found DebugLoc that has a DILocation, given a range of instructions.
const unsigned TiedMax
static void moveOperands(MachineOperand *Dst, MachineOperand *Src, unsigned NumOps, MachineRegisterInfo *MRI)
Move NumOps MachineOperands from Src to Dst, with support for overlapping ranges.
static unsigned getSpillSlotSize(const MMOList &Accesses, const MachineFrameInfo &MFI)
static bool MemOperandsHaveAlias(const MachineFrameInfo &MFI, AAResults *AA, bool UseTBAA, const MachineMemOperand *MMOa, const MachineMemOperand *MMOb)
static void tryToGetTargetInfo(const MachineInstr &MI, const TargetRegisterInfo *&TRI, const MachineRegisterInfo *&MRI, const TargetIntrinsicInfo *&IntrinsicInfo, const TargetInstrInfo *&TII)
static const DIExpression * computeExprForSpill(const MachineInstr &MI, SmallVectorImpl< const MachineOperand * > &SpilledOperands)
Compute the new DIExpression to use with a DBG_VALUE for a spill slot.
static const MachineFunction * getMFIfAvailable(const MachineInstr &MI)
static bool hasIdenticalMMOs(ArrayRef< MachineMemOperand * > LHS, ArrayRef< MachineMemOperand * > RHS)
Check to see if the MMOs pointed to by the two MemRefs arrays are identical.
unsigned const TargetRegisterInfo * TRI
This file provides utility analysis objects describing memory locations.
This file contains the declarations for metadata subclasses.
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static bool isReg(const MCInst &MI, unsigned OpNo)
Module.h This file contains the declarations for the Module class.
LLVMContext & Context
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
static cl::opt< bool > UseTBAA("use-tbaa-in-sched-mi", cl::Hidden, cl::init(true), cl::desc("Enable use of TBAA during MI DAG construction"))
This file implements the SmallBitVector class.
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:40
Value * RHS
Value * LHS
bool isNoAlias(const MemoryLocation &LocA, const MemoryLocation &LocB)
A trivial helper function to check to see if the specified pointers are no-alias.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:160
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Definition: ArrayRef.h:195
ConstMIBundleOperands - Iterate over all operands in a const bundle of machine instructions.
This is the shared class of boolean and integer constants.
Definition: Constants.h:79
DWARF expression.
bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static bool isEqualExpression(const DIExpression *FirstExpr, bool FirstIndirect, const DIExpression *SecondExpr, bool SecondIndirect)
Determines whether two debug values should produce equivalent DWARF expressions, using their DIExpres...
static DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
static DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
This class represents an Operation in the Expression.
bool print(raw_ostream &OS, DIDumpOptions DumpOpts, const DWARFExpression *Expr, DWARFUnit *U) const
A debug info location.
Definition: DebugLoc.h:33
bool hasTrivialDestructor() const
Check whether this has a trivial destructor.
Definition: DebugLoc.h:69
Utility class for floating point operations which can have information about relaxed accuracy require...
Definition: Operator.h:188
Convenience struct for specifying and reasoning about fast-math flags.
Definition: FMF.h:20
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:342
bool hasStoreToStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand * > &Accesses) const override
Check if the instruction or the bundle of instructions has store to stack slots.
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
bool hasLoadFromStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand * > &Accesses) const override
Check if the instruction or the bundle of instructions has load from stack slots.
static StringRef getMemConstraintName(ConstraintCode C)
Definition: InlineAsm.h:467
constexpr bool isValid() const
Definition: LowLevelType.h:137
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
void emitError(uint64_t LocCookie, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:237
ArrayRef< MCOperandInfo > operands() const
Definition: MCInstrDesc.h:239
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
Definition: MCInstrDesc.h:248
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
Definition: MCInstrDesc.h:219
ArrayRef< MCPhysReg > implicit_defs() const
Return a list of registers that are potentially written by any instance of this machine instruction.
Definition: MCInstrDesc.h:579
bool isPredicable() const
Return true if this instruction has a predicate operand that controls execution.
Definition: MCInstrDesc.h:338
unsigned short Opcode
Definition: MCInstrDesc.h:205
bool isVariadic() const
Return true if this instruction can have a variable number of operands.
Definition: MCInstrDesc.h:261
ArrayRef< MCPhysReg > implicit_uses() const
Return a list of registers that are potentially read by any instance of this machine instruction.
Definition: MCInstrDesc.h:565
MCRegAliasIterator enumerates all registers aliasing Reg.
bool isSubRegister(MCRegister RegA, MCRegister RegB) const
Returns true if RegB is a sub-register of RegA.
bool isSuperRegister(MCRegister RegA, MCRegister RegB) const
Returns true if RegB is a super-register of RegA.
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:40
Metadata node.
Definition: Metadata.h:1067
const MDOperand & getOperand(unsigned I) const
Definition: Metadata.h:1428
unsigned getNumOperands() const
Return number of MDNode operands.
Definition: Metadata.h:1434
bool isValid() const
isValid - Returns true until all the operands have been visited.
MachineInstr * remove_instr(MachineInstr *I)
Remove the possibly bundled instruction from the instruction list without deleting it.
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
instr_iterator erase_instr(MachineInstr *I)
Remove an instruction from the instruction list and delete it.
void printAsOperand(raw_ostream &OS, bool PrintType=true) const
MachineInstr * remove(MachineInstr *I)
Remove the unbundled instruction from the instruction list without deleting it.
void print(raw_ostream &OS, const SlotIndexes *=nullptr, bool IsStandalone=true) const
Instructions::iterator instr_iterator
Instructions::const_iterator const_instr_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool isSpillSlotObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a spill slot.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
void deallocateOperandArray(OperandCapacity Cap, MachineOperand *Array)
Dellocate an array of MachineOperands and recycle the memory.
MachineOperand * allocateOperandArray(OperandCapacity Cap)
Allocate an array of MachineOperands.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
void handleChangeDesc(MachineInstr &MI, const MCInstrDesc &TID)
Function & getFunction()
Return the LLVM function that this machine code represents.
MachineModuleInfo & getMMI() const
MachineInstr::ExtraInfo * createMIExtraInfo(ArrayRef< MachineMemOperand * > MMOs, MCSymbol *PreInstrSymbol=nullptr, MCSymbol *PostInstrSymbol=nullptr, MDNode *HeapAllocMarker=nullptr, MDNode *PCSections=nullptr, uint32_t CFIType=0)
Allocate and construct an extra info structure for a MachineInstr.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addMetadata(const MDNode *MD) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
Definition: MachineInstr.h:68
int findRegisterUseOperandIdx(Register Reg, bool isKill=false, const TargetRegisterInfo *TRI=nullptr) const
Returns the operand index that is a use of the specific register or -1 if it is not found.
bool mayRaiseFPException() const
Return true if this instruction could possibly raise a floating-point exception.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:543
void setRegisterDefReadUndef(Register Reg, bool IsUndef=true)
Mark all subregister defs of register Reg with the undef flag.
static iterator_range< filter_iterator< Operand *, std::function< bool(Operand &Op)> > > getDebugOperandsForReg(Instruction *MI, Register Reg)
Returns a range of all of the operands that correspond to a debug use of Reg.
Definition: MachineInstr.h:584
bool isDebugValueList() const
void bundleWithPred()
Bundle this instruction with its predecessor.
bool isPosition() const
bool isSafeToMove(AAResults *AA, bool &SawStore) const
Return true if it is safe to move this instruction.
bool isTerminator(QueryType Type=AnyInBundle) const
Returns true if this instruction part of the terminator for a basic block.
Definition: MachineInstr.h:939
std::tuple< Register, LLT, Register, LLT, Register, LLT, Register, LLT, Register, LLT > getFirst5RegLLTs() const
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
void setCFIType(MachineFunction &MF, uint32_t Type)
Set the CFI type for the instruction.
MachineInstr * removeFromParent()
Unlink 'this' from the containing basic block, and return it without deleting it.
iterator_range< mop_iterator > debug_operands()
Returns a range over all operands that are used to determine the variable location for this DBG_VALUE...
Definition: MachineInstr.h:681
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:326
void bundleWithSucc()
Bundle this instruction with its successor.
uint32_t getCFIType() const
Helper to extract a CFI type hash if one has been added.
Definition: MachineInstr.h:839
int findRegisterDefOperandIdx(Register Reg, bool isDead=false, bool Overlap=false, const TargetRegisterInfo *TRI=nullptr) const
Returns the operand index that is a def of the specified register or -1 if it is not found.
bool isDebugLabel() const
void setPreInstrSymbol(MachineFunction &MF, MCSymbol *Symbol)
Set a symbol that will be emitted just prior to the instruction itself.
bool hasProperty(unsigned MCFlag, QueryType Type=AnyInBundle) const
Return true if the instruction (or in the case of a bundle, the instructions inside the bundle) has t...
Definition: MachineInstr.h:862
bool isDereferenceableInvariantLoad() const
Return true if this load instruction never traps and points to a memory location whose value doesn't ...
void setFlags(unsigned flags)
Definition: MachineInstr.h:389
QueryType
API for querying MachineInstr properties.
Definition: MachineInstr.h:851
void addImplicitDefUseOperands(MachineFunction &MF)
Add all implicit def and use operands to this instruction.
std::tuple< LLT, LLT, LLT, LLT, LLT > getFirst5LLTs() const
bool isCall(QueryType Type=AnyInBundle) const
Definition: MachineInstr.h:915
std::tuple< Register, LLT, Register, LLT, Register, LLT > getFirst3RegLLTs() const
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
Definition: MachineInstr.h:376
uint32_t mergeFlagsWith(const MachineInstr &Other) const
Return the MIFlags which represent both MachineInstrs.
const MachineOperand & getDebugExpressionOp() const
Return the operand for the complex address expression referenced by this DBG_VALUE instruction.
std::pair< bool, bool > readsWritesVirtualRegister(Register Reg, SmallVectorImpl< unsigned > *Ops=nullptr) const
Return a pair of bools (reads, writes) indicating if this instruction reads or writes Reg.
bool isRegTiedToDefOperand(unsigned UseOpIdx, unsigned *DefOpIdx=nullptr) const
Return true if the use operand of the specified index is tied to a def operand.
bool allImplicitDefsAreDead() const
Return true if all the implicit defs of this instruction are dead.
void cloneMemRefs(MachineFunction &MF, const MachineInstr &MI)
Clone another MachineInstr's memory reference descriptor list and replace ours with it.
const TargetRegisterClass * getRegClassConstraintEffectForVReg(Register Reg, const TargetRegisterClass *CurRC, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ExploreBundle=false) const
Applies the constraints (def/use) implied by this MI on Reg to the given CurRC.
bool isBundle() const
bool isDebugInstr() const
unsigned getNumDebugOperands() const
Returns the total number of operands which are debug locations.
Definition: MachineInstr.h:549
unsigned getNumOperands() const
Retuns the total number of operands.
Definition: MachineInstr.h:546
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
MachineInstr * removeFromBundle()
Unlink this instruction from its basic block and return it without deleting it.
void dumpr(const MachineRegisterInfo &MRI, unsigned MaxDepth=UINT_MAX) const
Print on dbgs() the current instruction and the instructions defining its operands and so on until we...
void copyIRFlags(const Instruction &I)
Copy all flags to MachineInst MIFlags.
bool isDebugValueLike() const
bool isInlineAsm() const
bool memoperands_empty() const
Return true if we don't have any memory operands which described the memory access done by this instr...
Definition: MachineInstr.h:786
mmo_iterator memoperands_end() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:781
bool isDebugRef() const
void collectDebugValues(SmallVectorImpl< MachineInstr * > &DbgValues)
Scan instructions immediately following MI and collect any matching DBG_VALUEs.
unsigned getOperandNo(const_mop_iterator I) const
Returns the number of the operand iterator I points to.
Definition: MachineInstr.h:749
bool mayAlias(AAResults *AA, const MachineInstr &Other, bool UseTBAA) const
Returns true if this instruction's memory access aliases the memory access of Other.
unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
void setMemRefs(MachineFunction &MF, ArrayRef< MachineMemOperand * > MemRefs)
Assign this MachineInstr's memory reference descriptor list.
bool isBundledWithPred() const
Return true if this instruction is part of a bundle, and it is not the first instruction in the bundl...
Definition: MachineInstr.h:451
std::optional< unsigned > getFoldedRestoreSize(const TargetInstrInfo *TII) const
Return a valid size if the instruction is a folded restore instruction.
std::tuple< LLT, LLT > getFirst2LLTs() const
void unbundleFromPred()
Break bundle above this instruction.
void copyImplicitOps(MachineFunction &MF, const MachineInstr &MI)
Copy implicit register operands from specified instruction to this instruction.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
bool isStackAligningInlineAsm() const
void dropMemRefs(MachineFunction &MF)
Clear this MachineInstr's memory reference descriptor list.
bool shouldUpdateCallSiteInfo() const
Return true if copying, moving, or erasing this instruction requires updating Call Site Info (see cop...
MDNode * getPCSections() const
Helper to extract PCSections metadata target sections.
Definition: MachineInstr.h:829
bool isCFIInstruction() const
int findFirstPredOperandIdx() const
Find the index of the first operand in the operand list that is used to represent the predicate.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
Definition: MachineInstr.h:540
unsigned getBundleSize() const
Return the number of instructions inside the MI bundle, excluding the bundle header.
void cloneMergedMemRefs(MachineFunction &MF, ArrayRef< const MachineInstr * > MIs)
Clone the merge of multiple MachineInstrs' memory reference descriptors list and replace ours with it...
std::tuple< Register, LLT, Register, LLT, Register, LLT, Register, LLT > getFirst4RegLLTs() const
std::tuple< Register, LLT, Register, LLT > getFirst2RegLLTs() const
unsigned getNumMemOperands() const
Return the number of memory operands.
Definition: MachineInstr.h:792
void clearFlag(MIFlag Flag)
clearFlag - Clear a MI flag.
Definition: MachineInstr.h:398
unsigned isConstantValuePHI() const
If the specified instruction is a PHI that always merges together the same virtual register,...
const TargetRegisterClass * getRegClassConstraintEffect(unsigned OpIdx, const TargetRegisterClass *CurRC, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const
Applies the constraints (def/use) implied by the OpIdx operand to the given CurRC.
bool isOperandSubregIdx(unsigned OpIdx) const
Return true if operand OpIdx is a subregister index.
Definition: MachineInstr.h:630
InlineAsm::AsmDialect getInlineAsmDialect() const
bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
bool isEquivalentDbgInstr(const MachineInstr &Other) const
Returns true if this instruction is a debug instruction that represents an identical debug value to O...
const DILabel * getDebugLabel() const
Return the debug label referenced by this DBG_LABEL instruction.
void untieRegOperand(unsigned OpIdx)
Break any tie involving OpIdx.
static uint32_t copyFlagsFromInstruction(const Instruction &I)
void insert(mop_iterator InsertBefore, ArrayRef< MachineOperand > Ops)
Inserts Ops BEFORE It. Can untie/retie tied operands.
void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
bool isJumpTableDebugInfo() const
unsigned getNumExplicitDefs() const
Returns the number of non-implicit definitions.
void eraseFromBundle()
Unlink 'this' from its basic block and delete it.
iterator_range< mop_iterator > operands()
Definition: MachineInstr.h:659
void setHeapAllocMarker(MachineFunction &MF, MDNode *MD)
Set a marker on instructions that denotes where we should create and emit heap alloc site labels.
const DILocalVariable * getDebugVariable() const
Return the debug variable referenced by this DBG_VALUE instruction.
bool hasComplexRegisterTies() const
Return true when an instruction has tied register that can't be determined by the instruction's descr...
LLT getTypeToPrint(unsigned OpIdx, SmallBitVector &PrintedTypes, const MachineRegisterInfo &MRI) const
Debugging supportDetermine the generic type to be printed (if needed) on uses and defs.
void substituteRegister(Register FromReg, Register ToReg, unsigned SubIdx, const TargetRegisterInfo &RegInfo)
Replace all occurrences of FromReg with ToReg:SubIdx, properly composing subreg indices where necessa...
unsigned findTiedOperandIdx(unsigned OpIdx) const
Given the index of a tied register operand, find the operand it is tied to.
void tieOperands(unsigned DefIdx, unsigned UseIdx)
Add a tie between the register operands at DefIdx and UseIdx.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:774
void cloneInstrSymbols(MachineFunction &MF, const MachineInstr &MI)
Clone another MachineInstr's pre- and post- instruction symbols and replace ours with it.
void changeDebugValuesDefReg(Register Reg)
Find all DBG_VALUEs that point to the register def in this instruction and point them to Reg instead.
bool isIdenticalTo(const MachineInstr &Other, MICheckType Check=CheckDefs) const
Return true if this instruction is identical to Other.
bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
std::optional< unsigned > getRestoreSize(const TargetInstrInfo *TII) const
Return a valid size if the instruction is a restore instruction.
std::optional< unsigned > getFoldedSpillSize(const TargetInstrInfo *TII) const
Return a valid size if the instruction is a folded spill instruction.
const DIExpression * getDebugExpression() const
Return the complex address expression referenced by this DBG_VALUE instruction.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:756
void print(raw_ostream &OS, bool IsStandalone=true, bool SkipOpers=false, bool SkipDebugLoc=false, bool AddNewLine=true, const TargetInstrInfo *TII=nullptr) const
Print this MI to OS.
bool isNonListDebugValue() const
std::optional< unsigned > getSpillSize(const TargetInstrInfo *TII) const
Return a valid size if the instruction is a spill instruction.
bool isLoadFoldBarrier() const
Returns true if it is illegal to fold a load across this instruction.
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
void setFlag(MIFlag Flag)
Set a MI flag.
Definition: MachineInstr.h:383
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:472
std::tuple< LLT, LLT, LLT > getFirst3LLTs() const
const MachineOperand & getDebugVariableOp() const
Return the operand for the debug variable referenced by this DBG_VALUE instruction.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
void setPhysRegsDeadExcept(ArrayRef< Register > UsedRegs, const TargetRegisterInfo &TRI)
Mark every physreg used by this instruction as dead except those in the UsedRegs list.
bool isCandidateForCallSiteEntry(QueryType Type=IgnoreBundle) const
Return true if this is a call instruction that may have an associated call site entry in the debug in...
void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
MCSymbol * getPreInstrSymbol() const
Helper to extract a pre-instruction symbol if one has been added.
Definition: MachineInstr.h:795
bool addRegisterKilled(Register IncomingReg, const TargetRegisterInfo *RegInfo, bool AddIfNotFound=false)
We have determined MI kills a register.
void setPostInstrSymbol(MachineFunction &MF, MCSymbol *Symbol)
Set a symbol that will be emitted just after the instruction itself.
bool isDebugValue() const
const MachineOperand & getDebugOffset() const
Return the operand containing the offset to be used if this DBG_VALUE instruction is indirect; will b...
Definition: MachineInstr.h:477
MachineOperand & getDebugOperand(unsigned Index)
Definition: MachineInstr.h:562
iterator_range< mop_iterator > implicit_operands()
Definition: MachineInstr.h:673
bool isBundledWithSucc() const
Return true if this instruction is part of a bundle, and it is not the last instruction in the bundle...
Definition: MachineInstr.h:455
void addRegisterDefined(Register Reg, const TargetRegisterInfo *RegInfo=nullptr)
We have determined MI defines a register.
MDNode * getHeapAllocMarker() const
Helper to extract a heap alloc marker if one has been added.
Definition: MachineInstr.h:819
unsigned getDebugInstrNum()
Fetch the instruction number of this MachineInstr.
std::tuple< LLT, LLT, LLT, LLT > getFirst4LLTs() const
bool isPHI() const
void clearRegisterDeads(Register Reg)
Clear all dead flags on operands defining register Reg.
void clearRegisterKills(Register Reg, const TargetRegisterInfo *RegInfo)
Clear all kill flags affecting Reg.
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:553
uint32_t getFlags() const
Return the MI flags bitvector.
Definition: MachineInstr.h:371
bool isPseudoProbe() const
bool hasRegisterImplicitUseOperand(Register Reg) const
Returns true if the MachineInstr has an implicit-use operand of exactly the given register (not consi...
MCSymbol * getPostInstrSymbol() const
Helper to extract a post-instruction symbol if one has been added.
Definition: MachineInstr.h:807
void unbundleFromSucc()
Break bundle below this instruction.
void clearKillInfo()
Clears kill flags on all operands.
bool isDebugEntryValue() const
A DBG_VALUE is an entry value iff its debug expression contains the DW_OP_LLVM_entry_value operation.
bool isIndirectDebugValue() const
A DBG_VALUE is indirect iff the location operand is a register and the offset operand is an immediate...
unsigned getNumDefs() const
Returns the total number of definitions.
Definition: MachineInstr.h:612
void emitError(StringRef Msg) const
Emit an error referring to the source location of this instruction.
void setPCSections(MachineFunction &MF, MDNode *MD)
bool isKill() const
MachineOperand * findRegisterDefOperand(Register Reg, bool isDead=false, bool Overlap=false, const TargetRegisterInfo *TRI=nullptr)
Wrapper for findRegisterDefOperandIdx, it returns a pointer to the MachineOperand rather than an inde...
bool isVariadic(QueryType Type=IgnoreBundle) const
Return true if this instruction can have a variable number of operands.
Definition: MachineInstr.h:883
int findInlineAsmFlagIdx(unsigned OpIdx, unsigned *GroupNo=nullptr) const
Find the index of the flag word operand that corresponds to operand OpIdx on an inline asm instructio...
bool allDefsAreDead() const
Return true if all the defs of this instruction are dead.
const TargetRegisterClass * getRegClassConstraint(unsigned OpIdx, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const
Compute the static register class constraint for operand OpIdx.
void moveBefore(MachineInstr *MovePos)
Move the instruction before MovePos.
void addMemOperand(MachineFunction &MF, MachineMemOperand *MO)
Add a MachineMemOperand to the machine instruction.
bool addRegisterDead(Register Reg, const TargetRegisterInfo *RegInfo, bool AddIfNotFound=false)
We have determined MI defined a register without a use.
bool mayFoldInlineAsmRegOp(unsigned OpId) const
Returns true if the register operand can be folded with a load or store into a frame index.
A description of a memory reference used in the backend.
const PseudoSourceValue * getPseudoValue() const
bool isUnordered() const
Returns true if this memory operation doesn't have any ordering constraints other than normal aliasin...
uint64_t getSize() const
Return the size in bytes of the memory reference.
AAMDNodes getAAInfo() const
Return the AA tags for the memory reference.
const Value * getValue() const
Return the base address of the memory access.
int64_t getOffset() const
For normal values, this is a byte offset added to the base address.
const Module * getModule() const
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
void substVirtReg(Register Reg, unsigned SubIdx, const TargetRegisterInfo &)
substVirtReg - Substitute the current register with the virtual subregister Reg:SubReg.
static void printSubRegIdx(raw_ostream &OS, uint64_t Index, const TargetRegisterInfo *TRI)
Print a subreg index operand.
int64_t getImm() const
bool isImplicit() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isRegMask() const
isRegMask - Tests if this is a MO_RegisterMask operand.
const MDNode * getMetadata() const
void setIsDead(bool Val=true)
void setMetadata(const MDNode *MD)
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
bool isMetadata() const
isMetadata - Tests if this is a MO_Metadata operand.
void setIsKill(bool Val=true)
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
void substPhysReg(MCRegister Reg, const TargetRegisterInfo &)
substPhysReg - Substitute the current register with the physical register Reg, taking any existing Su...
void setIsEarlyClobber(bool Val=true)
void setIsUndef(bool Val=true)
void setIsDebug(bool Val=true)
Register getReg() const
getReg - Returns the register number.
bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
void print(raw_ostream &os, const TargetRegisterInfo *TRI=nullptr, const TargetIntrinsicInfo *IntrinsicInfo=nullptr) const
Print the MachineOperand to os.
static void printSymbol(raw_ostream &OS, MCSymbol &Sym)
Print a MCSymbol as an operand.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Representation for a specific memory location.
void printAsOperand(raw_ostream &OS, const Module *M=nullptr) const
Print as operand.
Definition: AsmWriter.cpp:5158
Manage lifetime of a slot tracker for printing IR.
void incorporateFunction(const Function &F)
Incorporate the given function.
Definition: AsmWriter.cpp:896
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
LLVMContext & getContext() const
Get the global data context.
Definition: Module.h:283
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
Definition: Operator.h:75
A udiv or sdiv instruction, which can be marked as "exact", indicating that no bits are destroyed.
Definition: Operator.h:138
Special value supplied for machine level alias analysis.
virtual bool mayAlias(const MachineFrameInfo *) const
Return true if the memory pointed to by this PseudoSourceValue can ever alias an LLVM IR Value.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:91
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:95
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
SmallBitVector & set()
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:321
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:342
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:427
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:950
void reserve(size_type N)
Definition: SmallVector.h:676
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:696
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
static unsigned getNextMetaArgIdx(const MachineInstr *MI, unsigned CurIdx)
Get index of next meta operand.
Definition: StackMaps.cpp:171
MI-level Statepoint operands.
Definition: StackMaps.h:158
int getFirstGCPtrIdx()
Get index of first GC pointer operand of -1 if there are none.
Definition: StackMaps.cpp:125
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
TargetInstrInfo - Interface to description of machine instruction set.
TargetIntrinsicInfo - Interface to description of machine instruction set.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
MCRegister getSubReg(MCRegister Reg, unsigned Idx) const
Returns the physical register number of sub-register "Index" for physical register RegNo.
bool regsOverlap(Register RegA, Register RegB) const
Returns true if the two registers are equal or alias each other.
virtual const TargetInstrInfo * getInstrInfo() const
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
LLVM Value Representation.
Definition: Value.h:74
formatted_raw_ostream & PadToColumn(unsigned NewCol)
PadToColumn - Align the output to some column number.
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
MCInstrDesc const & getDesc(MCInstrInfo const &MCII, MCInst const &MCI)
@ UnmodeledSideEffects
Definition: MCInstrDesc.h:173
constexpr double e
Definition: MathExtras.h:31
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
hash_code hash_value(const FixedPointSemantics &Val)
Definition: APFixedPoint.h:128
formatted_raw_ostream & fdbgs()
fdbgs() - This returns a reference to a formatted_raw_ostream for debug output.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void updateDbgValueForSpill(MachineInstr &Orig, int FrameIndex, Register Reg)
Update a DBG_VALUE whose value has been spilled to FrameIndex.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1738
iterator_range< pointee_iterator< WrappedIteratorT > > make_pointee_range(RangeT &&Range)
Definition: iterator.h:336
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1745
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:156
@ Other
Any other memory.
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1858
MachineInstr * buildDbgValueForSpill(MachineBasicBlock &BB, MachineBasicBlock::iterator I, const MachineInstr &Orig, int FrameIndex, Register SpillReg)
Clone a DBG_VALUE whose value has been spilled to FrameIndex.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1888
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
Definition: Hashing.h:491
Implement std::hash so that hash_code can be used in STL containers.
Definition: BitVector.h:858
#define N
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:760
static unsigned getHashValue(const MachineInstr *const &MI)