LLVM 18.0.0git
TargetInstrInfo.cpp
Go to the documentation of this file.
1//===-- TargetInstrInfo.cpp - Target Instruction Information --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the TargetInstrInfo class.
10//
11//===----------------------------------------------------------------------===//
12
30#include "llvm/IR/DataLayout.h"
32#include "llvm/MC/MCAsmInfo.h"
37
38using namespace llvm;
39
41 "disable-sched-hazard", cl::Hidden, cl::init(false),
42 cl::desc("Disable hazard detection during preRA scheduling"));
43
45
47TargetInstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
49 const MachineFunction &MF) const {
50 if (OpNum >= MCID.getNumOperands())
51 return nullptr;
52
53 short RegClass = MCID.operands()[OpNum].RegClass;
54 if (MCID.operands()[OpNum].isLookupPtrRegClass())
55 return TRI->getPointerRegClass(MF, RegClass);
56
57 // Instructions like INSERT_SUBREG do not have fixed register classes.
58 if (RegClass < 0)
59 return nullptr;
60
61 // Otherwise just look it up normally.
62 return TRI->getRegClass(RegClass);
63}
64
65/// insertNoop - Insert a noop into the instruction stream at the specified
66/// point.
69 llvm_unreachable("Target didn't implement insertNoop!");
70}
71
72/// insertNoops - Insert noops into the instruction stream at the specified
73/// point.
76 unsigned Quantity) const {
77 for (unsigned i = 0; i < Quantity; ++i)
79}
80
81static bool isAsmComment(const char *Str, const MCAsmInfo &MAI) {
82 return strncmp(Str, MAI.getCommentString().data(),
83 MAI.getCommentString().size()) == 0;
84}
85
86/// Measure the specified inline asm to determine an approximation of its
87/// length.
88/// Comments (which run till the next SeparatorString or newline) do not
89/// count as an instruction.
90/// Any other non-whitespace text is considered an instruction, with
91/// multiple instructions separated by SeparatorString or newlines.
92/// Variable-length instructions are not handled here; this function
93/// may be overloaded in the target code to do that.
94/// We implement a special case of the .space directive which takes only a
95/// single integer argument in base 10 that is the size in bytes. This is a
96/// restricted form of the GAS directive in that we only interpret
97/// simple--i.e. not a logical or arithmetic expression--size values without
98/// the optional fill value. This is primarily used for creating arbitrary
99/// sized inline asm blocks for testing purposes.
101 const char *Str,
102 const MCAsmInfo &MAI, const TargetSubtargetInfo *STI) const {
103 // Count the number of instructions in the asm.
104 bool AtInsnStart = true;
105 unsigned Length = 0;
106 const unsigned MaxInstLength = MAI.getMaxInstLength(STI);
107 for (; *Str; ++Str) {
108 if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(),
109 strlen(MAI.getSeparatorString())) == 0) {
110 AtInsnStart = true;
111 } else if (isAsmComment(Str, MAI)) {
112 // Stop counting as an instruction after a comment until the next
113 // separator.
114 AtInsnStart = false;
115 }
116
117 if (AtInsnStart && !isSpace(static_cast<unsigned char>(*Str))) {
118 unsigned AddLength = MaxInstLength;
119 if (strncmp(Str, ".space", 6) == 0) {
120 char *EStr;
121 int SpaceSize;
122 SpaceSize = strtol(Str + 6, &EStr, 10);
123 SpaceSize = SpaceSize < 0 ? 0 : SpaceSize;
124 while (*EStr != '\n' && isSpace(static_cast<unsigned char>(*EStr)))
125 ++EStr;
126 if (*EStr == '\0' || *EStr == '\n' ||
127 isAsmComment(EStr, MAI)) // Successfully parsed .space argument
128 AddLength = SpaceSize;
129 }
130 Length += AddLength;
131 AtInsnStart = false;
132 }
133 }
134
135 return Length;
136}
137
138/// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
139/// after it, replacing it with an unconditional branch to NewDest.
140void
142 MachineBasicBlock *NewDest) const {
143 MachineBasicBlock *MBB = Tail->getParent();
144
145 // Remove all the old successors of MBB from the CFG.
146 while (!MBB->succ_empty())
148
149 // Save off the debug loc before erasing the instruction.
150 DebugLoc DL = Tail->getDebugLoc();
151
152 // Update call site info and remove all the dead instructions
153 // from the end of MBB.
154 while (Tail != MBB->end()) {
155 auto MI = Tail++;
156 if (MI->shouldUpdateCallSiteInfo())
158 MBB->erase(MI);
159 }
160
161 // If MBB isn't immediately before MBB, insert a branch to it.
163 insertBranch(*MBB, NewDest, nullptr, SmallVector<MachineOperand, 0>(), DL);
164 MBB->addSuccessor(NewDest);
165}
166
168 bool NewMI, unsigned Idx1,
169 unsigned Idx2) const {
170 const MCInstrDesc &MCID = MI.getDesc();
171 bool HasDef = MCID.getNumDefs();
172 if (HasDef && !MI.getOperand(0).isReg())
173 // No idea how to commute this instruction. Target should implement its own.
174 return nullptr;
175
176 unsigned CommutableOpIdx1 = Idx1; (void)CommutableOpIdx1;
177 unsigned CommutableOpIdx2 = Idx2; (void)CommutableOpIdx2;
178 assert(findCommutedOpIndices(MI, CommutableOpIdx1, CommutableOpIdx2) &&
179 CommutableOpIdx1 == Idx1 && CommutableOpIdx2 == Idx2 &&
180 "TargetInstrInfo::CommuteInstructionImpl(): not commutable operands.");
181 assert(MI.getOperand(Idx1).isReg() && MI.getOperand(Idx2).isReg() &&
182 "This only knows how to commute register operands so far");
183
184 Register Reg0 = HasDef ? MI.getOperand(0).getReg() : Register();
185 Register Reg1 = MI.getOperand(Idx1).getReg();
186 Register Reg2 = MI.getOperand(Idx2).getReg();
187 unsigned SubReg0 = HasDef ? MI.getOperand(0).getSubReg() : 0;
188 unsigned SubReg1 = MI.getOperand(Idx1).getSubReg();
189 unsigned SubReg2 = MI.getOperand(Idx2).getSubReg();
190 bool Reg1IsKill = MI.getOperand(Idx1).isKill();
191 bool Reg2IsKill = MI.getOperand(Idx2).isKill();
192 bool Reg1IsUndef = MI.getOperand(Idx1).isUndef();
193 bool Reg2IsUndef = MI.getOperand(Idx2).isUndef();
194 bool Reg1IsInternal = MI.getOperand(Idx1).isInternalRead();
195 bool Reg2IsInternal = MI.getOperand(Idx2).isInternalRead();
196 // Avoid calling isRenamable for virtual registers since we assert that
197 // renamable property is only queried/set for physical registers.
198 bool Reg1IsRenamable =
199 Reg1.isPhysical() ? MI.getOperand(Idx1).isRenamable() : false;
200 bool Reg2IsRenamable =
201 Reg2.isPhysical() ? MI.getOperand(Idx2).isRenamable() : false;
202 // If destination is tied to either of the commuted source register, then
203 // it must be updated.
204 if (HasDef && Reg0 == Reg1 &&
205 MI.getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) {
206 Reg2IsKill = false;
207 Reg0 = Reg2;
208 SubReg0 = SubReg2;
209 } else if (HasDef && Reg0 == Reg2 &&
210 MI.getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) {
211 Reg1IsKill = false;
212 Reg0 = Reg1;
213 SubReg0 = SubReg1;
214 }
215
216 MachineInstr *CommutedMI = nullptr;
217 if (NewMI) {
218 // Create a new instruction.
219 MachineFunction &MF = *MI.getMF();
220 CommutedMI = MF.CloneMachineInstr(&MI);
221 } else {
222 CommutedMI = &MI;
223 }
224
225 if (HasDef) {
226 CommutedMI->getOperand(0).setReg(Reg0);
227 CommutedMI->getOperand(0).setSubReg(SubReg0);
228 }
229 CommutedMI->getOperand(Idx2).setReg(Reg1);
230 CommutedMI->getOperand(Idx1).setReg(Reg2);
231 CommutedMI->getOperand(Idx2).setSubReg(SubReg1);
232 CommutedMI->getOperand(Idx1).setSubReg(SubReg2);
233 CommutedMI->getOperand(Idx2).setIsKill(Reg1IsKill);
234 CommutedMI->getOperand(Idx1).setIsKill(Reg2IsKill);
235 CommutedMI->getOperand(Idx2).setIsUndef(Reg1IsUndef);
236 CommutedMI->getOperand(Idx1).setIsUndef(Reg2IsUndef);
237 CommutedMI->getOperand(Idx2).setIsInternalRead(Reg1IsInternal);
238 CommutedMI->getOperand(Idx1).setIsInternalRead(Reg2IsInternal);
239 // Avoid calling setIsRenamable for virtual registers since we assert that
240 // renamable property is only queried/set for physical registers.
241 if (Reg1.isPhysical())
242 CommutedMI->getOperand(Idx2).setIsRenamable(Reg1IsRenamable);
243 if (Reg2.isPhysical())
244 CommutedMI->getOperand(Idx1).setIsRenamable(Reg2IsRenamable);
245 return CommutedMI;
246}
247
249 unsigned OpIdx1,
250 unsigned OpIdx2) const {
251 // If OpIdx1 or OpIdx2 is not specified, then this method is free to choose
252 // any commutable operand, which is done in findCommutedOpIndices() method
253 // called below.
254 if ((OpIdx1 == CommuteAnyOperandIndex || OpIdx2 == CommuteAnyOperandIndex) &&
255 !findCommutedOpIndices(MI, OpIdx1, OpIdx2)) {
256 assert(MI.isCommutable() &&
257 "Precondition violation: MI must be commutable.");
258 return nullptr;
259 }
260 return commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
261}
262
264 unsigned &ResultIdx2,
265 unsigned CommutableOpIdx1,
266 unsigned CommutableOpIdx2) {
267 if (ResultIdx1 == CommuteAnyOperandIndex &&
268 ResultIdx2 == CommuteAnyOperandIndex) {
269 ResultIdx1 = CommutableOpIdx1;
270 ResultIdx2 = CommutableOpIdx2;
271 } else if (ResultIdx1 == CommuteAnyOperandIndex) {
272 if (ResultIdx2 == CommutableOpIdx1)
273 ResultIdx1 = CommutableOpIdx2;
274 else if (ResultIdx2 == CommutableOpIdx2)
275 ResultIdx1 = CommutableOpIdx1;
276 else
277 return false;
278 } else if (ResultIdx2 == CommuteAnyOperandIndex) {
279 if (ResultIdx1 == CommutableOpIdx1)
280 ResultIdx2 = CommutableOpIdx2;
281 else if (ResultIdx1 == CommutableOpIdx2)
282 ResultIdx2 = CommutableOpIdx1;
283 else
284 return false;
285 } else
286 // Check that the result operand indices match the given commutable
287 // operand indices.
288 return (ResultIdx1 == CommutableOpIdx1 && ResultIdx2 == CommutableOpIdx2) ||
289 (ResultIdx1 == CommutableOpIdx2 && ResultIdx2 == CommutableOpIdx1);
290
291 return true;
292}
293
295 unsigned &SrcOpIdx1,
296 unsigned &SrcOpIdx2) const {
297 assert(!MI.isBundle() &&
298 "TargetInstrInfo::findCommutedOpIndices() can't handle bundles");
299
300 const MCInstrDesc &MCID = MI.getDesc();
301 if (!MCID.isCommutable())
302 return false;
303
304 // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
305 // is not true, then the target must implement this.
306 unsigned CommutableOpIdx1 = MCID.getNumDefs();
307 unsigned CommutableOpIdx2 = CommutableOpIdx1 + 1;
308 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2,
309 CommutableOpIdx1, CommutableOpIdx2))
310 return false;
311
312 if (!MI.getOperand(SrcOpIdx1).isReg() || !MI.getOperand(SrcOpIdx2).isReg())
313 // No idea.
314 return false;
315 return true;
316}
317
319 if (!MI.isTerminator()) return false;
320
321 // Conditional branch is a special case.
322 if (MI.isBranch() && !MI.isBarrier())
323 return true;
324 if (!MI.isPredicable())
325 return true;
326 return !isPredicated(MI);
327}
328
331 bool MadeChange = false;
332
333 assert(!MI.isBundle() &&
334 "TargetInstrInfo::PredicateInstruction() can't handle bundles");
335
336 const MCInstrDesc &MCID = MI.getDesc();
337 if (!MI.isPredicable())
338 return false;
339
340 for (unsigned j = 0, i = 0, e = MI.getNumOperands(); i != e; ++i) {
341 if (MCID.operands()[i].isPredicate()) {
342 MachineOperand &MO = MI.getOperand(i);
343 if (MO.isReg()) {
344 MO.setReg(Pred[j].getReg());
345 MadeChange = true;
346 } else if (MO.isImm()) {
347 MO.setImm(Pred[j].getImm());
348 MadeChange = true;
349 } else if (MO.isMBB()) {
350 MO.setMBB(Pred[j].getMBB());
351 MadeChange = true;
352 }
353 ++j;
354 }
355 }
356 return MadeChange;
357}
358
360 const MachineInstr &MI,
362 size_t StartSize = Accesses.size();
363 for (MachineInstr::mmo_iterator o = MI.memoperands_begin(),
364 oe = MI.memoperands_end();
365 o != oe; ++o) {
366 if ((*o)->isLoad() &&
367 isa_and_nonnull<FixedStackPseudoSourceValue>((*o)->getPseudoValue()))
368 Accesses.push_back(*o);
369 }
370 return Accesses.size() != StartSize;
371}
372
374 const MachineInstr &MI,
376 size_t StartSize = Accesses.size();
377 for (MachineInstr::mmo_iterator o = MI.memoperands_begin(),
378 oe = MI.memoperands_end();
379 o != oe; ++o) {
380 if ((*o)->isStore() &&
381 isa_and_nonnull<FixedStackPseudoSourceValue>((*o)->getPseudoValue()))
382 Accesses.push_back(*o);
383 }
384 return Accesses.size() != StartSize;
385}
386
388 unsigned SubIdx, unsigned &Size,
389 unsigned &Offset,
390 const MachineFunction &MF) const {
392 if (!SubIdx) {
393 Size = TRI->getSpillSize(*RC);
394 Offset = 0;
395 return true;
396 }
397 unsigned BitSize = TRI->getSubRegIdxSize(SubIdx);
398 // Convert bit size to byte size.
399 if (BitSize % 8)
400 return false;
401
402 int BitOffset = TRI->getSubRegIdxOffset(SubIdx);
403 if (BitOffset < 0 || BitOffset % 8)
404 return false;
405
406 Size = BitSize / 8;
407 Offset = (unsigned)BitOffset / 8;
408
409 assert(TRI->getSpillSize(*RC) >= (Offset + Size) && "bad subregister range");
410
411 if (!MF.getDataLayout().isLittleEndian()) {
412 Offset = TRI->getSpillSize(*RC) - (Offset + Size);
413 }
414 return true;
415}
416
419 Register DestReg, unsigned SubIdx,
420 const MachineInstr &Orig,
421 const TargetRegisterInfo &TRI) const {
423 MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI);
424 MBB.insert(I, MI);
425}
426
428 const MachineInstr &MI1,
429 const MachineRegisterInfo *MRI) const {
431}
432
434 MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const {
435 assert(!Orig.isNotDuplicable() && "Instruction cannot be duplicated");
437 return MF.cloneMachineInstrBundle(MBB, InsertBefore, Orig);
438}
439
440// If the COPY instruction in MI can be folded to a stack operation, return
441// the register class to use.
443 const TargetInstrInfo &TII,
444 unsigned FoldIdx) {
445 assert(TII.isCopyInstr(MI) && "MI must be a COPY instruction");
446 if (MI.getNumOperands() != 2)
447 return nullptr;
448 assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand");
449
450 const MachineOperand &FoldOp = MI.getOperand(FoldIdx);
451 const MachineOperand &LiveOp = MI.getOperand(1 - FoldIdx);
452
453 if (FoldOp.getSubReg() || LiveOp.getSubReg())
454 return nullptr;
455
456 Register FoldReg = FoldOp.getReg();
457 Register LiveReg = LiveOp.getReg();
458
459 assert(FoldReg.isVirtual() && "Cannot fold physregs");
460
461 const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
462 const TargetRegisterClass *RC = MRI.getRegClass(FoldReg);
463
464 if (LiveOp.getReg().isPhysical())
465 return RC->contains(LiveOp.getReg()) ? RC : nullptr;
466
467 if (RC->hasSubClassEq(MRI.getRegClass(LiveReg)))
468 return RC;
469
470 // FIXME: Allow folding when register classes are memory compatible.
471 return nullptr;
472}
473
474MCInst TargetInstrInfo::getNop() const { llvm_unreachable("Not implemented"); }
475
476std::pair<unsigned, unsigned>
478 switch (MI.getOpcode()) {
479 case TargetOpcode::STACKMAP:
480 // StackMapLiveValues are foldable
481 return std::make_pair(0, StackMapOpers(&MI).getVarIdx());
482 case TargetOpcode::PATCHPOINT:
483 // For PatchPoint, the call args are not foldable (even if reported in the
484 // stackmap e.g. via anyregcc).
485 return std::make_pair(0, PatchPointOpers(&MI).getVarIdx());
486 case TargetOpcode::STATEPOINT:
487 // For statepoints, fold deopt and gc arguments, but not call arguments.
488 return std::make_pair(MI.getNumDefs(), StatepointOpers(&MI).getVarIdx());
489 default:
490 llvm_unreachable("unexpected stackmap opcode");
491 }
492}
493
495 ArrayRef<unsigned> Ops, int FrameIndex,
496 const TargetInstrInfo &TII) {
497 unsigned StartIdx = 0;
498 unsigned NumDefs = 0;
499 // getPatchpointUnfoldableRange throws guarantee if MI is not a patchpoint.
500 std::tie(NumDefs, StartIdx) = TII.getPatchpointUnfoldableRange(MI);
501
502 unsigned DefToFoldIdx = MI.getNumOperands();
503
504 // Return false if any operands requested for folding are not foldable (not
505 // part of the stackmap's live values).
506 for (unsigned Op : Ops) {
507 if (Op < NumDefs) {
508 assert(DefToFoldIdx == MI.getNumOperands() && "Folding multiple defs");
509 DefToFoldIdx = Op;
510 } else if (Op < StartIdx) {
511 return nullptr;
512 }
513 if (MI.getOperand(Op).isTied())
514 return nullptr;
515 }
516
517 MachineInstr *NewMI =
518 MF.CreateMachineInstr(TII.get(MI.getOpcode()), MI.getDebugLoc(), true);
519 MachineInstrBuilder MIB(MF, NewMI);
520
521 // No need to fold return, the meta data, and function arguments
522 for (unsigned i = 0; i < StartIdx; ++i)
523 if (i != DefToFoldIdx)
524 MIB.add(MI.getOperand(i));
525
526 for (unsigned i = StartIdx, e = MI.getNumOperands(); i < e; ++i) {
527 MachineOperand &MO = MI.getOperand(i);
528 unsigned TiedTo = e;
529 (void)MI.isRegTiedToDefOperand(i, &TiedTo);
530
531 if (is_contained(Ops, i)) {
532 assert(TiedTo == e && "Cannot fold tied operands");
533 unsigned SpillSize;
534 unsigned SpillOffset;
535 // Compute the spill slot size and offset.
536 const TargetRegisterClass *RC =
537 MF.getRegInfo().getRegClass(MO.getReg());
538 bool Valid =
539 TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize, SpillOffset, MF);
540 if (!Valid)
541 report_fatal_error("cannot spill patchpoint subregister operand");
542 MIB.addImm(StackMaps::IndirectMemRefOp);
543 MIB.addImm(SpillSize);
544 MIB.addFrameIndex(FrameIndex);
545 MIB.addImm(SpillOffset);
546 } else {
547 MIB.add(MO);
548 if (TiedTo < e) {
549 assert(TiedTo < NumDefs && "Bad tied operand");
550 if (TiedTo > DefToFoldIdx)
551 --TiedTo;
552 NewMI->tieOperands(TiedTo, NewMI->getNumOperands() - 1);
553 }
554 }
555 }
556 return NewMI;
557}
558
560 ArrayRef<unsigned> Ops, int FI,
561 LiveIntervals *LIS,
562 VirtRegMap *VRM) const {
563 auto Flags = MachineMemOperand::MONone;
564 for (unsigned OpIdx : Ops)
565 Flags |= MI.getOperand(OpIdx).isDef() ? MachineMemOperand::MOStore
567
568 MachineBasicBlock *MBB = MI.getParent();
569 assert(MBB && "foldMemoryOperand needs an inserted instruction");
570 MachineFunction &MF = *MBB->getParent();
571
572 // If we're not folding a load into a subreg, the size of the load is the
573 // size of the spill slot. But if we are, we need to figure out what the
574 // actual load size is.
575 int64_t MemSize = 0;
576 const MachineFrameInfo &MFI = MF.getFrameInfo();
578
579 if (Flags & MachineMemOperand::MOStore) {
580 MemSize = MFI.getObjectSize(FI);
581 } else {
582 for (unsigned OpIdx : Ops) {
583 int64_t OpSize = MFI.getObjectSize(FI);
584
585 if (auto SubReg = MI.getOperand(OpIdx).getSubReg()) {
586 unsigned SubRegSize = TRI->getSubRegIdxSize(SubReg);
587 if (SubRegSize > 0 && !(SubRegSize % 8))
588 OpSize = SubRegSize / 8;
589 }
590
591 MemSize = std::max(MemSize, OpSize);
592 }
593 }
594
595 assert(MemSize && "Did not expect a zero-sized stack slot");
596
597 MachineInstr *NewMI = nullptr;
598
599 if (MI.getOpcode() == TargetOpcode::STACKMAP ||
600 MI.getOpcode() == TargetOpcode::PATCHPOINT ||
601 MI.getOpcode() == TargetOpcode::STATEPOINT) {
602 // Fold stackmap/patchpoint.
603 NewMI = foldPatchpoint(MF, MI, Ops, FI, *this);
604 if (NewMI)
605 MBB->insert(MI, NewMI);
606 } else {
607 // Ask the target to do the actual folding.
608 NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI, LIS, VRM);
609 }
610
611 if (NewMI) {
612 NewMI->setMemRefs(MF, MI.memoperands());
613 // Add a memory operand, foldMemoryOperandImpl doesn't do that.
615 NewMI->mayStore()) &&
616 "Folded a def to a non-store!");
617 assert((!(Flags & MachineMemOperand::MOLoad) ||
618 NewMI->mayLoad()) &&
619 "Folded a use to a non-load!");
620 assert(MFI.getObjectOffset(FI) != -1);
621 MachineMemOperand *MMO =
623 Flags, MemSize, MFI.getObjectAlign(FI));
624 NewMI->addMemOperand(MF, MMO);
625
626 // The pass "x86 speculative load hardening" always attaches symbols to
627 // call instructions. We need copy it form old instruction.
628 NewMI->cloneInstrSymbols(MF, MI);
629
630 return NewMI;
631 }
632
633 // Straight COPY may fold as load/store.
634 if (!isCopyInstr(MI) || Ops.size() != 1)
635 return nullptr;
636
637 const TargetRegisterClass *RC = canFoldCopy(MI, *this, Ops[0]);
638 if (!RC)
639 return nullptr;
640
641 const MachineOperand &MO = MI.getOperand(1 - Ops[0]);
643
644 if (Flags == MachineMemOperand::MOStore)
645 storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI,
646 Register());
647 else
648 loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI, Register());
649 return &*--Pos;
650}
651
654 MachineInstr &LoadMI,
655 LiveIntervals *LIS) const {
656 assert(LoadMI.canFoldAsLoad() && "LoadMI isn't foldable!");
657#ifndef NDEBUG
658 for (unsigned OpIdx : Ops)
659 assert(MI.getOperand(OpIdx).isUse() && "Folding load into def!");
660#endif
661
662 MachineBasicBlock &MBB = *MI.getParent();
664
665 // Ask the target to do the actual folding.
666 MachineInstr *NewMI = nullptr;
667 int FrameIndex = 0;
668
669 if ((MI.getOpcode() == TargetOpcode::STACKMAP ||
670 MI.getOpcode() == TargetOpcode::PATCHPOINT ||
671 MI.getOpcode() == TargetOpcode::STATEPOINT) &&
672 isLoadFromStackSlot(LoadMI, FrameIndex)) {
673 // Fold stackmap/patchpoint.
674 NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
675 if (NewMI)
676 NewMI = &*MBB.insert(MI, NewMI);
677 } else {
678 // Ask the target to do the actual folding.
679 NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI, LIS);
680 }
681
682 if (!NewMI)
683 return nullptr;
684
685 // Copy the memoperands from the load to the folded instruction.
686 if (MI.memoperands_empty()) {
687 NewMI->setMemRefs(MF, LoadMI.memoperands());
688 } else {
689 // Handle the rare case of folding multiple loads.
690 NewMI->setMemRefs(MF, MI.memoperands());
692 E = LoadMI.memoperands_end();
693 I != E; ++I) {
694 NewMI->addMemOperand(MF, *I);
695 }
696 }
697 return NewMI;
698}
699
700/// transferImplicitOperands - MI is a pseudo-instruction, and the lowered
701/// replacement instructions immediately precede it. Copy any implicit
702/// operands from MI to the replacement instruction.
704 const TargetRegisterInfo *TRI) {
706 --CopyMI;
707
708 Register DstReg = MI->getOperand(0).getReg();
709 for (const MachineOperand &MO : MI->implicit_operands()) {
710 CopyMI->addOperand(MO);
711
712 // Be conservative about preserving kills when subregister defs are
713 // involved. If there was implicit kill of a super-register overlapping the
714 // copy result, we would kill the subregisters previous copies defined.
715
716 if (MO.isKill() && TRI->regsOverlap(DstReg, MO.getReg()))
717 CopyMI->getOperand(CopyMI->getNumOperands() - 1).setIsKill(false);
718 }
719}
720
722 const TargetRegisterInfo *TRI) const {
723 if (MI->allDefsAreDead()) {
724 MI->setDesc(get(TargetOpcode::KILL));
725 return;
726 }
727
728 MachineOperand &DstMO = MI->getOperand(0);
729 MachineOperand &SrcMO = MI->getOperand(1);
730
731 bool IdentityCopy = (SrcMO.getReg() == DstMO.getReg());
732 if (IdentityCopy || SrcMO.isUndef()) {
733 // No need to insert an identity copy instruction, but replace with a KILL
734 // if liveness is changed.
735 if (SrcMO.isUndef() || MI->getNumOperands() > 2) {
736 // We must make sure the super-register gets killed. Replace the
737 // instruction with KILL.
738 MI->setDesc(get(TargetOpcode::KILL));
739 return;
740 }
741 // Vanilla identity copy.
742 MI->eraseFromParent();
743 return;
744 }
745
746 copyPhysReg(*MI->getParent(), MI, MI->getDebugLoc(), DstMO.getReg(),
747 SrcMO.getReg(), SrcMO.isKill());
748
749 if (MI->getNumOperands() > 2)
751 MI->eraseFromParent();
752}
753
755 const MachineInstr &Inst, const MachineBasicBlock *MBB) const {
756 const MachineOperand &Op1 = Inst.getOperand(1);
757 const MachineOperand &Op2 = Inst.getOperand(2);
759
760 // We need virtual register definitions for the operands that we will
761 // reassociate.
762 MachineInstr *MI1 = nullptr;
763 MachineInstr *MI2 = nullptr;
764 if (Op1.isReg() && Op1.getReg().isVirtual())
765 MI1 = MRI.getUniqueVRegDef(Op1.getReg());
766 if (Op2.isReg() && Op2.getReg().isVirtual())
767 MI2 = MRI.getUniqueVRegDef(Op2.getReg());
768
769 // And at least one operand must be defined in MBB.
770 return MI1 && MI2 && (MI1->getParent() == MBB || MI2->getParent() == MBB);
771}
772
774 unsigned Opcode2) const {
775 return Opcode1 == Opcode2 || getInverseOpcode(Opcode1) == Opcode2;
776}
777
779 bool &Commuted) const {
780 const MachineBasicBlock *MBB = Inst.getParent();
782 MachineInstr *MI1 = MRI.getUniqueVRegDef(Inst.getOperand(1).getReg());
783 MachineInstr *MI2 = MRI.getUniqueVRegDef(Inst.getOperand(2).getReg());
784 unsigned Opcode = Inst.getOpcode();
785
786 // If only one operand has the same or inverse opcode and it's the second
787 // source operand, the operands must be commuted.
788 Commuted = !areOpcodesEqualOrInverse(Opcode, MI1->getOpcode()) &&
789 areOpcodesEqualOrInverse(Opcode, MI2->getOpcode());
790 if (Commuted)
791 std::swap(MI1, MI2);
792
793 // 1. The previous instruction must be the same type as Inst.
794 // 2. The previous instruction must also be associative/commutative or be the
795 // inverse of such an operation (this can be different even for
796 // instructions with the same opcode if traits like fast-math-flags are
797 // included).
798 // 3. The previous instruction must have virtual register definitions for its
799 // operands in the same basic block as Inst.
800 // 4. The previous instruction's result must only be used by Inst.
801 return areOpcodesEqualOrInverse(Opcode, MI1->getOpcode()) &&
803 isAssociativeAndCommutative(*MI1, /* Invert */ true)) &&
805 MRI.hasOneNonDBGUse(MI1->getOperand(0).getReg());
806}
807
808// 1. The operation must be associative and commutative or be the inverse of
809// such an operation.
810// 2. The instruction must have virtual register definitions for its
811// operands in the same basic block.
812// 3. The instruction must have a reassociable sibling.
814 bool &Commuted) const {
815 return (isAssociativeAndCommutative(Inst) ||
816 isAssociativeAndCommutative(Inst, /* Invert */ true)) &&
817 hasReassociableOperands(Inst, Inst.getParent()) &&
818 hasReassociableSibling(Inst, Commuted);
819}
820
821// The concept of the reassociation pass is that these operations can benefit
822// from this kind of transformation:
823//
824// A = ? op ?
825// B = A op X (Prev)
826// C = B op Y (Root)
827// -->
828// A = ? op ?
829// B = X op Y
830// C = A op B
831//
832// breaking the dependency between A and B, allowing them to be executed in
833// parallel (or back-to-back in a pipeline) instead of depending on each other.
834
835// FIXME: This has the potential to be expensive (compile time) while not
836// improving the code at all. Some ways to limit the overhead:
837// 1. Track successful transforms; bail out if hit rate gets too low.
838// 2. Only enable at -O3 or some other non-default optimization level.
839// 3. Pre-screen pattern candidates here: if an operand of the previous
840// instruction is known to not increase the critical path, then don't match
841// that pattern.
844 bool DoRegPressureReduce) const {
845 bool Commute;
846 if (isReassociationCandidate(Root, Commute)) {
847 // We found a sequence of instructions that may be suitable for a
848 // reassociation of operands to increase ILP. Specify each commutation
849 // possibility for the Prev instruction in the sequence and let the
850 // machine combiner decide if changing the operands is worthwhile.
851 if (Commute) {
854 } else {
857 }
858 return true;
859 }
860
861 return false;
862}
863
864/// Return true when a code sequence can improve loop throughput.
865bool
867 return false;
868}
869
870std::pair<unsigned, unsigned>
872 const MachineInstr &Root,
873 const MachineInstr &Prev) const {
874 bool AssocCommutRoot = isAssociativeAndCommutative(Root);
875 bool AssocCommutPrev = isAssociativeAndCommutative(Prev);
876
877 // Early exit if both opcodes are associative and commutative. It's a trivial
878 // reassociation when we only change operands order. In this case opcodes are
879 // not required to have inverse versions.
880 if (AssocCommutRoot && AssocCommutPrev) {
881 assert(Root.getOpcode() == Prev.getOpcode() && "Expected to be equal");
882 return std::make_pair(Root.getOpcode(), Root.getOpcode());
883 }
884
885 // At least one instruction is not associative or commutative.
886 // Since we have matched one of the reassociation patterns, we expect that the
887 // instructions' opcodes are equal or one of them is the inversion of the
888 // other.
890 "Incorrectly matched pattern");
891 unsigned AssocCommutOpcode = Root.getOpcode();
892 unsigned InverseOpcode = *getInverseOpcode(Root.getOpcode());
893 if (!AssocCommutRoot)
894 std::swap(AssocCommutOpcode, InverseOpcode);
895
896 // The transformation rule (`+` is any associative and commutative binary
897 // operation, `-` is the inverse):
898 // REASSOC_AX_BY:
899 // (A + X) + Y => A + (X + Y)
900 // (A + X) - Y => A + (X - Y)
901 // (A - X) + Y => A - (X - Y)
902 // (A - X) - Y => A - (X + Y)
903 // REASSOC_XA_BY:
904 // (X + A) + Y => (X + Y) + A
905 // (X + A) - Y => (X - Y) + A
906 // (X - A) + Y => (X + Y) - A
907 // (X - A) - Y => (X - Y) - A
908 // REASSOC_AX_YB:
909 // Y + (A + X) => (Y + X) + A
910 // Y - (A + X) => (Y - X) - A
911 // Y + (A - X) => (Y - X) + A
912 // Y - (A - X) => (Y + X) - A
913 // REASSOC_XA_YB:
914 // Y + (X + A) => (Y + X) + A
915 // Y - (X + A) => (Y - X) - A
916 // Y + (X - A) => (Y + X) - A
917 // Y - (X - A) => (Y - X) + A
918 switch (Pattern) {
919 default:
920 llvm_unreachable("Unexpected pattern");
922 if (!AssocCommutRoot && AssocCommutPrev)
923 return {AssocCommutOpcode, InverseOpcode};
924 if (AssocCommutRoot && !AssocCommutPrev)
925 return {InverseOpcode, InverseOpcode};
926 if (!AssocCommutRoot && !AssocCommutPrev)
927 return {InverseOpcode, AssocCommutOpcode};
928 break;
930 if (!AssocCommutRoot && AssocCommutPrev)
931 return {AssocCommutOpcode, InverseOpcode};
932 if (AssocCommutRoot && !AssocCommutPrev)
933 return {InverseOpcode, AssocCommutOpcode};
934 if (!AssocCommutRoot && !AssocCommutPrev)
935 return {InverseOpcode, InverseOpcode};
936 break;
938 if (!AssocCommutRoot && AssocCommutPrev)
939 return {InverseOpcode, InverseOpcode};
940 if (AssocCommutRoot && !AssocCommutPrev)
941 return {AssocCommutOpcode, InverseOpcode};
942 if (!AssocCommutRoot && !AssocCommutPrev)
943 return {InverseOpcode, AssocCommutOpcode};
944 break;
946 if (!AssocCommutRoot && AssocCommutPrev)
947 return {InverseOpcode, InverseOpcode};
948 if (AssocCommutRoot && !AssocCommutPrev)
949 return {InverseOpcode, AssocCommutOpcode};
950 if (!AssocCommutRoot && !AssocCommutPrev)
951 return {AssocCommutOpcode, InverseOpcode};
952 break;
953 }
954 llvm_unreachable("Unhandled combination");
955}
956
957// Return a pair of boolean flags showing if the new root and new prev operands
958// must be swapped. See visual example of the rule in
959// TargetInstrInfo::getReassociationOpcodes.
960static std::pair<bool, bool> mustSwapOperands(MachineCombinerPattern Pattern) {
961 switch (Pattern) {
962 default:
963 llvm_unreachable("Unexpected pattern");
965 return {false, false};
967 return {true, false};
969 return {true, true};
971 return {true, true};
972 }
973}
974
975/// Attempt the reassociation transformation to reduce critical path length.
976/// See the above comments before getMachineCombinerPatterns().
978 MachineInstr &Root, MachineInstr &Prev,
982 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
983 MachineFunction *MF = Root.getMF();
987 const TargetRegisterClass *RC = Root.getRegClassConstraint(0, TII, TRI);
988
989 // This array encodes the operand index for each parameter because the
990 // operands may be commuted. Each row corresponds to a pattern value,
991 // and each column specifies the index of A, B, X, Y.
992 unsigned OpIdx[4][4] = {
993 { 1, 1, 2, 2 },
994 { 1, 2, 2, 1 },
995 { 2, 1, 1, 2 },
996 { 2, 2, 1, 1 }
997 };
998
999 int Row;
1000 switch (Pattern) {
1001 case MachineCombinerPattern::REASSOC_AX_BY: Row = 0; break;
1002 case MachineCombinerPattern::REASSOC_AX_YB: Row = 1; break;
1003 case MachineCombinerPattern::REASSOC_XA_BY: Row = 2; break;
1004 case MachineCombinerPattern::REASSOC_XA_YB: Row = 3; break;
1005 default: llvm_unreachable("unexpected MachineCombinerPattern");
1006 }
1007
1008 MachineOperand &OpA = Prev.getOperand(OpIdx[Row][0]);
1009 MachineOperand &OpB = Root.getOperand(OpIdx[Row][1]);
1010 MachineOperand &OpX = Prev.getOperand(OpIdx[Row][2]);
1011 MachineOperand &OpY = Root.getOperand(OpIdx[Row][3]);
1012 MachineOperand &OpC = Root.getOperand(0);
1013
1014 Register RegA = OpA.getReg();
1015 Register RegB = OpB.getReg();
1016 Register RegX = OpX.getReg();
1017 Register RegY = OpY.getReg();
1018 Register RegC = OpC.getReg();
1019
1020 if (RegA.isVirtual())
1021 MRI.constrainRegClass(RegA, RC);
1022 if (RegB.isVirtual())
1023 MRI.constrainRegClass(RegB, RC);
1024 if (RegX.isVirtual())
1025 MRI.constrainRegClass(RegX, RC);
1026 if (RegY.isVirtual())
1027 MRI.constrainRegClass(RegY, RC);
1028 if (RegC.isVirtual())
1029 MRI.constrainRegClass(RegC, RC);
1030
1031 // Create a new virtual register for the result of (X op Y) instead of
1032 // recycling RegB because the MachineCombiner's computation of the critical
1033 // path requires a new register definition rather than an existing one.
1034 Register NewVR = MRI.createVirtualRegister(RC);
1035 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
1036
1037 auto [NewRootOpc, NewPrevOpc] = getReassociationOpcodes(Pattern, Root, Prev);
1038 bool KillA = OpA.isKill();
1039 bool KillX = OpX.isKill();
1040 bool KillY = OpY.isKill();
1041 bool KillNewVR = true;
1042
1043 auto [SwapRootOperands, SwapPrevOperands] = mustSwapOperands(Pattern);
1044
1045 if (SwapPrevOperands) {
1046 std::swap(RegX, RegY);
1047 std::swap(KillX, KillY);
1048 }
1049
1050 // Create new instructions for insertion.
1051 MachineInstrBuilder MIB1 =
1052 BuildMI(*MF, MIMetadata(Prev), TII->get(NewPrevOpc), NewVR)
1053 .addReg(RegX, getKillRegState(KillX))
1054 .addReg(RegY, getKillRegState(KillY))
1055 .setMIFlags(Prev.getFlags());
1056
1057 if (SwapRootOperands) {
1058 std::swap(RegA, NewVR);
1059 std::swap(KillA, KillNewVR);
1060 }
1061
1062 MachineInstrBuilder MIB2 =
1063 BuildMI(*MF, MIMetadata(Root), TII->get(NewRootOpc), RegC)
1064 .addReg(RegA, getKillRegState(KillA))
1065 .addReg(NewVR, getKillRegState(KillNewVR))
1066 .setMIFlags(Root.getFlags());
1067
1068 setSpecialOperandAttr(Root, Prev, *MIB1, *MIB2);
1069
1070 // Record new instructions for insertion and old instructions for deletion.
1071 InsInstrs.push_back(MIB1);
1072 InsInstrs.push_back(MIB2);
1073 DelInstrs.push_back(&Prev);
1074 DelInstrs.push_back(&Root);
1075
1076 // We transformed:
1077 // B = A op X (Prev)
1078 // C = B op Y (Root)
1079 // Into:
1080 // B = X op Y (MIB1)
1081 // C = A op B (MIB2)
1082 // C has the same value as before, B doesn't; as such, keep the debug number
1083 // of C but not of B.
1084 if (unsigned OldRootNum = Root.peekDebugInstrNum())
1085 MIB2.getInstr()->setDebugInstrNum(OldRootNum);
1086}
1087
1092 DenseMap<unsigned, unsigned> &InstIdxForVirtReg) const {
1094
1095 // Select the previous instruction in the sequence based on the input pattern.
1096 MachineInstr *Prev = nullptr;
1097 switch (Pattern) {
1100 Prev = MRI.getUniqueVRegDef(Root.getOperand(1).getReg());
1101 break;
1104 Prev = MRI.getUniqueVRegDef(Root.getOperand(2).getReg());
1105 break;
1106 default:
1107 llvm_unreachable("Unknown pattern for machine combiner");
1108 }
1109
1110 // Don't reassociate if Prev and Root are in different blocks.
1111 if (Prev->getParent() != Root.getParent())
1112 return;
1113
1114 reassociateOps(Root, *Prev, Pattern, InsInstrs, DelInstrs, InstIdxForVirtReg);
1115}
1116
1119}
1120
1122 const MachineInstr &MI) const {
1123 const MachineFunction &MF = *MI.getMF();
1124 const MachineRegisterInfo &MRI = MF.getRegInfo();
1125
1126 // Remat clients assume operand 0 is the defined register.
1127 if (!MI.getNumOperands() || !MI.getOperand(0).isReg())
1128 return false;
1129 Register DefReg = MI.getOperand(0).getReg();
1130
1131 // A sub-register definition can only be rematerialized if the instruction
1132 // doesn't read the other parts of the register. Otherwise it is really a
1133 // read-modify-write operation on the full virtual register which cannot be
1134 // moved safely.
1135 if (DefReg.isVirtual() && MI.getOperand(0).getSubReg() &&
1136 MI.readsVirtualRegister(DefReg))
1137 return false;
1138
1139 // A load from a fixed stack slot can be rematerialized. This may be
1140 // redundant with subsequent checks, but it's target-independent,
1141 // simple, and a common case.
1142 int FrameIdx = 0;
1143 if (isLoadFromStackSlot(MI, FrameIdx) &&
1144 MF.getFrameInfo().isImmutableObjectIndex(FrameIdx))
1145 return true;
1146
1147 // Avoid instructions obviously unsafe for remat.
1148 if (MI.isNotDuplicable() || MI.mayStore() || MI.mayRaiseFPException() ||
1149 MI.hasUnmodeledSideEffects())
1150 return false;
1151
1152 // Don't remat inline asm. We have no idea how expensive it is
1153 // even if it's side effect free.
1154 if (MI.isInlineAsm())
1155 return false;
1156
1157 // Avoid instructions which load from potentially varying memory.
1158 if (MI.mayLoad() && !MI.isDereferenceableInvariantLoad())
1159 return false;
1160
1161 // If any of the registers accessed are non-constant, conservatively assume
1162 // the instruction is not rematerializable.
1163 for (const MachineOperand &MO : MI.operands()) {
1164 if (!MO.isReg()) continue;
1165 Register Reg = MO.getReg();
1166 if (Reg == 0)
1167 continue;
1168
1169 // Check for a well-behaved physical register.
1170 if (Reg.isPhysical()) {
1171 if (MO.isUse()) {
1172 // If the physreg has no defs anywhere, it's just an ambient register
1173 // and we can freely move its uses. Alternatively, if it's allocatable,
1174 // it could get allocated to something with a def during allocation.
1175 if (!MRI.isConstantPhysReg(Reg))
1176 return false;
1177 } else {
1178 // A physreg def. We can't remat it.
1179 return false;
1180 }
1181 continue;
1182 }
1183
1184 // Only allow one virtual-register def. There may be multiple defs of the
1185 // same virtual register, though.
1186 if (MO.isDef() && Reg != DefReg)
1187 return false;
1188
1189 // Don't allow any virtual-register uses. Rematting an instruction with
1190 // virtual register uses would length the live ranges of the uses, which
1191 // is not necessarily a good idea, certainly not "trivial".
1192 if (MO.isUse())
1193 return false;
1194 }
1195
1196 // Everything checked out.
1197 return true;
1198}
1199
1201 const MachineFunction *MF = MI.getMF();
1203 bool StackGrowsDown =
1205
1206 unsigned FrameSetupOpcode = getCallFrameSetupOpcode();
1207 unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode();
1208
1209 if (!isFrameInstr(MI))
1210 return 0;
1211
1212 int SPAdj = TFI->alignSPAdjust(getFrameSize(MI));
1213
1214 if ((!StackGrowsDown && MI.getOpcode() == FrameSetupOpcode) ||
1215 (StackGrowsDown && MI.getOpcode() == FrameDestroyOpcode))
1216 SPAdj = -SPAdj;
1217
1218 return SPAdj;
1219}
1220
1221/// isSchedulingBoundary - Test if the given instruction should be
1222/// considered a scheduling boundary. This primarily includes labels
1223/// and terminators.
1225 const MachineBasicBlock *MBB,
1226 const MachineFunction &MF) const {
1227 // Terminators and labels can't be scheduled around.
1228 if (MI.isTerminator() || MI.isPosition())
1229 return true;
1230
1231 // INLINEASM_BR can jump to another block
1232 if (MI.getOpcode() == TargetOpcode::INLINEASM_BR)
1233 return true;
1234
1235 // Don't attempt to schedule around any instruction that defines
1236 // a stack-oriented pointer, as it's unlikely to be profitable. This
1237 // saves compile time, because it doesn't require every single
1238 // stack slot reference to depend on the instruction that does the
1239 // modification.
1240 const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
1242 return MI.modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), TRI);
1243}
1244
1245// Provide a global flag for disabling the PreRA hazard recognizer that targets
1246// may choose to honor.
1249}
1250
1251// Default implementation of CreateTargetRAHazardRecognizer.
1254 const ScheduleDAG *DAG) const {
1255 // Dummy hazard recognizer allows all instructions to issue.
1256 return new ScheduleHazardRecognizer();
1257}
1258
1259// Default implementation of CreateTargetMIHazardRecognizer.
1261 const InstrItineraryData *II, const ScheduleDAGMI *DAG) const {
1262 return new ScoreboardHazardRecognizer(II, DAG, "machine-scheduler");
1263}
1264
1265// Default implementation of CreateTargetPostRAHazardRecognizer.
1268 const ScheduleDAG *DAG) const {
1269 return new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched");
1270}
1271
1272// Default implementation of getMemOperandWithOffset.
1274 const MachineInstr &MI, const MachineOperand *&BaseOp, int64_t &Offset,
1275 bool &OffsetIsScalable, const TargetRegisterInfo *TRI) const {
1277 unsigned Width;
1278 if (!getMemOperandsWithOffsetWidth(MI, BaseOps, Offset, OffsetIsScalable,
1279 Width, TRI) ||
1280 BaseOps.size() != 1)
1281 return false;
1282 BaseOp = BaseOps.front();
1283 return true;
1284}
1285
1286//===----------------------------------------------------------------------===//
1287// SelectionDAG latency interface.
1288//===----------------------------------------------------------------------===//
1289
1290int
1292 SDNode *DefNode, unsigned DefIdx,
1293 SDNode *UseNode, unsigned UseIdx) const {
1294 if (!ItinData || ItinData->isEmpty())
1295 return -1;
1296
1297 if (!DefNode->isMachineOpcode())
1298 return -1;
1299
1300 unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass();
1301 if (!UseNode->isMachineOpcode())
1302 return ItinData->getOperandCycle(DefClass, DefIdx);
1303 unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass();
1304 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1305}
1306
1308 SDNode *N) const {
1309 if (!ItinData || ItinData->isEmpty())
1310 return 1;
1311
1312 if (!N->isMachineOpcode())
1313 return 1;
1314
1315 return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass());
1316}
1317
1318//===----------------------------------------------------------------------===//
1319// MachineInstr latency interface.
1320//===----------------------------------------------------------------------===//
1321
1323 const MachineInstr &MI) const {
1324 if (!ItinData || ItinData->isEmpty())
1325 return 1;
1326
1327 unsigned Class = MI.getDesc().getSchedClass();
1328 int UOps = ItinData->Itineraries[Class].NumMicroOps;
1329 if (UOps >= 0)
1330 return UOps;
1331
1332 // The # of u-ops is dynamically determined. The specific target should
1333 // override this function to return the right number.
1334 return 1;
1335}
1336
1337/// Return the default expected latency for a def based on it's opcode.
1339 const MachineInstr &DefMI) const {
1340 if (DefMI.isTransient())
1341 return 0;
1342 if (DefMI.mayLoad())
1343 return SchedModel.LoadLatency;
1344 if (isHighLatencyDef(DefMI.getOpcode()))
1345 return SchedModel.HighLatency;
1346 return 1;
1347}
1348
1350 return 0;
1351}
1352
1354 const MachineInstr &MI,
1355 unsigned *PredCost) const {
1356 // Default to one cycle for no itinerary. However, an "empty" itinerary may
1357 // still have a MinLatency property, which getStageLatency checks.
1358 if (!ItinData)
1359 return MI.mayLoad() ? 2 : 1;
1360
1361 return ItinData->getStageLatency(MI.getDesc().getSchedClass());
1362}
1363
1365 const MachineInstr &DefMI,
1366 unsigned DefIdx) const {
1367 const InstrItineraryData *ItinData = SchedModel.getInstrItineraries();
1368 if (!ItinData || ItinData->isEmpty())
1369 return false;
1370
1371 unsigned DefClass = DefMI.getDesc().getSchedClass();
1372 int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
1373 return (DefCycle != -1 && DefCycle <= 1);
1374}
1375
1377 // TODO: We don't split functions where a section attribute has been set
1378 // since the split part may not be placed in a contiguous region. It may also
1379 // be more beneficial to augment the linker to ensure contiguous layout of
1380 // split functions within the same section as specified by the attribute.
1381 if (MF.getFunction().hasSection() ||
1382 MF.getFunction().hasFnAttribute("implicit-section-name"))
1383 return false;
1384
1385 // We don't want to proceed further for cold functions
1386 // or functions of unknown hotness. Lukewarm functions have no prefix.
1387 std::optional<StringRef> SectionPrefix = MF.getFunction().getSectionPrefix();
1388 if (SectionPrefix &&
1389 (*SectionPrefix == "unlikely" || *SectionPrefix == "unknown")) {
1390 return false;
1391 }
1392
1393 return true;
1394}
1395
1396std::optional<ParamLoadedValue>
1398 Register Reg) const {
1399 const MachineFunction *MF = MI.getMF();
1402 int64_t Offset;
1403 bool OffsetIsScalable;
1404
1405 // To simplify the sub-register handling, verify that we only need to
1406 // consider physical registers.
1409
1410 if (auto DestSrc = isCopyInstr(MI)) {
1411 Register DestReg = DestSrc->Destination->getReg();
1412
1413 // If the copy destination is the forwarding reg, describe the forwarding
1414 // reg using the copy source as the backup location. Example:
1415 //
1416 // x0 = MOV x7
1417 // call callee(x0) ; x0 described as x7
1418 if (Reg == DestReg)
1419 return ParamLoadedValue(*DestSrc->Source, Expr);
1420
1421 // If the target's hook couldn't describe this copy, give up.
1422 return std::nullopt;
1423 } else if (auto RegImm = isAddImmediate(MI, Reg)) {
1424 Register SrcReg = RegImm->Reg;
1425 Offset = RegImm->Imm;
1427 return ParamLoadedValue(MachineOperand::CreateReg(SrcReg, false), Expr);
1428 } else if (MI.hasOneMemOperand()) {
1429 // Only describe memory which provably does not escape the function. As
1430 // described in llvm.org/PR43343, escaped memory may be clobbered by the
1431 // callee (or by another thread).
1432 const auto &TII = MF->getSubtarget().getInstrInfo();
1433 const MachineFrameInfo &MFI = MF->getFrameInfo();
1434 const MachineMemOperand *MMO = MI.memoperands()[0];
1435 const PseudoSourceValue *PSV = MMO->getPseudoValue();
1436
1437 // If the address points to "special" memory (e.g. a spill slot), it's
1438 // sufficient to check that it isn't aliased by any high-level IR value.
1439 if (!PSV || PSV->mayAlias(&MFI))
1440 return std::nullopt;
1441
1442 const MachineOperand *BaseOp;
1443 if (!TII->getMemOperandWithOffset(MI, BaseOp, Offset, OffsetIsScalable,
1444 TRI))
1445 return std::nullopt;
1446
1447 // FIXME: Scalable offsets are not yet handled in the offset code below.
1448 if (OffsetIsScalable)
1449 return std::nullopt;
1450
1451 // TODO: Can currently only handle mem instructions with a single define.
1452 // An example from the x86 target:
1453 // ...
1454 // DIV64m $rsp, 1, $noreg, 24, $noreg, implicit-def dead $rax, implicit-def $rdx
1455 // ...
1456 //
1457 if (MI.getNumExplicitDefs() != 1)
1458 return std::nullopt;
1459
1460 // TODO: In what way do we need to take Reg into consideration here?
1461
1464 Ops.push_back(dwarf::DW_OP_deref_size);
1465 Ops.push_back(MMO->getSize());
1466 Expr = DIExpression::prependOpcodes(Expr, Ops);
1467 return ParamLoadedValue(*BaseOp, Expr);
1468 }
1469
1470 return std::nullopt;
1471}
1472
1473// Get the call frame size just before MI.
1475 // Search backwards from MI for the most recent call frame instruction.
1476 MachineBasicBlock *MBB = MI.getParent();
1477 for (auto &AdjI : reverse(make_range(MBB->instr_begin(), MI.getIterator()))) {
1478 if (AdjI.getOpcode() == getCallFrameSetupOpcode())
1479 return getFrameTotalSize(AdjI);
1480 if (AdjI.getOpcode() == getCallFrameDestroyOpcode())
1481 return 0;
1482 }
1483
1484 // If none was found, use the call frame size from the start of the basic
1485 // block.
1486 return MBB->getCallFrameSize();
1487}
1488
1489/// Both DefMI and UseMI must be valid. By default, call directly to the
1490/// itinerary. This may be overriden by the target.
1492 const MachineInstr &DefMI,
1493 unsigned DefIdx,
1494 const MachineInstr &UseMI,
1495 unsigned UseIdx) const {
1496 unsigned DefClass = DefMI.getDesc().getSchedClass();
1497 unsigned UseClass = UseMI.getDesc().getSchedClass();
1498 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1499}
1500
1502 const MachineInstr &MI, unsigned DefIdx,
1503 SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
1504 assert((MI.isRegSequence() ||
1505 MI.isRegSequenceLike()) && "Instruction do not have the proper type");
1506
1507 if (!MI.isRegSequence())
1508 return getRegSequenceLikeInputs(MI, DefIdx, InputRegs);
1509
1510 // We are looking at:
1511 // Def = REG_SEQUENCE v0, sub0, v1, sub1, ...
1512 assert(DefIdx == 0 && "REG_SEQUENCE only has one def");
1513 for (unsigned OpIdx = 1, EndOpIdx = MI.getNumOperands(); OpIdx != EndOpIdx;
1514 OpIdx += 2) {
1515 const MachineOperand &MOReg = MI.getOperand(OpIdx);
1516 if (MOReg.isUndef())
1517 continue;
1518 const MachineOperand &MOSubIdx = MI.getOperand(OpIdx + 1);
1519 assert(MOSubIdx.isImm() &&
1520 "One of the subindex of the reg_sequence is not an immediate");
1521 // Record Reg:SubReg, SubIdx.
1522 InputRegs.push_back(RegSubRegPairAndIdx(MOReg.getReg(), MOReg.getSubReg(),
1523 (unsigned)MOSubIdx.getImm()));
1524 }
1525 return true;
1526}
1527
1529 const MachineInstr &MI, unsigned DefIdx,
1530 RegSubRegPairAndIdx &InputReg) const {
1531 assert((MI.isExtractSubreg() ||
1532 MI.isExtractSubregLike()) && "Instruction do not have the proper type");
1533
1534 if (!MI.isExtractSubreg())
1535 return getExtractSubregLikeInputs(MI, DefIdx, InputReg);
1536
1537 // We are looking at:
1538 // Def = EXTRACT_SUBREG v0.sub1, sub0.
1539 assert(DefIdx == 0 && "EXTRACT_SUBREG only has one def");
1540 const MachineOperand &MOReg = MI.getOperand(1);
1541 if (MOReg.isUndef())
1542 return false;
1543 const MachineOperand &MOSubIdx = MI.getOperand(2);
1544 assert(MOSubIdx.isImm() &&
1545 "The subindex of the extract_subreg is not an immediate");
1546
1547 InputReg.Reg = MOReg.getReg();
1548 InputReg.SubReg = MOReg.getSubReg();
1549 InputReg.SubIdx = (unsigned)MOSubIdx.getImm();
1550 return true;
1551}
1552
1554 const MachineInstr &MI, unsigned DefIdx,
1555 RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const {
1556 assert((MI.isInsertSubreg() ||
1557 MI.isInsertSubregLike()) && "Instruction do not have the proper type");
1558
1559 if (!MI.isInsertSubreg())
1560 return getInsertSubregLikeInputs(MI, DefIdx, BaseReg, InsertedReg);
1561
1562 // We are looking at:
1563 // Def = INSERT_SEQUENCE v0, v1, sub0.
1564 assert(DefIdx == 0 && "INSERT_SUBREG only has one def");
1565 const MachineOperand &MOBaseReg = MI.getOperand(1);
1566 const MachineOperand &MOInsertedReg = MI.getOperand(2);
1567 if (MOInsertedReg.isUndef())
1568 return false;
1569 const MachineOperand &MOSubIdx = MI.getOperand(3);
1570 assert(MOSubIdx.isImm() &&
1571 "One of the subindex of the reg_sequence is not an immediate");
1572 BaseReg.Reg = MOBaseReg.getReg();
1573 BaseReg.SubReg = MOBaseReg.getSubReg();
1574
1575 InsertedReg.Reg = MOInsertedReg.getReg();
1576 InsertedReg.SubReg = MOInsertedReg.getSubReg();
1577 InsertedReg.SubIdx = (unsigned)MOSubIdx.getImm();
1578 return true;
1579}
1580
1581// Returns a MIRPrinter comment for this machine operand.
1583 const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx,
1584 const TargetRegisterInfo *TRI) const {
1585
1586 if (!MI.isInlineAsm())
1587 return "";
1588
1589 std::string Flags;
1590 raw_string_ostream OS(Flags);
1591
1592 if (OpIdx == InlineAsm::MIOp_ExtraInfo) {
1593 // Print HasSideEffects, MayLoad, MayStore, IsAlignStack
1594 unsigned ExtraInfo = Op.getImm();
1595 bool First = true;
1596 for (StringRef Info : InlineAsm::getExtraInfoNames(ExtraInfo)) {
1597 if (!First)
1598 OS << " ";
1599 First = false;
1600 OS << Info;
1601 }
1602
1603 return OS.str();
1604 }
1605
1606 int FlagIdx = MI.findInlineAsmFlagIdx(OpIdx);
1607 if (FlagIdx < 0 || (unsigned)FlagIdx != OpIdx)
1608 return "";
1609
1610 assert(Op.isImm() && "Expected flag operand to be an immediate");
1611 // Pretty print the inline asm operand descriptor.
1612 unsigned Flag = Op.getImm();
1613 const InlineAsm::Flag F(Flag);
1614 OS << F.getKindName();
1615
1616 unsigned RCID;
1617 if (!F.isImmKind() && !F.isMemKind() && F.hasRegClassConstraint(RCID)) {
1618 if (TRI) {
1619 OS << ':' << TRI->getRegClassName(TRI->getRegClass(RCID));
1620 } else
1621 OS << ":RC" << RCID;
1622 }
1623
1624 if (F.isMemKind()) {
1625 InlineAsm::ConstraintCode MCID = F.getMemoryConstraintID();
1626 OS << ":" << InlineAsm::getMemConstraintName(MCID);
1627 }
1628
1629 unsigned TiedTo;
1630 if (F.isUseOperandTiedToDef(TiedTo))
1631 OS << " tiedto:$" << TiedTo;
1632
1633 return OS.str();
1634}
1635
1637
1639 Function &F, std::vector<outliner::Candidate> &Candidates) const {
1640 // Include target features from an arbitrary candidate for the outlined
1641 // function. This makes sure the outlined function knows what kinds of
1642 // instructions are going into it. This is fine, since all parent functions
1643 // must necessarily support the instructions that are in the outlined region.
1644 outliner::Candidate &FirstCand = Candidates.front();
1645 const Function &ParentFn = FirstCand.getMF()->getFunction();
1646 if (ParentFn.hasFnAttribute("target-features"))
1647 F.addFnAttr(ParentFn.getFnAttribute("target-features"));
1648 if (ParentFn.hasFnAttribute("target-cpu"))
1649 F.addFnAttr(ParentFn.getFnAttribute("target-cpu"));
1650
1651 // Set nounwind, so we don't generate eh_frame.
1652 if (llvm::all_of(Candidates, [](const outliner::Candidate &C) {
1653 return C.getMF()->getFunction().hasFnAttribute(Attribute::NoUnwind);
1654 }))
1655 F.addFnAttr(Attribute::NoUnwind);
1656}
1657
1659 MachineBasicBlock::iterator &MIT, unsigned Flags) const {
1660 MachineInstr &MI = *MIT;
1661
1662 // NOTE: MI.isMetaInstruction() will match CFI_INSTRUCTION, but some targets
1663 // have support for outlining those. Special-case that here.
1664 if (MI.isCFIInstruction())
1665 // Just go right to the target implementation.
1666 return getOutliningTypeImpl(MIT, Flags);
1667
1668 // Be conservative about inline assembly.
1669 if (MI.isInlineAsm())
1671
1672 // Labels generally can't safely be outlined.
1673 if (MI.isLabel())
1675
1676 // Don't let debug instructions impact analysis.
1677 if (MI.isDebugInstr())
1679
1680 // Some other special cases.
1681 switch (MI.getOpcode()) {
1682 case TargetOpcode::IMPLICIT_DEF:
1683 case TargetOpcode::KILL:
1684 case TargetOpcode::LIFETIME_START:
1685 case TargetOpcode::LIFETIME_END:
1687 default:
1688 break;
1689 }
1690
1691 // Is this a terminator for a basic block?
1692 if (MI.isTerminator()) {
1693 // If this is a branch to another block, we can't outline it.
1694 if (!MI.getParent()->succ_empty())
1696
1697 // Don't outline if the branch is not unconditional.
1698 if (isPredicated(MI))
1700 }
1701
1702 // Make sure none of the operands of this instruction do anything that
1703 // might break if they're moved outside their current function.
1704 // This includes MachineBasicBlock references, BlockAddressses,
1705 // Constant pool indices and jump table indices.
1706 //
1707 // A quick note on MO_TargetIndex:
1708 // This doesn't seem to be used in any of the architectures that the
1709 // MachineOutliner supports, but it was still filtered out in all of them.
1710 // There was one exception (RISC-V), but MO_TargetIndex also isn't used there.
1711 // As such, this check is removed both here and in the target-specific
1712 // implementations. Instead, we assert to make sure this doesn't
1713 // catch anyone off-guard somewhere down the line.
1714 for (const MachineOperand &MOP : MI.operands()) {
1715 // If you hit this assertion, please remove it and adjust
1716 // `getOutliningTypeImpl` for your target appropriately if necessary.
1717 // Adding the assertion back to other supported architectures
1718 // would be nice too :)
1719 assert(!MOP.isTargetIndex() && "This isn't used quite yet!");
1720
1721 // CFI instructions should already have been filtered out at this point.
1722 assert(!MOP.isCFIIndex() && "CFI instructions handled elsewhere!");
1723
1724 // PrologEpilogInserter should've already run at this point.
1725 assert(!MOP.isFI() && "FrameIndex instructions should be gone by now!");
1726
1727 if (MOP.isMBB() || MOP.isBlockAddress() || MOP.isCPI() || MOP.isJTI())
1729 }
1730
1731 // If we don't know, delegate to the target-specific hook.
1732 return getOutliningTypeImpl(MIT, Flags);
1733}
1734
1736 unsigned &Flags) const {
1737 // Some instrumentations create special TargetOpcode at the start which
1738 // expands to special code sequences which must be present.
1740 if (First == MBB.end())
1741 return true;
1742
1743 if (First->getOpcode() == TargetOpcode::FENTRY_CALL ||
1744 First->getOpcode() == TargetOpcode::PATCHABLE_FUNCTION_ENTER)
1745 return false;
1746
1747 // Some instrumentations create special pseudo-instructions at or just before
1748 // the end that must be present.
1749 auto Last = MBB.getLastNonDebugInstr();
1750 if (Last->getOpcode() == TargetOpcode::PATCHABLE_RET ||
1751 Last->getOpcode() == TargetOpcode::PATCHABLE_TAIL_CALL)
1752 return false;
1753
1754 if (Last != First && Last->isReturn()) {
1755 --Last;
1756 if (Last->getOpcode() == TargetOpcode::PATCHABLE_FUNCTION_EXIT ||
1757 Last->getOpcode() == TargetOpcode::PATCHABLE_TAIL_CALL)
1758 return false;
1759 }
1760 return true;
1761}
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
This file contains constants used for implementing Dwarf debug support.
uint64_t Size
static Function * getFunction(Constant *C)
Definition: Evaluator.cpp:236
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
raw_pwrite_stream & OS
This file contains some functions that are useful when dealing with strings.
static bool isAsmComment(const char *Str, const MCAsmInfo &MAI)
static void transferImplicitOperands(MachineInstr *MI, const TargetRegisterInfo *TRI)
transferImplicitOperands - MI is a pseudo-instruction, and the lowered replacement instructions immed...
static std::pair< bool, bool > mustSwapOperands(MachineCombinerPattern Pattern)
static const TargetRegisterClass * canFoldCopy(const MachineInstr &MI, const TargetInstrInfo &TII, unsigned FoldIdx)
static cl::opt< bool > DisableHazardRecognizer("disable-sched-hazard", cl::Hidden, cl::init(false), cl::desc("Disable hazard detection during preRA scheduling"))
static MachineInstr * foldPatchpoint(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, int FrameIndex, const TargetInstrInfo &TII)
This file describes how to lower LLVM code to machine code.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
DWARF expression.
static void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
static DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
static DIExpression * prependOpcodes(const DIExpression *Expr, SmallVectorImpl< uint64_t > &Ops, bool StackValue=false, bool EntryValue=false)
Prepend DIExpr with the given opcodes and optionally turn it into a stack value.
This class represents an Operation in the Expression.
bool isLittleEndian() const
Layout endianness...
Definition: DataLayout.h:238
A debug info location.
Definition: DebugLoc.h:33
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:220
std::optional< StringRef > getSectionPrefix() const
Get the section prefix for this function.
Definition: Function.cpp:1970
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Definition: Function.cpp:671
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:320
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.cpp:645
bool hasSection() const
Check if this global has a custom object file section.
Definition: GlobalObject.h:109
static std::vector< StringRef > getExtraInfoNames(unsigned ExtraInfo)
Definition: InlineAsm.h:418
static StringRef getMemConstraintName(ConstraintCode C)
Definition: InlineAsm.h:442
Itinerary data supplied by a subtarget to be used by a target.
int getOperandLatency(unsigned DefClass, unsigned DefIdx, unsigned UseClass, unsigned UseIdx) const
Compute and return the use operand latency of a given itinerary class and operand index if the value ...
unsigned getStageLatency(unsigned ItinClassIndx) const
Return the total stage latency of the given class.
int getOperandCycle(unsigned ItinClassIndx, unsigned OperandIdx) const
Return the cycle for the given class and operand.
const InstrItinerary * Itineraries
Array of itineraries selected.
bool isEmpty() const
Returns true if there are no itineraries.
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition: MCAsmInfo.h:56
virtual unsigned getMaxInstLength(const MCSubtargetInfo *STI=nullptr) const
Returns the maximum possible encoded instruction size in bytes.
Definition: MCAsmInfo.h:641
StringRef getCommentString() const
Definition: MCAsmInfo.h:655
const char * getSeparatorString() const
Definition: MCAsmInfo.h:649
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:184
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
unsigned getSchedClass() const
Return the scheduling class for this instruction.
Definition: MCInstrDesc.h:600
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:237
ArrayRef< MCOperandInfo > operands() const
Definition: MCInstrDesc.h:239
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
Definition: MCInstrDesc.h:248
bool isCommutable() const
Return true if this may be a 2- or 3-address instruction (of the form "X = op Y, Z,...
Definition: MCInstrDesc.h:481
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition: MCInstrInfo.h:63
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1416
Set of metadata that should be preserved when using BuildMI().
instr_iterator instr_begin()
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
iterator getFirstNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the first non-debug instruction in the basic block, or end().
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
void removeSuccessor(MachineBasicBlock *Succ, bool NormalizeSuccProbs=false)
Remove successor from the successors list of this MachineBasicBlock.
iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
unsigned getCallFrameSize() const
Return the call frame size on entry to this basic block.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool isImmutableObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to an immutable object.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
bool hasProperty(Property P) const
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineInstr & cloneMachineInstrBundle(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig)
Clones instruction or the whole instruction bundle Orig and insert into MBB before InsertBefore.
MachineInstr * CreateMachineInstr(const MCInstrDesc &MCID, DebugLoc DL, bool NoImplicit=false)
CreateMachineInstr - Allocate a new MachineInstr.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineFunctionProperties & getProperties() const
Get the function properties.
MachineInstr * CloneMachineInstr(const MachineInstr *Orig)
Create a new MachineInstr which is a copy of Orig, identical in all ways except the instruction has n...
void eraseCallSiteInfo(const MachineInstr *MI)
Following functions update call site info.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
Representation of each machine instruction.
Definition: MachineInstr.h:68
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:543
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:326
unsigned getNumOperands() const
Retuns the total number of operands.
Definition: MachineInstr.h:546
void setDebugInstrNum(unsigned Num)
Set instruction number of this MachineInstr.
Definition: MachineInstr.h:523
mmo_iterator memoperands_end() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:781
unsigned peekDebugInstrNum() const
Examine the instruction number of this MachineInstr.
Definition: MachineInstr.h:519
void setMemRefs(MachineFunction &MF, ArrayRef< MachineMemOperand * > MemRefs)
Assign this MachineInstr's memory reference descriptor list.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
bool isNotDuplicable(QueryType Type=AnyInBundle) const
Return true if this instruction cannot be safely duplicated.
void tieOperands(unsigned DefIdx, unsigned UseIdx)
Add a tie between the register operands at DefIdx and UseIdx.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:774
void cloneInstrSymbols(MachineFunction &MF, const MachineInstr &MI)
Clone another MachineInstr's pre- and post- instruction symbols and replace ours with it.
bool isIdenticalTo(const MachineInstr &Other, MICheckType Check=CheckDefs) const
Return true if this instruction is identical to Other.
const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:756
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:553
uint32_t getFlags() const
Return the MI flags bitvector.
Definition: MachineInstr.h:371
bool canFoldAsLoad(QueryType Type=IgnoreBundle) const
Return true for instructions that can be folded as memory operands in other instructions.
const TargetRegisterClass * getRegClassConstraint(unsigned OpIdx, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const
Compute the static register class constraint for operand OpIdx.
void addMemOperand(MachineFunction &MF, MachineMemOperand *MO)
Add a MachineMemOperand to the machine instruction.
A description of a memory reference used in the backend.
const PseudoSourceValue * getPseudoValue() const
uint64_t getSize() const
Return the size in bytes of the memory reference.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
void setSubReg(unsigned subReg)
unsigned getSubReg() const
void setIsInternalRead(bool Val=true)
void setImm(int64_t immVal)
int64_t getImm() const
void setIsRenamable(bool Val=true)
bool isReg() const
isReg - Tests if this is a MO_Register operand.
void setReg(Register Reg)
Change the register this operand corresponds to.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
void setIsKill(bool Val=true)
void setMBB(MachineBasicBlock *MBB)
void setIsUndef(bool Val=true)
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
MI-level patchpoint operands.
Definition: StackMaps.h:76
Special value supplied for machine level alias analysis.
virtual bool mayAlias(const MachineFrameInfo *) const
Return true if the memory pointed to by this PseudoSourceValue can ever alias an LLVM IR Value.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:91
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:95
Represents one node in the SelectionDAG.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
HazardRecognizer - This determines whether or not an instruction can be issued this cycle,...
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
MI-level stackmap operands.
Definition: StackMaps.h:35
MI-level Statepoint operands.
Definition: StackMaps.h:158
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
constexpr size_t size() const
size - Get the string size.
Definition: StringRef.h:137
const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition: StringRef.h:131
Information about stack frame layout on the target.
StackDirection getStackGrowthDirection() const
getStackGrowthDirection - Return the direction the stack grows
int alignSPAdjust(int SPAdj) const
alignSPAdjust - This method aligns the stack adjustment to the correct alignment.
TargetInstrInfo - Interface to description of machine instruction set.
virtual ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const
Target-dependent implementation for foldMemoryOperand.
virtual bool hasLowDefLatency(const TargetSchedModel &SchedModel, const MachineInstr &DefMI, unsigned DefIdx) const
Compute operand latency of a def of 'Reg'.
virtual void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2, MachineInstr &NewMI1, MachineInstr &NewMI2) const
This is an architecture-specific helper function of reassociateOps.
virtual bool getMemOperandsWithOffsetWidth(const MachineInstr &MI, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, unsigned &Width, const TargetRegisterInfo *TRI) const
Get zero or more base operands and the byte offset of an instruction that reads/writes memory.
virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData, const MachineInstr &MI) const
Return the number of u-operations the given machine instruction will be decoded to on the target cpu.
virtual int getSPAdjust(const MachineInstr &MI) const
Returns the actual stack pointer adjustment made by an instruction as part of a call sequence.
virtual void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const
Load the specified register of the given register class from the specified stack frame index.
virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail, MachineBasicBlock *NewDest) const
Delete the instruction OldInst and everything after it, replacing it with an unconditional branch to ...
virtual bool PredicateInstruction(MachineInstr &MI, ArrayRef< MachineOperand > Pred) const
Convert the instruction into a predicated instruction.
bool areOpcodesEqualOrInverse(unsigned Opcode1, unsigned Opcode2) const
Return true when \P Opcode1 or its inversion is equal to \P Opcode2.
virtual bool getInsertSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const
Target-dependent implementation of getInsertSubregInputs.
virtual std::pair< unsigned, unsigned > getPatchpointUnfoldableRange(const MachineInstr &MI) const
For a patchpoint, stackmap, or statepoint intrinsic, return the range of operands which can't be fold...
outliner::InstrType getOutliningType(MachineBasicBlock::iterator &MIT, unsigned Flags) const
Returns how or if MIT should be outlined.
virtual int getOperandLatency(const InstrItineraryData *ItinData, SDNode *DefNode, unsigned DefIdx, SDNode *UseNode, unsigned UseIdx) const
virtual void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const
Store the specified register of the given register class to the specified stack frame index.
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual void mergeOutliningCandidateAttributes(Function &F, std::vector< outliner::Candidate > &Candidates) const
Optional target hook to create the LLVM IR attributes for the outlined function.
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< MachineCombinerPattern > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
bool isUnpredicatedTerminator(const MachineInstr &MI) const
Returns true if the instruction is a terminator instruction that has not been predicated.
virtual void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const
Insert a noop into the instruction stream at the specified point.
bool isFrameInstr(const MachineInstr &I) const
Returns true if the argument is a frame pseudo instruction.
virtual bool getRegSequenceLikeInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const
Target-dependent implementation of getRegSequenceInputs.
virtual bool getStackSlotRange(const TargetRegisterClass *RC, unsigned SubIdx, unsigned &Size, unsigned &Offset, const MachineFunction &MF) const
Compute the size in bytes and offset within a stack slot of a spilled register or subregister.
virtual ScheduleHazardRecognizer * CreateTargetMIHazardRecognizer(const InstrItineraryData *, const ScheduleDAGMI *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual bool hasStoreToStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand * > &Accesses) const
If the specified machine instruction has a store to a stack slot, return true along with the FrameInd...
virtual void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc) const
Emit instructions to copy a pair of physical registers.
virtual bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const
Return true when \P Inst has reassociable operands in the same \P MBB.
virtual unsigned getInlineAsmLength(const char *Str, const MCAsmInfo &MAI, const TargetSubtargetInfo *STI=nullptr) const
Measure the specified inline asm to determine an approximation of its length.
virtual outliner::InstrType getOutliningTypeImpl(MachineBasicBlock::iterator &MIT, unsigned Flags) const
Target-dependent implementation for getOutliningTypeImpl.
virtual std::optional< ParamLoadedValue > describeLoadedValue(const MachineInstr &MI, Register Reg) const
Produce the expression describing the MI loading a value into the physical register Reg.
void lowerCopy(MachineInstr *MI, const TargetRegisterInfo *TRI) const
This function defines the logic to lower COPY instruction to target specific instruction(s).
MachineInstr * foldMemoryOperand(MachineInstr &MI, ArrayRef< unsigned > Ops, int FI, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const
Attempt to fold a load or store of the specified stack slot into the specified machine instruction fo...
virtual ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const
Insert branch code into the end of the specified MachineBasicBlock.
std::optional< DestSourcePair > isCopyInstr(const MachineInstr &MI, bool ForbidImplicitOperands=true) const
If the specific machine instruction is a instruction that moves/copies value from one register to ano...
unsigned getCallFrameSetupOpcode() const
These methods return the opcode of the frame setup/destroy instructions if they exist (-1 otherwise).
virtual MCInst getNop() const
Return the noop instruction to use for a noop.
unsigned getCallFrameSizeAt(MachineInstr &MI) const
virtual MachineInstr & duplicate(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const
Clones instruction or the whole instruction bundle Orig and insert into MBB before InsertBefore.
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook that returns true if MBB is safe to outline from, and returns any target-specifi...
int64_t getFrameTotalSize(const MachineInstr &I) const
Returns the total frame size, which is made up of the space set up inside the pair of frame start-sto...
MachineInstr * commuteInstruction(MachineInstr &MI, bool NewMI=false, unsigned OpIdx1=CommuteAnyOperandIndex, unsigned OpIdx2=CommuteAnyOperandIndex) const
This method commutes the operands of the given machine instruction MI.
virtual void genAlternativeCodeSequence(MachineInstr &Root, MachineCombinerPattern Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< unsigned, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert=false) const
Return true when \P Inst is both associative and commutative.
virtual void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, unsigned SubIdx, const MachineInstr &Orig, const TargetRegisterInfo &TRI) const
Re-issue the specified 'original' instruction at the specific location targeting a new destination re...
virtual std::optional< unsigned > getInverseOpcode(unsigned Opcode) const
Return the inverse operation opcode if it exists for \P Opcode (e.g.
virtual void insertNoops(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned Quantity) const
Insert noops into the instruction stream at the specified point.
unsigned getCallFrameDestroyOpcode() const
int64_t getFrameSize(const MachineInstr &I) const
Returns size of the frame associated with the given frame instruction.
virtual bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual bool isPredicated(const MachineInstr &MI) const
Returns true if the instruction is already predicated.
bool getInsertSubregInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const
Build the equivalent inputs of a INSERT_SUBREG for the given MI and DefIdx.
virtual bool isThroughputPattern(MachineCombinerPattern Pattern) const
Return true when a code sequence can improve throughput.
virtual ~TargetInstrInfo()
virtual unsigned getInstrLatency(const InstrItineraryData *ItinData, const MachineInstr &MI, unsigned *PredCost=nullptr) const
Compute the instruction latency of a given instruction.
virtual bool produceSameValue(const MachineInstr &MI0, const MachineInstr &MI1, const MachineRegisterInfo *MRI=nullptr) const
Return true if two machine instructions would produce identical values.
bool isReassociationCandidate(const MachineInstr &Inst, bool &Commuted) const
Return true if the input \P Inst is part of a chain of dependent ops that are suitable for reassociat...
virtual bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const
Test if the given instruction should be considered a scheduling boundary.
std::pair< unsigned, unsigned > getReassociationOpcodes(MachineCombinerPattern Pattern, const MachineInstr &Root, const MachineInstr &Prev) const
Reassociation of some instructions requires inverse operations (e.g.
virtual unsigned getPredicationCost(const MachineInstr &MI) const
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual bool isFunctionSafeToSplit(const MachineFunction &MF) const
Return true if the function is a viable candidate for machine function splitting.
virtual MachineTraceStrategy getMachineCombinerTraceStrategy() const
Return a strategy that MachineCombiner must use when creating traces.
bool getRegSequenceInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const
Build the equivalent inputs of a REG_SEQUENCE for the given MI and DefIdx.
virtual bool hasLoadFromStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand * > &Accesses) const
If the specified machine instruction has a load from a stack slot, return true along with the FrameIn...
virtual std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const
If the specific machine instruction is an instruction that adds an immediate value and a physical reg...
unsigned defaultDefLatency(const MCSchedModel &SchedModel, const MachineInstr &DefMI) const
Return the default expected latency for a def based on its opcode.
static const unsigned CommuteAnyOperandIndex
virtual bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const
Return true when \P Inst has reassociable sibling.
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
void reassociateOps(MachineInstr &Root, MachineInstr &Prev, MachineCombinerPattern Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< unsigned, unsigned > &InstrIdxForVirtReg) const
Attempt to reassociate \P Root and \P Prev according to \P Pattern to reduce critical path length.
virtual bool isHighLatencyDef(int opc) const
Return true if this opcode has high latency to its result.
static bool fixCommutedOpIndices(unsigned &ResultIdx1, unsigned &ResultIdx2, unsigned CommutableOpIdx1, unsigned CommutableOpIdx2)
Assigns the (CommutableOpIdx1, CommutableOpIdx2) pair of commutable operand indices to (ResultIdx1,...
virtual unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const
If the specified machine instruction is a direct load from a stack slot, return the virtual or physic...
bool getExtractSubregInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const
Build the equivalent inputs of a EXTRACT_SUBREG for the given MI and DefIdx.
virtual bool getExtractSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const
Target-dependent implementation of getExtractSubregInputs.
bool usePreRAHazardRecognizer() const
Provide a global flag for disabling the PreRA hazard recognizer that targets may choose to honor.
virtual const TargetRegisterClass * getRegClass(const MCInstrDesc &MCID, unsigned OpNum, const TargetRegisterInfo *TRI, const MachineFunction &MF) const
Given a machine instruction descriptor, returns the register class constraint for OpNum,...
bool getMemOperandWithOffset(const MachineInstr &MI, const MachineOperand *&BaseOp, int64_t &Offset, bool &OffsetIsScalable, const TargetRegisterInfo *TRI) const
Get the base operand and byte offset of an instruction that reads/writes memory.
Register getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Provide an instruction scheduling machine model to CodeGen passes.
const InstrItineraryData * getInstrItineraries() const
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const TargetFrameLowering * getFrameLowering() const
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetLowering * getTargetLowering() const
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:642
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition: CallingConv.h:76
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:445
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:440
@ Length
Definition: DWP.cpp:440
MachineTraceStrategy
Strategies for selecting traces.
@ TS_MinInstrCount
Select the trace through a block that has the fewest instructions.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1727
std::pair< MachineOperand, DIExpression * > ParamLoadedValue
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:429
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:156
MachineCombinerPattern
These are instruction patterns matched by the machine combiner pass.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
unsigned getKillRegState(bool B)
DWARFExpression::Operation Op
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1884
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
Machine model for scheduling, bundling, and heuristics.
Definition: MCSchedule.h:253
unsigned LoadLatency
Definition: MCSchedule.h:294
unsigned HighLatency
Definition: MCSchedule.h:301
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
A pair composed of a pair of a register and a sub-register index, and another sub-register index.
A pair composed of a register and a sub-register index.
An individual sequence of instructions to be replaced with a call to an outlined function.
MachineBasicBlock::iterator & front()
MachineFunction * getMF() const