LLVM  10.0.0svn
TargetInstrInfo.cpp
Go to the documentation of this file.
1 //===-- TargetInstrInfo.cpp - Target Instruction Information --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the TargetInstrInfo class.
10 //
11 //===----------------------------------------------------------------------===//
12 
20 #include "llvm/CodeGen/StackMaps.h"
25 #include "llvm/IR/DataLayout.h"
27 #include "llvm/MC/MCAsmInfo.h"
33 #include <cctype>
34 
35 using namespace llvm;
36 
38  "disable-sched-hazard", cl::Hidden, cl::init(false),
39  cl::desc("Disable hazard detection during preRA scheduling"));
40 
42 }
43 
45 TargetInstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
46  const TargetRegisterInfo *TRI,
47  const MachineFunction &MF) const {
48  if (OpNum >= MCID.getNumOperands())
49  return nullptr;
50 
51  short RegClass = MCID.OpInfo[OpNum].RegClass;
52  if (MCID.OpInfo[OpNum].isLookupPtrRegClass())
53  return TRI->getPointerRegClass(MF, RegClass);
54 
55  // Instructions like INSERT_SUBREG do not have fixed register classes.
56  if (RegClass < 0)
57  return nullptr;
58 
59  // Otherwise just look it up normally.
60  return TRI->getRegClass(RegClass);
61 }
62 
63 /// insertNoop - Insert a noop into the instruction stream at the specified
64 /// point.
67  llvm_unreachable("Target didn't implement insertNoop!");
68 }
69 
70 static bool isAsmComment(const char *Str, const MCAsmInfo &MAI) {
71  return strncmp(Str, MAI.getCommentString().data(),
72  MAI.getCommentString().size()) == 0;
73 }
74 
75 /// Measure the specified inline asm to determine an approximation of its
76 /// length.
77 /// Comments (which run till the next SeparatorString or newline) do not
78 /// count as an instruction.
79 /// Any other non-whitespace text is considered an instruction, with
80 /// multiple instructions separated by SeparatorString or newlines.
81 /// Variable-length instructions are not handled here; this function
82 /// may be overloaded in the target code to do that.
83 /// We implement a special case of the .space directive which takes only a
84 /// single integer argument in base 10 that is the size in bytes. This is a
85 /// restricted form of the GAS directive in that we only interpret
86 /// simple--i.e. not a logical or arithmetic expression--size values without
87 /// the optional fill value. This is primarily used for creating arbitrary
88 /// sized inline asm blocks for testing purposes.
90  const char *Str,
91  const MCAsmInfo &MAI, const TargetSubtargetInfo *STI) const {
92  // Count the number of instructions in the asm.
93  bool AtInsnStart = true;
94  unsigned Length = 0;
95  const unsigned MaxInstLength = MAI.getMaxInstLength(STI);
96  for (; *Str; ++Str) {
97  if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(),
98  strlen(MAI.getSeparatorString())) == 0) {
99  AtInsnStart = true;
100  } else if (isAsmComment(Str, MAI)) {
101  // Stop counting as an instruction after a comment until the next
102  // separator.
103  AtInsnStart = false;
104  }
105 
106  if (AtInsnStart && !std::isspace(static_cast<unsigned char>(*Str))) {
107  unsigned AddLength = MaxInstLength;
108  if (strncmp(Str, ".space", 6) == 0) {
109  char *EStr;
110  int SpaceSize;
111  SpaceSize = strtol(Str + 6, &EStr, 10);
112  SpaceSize = SpaceSize < 0 ? 0 : SpaceSize;
113  while (*EStr != '\n' && std::isspace(static_cast<unsigned char>(*EStr)))
114  ++EStr;
115  if (*EStr == '\0' || *EStr == '\n' ||
116  isAsmComment(EStr, MAI)) // Successfully parsed .space argument
117  AddLength = SpaceSize;
118  }
119  Length += AddLength;
120  AtInsnStart = false;
121  }
122  }
123 
124  return Length;
125 }
126 
127 /// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
128 /// after it, replacing it with an unconditional branch to NewDest.
129 void
131  MachineBasicBlock *NewDest) const {
132  MachineBasicBlock *MBB = Tail->getParent();
133 
134  // Remove all the old successors of MBB from the CFG.
135  while (!MBB->succ_empty())
136  MBB->removeSuccessor(MBB->succ_begin());
137 
138  // Save off the debug loc before erasing the instruction.
139  DebugLoc DL = Tail->getDebugLoc();
140 
141  // Update call site info and remove all the dead instructions
142  // from the end of MBB.
143  while (Tail != MBB->end()) {
144  auto MI = Tail++;
145  if (MI->isCall())
146  MBB->getParent()->updateCallSiteInfo(&*MI);
147  MBB->erase(MI);
148  }
149 
150  // If MBB isn't immediately before MBB, insert a branch to it.
152  insertBranch(*MBB, NewDest, nullptr, SmallVector<MachineOperand, 0>(), DL);
153  MBB->addSuccessor(NewDest);
154 }
155 
157  bool NewMI, unsigned Idx1,
158  unsigned Idx2) const {
159  const MCInstrDesc &MCID = MI.getDesc();
160  bool HasDef = MCID.getNumDefs();
161  if (HasDef && !MI.getOperand(0).isReg())
162  // No idea how to commute this instruction. Target should implement its own.
163  return nullptr;
164 
165  unsigned CommutableOpIdx1 = Idx1; (void)CommutableOpIdx1;
166  unsigned CommutableOpIdx2 = Idx2; (void)CommutableOpIdx2;
167  assert(findCommutedOpIndices(MI, CommutableOpIdx1, CommutableOpIdx2) &&
168  CommutableOpIdx1 == Idx1 && CommutableOpIdx2 == Idx2 &&
169  "TargetInstrInfo::CommuteInstructionImpl(): not commutable operands.");
170  assert(MI.getOperand(Idx1).isReg() && MI.getOperand(Idx2).isReg() &&
171  "This only knows how to commute register operands so far");
172 
173  Register Reg0 = HasDef ? MI.getOperand(0).getReg() : Register();
174  Register Reg1 = MI.getOperand(Idx1).getReg();
175  Register Reg2 = MI.getOperand(Idx2).getReg();
176  unsigned SubReg0 = HasDef ? MI.getOperand(0).getSubReg() : 0;
177  unsigned SubReg1 = MI.getOperand(Idx1).getSubReg();
178  unsigned SubReg2 = MI.getOperand(Idx2).getSubReg();
179  bool Reg1IsKill = MI.getOperand(Idx1).isKill();
180  bool Reg2IsKill = MI.getOperand(Idx2).isKill();
181  bool Reg1IsUndef = MI.getOperand(Idx1).isUndef();
182  bool Reg2IsUndef = MI.getOperand(Idx2).isUndef();
183  bool Reg1IsInternal = MI.getOperand(Idx1).isInternalRead();
184  bool Reg2IsInternal = MI.getOperand(Idx2).isInternalRead();
185  // Avoid calling isRenamable for virtual registers since we assert that
186  // renamable property is only queried/set for physical registers.
187  bool Reg1IsRenamable = Register::isPhysicalRegister(Reg1)
188  ? MI.getOperand(Idx1).isRenamable()
189  : false;
190  bool Reg2IsRenamable = Register::isPhysicalRegister(Reg2)
191  ? MI.getOperand(Idx2).isRenamable()
192  : false;
193  // If destination is tied to either of the commuted source register, then
194  // it must be updated.
195  if (HasDef && Reg0 == Reg1 &&
196  MI.getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) {
197  Reg2IsKill = false;
198  Reg0 = Reg2;
199  SubReg0 = SubReg2;
200  } else if (HasDef && Reg0 == Reg2 &&
201  MI.getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) {
202  Reg1IsKill = false;
203  Reg0 = Reg1;
204  SubReg0 = SubReg1;
205  }
206 
207  MachineInstr *CommutedMI = nullptr;
208  if (NewMI) {
209  // Create a new instruction.
210  MachineFunction &MF = *MI.getMF();
211  CommutedMI = MF.CloneMachineInstr(&MI);
212  } else {
213  CommutedMI = &MI;
214  }
215 
216  if (HasDef) {
217  CommutedMI->getOperand(0).setReg(Reg0);
218  CommutedMI->getOperand(0).setSubReg(SubReg0);
219  }
220  CommutedMI->getOperand(Idx2).setReg(Reg1);
221  CommutedMI->getOperand(Idx1).setReg(Reg2);
222  CommutedMI->getOperand(Idx2).setSubReg(SubReg1);
223  CommutedMI->getOperand(Idx1).setSubReg(SubReg2);
224  CommutedMI->getOperand(Idx2).setIsKill(Reg1IsKill);
225  CommutedMI->getOperand(Idx1).setIsKill(Reg2IsKill);
226  CommutedMI->getOperand(Idx2).setIsUndef(Reg1IsUndef);
227  CommutedMI->getOperand(Idx1).setIsUndef(Reg2IsUndef);
228  CommutedMI->getOperand(Idx2).setIsInternalRead(Reg1IsInternal);
229  CommutedMI->getOperand(Idx1).setIsInternalRead(Reg2IsInternal);
230  // Avoid calling setIsRenamable for virtual registers since we assert that
231  // renamable property is only queried/set for physical registers.
233  CommutedMI->getOperand(Idx2).setIsRenamable(Reg1IsRenamable);
235  CommutedMI->getOperand(Idx1).setIsRenamable(Reg2IsRenamable);
236  return CommutedMI;
237 }
238 
240  unsigned OpIdx1,
241  unsigned OpIdx2) const {
242  // If OpIdx1 or OpIdx2 is not specified, then this method is free to choose
243  // any commutable operand, which is done in findCommutedOpIndices() method
244  // called below.
245  if ((OpIdx1 == CommuteAnyOperandIndex || OpIdx2 == CommuteAnyOperandIndex) &&
246  !findCommutedOpIndices(MI, OpIdx1, OpIdx2)) {
247  assert(MI.isCommutable() &&
248  "Precondition violation: MI must be commutable.");
249  return nullptr;
250  }
251  return commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
252 }
253 
254 bool TargetInstrInfo::fixCommutedOpIndices(unsigned &ResultIdx1,
255  unsigned &ResultIdx2,
256  unsigned CommutableOpIdx1,
257  unsigned CommutableOpIdx2) {
258  if (ResultIdx1 == CommuteAnyOperandIndex &&
259  ResultIdx2 == CommuteAnyOperandIndex) {
260  ResultIdx1 = CommutableOpIdx1;
261  ResultIdx2 = CommutableOpIdx2;
262  } else if (ResultIdx1 == CommuteAnyOperandIndex) {
263  if (ResultIdx2 == CommutableOpIdx1)
264  ResultIdx1 = CommutableOpIdx2;
265  else if (ResultIdx2 == CommutableOpIdx2)
266  ResultIdx1 = CommutableOpIdx1;
267  else
268  return false;
269  } else if (ResultIdx2 == CommuteAnyOperandIndex) {
270  if (ResultIdx1 == CommutableOpIdx1)
271  ResultIdx2 = CommutableOpIdx2;
272  else if (ResultIdx1 == CommutableOpIdx2)
273  ResultIdx2 = CommutableOpIdx1;
274  else
275  return false;
276  } else
277  // Check that the result operand indices match the given commutable
278  // operand indices.
279  return (ResultIdx1 == CommutableOpIdx1 && ResultIdx2 == CommutableOpIdx2) ||
280  (ResultIdx1 == CommutableOpIdx2 && ResultIdx2 == CommutableOpIdx1);
281 
282  return true;
283 }
284 
286  unsigned &SrcOpIdx1,
287  unsigned &SrcOpIdx2) const {
288  assert(!MI.isBundle() &&
289  "TargetInstrInfo::findCommutedOpIndices() can't handle bundles");
290 
291  const MCInstrDesc &MCID = MI.getDesc();
292  if (!MCID.isCommutable())
293  return false;
294 
295  // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
296  // is not true, then the target must implement this.
297  unsigned CommutableOpIdx1 = MCID.getNumDefs();
298  unsigned CommutableOpIdx2 = CommutableOpIdx1 + 1;
299  if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2,
300  CommutableOpIdx1, CommutableOpIdx2))
301  return false;
302 
303  if (!MI.getOperand(SrcOpIdx1).isReg() || !MI.getOperand(SrcOpIdx2).isReg())
304  // No idea.
305  return false;
306  return true;
307 }
308 
310  if (!MI.isTerminator()) return false;
311 
312  // Conditional branch is a special case.
313  if (MI.isBranch() && !MI.isBarrier())
314  return true;
315  if (!MI.isPredicable())
316  return true;
317  return !isPredicated(MI);
318 }
319 
322  bool MadeChange = false;
323 
324  assert(!MI.isBundle() &&
325  "TargetInstrInfo::PredicateInstruction() can't handle bundles");
326 
327  const MCInstrDesc &MCID = MI.getDesc();
328  if (!MI.isPredicable())
329  return false;
330 
331  for (unsigned j = 0, i = 0, e = MI.getNumOperands(); i != e; ++i) {
332  if (MCID.OpInfo[i].isPredicate()) {
333  MachineOperand &MO = MI.getOperand(i);
334  if (MO.isReg()) {
335  MO.setReg(Pred[j].getReg());
336  MadeChange = true;
337  } else if (MO.isImm()) {
338  MO.setImm(Pred[j].getImm());
339  MadeChange = true;
340  } else if (MO.isMBB()) {
341  MO.setMBB(Pred[j].getMBB());
342  MadeChange = true;
343  }
344  ++j;
345  }
346  }
347  return MadeChange;
348 }
349 
351  const MachineInstr &MI,
353  size_t StartSize = Accesses.size();
355  oe = MI.memoperands_end();
356  o != oe; ++o) {
357  if ((*o)->isLoad() &&
358  dyn_cast_or_null<FixedStackPseudoSourceValue>((*o)->getPseudoValue()))
359  Accesses.push_back(*o);
360  }
361  return Accesses.size() != StartSize;
362 }
363 
365  const MachineInstr &MI,
367  size_t StartSize = Accesses.size();
369  oe = MI.memoperands_end();
370  o != oe; ++o) {
371  if ((*o)->isStore() &&
372  dyn_cast_or_null<FixedStackPseudoSourceValue>((*o)->getPseudoValue()))
373  Accesses.push_back(*o);
374  }
375  return Accesses.size() != StartSize;
376 }
377 
379  unsigned SubIdx, unsigned &Size,
380  unsigned &Offset,
381  const MachineFunction &MF) const {
383  if (!SubIdx) {
384  Size = TRI->getSpillSize(*RC);
385  Offset = 0;
386  return true;
387  }
388  unsigned BitSize = TRI->getSubRegIdxSize(SubIdx);
389  // Convert bit size to byte size.
390  if (BitSize % 8)
391  return false;
392 
393  int BitOffset = TRI->getSubRegIdxOffset(SubIdx);
394  if (BitOffset < 0 || BitOffset % 8)
395  return false;
396 
397  Size = BitSize /= 8;
398  Offset = (unsigned)BitOffset / 8;
399 
400  assert(TRI->getSpillSize(*RC) >= (Offset + Size) && "bad subregister range");
401 
402  if (!MF.getDataLayout().isLittleEndian()) {
403  Offset = TRI->getSpillSize(*RC) - (Offset + Size);
404  }
405  return true;
406 }
407 
410  unsigned DestReg, unsigned SubIdx,
411  const MachineInstr &Orig,
412  const TargetRegisterInfo &TRI) const {
413  MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig);
414  MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI);
415  MBB.insert(I, MI);
416 }
417 
419  const MachineInstr &MI1,
420  const MachineRegisterInfo *MRI) const {
422 }
423 
425  MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const {
426  assert(!Orig.isNotDuplicable() && "Instruction cannot be duplicated");
427  MachineFunction &MF = *MBB.getParent();
428  return MF.CloneMachineInstrBundle(MBB, InsertBefore, Orig);
429 }
430 
431 // If the COPY instruction in MI can be folded to a stack operation, return
432 // the register class to use.
434  unsigned FoldIdx) {
435  assert(MI.isCopy() && "MI must be a COPY instruction");
436  if (MI.getNumOperands() != 2)
437  return nullptr;
438  assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand");
439 
440  const MachineOperand &FoldOp = MI.getOperand(FoldIdx);
441  const MachineOperand &LiveOp = MI.getOperand(1 - FoldIdx);
442 
443  if (FoldOp.getSubReg() || LiveOp.getSubReg())
444  return nullptr;
445 
446  Register FoldReg = FoldOp.getReg();
447  Register LiveReg = LiveOp.getReg();
448 
449  assert(Register::isVirtualRegister(FoldReg) && "Cannot fold physregs");
450 
451  const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
452  const TargetRegisterClass *RC = MRI.getRegClass(FoldReg);
453 
454  if (Register::isPhysicalRegister(LiveOp.getReg()))
455  return RC->contains(LiveOp.getReg()) ? RC : nullptr;
456 
457  if (RC->hasSubClassEq(MRI.getRegClass(LiveReg)))
458  return RC;
459 
460  // FIXME: Allow folding when register classes are memory compatible.
461  return nullptr;
462 }
463 
464 void TargetInstrInfo::getNoop(MCInst &NopInst) const {
465  llvm_unreachable("Not implemented");
466 }
467 
470  const TargetInstrInfo &TII) {
471  unsigned StartIdx = 0;
472  switch (MI.getOpcode()) {
473  case TargetOpcode::STACKMAP: {
474  // StackMapLiveValues are foldable
475  StartIdx = StackMapOpers(&MI).getVarIdx();
476  break;
477  }
478  case TargetOpcode::PATCHPOINT: {
479  // For PatchPoint, the call args are not foldable (even if reported in the
480  // stackmap e.g. via anyregcc).
481  StartIdx = PatchPointOpers(&MI).getVarIdx();
482  break;
483  }
484  case TargetOpcode::STATEPOINT: {
485  // For statepoints, fold deopt and gc arguments, but not call arguments.
486  StartIdx = StatepointOpers(&MI).getVarIdx();
487  break;
488  }
489  default:
490  llvm_unreachable("unexpected stackmap opcode");
491  }
492 
493  // Return false if any operands requested for folding are not foldable (not
494  // part of the stackmap's live values).
495  for (unsigned Op : Ops) {
496  if (Op < StartIdx)
497  return nullptr;
498  }
499 
500  MachineInstr *NewMI =
501  MF.CreateMachineInstr(TII.get(MI.getOpcode()), MI.getDebugLoc(), true);
502  MachineInstrBuilder MIB(MF, NewMI);
503 
504  // No need to fold return, the meta data, and function arguments
505  for (unsigned i = 0; i < StartIdx; ++i)
506  MIB.add(MI.getOperand(i));
507 
508  for (unsigned i = StartIdx; i < MI.getNumOperands(); ++i) {
509  MachineOperand &MO = MI.getOperand(i);
510  if (is_contained(Ops, i)) {
511  unsigned SpillSize;
512  unsigned SpillOffset;
513  // Compute the spill slot size and offset.
514  const TargetRegisterClass *RC =
515  MF.getRegInfo().getRegClass(MO.getReg());
516  bool Valid =
517  TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize, SpillOffset, MF);
518  if (!Valid)
519  report_fatal_error("cannot spill patchpoint subregister operand");
520  MIB.addImm(StackMaps::IndirectMemRefOp);
521  MIB.addImm(SpillSize);
522  MIB.addFrameIndex(FrameIndex);
523  MIB.addImm(SpillOffset);
524  }
525  else
526  MIB.add(MO);
527  }
528  return NewMI;
529 }
530 
532  ArrayRef<unsigned> Ops, int FI,
533  LiveIntervals *LIS,
534  VirtRegMap *VRM) const {
535  auto Flags = MachineMemOperand::MONone;
536  for (unsigned OpIdx : Ops)
537  Flags |= MI.getOperand(OpIdx).isDef() ? MachineMemOperand::MOStore
539 
540  MachineBasicBlock *MBB = MI.getParent();
541  assert(MBB && "foldMemoryOperand needs an inserted instruction");
542  MachineFunction &MF = *MBB->getParent();
543 
544  // If we're not folding a load into a subreg, the size of the load is the
545  // size of the spill slot. But if we are, we need to figure out what the
546  // actual load size is.
547  int64_t MemSize = 0;
548  const MachineFrameInfo &MFI = MF.getFrameInfo();
550 
551  if (Flags & MachineMemOperand::MOStore) {
552  MemSize = MFI.getObjectSize(FI);
553  } else {
554  for (unsigned OpIdx : Ops) {
555  int64_t OpSize = MFI.getObjectSize(FI);
556 
557  if (auto SubReg = MI.getOperand(OpIdx).getSubReg()) {
558  unsigned SubRegSize = TRI->getSubRegIdxSize(SubReg);
559  if (SubRegSize > 0 && !(SubRegSize % 8))
560  OpSize = SubRegSize / 8;
561  }
562 
563  MemSize = std::max(MemSize, OpSize);
564  }
565  }
566 
567  assert(MemSize && "Did not expect a zero-sized stack slot");
568 
569  MachineInstr *NewMI = nullptr;
570 
571  if (MI.getOpcode() == TargetOpcode::STACKMAP ||
572  MI.getOpcode() == TargetOpcode::PATCHPOINT ||
573  MI.getOpcode() == TargetOpcode::STATEPOINT) {
574  // Fold stackmap/patchpoint.
575  NewMI = foldPatchpoint(MF, MI, Ops, FI, *this);
576  if (NewMI)
577  MBB->insert(MI, NewMI);
578  } else {
579  // Ask the target to do the actual folding.
580  NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI, LIS, VRM);
581  }
582 
583  if (NewMI) {
584  NewMI->setMemRefs(MF, MI.memoperands());
585  // Add a memory operand, foldMemoryOperandImpl doesn't do that.
586  assert((!(Flags & MachineMemOperand::MOStore) ||
587  NewMI->mayStore()) &&
588  "Folded a def to a non-store!");
589  assert((!(Flags & MachineMemOperand::MOLoad) ||
590  NewMI->mayLoad()) &&
591  "Folded a use to a non-load!");
592  assert(MFI.getObjectOffset(FI) != -1);
594  MachinePointerInfo::getFixedStack(MF, FI), Flags, MemSize,
595  MFI.getObjectAlignment(FI));
596  NewMI->addMemOperand(MF, MMO);
597 
598  return NewMI;
599  }
600 
601  // Straight COPY may fold as load/store.
602  if (!MI.isCopy() || Ops.size() != 1)
603  return nullptr;
604 
605  const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]);
606  if (!RC)
607  return nullptr;
608 
609  const MachineOperand &MO = MI.getOperand(1 - Ops[0]);
611 
612  if (Flags == MachineMemOperand::MOStore)
613  storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI);
614  else
615  loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI);
616  return &*--Pos;
617 }
618 
620  ArrayRef<unsigned> Ops,
621  MachineInstr &LoadMI,
622  LiveIntervals *LIS) const {
623  assert(LoadMI.canFoldAsLoad() && "LoadMI isn't foldable!");
624 #ifndef NDEBUG
625  for (unsigned OpIdx : Ops)
626  assert(MI.getOperand(OpIdx).isUse() && "Folding load into def!");
627 #endif
628 
629  MachineBasicBlock &MBB = *MI.getParent();
630  MachineFunction &MF = *MBB.getParent();
631 
632  // Ask the target to do the actual folding.
633  MachineInstr *NewMI = nullptr;
634  int FrameIndex = 0;
635 
636  if ((MI.getOpcode() == TargetOpcode::STACKMAP ||
637  MI.getOpcode() == TargetOpcode::PATCHPOINT ||
638  MI.getOpcode() == TargetOpcode::STATEPOINT) &&
639  isLoadFromStackSlot(LoadMI, FrameIndex)) {
640  // Fold stackmap/patchpoint.
641  NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
642  if (NewMI)
643  NewMI = &*MBB.insert(MI, NewMI);
644  } else {
645  // Ask the target to do the actual folding.
646  NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI, LIS);
647  }
648 
649  if (!NewMI)
650  return nullptr;
651 
652  // Copy the memoperands from the load to the folded instruction.
653  if (MI.memoperands_empty()) {
654  NewMI->setMemRefs(MF, LoadMI.memoperands());
655  } else {
656  // Handle the rare case of folding multiple loads.
657  NewMI->setMemRefs(MF, MI.memoperands());
659  E = LoadMI.memoperands_end();
660  I != E; ++I) {
661  NewMI->addMemOperand(MF, *I);
662  }
663  }
664  return NewMI;
665 }
666 
668  const MachineInstr &Inst, const MachineBasicBlock *MBB) const {
669  const MachineOperand &Op1 = Inst.getOperand(1);
670  const MachineOperand &Op2 = Inst.getOperand(2);
671  const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
672 
673  // We need virtual register definitions for the operands that we will
674  // reassociate.
675  MachineInstr *MI1 = nullptr;
676  MachineInstr *MI2 = nullptr;
677  if (Op1.isReg() && Register::isVirtualRegister(Op1.getReg()))
678  MI1 = MRI.getUniqueVRegDef(Op1.getReg());
679  if (Op2.isReg() && Register::isVirtualRegister(Op2.getReg()))
680  MI2 = MRI.getUniqueVRegDef(Op2.getReg());
681 
682  // And they need to be in the trace (otherwise, they won't have a depth).
683  return MI1 && MI2 && MI1->getParent() == MBB && MI2->getParent() == MBB;
684 }
685 
687  bool &Commuted) const {
688  const MachineBasicBlock *MBB = Inst.getParent();
689  const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
690  MachineInstr *MI1 = MRI.getUniqueVRegDef(Inst.getOperand(1).getReg());
691  MachineInstr *MI2 = MRI.getUniqueVRegDef(Inst.getOperand(2).getReg());
692  unsigned AssocOpcode = Inst.getOpcode();
693 
694  // If only one operand has the same opcode and it's the second source operand,
695  // the operands must be commuted.
696  Commuted = MI1->getOpcode() != AssocOpcode && MI2->getOpcode() == AssocOpcode;
697  if (Commuted)
698  std::swap(MI1, MI2);
699 
700  // 1. The previous instruction must be the same type as Inst.
701  // 2. The previous instruction must have virtual register definitions for its
702  // operands in the same basic block as Inst.
703  // 3. The previous instruction's result must only be used by Inst.
704  return MI1->getOpcode() == AssocOpcode &&
705  hasReassociableOperands(*MI1, MBB) &&
706  MRI.hasOneNonDBGUse(MI1->getOperand(0).getReg());
707 }
708 
709 // 1. The operation must be associative and commutative.
710 // 2. The instruction must have virtual register definitions for its
711 // operands in the same basic block.
712 // 3. The instruction must have a reassociable sibling.
714  bool &Commuted) const {
715  return isAssociativeAndCommutative(Inst) &&
716  hasReassociableOperands(Inst, Inst.getParent()) &&
717  hasReassociableSibling(Inst, Commuted);
718 }
719 
720 // The concept of the reassociation pass is that these operations can benefit
721 // from this kind of transformation:
722 //
723 // A = ? op ?
724 // B = A op X (Prev)
725 // C = B op Y (Root)
726 // -->
727 // A = ? op ?
728 // B = X op Y
729 // C = A op B
730 //
731 // breaking the dependency between A and B, allowing them to be executed in
732 // parallel (or back-to-back in a pipeline) instead of depending on each other.
733 
734 // FIXME: This has the potential to be expensive (compile time) while not
735 // improving the code at all. Some ways to limit the overhead:
736 // 1. Track successful transforms; bail out if hit rate gets too low.
737 // 2. Only enable at -O3 or some other non-default optimization level.
738 // 3. Pre-screen pattern candidates here: if an operand of the previous
739 // instruction is known to not increase the critical path, then don't match
740 // that pattern.
742  MachineInstr &Root,
743  SmallVectorImpl<MachineCombinerPattern> &Patterns) const {
744  bool Commute;
745  if (isReassociationCandidate(Root, Commute)) {
746  // We found a sequence of instructions that may be suitable for a
747  // reassociation of operands to increase ILP. Specify each commutation
748  // possibility for the Prev instruction in the sequence and let the
749  // machine combiner decide if changing the operands is worthwhile.
750  if (Commute) {
753  } else {
756  }
757  return true;
758  }
759 
760  return false;
761 }
762 
763 /// Return true when a code sequence can improve loop throughput.
764 bool
766  return false;
767 }
768 
769 /// Attempt the reassociation transformation to reduce critical path length.
770 /// See the above comments before getMachineCombinerPatterns().
772  MachineInstr &Root, MachineInstr &Prev,
773  MachineCombinerPattern Pattern,
776  DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
777  MachineFunction *MF = Root.getMF();
779  const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
781  const TargetRegisterClass *RC = Root.getRegClassConstraint(0, TII, TRI);
782 
783  // This array encodes the operand index for each parameter because the
784  // operands may be commuted. Each row corresponds to a pattern value,
785  // and each column specifies the index of A, B, X, Y.
786  unsigned OpIdx[4][4] = {
787  { 1, 1, 2, 2 },
788  { 1, 2, 2, 1 },
789  { 2, 1, 1, 2 },
790  { 2, 2, 1, 1 }
791  };
792 
793  int Row;
794  switch (Pattern) {
795  case MachineCombinerPattern::REASSOC_AX_BY: Row = 0; break;
796  case MachineCombinerPattern::REASSOC_AX_YB: Row = 1; break;
797  case MachineCombinerPattern::REASSOC_XA_BY: Row = 2; break;
798  case MachineCombinerPattern::REASSOC_XA_YB: Row = 3; break;
799  default: llvm_unreachable("unexpected MachineCombinerPattern");
800  }
801 
802  MachineOperand &OpA = Prev.getOperand(OpIdx[Row][0]);
803  MachineOperand &OpB = Root.getOperand(OpIdx[Row][1]);
804  MachineOperand &OpX = Prev.getOperand(OpIdx[Row][2]);
805  MachineOperand &OpY = Root.getOperand(OpIdx[Row][3]);
806  MachineOperand &OpC = Root.getOperand(0);
807 
808  Register RegA = OpA.getReg();
809  Register RegB = OpB.getReg();
810  Register RegX = OpX.getReg();
811  Register RegY = OpY.getReg();
812  Register RegC = OpC.getReg();
813 
814  if (Register::isVirtualRegister(RegA))
815  MRI.constrainRegClass(RegA, RC);
816  if (Register::isVirtualRegister(RegB))
817  MRI.constrainRegClass(RegB, RC);
818  if (Register::isVirtualRegister(RegX))
819  MRI.constrainRegClass(RegX, RC);
820  if (Register::isVirtualRegister(RegY))
821  MRI.constrainRegClass(RegY, RC);
822  if (Register::isVirtualRegister(RegC))
823  MRI.constrainRegClass(RegC, RC);
824 
825  // Create a new virtual register for the result of (X op Y) instead of
826  // recycling RegB because the MachineCombiner's computation of the critical
827  // path requires a new register definition rather than an existing one.
828  Register NewVR = MRI.createVirtualRegister(RC);
829  InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
830 
831  unsigned Opcode = Root.getOpcode();
832  bool KillA = OpA.isKill();
833  bool KillX = OpX.isKill();
834  bool KillY = OpY.isKill();
835 
836  // Create new instructions for insertion.
837  MachineInstrBuilder MIB1 =
838  BuildMI(*MF, Prev.getDebugLoc(), TII->get(Opcode), NewVR)
839  .addReg(RegX, getKillRegState(KillX))
840  .addReg(RegY, getKillRegState(KillY));
841  MachineInstrBuilder MIB2 =
842  BuildMI(*MF, Root.getDebugLoc(), TII->get(Opcode), RegC)
843  .addReg(RegA, getKillRegState(KillA))
844  .addReg(NewVR, getKillRegState(true));
845 
846  setSpecialOperandAttr(Root, Prev, *MIB1, *MIB2);
847 
848  // Record new instructions for insertion and old instructions for deletion.
849  InsInstrs.push_back(MIB1);
850  InsInstrs.push_back(MIB2);
851  DelInstrs.push_back(&Prev);
852  DelInstrs.push_back(&Root);
853 }
854 
856  MachineInstr &Root, MachineCombinerPattern Pattern,
859  DenseMap<unsigned, unsigned> &InstIdxForVirtReg) const {
861 
862  // Select the previous instruction in the sequence based on the input pattern.
863  MachineInstr *Prev = nullptr;
864  switch (Pattern) {
867  Prev = MRI.getUniqueVRegDef(Root.getOperand(1).getReg());
868  break;
871  Prev = MRI.getUniqueVRegDef(Root.getOperand(2).getReg());
872  break;
873  default:
874  break;
875  }
876 
877  assert(Prev && "Unknown pattern for machine combiner");
878 
879  reassociateOps(Root, *Prev, Pattern, InsInstrs, DelInstrs, InstIdxForVirtReg);
880 }
881 
882 bool TargetInstrInfo::isReallyTriviallyReMaterializableGeneric(
883  const MachineInstr &MI, AliasAnalysis *AA) const {
884  const MachineFunction &MF = *MI.getMF();
885  const MachineRegisterInfo &MRI = MF.getRegInfo();
886 
887  // Remat clients assume operand 0 is the defined register.
888  if (!MI.getNumOperands() || !MI.getOperand(0).isReg())
889  return false;
890  Register DefReg = MI.getOperand(0).getReg();
891 
892  // A sub-register definition can only be rematerialized if the instruction
893  // doesn't read the other parts of the register. Otherwise it is really a
894  // read-modify-write operation on the full virtual register which cannot be
895  // moved safely.
896  if (Register::isVirtualRegister(DefReg) && MI.getOperand(0).getSubReg() &&
897  MI.readsVirtualRegister(DefReg))
898  return false;
899 
900  // A load from a fixed stack slot can be rematerialized. This may be
901  // redundant with subsequent checks, but it's target-independent,
902  // simple, and a common case.
903  int FrameIdx = 0;
904  if (isLoadFromStackSlot(MI, FrameIdx) &&
905  MF.getFrameInfo().isImmutableObjectIndex(FrameIdx))
906  return true;
907 
908  // Avoid instructions obviously unsafe for remat.
909  if (MI.isNotDuplicable() || MI.mayStore() || MI.mayRaiseFPException() ||
911  return false;
912 
913  // Don't remat inline asm. We have no idea how expensive it is
914  // even if it's side effect free.
915  if (MI.isInlineAsm())
916  return false;
917 
918  // Avoid instructions which load from potentially varying memory.
919  if (MI.mayLoad() && !MI.isDereferenceableInvariantLoad(AA))
920  return false;
921 
922  // If any of the registers accessed are non-constant, conservatively assume
923  // the instruction is not rematerializable.
924  for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
925  const MachineOperand &MO = MI.getOperand(i);
926  if (!MO.isReg()) continue;
927  Register Reg = MO.getReg();
928  if (Reg == 0)
929  continue;
930 
931  // Check for a well-behaved physical register.
932  if (Register::isPhysicalRegister(Reg)) {
933  if (MO.isUse()) {
934  // If the physreg has no defs anywhere, it's just an ambient register
935  // and we can freely move its uses. Alternatively, if it's allocatable,
936  // it could get allocated to something with a def during allocation.
937  if (!MRI.isConstantPhysReg(Reg))
938  return false;
939  } else {
940  // A physreg def. We can't remat it.
941  return false;
942  }
943  continue;
944  }
945 
946  // Only allow one virtual-register def. There may be multiple defs of the
947  // same virtual register, though.
948  if (MO.isDef() && Reg != DefReg)
949  return false;
950 
951  // Don't allow any virtual-register uses. Rematting an instruction with
952  // virtual register uses would length the live ranges of the uses, which
953  // is not necessarily a good idea, certainly not "trivial".
954  if (MO.isUse())
955  return false;
956  }
957 
958  // Everything checked out.
959  return true;
960 }
961 
963  const MachineFunction *MF = MI.getMF();
965  bool StackGrowsDown =
967 
968  unsigned FrameSetupOpcode = getCallFrameSetupOpcode();
969  unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode();
970 
971  if (!isFrameInstr(MI))
972  return 0;
973 
974  int SPAdj = TFI->alignSPAdjust(getFrameSize(MI));
975 
976  if ((!StackGrowsDown && MI.getOpcode() == FrameSetupOpcode) ||
977  (StackGrowsDown && MI.getOpcode() == FrameDestroyOpcode))
978  SPAdj = -SPAdj;
979 
980  return SPAdj;
981 }
982 
983 /// isSchedulingBoundary - Test if the given instruction should be
984 /// considered a scheduling boundary. This primarily includes labels
985 /// and terminators.
987  const MachineBasicBlock *MBB,
988  const MachineFunction &MF) const {
989  // Terminators and labels can't be scheduled around.
990  if (MI.isTerminator() || MI.isPosition())
991  return true;
992 
993  // Don't attempt to schedule around any instruction that defines
994  // a stack-oriented pointer, as it's unlikely to be profitable. This
995  // saves compile time, because it doesn't require every single
996  // stack slot reference to depend on the instruction that does the
997  // modification.
998  const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
1001 }
1002 
1003 // Provide a global flag for disabling the PreRA hazard recognizer that targets
1004 // may choose to honor.
1006  return !DisableHazardRecognizer;
1007 }
1008 
1009 // Default implementation of CreateTargetRAHazardRecognizer.
1012  const ScheduleDAG *DAG) const {
1013  // Dummy hazard recognizer allows all instructions to issue.
1014  return new ScheduleHazardRecognizer();
1015 }
1016 
1017 // Default implementation of CreateTargetMIHazardRecognizer.
1020  const ScheduleDAG *DAG) const {
1021  return (ScheduleHazardRecognizer *)
1022  new ScoreboardHazardRecognizer(II, DAG, "machine-scheduler");
1023 }
1024 
1025 // Default implementation of CreateTargetPostRAHazardRecognizer.
1028  const ScheduleDAG *DAG) const {
1029  return (ScheduleHazardRecognizer *)
1030  new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched");
1031 }
1032 
1033 //===----------------------------------------------------------------------===//
1034 // SelectionDAG latency interface.
1035 //===----------------------------------------------------------------------===//
1036 
1037 int
1039  SDNode *DefNode, unsigned DefIdx,
1040  SDNode *UseNode, unsigned UseIdx) const {
1041  if (!ItinData || ItinData->isEmpty())
1042  return -1;
1043 
1044  if (!DefNode->isMachineOpcode())
1045  return -1;
1046 
1047  unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass();
1048  if (!UseNode->isMachineOpcode())
1049  return ItinData->getOperandCycle(DefClass, DefIdx);
1050  unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass();
1051  return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1052 }
1053 
1055  SDNode *N) const {
1056  if (!ItinData || ItinData->isEmpty())
1057  return 1;
1058 
1059  if (!N->isMachineOpcode())
1060  return 1;
1061 
1062  return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass());
1063 }
1064 
1065 //===----------------------------------------------------------------------===//
1066 // MachineInstr latency interface.
1067 //===----------------------------------------------------------------------===//
1068 
1070  const MachineInstr &MI) const {
1071  if (!ItinData || ItinData->isEmpty())
1072  return 1;
1073 
1074  unsigned Class = MI.getDesc().getSchedClass();
1075  int UOps = ItinData->Itineraries[Class].NumMicroOps;
1076  if (UOps >= 0)
1077  return UOps;
1078 
1079  // The # of u-ops is dynamically determined. The specific target should
1080  // override this function to return the right number.
1081  return 1;
1082 }
1083 
1084 /// Return the default expected latency for a def based on it's opcode.
1086  const MachineInstr &DefMI) const {
1087  if (DefMI.isTransient())
1088  return 0;
1089  if (DefMI.mayLoad())
1090  return SchedModel.LoadLatency;
1091  if (isHighLatencyDef(DefMI.getOpcode()))
1092  return SchedModel.HighLatency;
1093  return 1;
1094 }
1095 
1097  return 0;
1098 }
1099 
1101  const MachineInstr &MI,
1102  unsigned *PredCost) const {
1103  // Default to one cycle for no itinerary. However, an "empty" itinerary may
1104  // still have a MinLatency property, which getStageLatency checks.
1105  if (!ItinData)
1106  return MI.mayLoad() ? 2 : 1;
1107 
1108  return ItinData->getStageLatency(MI.getDesc().getSchedClass());
1109 }
1110 
1112  const MachineInstr &DefMI,
1113  unsigned DefIdx) const {
1114  const InstrItineraryData *ItinData = SchedModel.getInstrItineraries();
1115  if (!ItinData || ItinData->isEmpty())
1116  return false;
1117 
1118  unsigned DefClass = DefMI.getDesc().getSchedClass();
1119  int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
1120  return (DefCycle != -1 && DefCycle <= 1);
1121 }
1122 
1125  const MachineFunction *MF = MI.getMF();
1126  const MachineOperand *Op = nullptr;
1127  DIExpression *Expr = DIExpression::get(MF->getFunction().getContext(), {});;
1128  const MachineOperand *SrcRegOp, *DestRegOp;
1129 
1130  if (isCopyInstr(MI, SrcRegOp, DestRegOp)) {
1131  Op = SrcRegOp;
1132  return ParamLoadedValue(*Op, Expr);
1133  } else if (MI.isMoveImmediate()) {
1134  Op = &MI.getOperand(1);
1135  return ParamLoadedValue(*Op, Expr);
1136  } else if (MI.hasOneMemOperand()) {
1137  int64_t Offset;
1138  const auto &TRI = MF->getSubtarget().getRegisterInfo();
1139  const auto &TII = MF->getSubtarget().getInstrInfo();
1140  const MachineOperand *BaseOp;
1141 
1142  if (!TII->getMemOperandWithOffset(MI, BaseOp, Offset, TRI))
1143  return None;
1144 
1145  Expr = DIExpression::prepend(Expr, DIExpression::DerefAfter, Offset);
1146  Op = BaseOp;
1147  return ParamLoadedValue(*Op, Expr);
1148  }
1149 
1150  return None;
1151 }
1152 
1153 /// Both DefMI and UseMI must be valid. By default, call directly to the
1154 /// itinerary. This may be overriden by the target.
1156  const MachineInstr &DefMI,
1157  unsigned DefIdx,
1158  const MachineInstr &UseMI,
1159  unsigned UseIdx) const {
1160  unsigned DefClass = DefMI.getDesc().getSchedClass();
1161  unsigned UseClass = UseMI.getDesc().getSchedClass();
1162  return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1163 }
1164 
1165 /// If we can determine the operand latency from the def only, without itinerary
1166 /// lookup, do so. Otherwise return -1.
1168  const InstrItineraryData *ItinData, const MachineInstr &DefMI) const {
1169 
1170  // Let the target hook getInstrLatency handle missing itineraries.
1171  if (!ItinData)
1172  return getInstrLatency(ItinData, DefMI);
1173 
1174  if(ItinData->isEmpty())
1175  return defaultDefLatency(ItinData->SchedModel, DefMI);
1176 
1177  // ...operand lookup required
1178  return -1;
1179 }
1180 
1182  const MachineInstr &MI, unsigned DefIdx,
1183  SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
1184  assert((MI.isRegSequence() ||
1185  MI.isRegSequenceLike()) && "Instruction do not have the proper type");
1186 
1187  if (!MI.isRegSequence())
1188  return getRegSequenceLikeInputs(MI, DefIdx, InputRegs);
1189 
1190  // We are looking at:
1191  // Def = REG_SEQUENCE v0, sub0, v1, sub1, ...
1192  assert(DefIdx == 0 && "REG_SEQUENCE only has one def");
1193  for (unsigned OpIdx = 1, EndOpIdx = MI.getNumOperands(); OpIdx != EndOpIdx;
1194  OpIdx += 2) {
1195  const MachineOperand &MOReg = MI.getOperand(OpIdx);
1196  if (MOReg.isUndef())
1197  continue;
1198  const MachineOperand &MOSubIdx = MI.getOperand(OpIdx + 1);
1199  assert(MOSubIdx.isImm() &&
1200  "One of the subindex of the reg_sequence is not an immediate");
1201  // Record Reg:SubReg, SubIdx.
1202  InputRegs.push_back(RegSubRegPairAndIdx(MOReg.getReg(), MOReg.getSubReg(),
1203  (unsigned)MOSubIdx.getImm()));
1204  }
1205  return true;
1206 }
1207 
1209  const MachineInstr &MI, unsigned DefIdx,
1210  RegSubRegPairAndIdx &InputReg) const {
1211  assert((MI.isExtractSubreg() ||
1212  MI.isExtractSubregLike()) && "Instruction do not have the proper type");
1213 
1214  if (!MI.isExtractSubreg())
1215  return getExtractSubregLikeInputs(MI, DefIdx, InputReg);
1216 
1217  // We are looking at:
1218  // Def = EXTRACT_SUBREG v0.sub1, sub0.
1219  assert(DefIdx == 0 && "EXTRACT_SUBREG only has one def");
1220  const MachineOperand &MOReg = MI.getOperand(1);
1221  if (MOReg.isUndef())
1222  return false;
1223  const MachineOperand &MOSubIdx = MI.getOperand(2);
1224  assert(MOSubIdx.isImm() &&
1225  "The subindex of the extract_subreg is not an immediate");
1226 
1227  InputReg.Reg = MOReg.getReg();
1228  InputReg.SubReg = MOReg.getSubReg();
1229  InputReg.SubIdx = (unsigned)MOSubIdx.getImm();
1230  return true;
1231 }
1232 
1234  const MachineInstr &MI, unsigned DefIdx,
1235  RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const {
1236  assert((MI.isInsertSubreg() ||
1237  MI.isInsertSubregLike()) && "Instruction do not have the proper type");
1238 
1239  if (!MI.isInsertSubreg())
1240  return getInsertSubregLikeInputs(MI, DefIdx, BaseReg, InsertedReg);
1241 
1242  // We are looking at:
1243  // Def = INSERT_SEQUENCE v0, v1, sub0.
1244  assert(DefIdx == 0 && "INSERT_SUBREG only has one def");
1245  const MachineOperand &MOBaseReg = MI.getOperand(1);
1246  const MachineOperand &MOInsertedReg = MI.getOperand(2);
1247  if (MOInsertedReg.isUndef())
1248  return false;
1249  const MachineOperand &MOSubIdx = MI.getOperand(3);
1250  assert(MOSubIdx.isImm() &&
1251  "One of the subindex of the reg_sequence is not an immediate");
1252  BaseReg.Reg = MOBaseReg.getReg();
1253  BaseReg.SubReg = MOBaseReg.getSubReg();
1254 
1255  InsertedReg.Reg = MOInsertedReg.getReg();
1256  InsertedReg.SubReg = MOInsertedReg.getSubReg();
1257  InsertedReg.SubIdx = (unsigned)MOSubIdx.getImm();
1258  return true;
1259 }
const MachineInstrBuilder & add(const MachineOperand &MO) const
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
bool contains(unsigned Reg) const
Return true if the specified register is included in this register class.
const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:139
This class represents lattice values for constants.
Definition: AllocatorList.h:23
bool isLookupPtrRegClass() const
Set if this operand is a pointer value and it requires a callback to look up its register class...
Definition: MCInstrDesc.h:90
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
static MachineInstr * foldPatchpoint(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, int FrameIndex, const TargetInstrInfo &TII)
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual bool isThroughputPattern(MachineCombinerPattern Pattern) const
Return true when a code sequence can improve throughput.
bool isExtractSubregLike(QueryType Type=IgnoreBundle) const
Return true if this instruction behaves the same way as the generic EXTRACT_SUBREG instructions...
Definition: MachineInstr.h:800
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:385
static bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:63
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:178
void setIsUndef(bool Val=true)
unsigned Reg
virtual MachineInstr & duplicate(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const
Clones instruction or the whole instruction bundle Orig and insert into MBB before InsertBefore...
MachineInstr * commuteInstruction(MachineInstr &MI, bool NewMI=false, unsigned OpIdx1=CommuteAnyOperandIndex, unsigned OpIdx2=CommuteAnyOperandIndex) const
This method commutes the operands of the given machine instruction MI.
unsigned getSubReg() const
virtual bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const
Return true when Inst has reassociable operands in the same MBB.
bool isInlineAsm() const
virtual const TargetLowering * getTargetLowering() const
bool isPredicable(QueryType Type=AllInBundle) const
Return true if this instruction has a predicate operand that controls execution.
Definition: MachineInstr.h:705
virtual unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const
Insert branch code into the end of the specified MachineBasicBlock.
bool readsVirtualRegister(Register Reg) const
Return true if the MachineInstr reads the specified virtual register.
bool isRegSequence() const
static cl::opt< bool > DisableHazardRecognizer("disable-sched-hazard", cl::Hidden, cl::init(false), cl::desc("Disable hazard detection during preRA scheduling"))
bool isTransient() const
Return true if this is a transient instruction that is either very likely to be eliminated during reg...
virtual bool getExtractSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const
Target-dependent implementation of getExtractSubregInputs.
unsigned const TargetRegisterInfo * TRI
A debug info location.
Definition: DebugLoc.h:33
unsigned getCallFrameDestroyOpcode() const
virtual MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const
Target-dependent implementation for foldMemoryOperand.
unsigned defaultDefLatency(const MCSchedModel &SchedModel, const MachineInstr &DefMI) const
Return the default expected latency for a def based on its opcode.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
MachineInstr * CreateMachineInstr(const MCInstrDesc &MCID, const DebugLoc &DL, bool NoImp=false)
CreateMachineInstr - Allocate a new MachineInstr.
void setIsRenamable(bool Val=true)
unsigned getSpillSize(const TargetRegisterClass &RC) const
Return the size in bytes of the stack slot allocated to hold a spilled copy of a register from class ...
bool isInternalRead() const
virtual bool isHighLatencyDef(int opc) const
Return true if this opcode has high latency to its result.
const TargetRegisterClass * getRegClass(unsigned i) const
Returns the register class associated with the enumeration value.
bool isMoveImmediate(QueryType Type=IgnoreBundle) const
Return true if this instruction is a move immediate (including conditional moves) instruction...
Definition: MachineInstr.h:718
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
virtual bool isUnpredicatedTerminator(const MachineInstr &MI) const
Returns true if the instruction is a terminator instruction that has not been predicated.
static DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:195
std::pair< MachineOperand, DIExpression * > ParamLoadedValue
A description of a memory reference used in the backend.
virtual void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned DestReg, unsigned SubIdx, const MachineInstr &Orig, const TargetRegisterInfo &TRI) const
Re-issue the specified &#39;original&#39; instruction at the specific location targeting a new destination re...
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:225
Provide an instruction scheduling machine model to CodeGen passes.
const HexagonInstrInfo * TII
void substituteRegister(Register FromReg, Register ToReg, unsigned SubIdx, const TargetRegisterInfo &RegInfo)
Replace all occurrences of FromReg with ToReg:SubIdx, properly composing subreg indices where necessa...
unsigned getVarIdx() const
Get starting index of non call related arguments (calling convention, statepoint flags, vm state and gc state).
Definition: StackMaps.h:172
const TargetRegisterClass * getRegClassConstraint(unsigned OpIdx, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const
Compute the static register class constraint for operand OpIdx.
unsigned getNumOperands() const
Retuns the total number of operands.
Definition: MachineInstr.h:414
virtual const TargetRegisterClass * getRegClass(const MCInstrDesc &MCID, unsigned OpNum, const TargetRegisterInfo *TRI, const MachineFunction &MF) const
Given a machine instruction descriptor, returns the register class constraint for OpNum...
const InstrItinerary * Itineraries
Array of itineraries selected.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:41
unsigned SubReg
MachineInstr & CloneMachineInstrBundle(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig)
Clones instruction or the whole instruction bundle Orig and insert into MBB before InsertBefore...
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
bool isTerminator(QueryType Type=AnyInBundle) const
Returns true if this instruction part of the terminator for a basic block.
Definition: MachineInstr.h:667
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
virtual bool hasLowDefLatency(const TargetSchedModel &SchedModel, const MachineInstr &DefMI, unsigned DefIdx) const
Compute operand latency of a def of &#39;Reg&#39;.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:411
const InstrItineraryData * getInstrItineraries() const
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
bool isImmutableObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to an immutable object.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
Definition: MachineInstr.h:408
bool isBundle() const
bool mayRaiseFPException() const
Return true if this instruction could possibly raise a floating-point exception.
Definition: MachineInstr.h:858
virtual unsigned getInstrLatency(const InstrItineraryData *ItinData, const MachineInstr &MI, unsigned *PredCost=nullptr) const
Compute the instruction latency of a given instruction.
int alignSPAdjust(int SPAdj) const
alignSPAdjust - This method aligns the stack adjustment to the correct alignment. ...
static bool isAsmComment(const char *Str, const MCAsmInfo &MAI)
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< MachineCombinerPattern > &Patterns) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root...
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:32
LLVM_NODISCARD size_t size() const
size - Get the string size.
Definition: StringRef.h:144
bool isLittleEndian() const
Layout endianness...
Definition: DataLayout.h:233
Itinerary data supplied by a subtarget to be used by a target.
void setReg(Register Reg)
Change the register this operand corresponds to.
virtual const TargetInstrInfo * getInstrInfo() const
void addMemOperand(MachineFunction &MF, MachineMemOperand *MO)
Add a MachineMemOperand to the machine instruction.
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
virtual unsigned getInlineAsmLength(const char *Str, const MCAsmInfo &MAI, const TargetSubtargetInfo *STI=nullptr) const
Measure the specified inline asm to determine an approximation of its length.
TargetInstrInfo::RegSubRegPairAndIdx RegSubRegPairAndIdx
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
unsigned getStageLatency(unsigned ItinClassIndx) const
Return the total stage latency of the given class.
bool isDereferenceableInvariantLoad(AliasAnalysis *AA) const
Return true if this load instruction never traps and points to a memory location whose value doesn&#39;t ...
const TargetRegisterClass * constrainRegClass(unsigned Reg, const TargetRegisterClass *RC, unsigned MinNumRegs=0)
constrainRegClass - Constrain the register class of the specified virtual register to be a common sub...
bool isBranch(QueryType Type=AnyInBundle) const
Returns true if this is a conditional, unconditional, or indirect branch.
Definition: MachineInstr.h:675
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:158
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
unsigned getKillRegState(bool B)
void updateCallSiteInfo(const MachineInstr *Old, const MachineInstr *New=nullptr)
Update call sites info by deleting entry for Old call instruction.
TargetInstrInfo - Interface to description of machine instruction set.
virtual void getNoop(MCInst &NopInst) const
Return the noop instruction to use for a noop.
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition: MCAsmInfo.h:56
bool usePreRAHazardRecognizer() const
Provide a global flag for disabling the PreRA hazard recognizer that targets may choose to honor...
static const unsigned CommuteAnyOperandIndex
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
unsigned getSchedClass() const
Return the scheduling class for this instruction.
Definition: MCInstrDesc.h:596
unsigned getObjectAlignment(int ObjectIdx) const
Return the alignment of the specified stack object.
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata *> MDs)
Definition: Metadata.h:1165
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
Definition: MachineInstr.h:838
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail, MachineBasicBlock *NewDest) const
Delete the instruction OldInst and everything after it, replacing it with an unconditional branch to ...
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:432
unsigned LoadLatency
Definition: MCSchedule.h:285
* if(!EatIfPresent(lltok::kw_thread_local)) return false
ParseOptionalThreadLocal := /*empty.
unsigned const MachineRegisterInfo * MRI
virtual unsigned getPredicationCost(const MachineInstr &MI) const
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:534
HazardRecognizer - This determines whether or not an instruction can be issued this cycle...
virtual bool getInsertSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const
Target-dependent implementation of getInsertSubregInputs.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
int getOperandLatency(unsigned DefClass, unsigned DefIdx, unsigned UseClass, unsigned UseIdx) const
Compute and return the use operand latency of a given itinerary class and operand index if the value ...
MachineInstrBuilder & UseMI
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
const char * getSeparatorString() const
Definition: MCAsmInfo.h:490
virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData, const MachineInstr &MI) const
Return the number of u-operations the given machine instruction will be decoded to on the target cpu...
StackDirection getStackGrowthDirection() const
getStackGrowthDirection - Return the direction the stack grows
int16_t NumMicroOps
of micro-ops, -1 means it&#39;s variable
void setMBB(MachineBasicBlock *MBB)
unsigned getCallFrameSetupOpcode() const
These methods return the opcode of the frame setup/destroy instructions if they exist (-1 otherwise)...
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
Definition: MachineInstr.h:567
bool isReassociationCandidate(const MachineInstr &Inst, bool &Commuted) const
Return true if the input Inst is part of a chain of dependent ops that are suitable for reassociatio...
StringRef getCommentString() const
Definition: MCAsmInfo.h:496
virtual void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2, MachineInstr &NewMI1, MachineInstr &NewMI2) const
This is an architecture-specific helper function of reassociateOps.
unsigned getVarIdx() const
Get the operand index of the variable list of non-argument operands.
Definition: StackMaps.h:127
void setImm(int64_t immVal)
void setIsInternalRead(bool Val=true)
MI-level patchpoint operands.
Definition: StackMaps.h:76
const MachineInstrBuilder & addFrameIndex(int Idx) const
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function. ...
Definition: Function.cpp:205
MachineCombinerPattern
These are instruction patterns matched by the machine combiner pass.
virtual bool getStackSlotRange(const TargetRegisterClass *RC, unsigned SubIdx, unsigned &Size, unsigned &Offset, const MachineFunction &MF) const
Compute the size in bytes and offset within a stack slot of a spilled register or subregister...
bool isCopy() const
virtual bool PredicateInstruction(MachineInstr &MI, ArrayRef< MachineOperand > Pred) const
Convert the instruction into a predicated instruction.
bool isInsertSubregLike(QueryType Type=IgnoreBundle) const
Return true if this instruction behaves the same way as the generic INSERT_SUBREG instructions...
Definition: MachineInstr.h:814
unsigned getSubRegIdxOffset(unsigned Idx) const
Get the offset of the bit range covered by a sub-register index.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
size_t size() const
Definition: SmallVector.h:52
virtual bool isPredicated(const MachineInstr &MI) const
Returns true if the instruction is already predicated.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool isConstantPhysReg(unsigned PhysReg) const
Returns true if PhysReg is unallocatable and constant throughout the function.
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
void setIsKill(bool Val=true)
The memory access writes data.
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specific constraint if it is set.
Definition: MCInstrDesc.h:202
virtual bool hasLoadFromStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand *> &Accesses) const
If the specified machine instruction has a load from a stack slot, return true along with the FrameIn...
virtual bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const
Test if the given instruction should be considered a scheduling boundary.
Iterator for intrusive lists based on ilist_node.
MachineInstr * foldMemoryOperand(MachineInstr &MI, ArrayRef< unsigned > Ops, int FI, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const
Attempt to fold a load or store of the specified stack slot into the specified machine instruction fo...
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:552
void reassociateOps(MachineInstr &Root, MachineInstr &Prev, MachineCombinerPattern Pattern, SmallVectorImpl< MachineInstr *> &InsInstrs, SmallVectorImpl< MachineInstr *> &DelInstrs, DenseMap< unsigned, unsigned > &InstrIdxForVirtReg) const
Attempt to reassociate Root and Prev according to Pattern to reduce critical path length...
MachineInstr * CloneMachineInstr(const MachineInstr *Orig)
Create a new MachineInstr which is a copy of Orig, identical in all ways except the instruction has n...
MachineOperand class - Representation of each machine instruction operand.
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:837
bool isInsertSubreg() const
A pair composed of a register and a sub-register index.
MachineInstrBuilder MachineInstrBuilder & DefMI
unsigned getVarIdx() const
Get the operand index of the variable list of non-argument operands.
Definition: StackMaps.h:56
bool modifiesRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr modifies (fully define or partially define) the specified register...
bool isFrameInstr(const MachineInstr &I) const
Returns true if the argument is a frame pseudo instruction.
Information about stack frame layout on the target.
Promote Memory to Register
Definition: Mem2Reg.cpp:109
unsigned HighLatency
Definition: MCSchedule.h:292
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
Definition: MCInstrDesc.h:240
Represents one node in the SelectionDAG.
int64_t getFrameSize(const MachineInstr &I) const
Returns size of the frame associated with the given frame instruction.
int64_t getImm() const
bool getRegSequenceInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const
Build the equivalent inputs of a REG_SEQUENCE for the given MI and DefIdx.
DWARF expression.
MachineInstr * getUniqueVRegDef(unsigned Reg) const
getUniqueVRegDef - Return the unique machine instr that defines the specified virtual register or nul...
const Function & getFunction() const
Return the LLVM function that this machine code represents.
virtual bool findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction...
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:940
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
bool isRenamable() const
isRenamable - Returns true if this register may be renamed, i.e.
virtual void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const
Store the specified register of the given register class to the specified stack frame index...
bool isEmpty() const
Returns true if there are no itineraries.
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:256
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
The memory access reads data.
TargetSubtargetInfo - Generic base class for all target subtargets.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
virtual Optional< ParamLoadedValue > describeLoadedValue(const MachineInstr &MI) const
Produce the expression describing the MI loading a value into the parameter&#39;s forwarding register...
virtual void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const
Insert a noop into the instruction stream at the specified point.
bool isRegSequenceLike(QueryType Type=IgnoreBundle) const
Return true if this instruction behaves the same way as the generic REG_SEQUENCE instructions.
Definition: MachineInstr.h:785
Representation of each machine instruction.
Definition: MachineInstr.h:64
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
static bool fixCommutedOpIndices(unsigned &ResultIdx1, unsigned &ResultIdx2, unsigned CommutableOpIdx1, unsigned CommutableOpIdx2)
Assigns the (CommutableOpIdx1, CommutableOpIdx2) pair of commutable operand indices to (ResultIdx1...
bool isCopyInstr(const MachineInstr &MI, const MachineOperand *&Source, const MachineOperand *&Destination) const
If the specific machine instruction is a instruction that moves/copies value from one register to ano...
virtual bool hasStoreToStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand *> &Accesses) const
If the specified machine instruction has a store to a stack slot, return true along with the FrameInd...
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
MCSchedModel SchedModel
Basic machine properties.
virtual ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
MI-level stackmap operands.
Definition: StackMaps.h:35
virtual void genAlternativeCodeSequence(MachineInstr &Root, MachineCombinerPattern Pattern, SmallVectorImpl< MachineInstr *> &InsInstrs, SmallVectorImpl< MachineInstr *> &DelInstrs, DenseMap< unsigned, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const
If the specified machine instruction is a direct load from a stack slot, return the virtual or physic...
int16_t RegClass
This specifies the register class enumeration of the operand if the operand is a register.
Definition: MCInstrDesc.h:76
bool canFoldAsLoad(QueryType Type=IgnoreBundle) const
Return true for instructions that can be folded as memory operands in other instructions.
Definition: MachineInstr.h:771
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
Definition: MCInstrInfo.h:44
#define I(x, y, z)
Definition: MD5.cpp:58
#define N
void setSubReg(unsigned subReg)
virtual const TargetFrameLowering * getFrameLowering() const
virtual bool produceSameValue(const MachineInstr &MI0, const MachineInstr &MI1, const MachineRegisterInfo *MRI=nullptr) const
Return true if two machine instructions would produce identical values.
MI-level Statepoint operands.
Definition: StackMaps.h:154
uint32_t Size
Definition: Profile.cpp:46
bool hasOneNonDBGUse(unsigned RegNo) const
hasOneNonDBGUse - Return true if there is exactly one non-Debug use of the specified register...
void removeSuccessor(MachineBasicBlock *Succ, bool NormalizeSuccProbs=false)
Remove successor from the successors list of this MachineBasicBlock.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
LLVM_NODISCARD const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition: StringRef.h:136
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
Definition: MachineInstr.h:825
bool memoperands_empty() const
Return true if we don&#39;t have any memory operands which described the memory access done by this instr...
Definition: MachineInstr.h:564
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
bool isIdenticalTo(const MachineInstr &Other, MICheckType Check=CheckDefs) const
Return true if this instruction is identical to Other.
void setMemRefs(MachineFunction &MF, ArrayRef< MachineMemOperand *> MemRefs)
Assign this MachineInstr&#39;s memory reference descriptor list.
bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const
Return true when Inst has reassociable sibling.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
virtual ScheduleHazardRecognizer * CreateTargetMIHazardRecognizer(const InstrItineraryData *, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
static bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:69
bool isPosition() const
bool getMemOperandWithOffset(const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, const TargetRegisterInfo *TRI) const override
Get the base register and byte offset of a load/store instr.
unsigned getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
virtual bool isAssociativeAndCommutative(const MachineInstr &Inst) const
Return true when Inst is both associative and commutative.
const MCOperandInfo * OpInfo
Definition: MCInstrDesc.h:189
int getOperandCycle(unsigned ItinClassIndx, unsigned OperandIdx) const
Return the cycle for the given class and operand.
virtual unsigned getMaxInstLength(const MCSubtargetInfo *STI=nullptr) const
Returns the maximum possible encoded instruction size in bytes.
Definition: MCAsmInfo.h:484
unsigned getSubRegIdxSize(unsigned Idx) const
Get the size of the bit range covered by a sub-register index.
bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore...
IRTranslator LLVM IR MI
bool isBarrier(QueryType Type=AnyInBundle) const
Returns true if the specified instruction stops control flow from executing the instruction immediate...
Definition: MachineInstr.h:658
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
Register getReg() const
getReg - Returns the register number.
virtual bool getRegSequenceLikeInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const
Target-dependent implementation of getRegSequenceInputs.
bool getExtractSubregInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const
Build the equivalent inputs of a EXTRACT_SUBREG for the given MI and DefIdx.
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:416
static const TargetRegisterClass * canFoldCopy(const MachineInstr &MI, unsigned FoldIdx)
Machine model for scheduling, bundling, and heuristics.
Definition: MCSchedule.h:244
bool getInsertSubregInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const
Build the equivalent inputs of a INSERT_SUBREG for the given MI and DefIdx.
bool isExtractSubreg() const
virtual ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
int computeDefOperandLatency(const InstrItineraryData *ItinData, const MachineInstr &DefMI) const
If we can determine the operand latency from the def only, without itinerary lookup, do so.
virtual int getOperandLatency(const InstrItineraryData *ItinData, SDNode *DefNode, unsigned DefIdx, SDNode *UseNode, unsigned UseIdx) const
virtual void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const
Load the specified register of the given register class from the specified stack frame index...
bool isCommutable(QueryType Type=IgnoreBundle) const
Return true if this may be a 2- or 3-address instruction (of the form "X = op Y, Z, ..."), which produces the same result if Y and Z are exchanged.
Definition: MachineInstr.h:877
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
virtual int getSPAdjust(const MachineInstr &MI) const
Returns the actual stack pointer adjustment made by an instruction as part of a call sequence...
bool isNotDuplicable(QueryType Type=AnyInBundle) const
Return true if this instruction cannot be safely duplicated.
Definition: MachineInstr.h:741
This file describes how to lower LLVM code to machine code.
mmo_iterator memoperands_end() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:559
virtual const TargetRegisterClass * getPointerRegClass(const MachineFunction &MF, unsigned Kind=0) const
Returns a TargetRegisterClass used for pointer values.
A pair composed of a pair of a register and a sub-register index, and another sub-register index...
bool is_contained(R &&Range, const E &Element)
Wrapper function around std::find to detect if an element exists in a container.
Definition: STLExtras.h:1224