LLVM  9.0.0svn
ARMBaseInstrInfo.cpp
Go to the documentation of this file.
1 //===-- ARMBaseInstrInfo.cpp - ARM Instruction Information ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the Base ARM implementation of the TargetInstrInfo class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "ARMBaseInstrInfo.h"
14 #include "ARMBaseRegisterInfo.h"
15 #include "ARMConstantPoolValue.h"
16 #include "ARMFeatures.h"
17 #include "ARMHazardRecognizer.h"
18 #include "ARMMachineFunctionInfo.h"
19 #include "ARMSubtarget.h"
22 #include "llvm/ADT/DenseMap.h"
23 #include "llvm/ADT/STLExtras.h"
24 #include "llvm/ADT/SmallSet.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/ADT/Triple.h"
42 #include "llvm/IR/Attributes.h"
43 #include "llvm/IR/Constants.h"
44 #include "llvm/IR/DebugLoc.h"
45 #include "llvm/IR/Function.h"
46 #include "llvm/IR/GlobalValue.h"
47 #include "llvm/MC/MCAsmInfo.h"
48 #include "llvm/MC/MCInstrDesc.h"
51 #include "llvm/Support/Casting.h"
53 #include "llvm/Support/Compiler.h"
54 #include "llvm/Support/Debug.h"
58 #include <algorithm>
59 #include <cassert>
60 #include <cstdint>
61 #include <iterator>
62 #include <new>
63 #include <utility>
64 #include <vector>
65 
66 using namespace llvm;
67 
68 #define DEBUG_TYPE "arm-instrinfo"
69 
70 #define GET_INSTRINFO_CTOR_DTOR
71 #include "ARMGenInstrInfo.inc"
72 
73 static cl::opt<bool>
74 EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden,
75  cl::desc("Enable ARM 2-addr to 3-addr conv"));
76 
77 /// ARM_MLxEntry - Record information about MLA / MLS instructions.
78 struct ARM_MLxEntry {
79  uint16_t MLxOpc; // MLA / MLS opcode
80  uint16_t MulOpc; // Expanded multiplication opcode
81  uint16_t AddSubOpc; // Expanded add / sub opcode
82  bool NegAcc; // True if the acc is negated before the add / sub.
83  bool HasLane; // True if instruction has an extra "lane" operand.
84 };
85 
86 static const ARM_MLxEntry ARM_MLxTable[] = {
87  // MLxOpc, MulOpc, AddSubOpc, NegAcc, HasLane
88  // fp scalar ops
89  { ARM::VMLAS, ARM::VMULS, ARM::VADDS, false, false },
90  { ARM::VMLSS, ARM::VMULS, ARM::VSUBS, false, false },
91  { ARM::VMLAD, ARM::VMULD, ARM::VADDD, false, false },
92  { ARM::VMLSD, ARM::VMULD, ARM::VSUBD, false, false },
93  { ARM::VNMLAS, ARM::VNMULS, ARM::VSUBS, true, false },
94  { ARM::VNMLSS, ARM::VMULS, ARM::VSUBS, true, false },
95  { ARM::VNMLAD, ARM::VNMULD, ARM::VSUBD, true, false },
96  { ARM::VNMLSD, ARM::VMULD, ARM::VSUBD, true, false },
97 
98  // fp SIMD ops
99  { ARM::VMLAfd, ARM::VMULfd, ARM::VADDfd, false, false },
100  { ARM::VMLSfd, ARM::VMULfd, ARM::VSUBfd, false, false },
101  { ARM::VMLAfq, ARM::VMULfq, ARM::VADDfq, false, false },
102  { ARM::VMLSfq, ARM::VMULfq, ARM::VSUBfq, false, false },
103  { ARM::VMLAslfd, ARM::VMULslfd, ARM::VADDfd, false, true },
104  { ARM::VMLSslfd, ARM::VMULslfd, ARM::VSUBfd, false, true },
105  { ARM::VMLAslfq, ARM::VMULslfq, ARM::VADDfq, false, true },
106  { ARM::VMLSslfq, ARM::VMULslfq, ARM::VSUBfq, false, true },
107 };
108 
110  : ARMGenInstrInfo(ARM::ADJCALLSTACKDOWN, ARM::ADJCALLSTACKUP),
111  Subtarget(STI) {
112  for (unsigned i = 0, e = array_lengthof(ARM_MLxTable); i != e; ++i) {
113  if (!MLxEntryMap.insert(std::make_pair(ARM_MLxTable[i].MLxOpc, i)).second)
114  llvm_unreachable("Duplicated entries?");
115  MLxHazardOpcodes.insert(ARM_MLxTable[i].AddSubOpc);
116  MLxHazardOpcodes.insert(ARM_MLxTable[i].MulOpc);
117  }
118 }
119 
120 // Use a ScoreboardHazardRecognizer for prepass ARM scheduling. TargetInstrImpl
121 // currently defaults to no prepass hazard recognizer.
124  const ScheduleDAG *DAG) const {
125  if (usePreRAHazardRecognizer()) {
126  const InstrItineraryData *II =
127  static_cast<const ARMSubtarget *>(STI)->getInstrItineraryData();
128  return new ScoreboardHazardRecognizer(II, DAG, "pre-RA-sched");
129  }
131 }
132 
135  const ScheduleDAG *DAG) const {
136  if (Subtarget.isThumb2() || Subtarget.hasVFP2())
137  return (ScheduleHazardRecognizer *)new ARMHazardRecognizer(II, DAG);
139 }
140 
143  // FIXME: Thumb2 support.
144 
145  if (!EnableARM3Addr)
146  return nullptr;
147 
148  MachineFunction &MF = *MI.getParent()->getParent();
149  uint64_t TSFlags = MI.getDesc().TSFlags;
150  bool isPre = false;
151  switch ((TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift) {
152  default: return nullptr;
153  case ARMII::IndexModePre:
154  isPre = true;
155  break;
157  break;
158  }
159 
160  // Try splitting an indexed load/store to an un-indexed one plus an add/sub
161  // operation.
162  unsigned MemOpc = getUnindexedOpcode(MI.getOpcode());
163  if (MemOpc == 0)
164  return nullptr;
165 
166  MachineInstr *UpdateMI = nullptr;
167  MachineInstr *MemMI = nullptr;
168  unsigned AddrMode = (TSFlags & ARMII::AddrModeMask);
169  const MCInstrDesc &MCID = MI.getDesc();
170  unsigned NumOps = MCID.getNumOperands();
171  bool isLoad = !MI.mayStore();
172  const MachineOperand &WB = isLoad ? MI.getOperand(1) : MI.getOperand(0);
173  const MachineOperand &Base = MI.getOperand(2);
174  const MachineOperand &Offset = MI.getOperand(NumOps - 3);
175  unsigned WBReg = WB.getReg();
176  unsigned BaseReg = Base.getReg();
177  unsigned OffReg = Offset.getReg();
178  unsigned OffImm = MI.getOperand(NumOps - 2).getImm();
179  ARMCC::CondCodes Pred = (ARMCC::CondCodes)MI.getOperand(NumOps - 1).getImm();
180  switch (AddrMode) {
181  default: llvm_unreachable("Unknown indexed op!");
182  case ARMII::AddrMode2: {
183  bool isSub = ARM_AM::getAM2Op(OffImm) == ARM_AM::sub;
184  unsigned Amt = ARM_AM::getAM2Offset(OffImm);
185  if (OffReg == 0) {
186  if (ARM_AM::getSOImmVal(Amt) == -1)
187  // Can't encode it in a so_imm operand. This transformation will
188  // add more than 1 instruction. Abandon!
189  return nullptr;
190  UpdateMI = BuildMI(MF, MI.getDebugLoc(),
191  get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
192  .addReg(BaseReg)
193  .addImm(Amt)
194  .add(predOps(Pred))
195  .add(condCodeOp());
196  } else if (Amt != 0) {
198  unsigned SOOpc = ARM_AM::getSORegOpc(ShOpc, Amt);
199  UpdateMI = BuildMI(MF, MI.getDebugLoc(),
200  get(isSub ? ARM::SUBrsi : ARM::ADDrsi), WBReg)
201  .addReg(BaseReg)
202  .addReg(OffReg)
203  .addReg(0)
204  .addImm(SOOpc)
205  .add(predOps(Pred))
206  .add(condCodeOp());
207  } else
208  UpdateMI = BuildMI(MF, MI.getDebugLoc(),
209  get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
210  .addReg(BaseReg)
211  .addReg(OffReg)
212  .add(predOps(Pred))
213  .add(condCodeOp());
214  break;
215  }
216  case ARMII::AddrMode3 : {
217  bool isSub = ARM_AM::getAM3Op(OffImm) == ARM_AM::sub;
218  unsigned Amt = ARM_AM::getAM3Offset(OffImm);
219  if (OffReg == 0)
220  // Immediate is 8-bits. It's guaranteed to fit in a so_imm operand.
221  UpdateMI = BuildMI(MF, MI.getDebugLoc(),
222  get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
223  .addReg(BaseReg)
224  .addImm(Amt)
225  .add(predOps(Pred))
226  .add(condCodeOp());
227  else
228  UpdateMI = BuildMI(MF, MI.getDebugLoc(),
229  get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
230  .addReg(BaseReg)
231  .addReg(OffReg)
232  .add(predOps(Pred))
233  .add(condCodeOp());
234  break;
235  }
236  }
237 
238  std::vector<MachineInstr*> NewMIs;
239  if (isPre) {
240  if (isLoad)
241  MemMI =
242  BuildMI(MF, MI.getDebugLoc(), get(MemOpc), MI.getOperand(0).getReg())
243  .addReg(WBReg)
244  .addImm(0)
245  .addImm(Pred);
246  else
247  MemMI = BuildMI(MF, MI.getDebugLoc(), get(MemOpc))
248  .addReg(MI.getOperand(1).getReg())
249  .addReg(WBReg)
250  .addReg(0)
251  .addImm(0)
252  .addImm(Pred);
253  NewMIs.push_back(MemMI);
254  NewMIs.push_back(UpdateMI);
255  } else {
256  if (isLoad)
257  MemMI =
258  BuildMI(MF, MI.getDebugLoc(), get(MemOpc), MI.getOperand(0).getReg())
259  .addReg(BaseReg)
260  .addImm(0)
261  .addImm(Pred);
262  else
263  MemMI = BuildMI(MF, MI.getDebugLoc(), get(MemOpc))
264  .addReg(MI.getOperand(1).getReg())
265  .addReg(BaseReg)
266  .addReg(0)
267  .addImm(0)
268  .addImm(Pred);
269  if (WB.isDead())
270  UpdateMI->getOperand(0).setIsDead();
271  NewMIs.push_back(UpdateMI);
272  NewMIs.push_back(MemMI);
273  }
274 
275  // Transfer LiveVariables states, kill / dead info.
276  if (LV) {
277  for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
278  MachineOperand &MO = MI.getOperand(i);
280  unsigned Reg = MO.getReg();
281 
283  if (MO.isDef()) {
284  MachineInstr *NewMI = (Reg == WBReg) ? UpdateMI : MemMI;
285  if (MO.isDead())
286  LV->addVirtualRegisterDead(Reg, *NewMI);
287  }
288  if (MO.isUse() && MO.isKill()) {
289  for (unsigned j = 0; j < 2; ++j) {
290  // Look at the two new MI's in reverse order.
291  MachineInstr *NewMI = NewMIs[j];
292  if (!NewMI->readsRegister(Reg))
293  continue;
294  LV->addVirtualRegisterKilled(Reg, *NewMI);
295  if (VI.removeKill(MI))
296  VI.Kills.push_back(NewMI);
297  break;
298  }
299  }
300  }
301  }
302  }
303 
305  MFI->insert(MBBI, NewMIs[1]);
306  MFI->insert(MBBI, NewMIs[0]);
307  return NewMIs[0];
308 }
309 
310 // Branch analysis.
312  MachineBasicBlock *&TBB,
313  MachineBasicBlock *&FBB,
315  bool AllowModify) const {
316  TBB = nullptr;
317  FBB = nullptr;
318 
320  if (I == MBB.begin())
321  return false; // Empty blocks are easy.
322  --I;
323 
324  // Walk backwards from the end of the basic block until the branch is
325  // analyzed or we give up.
326  while (isPredicated(*I) || I->isTerminator() || I->isDebugValue()) {
327  // Flag to be raised on unanalyzeable instructions. This is useful in cases
328  // where we want to clean up on the end of the basic block before we bail
329  // out.
330  bool CantAnalyze = false;
331 
332  // Skip over DEBUG values and predicated nonterminators.
333  while (I->isDebugInstr() || !I->isTerminator()) {
334  if (I == MBB.begin())
335  return false;
336  --I;
337  }
338 
339  if (isIndirectBranchOpcode(I->getOpcode()) ||
340  isJumpTableBranchOpcode(I->getOpcode())) {
341  // Indirect branches and jump tables can't be analyzed, but we still want
342  // to clean up any instructions at the tail of the basic block.
343  CantAnalyze = true;
344  } else if (isUncondBranchOpcode(I->getOpcode())) {
345  TBB = I->getOperand(0).getMBB();
346  } else if (isCondBranchOpcode(I->getOpcode())) {
347  // Bail out if we encounter multiple conditional branches.
348  if (!Cond.empty())
349  return true;
350 
351  assert(!FBB && "FBB should have been null.");
352  FBB = TBB;
353  TBB = I->getOperand(0).getMBB();
354  Cond.push_back(I->getOperand(1));
355  Cond.push_back(I->getOperand(2));
356  } else if (I->isReturn()) {
357  // Returns can't be analyzed, but we should run cleanup.
358  CantAnalyze = !isPredicated(*I);
359  } else {
360  // We encountered other unrecognized terminator. Bail out immediately.
361  return true;
362  }
363 
364  // Cleanup code - to be run for unpredicated unconditional branches and
365  // returns.
366  if (!isPredicated(*I) &&
367  (isUncondBranchOpcode(I->getOpcode()) ||
368  isIndirectBranchOpcode(I->getOpcode()) ||
369  isJumpTableBranchOpcode(I->getOpcode()) ||
370  I->isReturn())) {
371  // Forget any previous condition branch information - it no longer applies.
372  Cond.clear();
373  FBB = nullptr;
374 
375  // If we can modify the function, delete everything below this
376  // unconditional branch.
377  if (AllowModify) {
378  MachineBasicBlock::iterator DI = std::next(I);
379  while (DI != MBB.end()) {
380  MachineInstr &InstToDelete = *DI;
381  ++DI;
382  InstToDelete.eraseFromParent();
383  }
384  }
385  }
386 
387  if (CantAnalyze)
388  return true;
389 
390  if (I == MBB.begin())
391  return false;
392 
393  --I;
394  }
395 
396  // We made it past the terminators without bailing out - we must have
397  // analyzed this branch successfully.
398  return false;
399 }
400 
402  int *BytesRemoved) const {
403  assert(!BytesRemoved && "code size not handled");
404 
406  if (I == MBB.end())
407  return 0;
408 
409  if (!isUncondBranchOpcode(I->getOpcode()) &&
410  !isCondBranchOpcode(I->getOpcode()))
411  return 0;
412 
413  // Remove the branch.
414  I->eraseFromParent();
415 
416  I = MBB.end();
417 
418  if (I == MBB.begin()) return 1;
419  --I;
420  if (!isCondBranchOpcode(I->getOpcode()))
421  return 1;
422 
423  // Remove the branch.
424  I->eraseFromParent();
425  return 2;
426 }
427 
429  MachineBasicBlock *TBB,
430  MachineBasicBlock *FBB,
432  const DebugLoc &DL,
433  int *BytesAdded) const {
434  assert(!BytesAdded && "code size not handled");
436  int BOpc = !AFI->isThumbFunction()
437  ? ARM::B : (AFI->isThumb2Function() ? ARM::t2B : ARM::tB);
438  int BccOpc = !AFI->isThumbFunction()
439  ? ARM::Bcc : (AFI->isThumb2Function() ? ARM::t2Bcc : ARM::tBcc);
440  bool isThumb = AFI->isThumbFunction() || AFI->isThumb2Function();
441 
442  // Shouldn't be a fall through.
443  assert(TBB && "insertBranch must not be told to insert a fallthrough");
444  assert((Cond.size() == 2 || Cond.size() == 0) &&
445  "ARM branch conditions have two components!");
446 
447  // For conditional branches, we use addOperand to preserve CPSR flags.
448 
449  if (!FBB) {
450  if (Cond.empty()) { // Unconditional branch?
451  if (isThumb)
452  BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB).add(predOps(ARMCC::AL));
453  else
454  BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB);
455  } else
456  BuildMI(&MBB, DL, get(BccOpc))
457  .addMBB(TBB)
458  .addImm(Cond[0].getImm())
459  .add(Cond[1]);
460  return 1;
461  }
462 
463  // Two-way conditional branch.
464  BuildMI(&MBB, DL, get(BccOpc))
465  .addMBB(TBB)
466  .addImm(Cond[0].getImm())
467  .add(Cond[1]);
468  if (isThumb)
469  BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB).add(predOps(ARMCC::AL));
470  else
471  BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB);
472  return 2;
473 }
474 
477  ARMCC::CondCodes CC = (ARMCC::CondCodes)(int)Cond[0].getImm();
478  Cond[0].setImm(ARMCC::getOppositeCondition(CC));
479  return false;
480 }
481 
483  if (MI.isBundle()) {
486  while (++I != E && I->isInsideBundle()) {
487  int PIdx = I->findFirstPredOperandIdx();
488  if (PIdx != -1 && I->getOperand(PIdx).getImm() != ARMCC::AL)
489  return true;
490  }
491  return false;
492  }
493 
494  int PIdx = MI.findFirstPredOperandIdx();
495  return PIdx != -1 && MI.getOperand(PIdx).getImm() != ARMCC::AL;
496 }
497 
500  unsigned Opc = MI.getOpcode();
501  if (isUncondBranchOpcode(Opc)) {
502  MI.setDesc(get(getMatchingCondBranchOpcode(Opc)));
504  .addImm(Pred[0].getImm())
505  .addReg(Pred[1].getReg());
506  return true;
507  }
508 
509  int PIdx = MI.findFirstPredOperandIdx();
510  if (PIdx != -1) {
511  MachineOperand &PMO = MI.getOperand(PIdx);
512  PMO.setImm(Pred[0].getImm());
513  MI.getOperand(PIdx+1).setReg(Pred[1].getReg());
514  return true;
515  }
516  return false;
517 }
518 
520  ArrayRef<MachineOperand> Pred2) const {
521  if (Pred1.size() > 2 || Pred2.size() > 2)
522  return false;
523 
524  ARMCC::CondCodes CC1 = (ARMCC::CondCodes)Pred1[0].getImm();
525  ARMCC::CondCodes CC2 = (ARMCC::CondCodes)Pred2[0].getImm();
526  if (CC1 == CC2)
527  return true;
528 
529  switch (CC1) {
530  default:
531  return false;
532  case ARMCC::AL:
533  return true;
534  case ARMCC::HS:
535  return CC2 == ARMCC::HI;
536  case ARMCC::LS:
537  return CC2 == ARMCC::LO || CC2 == ARMCC::EQ;
538  case ARMCC::GE:
539  return CC2 == ARMCC::GT;
540  case ARMCC::LE:
541  return CC2 == ARMCC::LT;
542  }
543 }
544 
546  MachineInstr &MI, std::vector<MachineOperand> &Pred) const {
547  bool Found = false;
548  for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
549  const MachineOperand &MO = MI.getOperand(i);
550  if ((MO.isRegMask() && MO.clobbersPhysReg(ARM::CPSR)) ||
551  (MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR)) {
552  Pred.push_back(MO);
553  Found = true;
554  }
555  }
556 
557  return Found;
558 }
559 
561  for (const auto &MO : MI.operands())
562  if (MO.isReg() && MO.getReg() == ARM::CPSR && MO.isDef() && !MO.isDead())
563  return true;
564  return false;
565 }
566 
568  unsigned Op) const {
569  const MachineOperand &Offset = MI.getOperand(Op + 1);
570  return Offset.getReg() != 0;
571 }
572 
573 // Load with negative register offset requires additional 1cyc and +I unit
574 // for Cortex A57
576  unsigned Op) const {
577  const MachineOperand &Offset = MI.getOperand(Op + 1);
578  const MachineOperand &Opc = MI.getOperand(Op + 2);
579  assert(Opc.isImm());
580  assert(Offset.isReg());
581  int64_t OpcImm = Opc.getImm();
582 
583  bool isSub = ARM_AM::getAM3Op(OpcImm) == ARM_AM::sub;
584  return (isSub && Offset.getReg() != 0);
585 }
586 
588  unsigned Op) const {
589  const MachineOperand &Opc = MI.getOperand(Op + 2);
590  unsigned OffImm = Opc.getImm();
591  return ARM_AM::getAM2ShiftOpc(OffImm) != ARM_AM::no_shift;
592 }
593 
594 // Load, scaled register offset, not plus LSL2
596  unsigned Op) const {
597  const MachineOperand &Opc = MI.getOperand(Op + 2);
598  unsigned OffImm = Opc.getImm();
599 
600  bool isAdd = ARM_AM::getAM2Op(OffImm) == ARM_AM::add;
601  unsigned Amt = ARM_AM::getAM2Offset(OffImm);
603  if (ShiftOpc == ARM_AM::no_shift) return false; // not scaled
604  bool SimpleScaled = (isAdd && ShiftOpc == ARM_AM::lsl && Amt == 2);
605  return !SimpleScaled;
606 }
607 
608 // Minus reg for ldstso addr mode
610  unsigned Op) const {
611  unsigned OffImm = MI.getOperand(Op + 2).getImm();
612  return ARM_AM::getAM2Op(OffImm) == ARM_AM::sub;
613 }
614 
615 // Load, scaled register offset
617  unsigned Op) const {
618  unsigned OffImm = MI.getOperand(Op + 2).getImm();
619  return ARM_AM::getAM2ShiftOpc(OffImm) != ARM_AM::no_shift;
620 }
621 
622 static bool isEligibleForITBlock(const MachineInstr *MI) {
623  switch (MI->getOpcode()) {
624  default: return true;
625  case ARM::tADC: // ADC (register) T1
626  case ARM::tADDi3: // ADD (immediate) T1
627  case ARM::tADDi8: // ADD (immediate) T2
628  case ARM::tADDrr: // ADD (register) T1
629  case ARM::tAND: // AND (register) T1
630  case ARM::tASRri: // ASR (immediate) T1
631  case ARM::tASRrr: // ASR (register) T1
632  case ARM::tBIC: // BIC (register) T1
633  case ARM::tEOR: // EOR (register) T1
634  case ARM::tLSLri: // LSL (immediate) T1
635  case ARM::tLSLrr: // LSL (register) T1
636  case ARM::tLSRri: // LSR (immediate) T1
637  case ARM::tLSRrr: // LSR (register) T1
638  case ARM::tMUL: // MUL T1
639  case ARM::tMVN: // MVN (register) T1
640  case ARM::tORR: // ORR (register) T1
641  case ARM::tROR: // ROR (register) T1
642  case ARM::tRSB: // RSB (immediate) T1
643  case ARM::tSBC: // SBC (register) T1
644  case ARM::tSUBi3: // SUB (immediate) T1
645  case ARM::tSUBi8: // SUB (immediate) T2
646  case ARM::tSUBrr: // SUB (register) T1
647  return !ARMBaseInstrInfo::isCPSRDefined(*MI);
648  }
649 }
650 
651 /// isPredicable - Return true if the specified instruction can be predicated.
652 /// By default, this returns true for every instruction with a
653 /// PredicateOperand.
655  if (!MI.isPredicable())
656  return false;
657 
658  if (MI.isBundle())
659  return false;
660 
661  if (!isEligibleForITBlock(&MI))
662  return false;
663 
664  const ARMFunctionInfo *AFI =
666 
667  // Neon instructions in Thumb2 IT blocks are deprecated, see ARMARM.
668  // In their ARM encoding, they can't be encoded in a conditional form.
670  return false;
671 
672  if (AFI->isThumb2Function()) {
673  if (getSubtarget().restrictIT())
674  return isV8EligibleForIT(&MI);
675  }
676 
677  return true;
678 }
679 
680 namespace llvm {
681 
682 template <> bool IsCPSRDead<MachineInstr>(const MachineInstr *MI) {
683  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
684  const MachineOperand &MO = MI->getOperand(i);
685  if (!MO.isReg() || MO.isUndef() || MO.isUse())
686  continue;
687  if (MO.getReg() != ARM::CPSR)
688  continue;
689  if (!MO.isDead())
690  return false;
691  }
692  // all definitions of CPSR are dead
693  return true;
694 }
695 
696 } // end namespace llvm
697 
698 /// GetInstSize - Return the size of the specified MachineInstr.
699 ///
701  const MachineBasicBlock &MBB = *MI.getParent();
702  const MachineFunction *MF = MBB.getParent();
703  const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
704 
705  const MCInstrDesc &MCID = MI.getDesc();
706  if (MCID.getSize())
707  return MCID.getSize();
708 
709  // If this machine instr is an inline asm, measure it.
710  if (MI.getOpcode() == ARM::INLINEASM) {
711  unsigned Size = getInlineAsmLength(MI.getOperand(0).getSymbolName(), *MAI);
712  if (!MF->getInfo<ARMFunctionInfo>()->isThumbFunction())
713  Size = alignTo(Size, 4);
714  return Size;
715  }
716  unsigned Opc = MI.getOpcode();
717  switch (Opc) {
718  default:
719  // pseudo-instruction sizes are zero.
720  return 0;
721  case TargetOpcode::BUNDLE:
722  return getInstBundleLength(MI);
723  case ARM::MOVi16_ga_pcrel:
724  case ARM::MOVTi16_ga_pcrel:
725  case ARM::t2MOVi16_ga_pcrel:
726  case ARM::t2MOVTi16_ga_pcrel:
727  return 4;
728  case ARM::MOVi32imm:
729  case ARM::t2MOVi32imm:
730  return 8;
731  case ARM::CONSTPOOL_ENTRY:
732  case ARM::JUMPTABLE_INSTS:
733  case ARM::JUMPTABLE_ADDRS:
734  case ARM::JUMPTABLE_TBB:
735  case ARM::JUMPTABLE_TBH:
736  // If this machine instr is a constant pool entry, its size is recorded as
737  // operand #2.
738  return MI.getOperand(2).getImm();
739  case ARM::Int_eh_sjlj_longjmp:
740  return 16;
741  case ARM::tInt_eh_sjlj_longjmp:
742  return 10;
743  case ARM::tInt_WIN_eh_sjlj_longjmp:
744  return 12;
745  case ARM::Int_eh_sjlj_setjmp:
746  case ARM::Int_eh_sjlj_setjmp_nofp:
747  return 20;
748  case ARM::tInt_eh_sjlj_setjmp:
749  case ARM::t2Int_eh_sjlj_setjmp:
750  case ARM::t2Int_eh_sjlj_setjmp_nofp:
751  return 12;
752  case ARM::SPACE:
753  return MI.getOperand(1).getImm();
754  }
755 }
756 
757 unsigned ARMBaseInstrInfo::getInstBundleLength(const MachineInstr &MI) const {
758  unsigned Size = 0;
761  while (++I != E && I->isInsideBundle()) {
762  assert(!I->isBundle() && "No nested bundle!");
763  Size += getInstSizeInBytes(*I);
764  }
765  return Size;
766 }
767 
770  unsigned DestReg, bool KillSrc,
771  const ARMSubtarget &Subtarget) const {
772  unsigned Opc = Subtarget.isThumb()
773  ? (Subtarget.isMClass() ? ARM::t2MRS_M : ARM::t2MRS_AR)
774  : ARM::MRS;
775 
776  MachineInstrBuilder MIB =
777  BuildMI(MBB, I, I->getDebugLoc(), get(Opc), DestReg);
778 
779  // There is only 1 A/R class MRS instruction, and it always refers to
780  // APSR. However, there are lots of other possibilities on M-class cores.
781  if (Subtarget.isMClass())
782  MIB.addImm(0x800);
783 
784  MIB.add(predOps(ARMCC::AL))
785  .addReg(ARM::CPSR, RegState::Implicit | getKillRegState(KillSrc));
786 }
787 
790  unsigned SrcReg, bool KillSrc,
791  const ARMSubtarget &Subtarget) const {
792  unsigned Opc = Subtarget.isThumb()
793  ? (Subtarget.isMClass() ? ARM::t2MSR_M : ARM::t2MSR_AR)
794  : ARM::MSR;
795 
796  MachineInstrBuilder MIB = BuildMI(MBB, I, I->getDebugLoc(), get(Opc));
797 
798  if (Subtarget.isMClass())
799  MIB.addImm(0x800);
800  else
801  MIB.addImm(8);
802 
803  MIB.addReg(SrcReg, getKillRegState(KillSrc))
806 }
807 
810  const DebugLoc &DL, unsigned DestReg,
811  unsigned SrcReg, bool KillSrc) const {
812  bool GPRDest = ARM::GPRRegClass.contains(DestReg);
813  bool GPRSrc = ARM::GPRRegClass.contains(SrcReg);
814 
815  if (GPRDest && GPRSrc) {
816  BuildMI(MBB, I, DL, get(ARM::MOVr), DestReg)
817  .addReg(SrcReg, getKillRegState(KillSrc))
819  .add(condCodeOp());
820  return;
821  }
822 
823  bool SPRDest = ARM::SPRRegClass.contains(DestReg);
824  bool SPRSrc = ARM::SPRRegClass.contains(SrcReg);
825 
826  unsigned Opc = 0;
827  if (SPRDest && SPRSrc)
828  Opc = ARM::VMOVS;
829  else if (GPRDest && SPRSrc)
830  Opc = ARM::VMOVRS;
831  else if (SPRDest && GPRSrc)
832  Opc = ARM::VMOVSR;
833  else if (ARM::DPRRegClass.contains(DestReg, SrcReg) && !Subtarget.isFPOnlySP())
834  Opc = ARM::VMOVD;
835  else if (ARM::QPRRegClass.contains(DestReg, SrcReg))
836  Opc = ARM::VORRq;
837 
838  if (Opc) {
839  MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opc), DestReg);
840  MIB.addReg(SrcReg, getKillRegState(KillSrc));
841  if (Opc == ARM::VORRq)
842  MIB.addReg(SrcReg, getKillRegState(KillSrc));
843  MIB.add(predOps(ARMCC::AL));
844  return;
845  }
846 
847  // Handle register classes that require multiple instructions.
848  unsigned BeginIdx = 0;
849  unsigned SubRegs = 0;
850  int Spacing = 1;
851 
852  // Use VORRq when possible.
853  if (ARM::QQPRRegClass.contains(DestReg, SrcReg)) {
854  Opc = ARM::VORRq;
855  BeginIdx = ARM::qsub_0;
856  SubRegs = 2;
857  } else if (ARM::QQQQPRRegClass.contains(DestReg, SrcReg)) {
858  Opc = ARM::VORRq;
859  BeginIdx = ARM::qsub_0;
860  SubRegs = 4;
861  // Fall back to VMOVD.
862  } else if (ARM::DPairRegClass.contains(DestReg, SrcReg)) {
863  Opc = ARM::VMOVD;
864  BeginIdx = ARM::dsub_0;
865  SubRegs = 2;
866  } else if (ARM::DTripleRegClass.contains(DestReg, SrcReg)) {
867  Opc = ARM::VMOVD;
868  BeginIdx = ARM::dsub_0;
869  SubRegs = 3;
870  } else if (ARM::DQuadRegClass.contains(DestReg, SrcReg)) {
871  Opc = ARM::VMOVD;
872  BeginIdx = ARM::dsub_0;
873  SubRegs = 4;
874  } else if (ARM::GPRPairRegClass.contains(DestReg, SrcReg)) {
875  Opc = Subtarget.isThumb2() ? ARM::tMOVr : ARM::MOVr;
876  BeginIdx = ARM::gsub_0;
877  SubRegs = 2;
878  } else if (ARM::DPairSpcRegClass.contains(DestReg, SrcReg)) {
879  Opc = ARM::VMOVD;
880  BeginIdx = ARM::dsub_0;
881  SubRegs = 2;
882  Spacing = 2;
883  } else if (ARM::DTripleSpcRegClass.contains(DestReg, SrcReg)) {
884  Opc = ARM::VMOVD;
885  BeginIdx = ARM::dsub_0;
886  SubRegs = 3;
887  Spacing = 2;
888  } else if (ARM::DQuadSpcRegClass.contains(DestReg, SrcReg)) {
889  Opc = ARM::VMOVD;
890  BeginIdx = ARM::dsub_0;
891  SubRegs = 4;
892  Spacing = 2;
893  } else if (ARM::DPRRegClass.contains(DestReg, SrcReg) && Subtarget.isFPOnlySP()) {
894  Opc = ARM::VMOVS;
895  BeginIdx = ARM::ssub_0;
896  SubRegs = 2;
897  } else if (SrcReg == ARM::CPSR) {
898  copyFromCPSR(MBB, I, DestReg, KillSrc, Subtarget);
899  return;
900  } else if (DestReg == ARM::CPSR) {
901  copyToCPSR(MBB, I, SrcReg, KillSrc, Subtarget);
902  return;
903  }
904 
905  assert(Opc && "Impossible reg-to-reg copy");
906 
909 
910  // Copy register tuples backward when the first Dest reg overlaps with SrcReg.
911  if (TRI->regsOverlap(SrcReg, TRI->getSubReg(DestReg, BeginIdx))) {
912  BeginIdx = BeginIdx + ((SubRegs - 1) * Spacing);
913  Spacing = -Spacing;
914  }
915 #ifndef NDEBUG
916  SmallSet<unsigned, 4> DstRegs;
917 #endif
918  for (unsigned i = 0; i != SubRegs; ++i) {
919  unsigned Dst = TRI->getSubReg(DestReg, BeginIdx + i * Spacing);
920  unsigned Src = TRI->getSubReg(SrcReg, BeginIdx + i * Spacing);
921  assert(Dst && Src && "Bad sub-register");
922 #ifndef NDEBUG
923  assert(!DstRegs.count(Src) && "destructive vector copy");
924  DstRegs.insert(Dst);
925 #endif
926  Mov = BuildMI(MBB, I, I->getDebugLoc(), get(Opc), Dst).addReg(Src);
927  // VORR takes two source operands.
928  if (Opc == ARM::VORRq)
929  Mov.addReg(Src);
930  Mov = Mov.add(predOps(ARMCC::AL));
931  // MOVr can set CC.
932  if (Opc == ARM::MOVr)
933  Mov = Mov.add(condCodeOp());
934  }
935  // Add implicit super-register defs and kills to the last instruction.
936  Mov->addRegisterDefined(DestReg, TRI);
937  if (KillSrc)
938  Mov->addRegisterKilled(SrcReg, TRI);
939 }
940 
942  const MachineOperand *&Src,
943  const MachineOperand *&Dest) const {
944  // VMOVRRD is also a copy instruction but it requires
945  // special way of handling. It is more complex copy version
946  // and since that we are not considering it. For recognition
947  // of such instruction isExtractSubregLike MI interface fuction
948  // could be used.
949  // VORRq is considered as a move only if two inputs are
950  // the same register.
951  if (!MI.isMoveReg() ||
952  (MI.getOpcode() == ARM::VORRq &&
953  MI.getOperand(1).getReg() != MI.getOperand(2).getReg()))
954  return false;
955  Dest = &MI.getOperand(0);
956  Src = &MI.getOperand(1);
957  return true;
958 }
959 
960 const MachineInstrBuilder &
962  unsigned SubIdx, unsigned State,
963  const TargetRegisterInfo *TRI) const {
964  if (!SubIdx)
965  return MIB.addReg(Reg, State);
966 
968  return MIB.addReg(TRI->getSubReg(Reg, SubIdx), State);
969  return MIB.addReg(Reg, State, SubIdx);
970 }
971 
974  unsigned SrcReg, bool isKill, int FI,
975  const TargetRegisterClass *RC,
976  const TargetRegisterInfo *TRI) const {
977  MachineFunction &MF = *MBB.getParent();
978  MachineFrameInfo &MFI = MF.getFrameInfo();
979  unsigned Align = MFI.getObjectAlignment(FI);
980 
983  MFI.getObjectSize(FI), Align);
984 
985  switch (TRI->getSpillSize(*RC)) {
986  case 2:
987  if (ARM::HPRRegClass.hasSubClassEq(RC)) {
988  BuildMI(MBB, I, DebugLoc(), get(ARM::VSTRH))
989  .addReg(SrcReg, getKillRegState(isKill))
990  .addFrameIndex(FI)
991  .addImm(0)
992  .addMemOperand(MMO)
993  .add(predOps(ARMCC::AL));
994  } else
995  llvm_unreachable("Unknown reg class!");
996  break;
997  case 4:
998  if (ARM::GPRRegClass.hasSubClassEq(RC)) {
999  BuildMI(MBB, I, DebugLoc(), get(ARM::STRi12))
1000  .addReg(SrcReg, getKillRegState(isKill))
1001  .addFrameIndex(FI)
1002  .addImm(0)
1003  .addMemOperand(MMO)
1004  .add(predOps(ARMCC::AL));
1005  } else if (ARM::SPRRegClass.hasSubClassEq(RC)) {
1006  BuildMI(MBB, I, DebugLoc(), get(ARM::VSTRS))
1007  .addReg(SrcReg, getKillRegState(isKill))
1008  .addFrameIndex(FI)
1009  .addImm(0)
1010  .addMemOperand(MMO)
1011  .add(predOps(ARMCC::AL));
1012  } else
1013  llvm_unreachable("Unknown reg class!");
1014  break;
1015  case 8:
1016  if (ARM::DPRRegClass.hasSubClassEq(RC)) {
1017  BuildMI(MBB, I, DebugLoc(), get(ARM::VSTRD))
1018  .addReg(SrcReg, getKillRegState(isKill))
1019  .addFrameIndex(FI)
1020  .addImm(0)
1021  .addMemOperand(MMO)
1022  .add(predOps(ARMCC::AL));
1023  } else if (ARM::GPRPairRegClass.hasSubClassEq(RC)) {
1024  if (Subtarget.hasV5TEOps()) {
1025  MachineInstrBuilder MIB = BuildMI(MBB, I, DebugLoc(), get(ARM::STRD));
1026  AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill), TRI);
1027  AddDReg(MIB, SrcReg, ARM::gsub_1, 0, TRI);
1028  MIB.addFrameIndex(FI).addReg(0).addImm(0).addMemOperand(MMO)
1029  .add(predOps(ARMCC::AL));
1030  } else {
1031  // Fallback to STM instruction, which has existed since the dawn of
1032  // time.
1033  MachineInstrBuilder MIB = BuildMI(MBB, I, DebugLoc(), get(ARM::STMIA))
1034  .addFrameIndex(FI)
1035  .addMemOperand(MMO)
1036  .add(predOps(ARMCC::AL));
1037  AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill), TRI);
1038  AddDReg(MIB, SrcReg, ARM::gsub_1, 0, TRI);
1039  }
1040  } else
1041  llvm_unreachable("Unknown reg class!");
1042  break;
1043  case 16:
1044  if (ARM::DPairRegClass.hasSubClassEq(RC)) {
1045  // Use aligned spills if the stack can be realigned.
1046  if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
1047  BuildMI(MBB, I, DebugLoc(), get(ARM::VST1q64))
1048  .addFrameIndex(FI)
1049  .addImm(16)
1050  .addReg(SrcReg, getKillRegState(isKill))
1051  .addMemOperand(MMO)
1052  .add(predOps(ARMCC::AL));
1053  } else {
1054  BuildMI(MBB, I, DebugLoc(), get(ARM::VSTMQIA))
1055  .addReg(SrcReg, getKillRegState(isKill))
1056  .addFrameIndex(FI)
1057  .addMemOperand(MMO)
1058  .add(predOps(ARMCC::AL));
1059  }
1060  } else
1061  llvm_unreachable("Unknown reg class!");
1062  break;
1063  case 24:
1064  if (ARM::DTripleRegClass.hasSubClassEq(RC)) {
1065  // Use aligned spills if the stack can be realigned.
1066  if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
1067  BuildMI(MBB, I, DebugLoc(), get(ARM::VST1d64TPseudo))
1068  .addFrameIndex(FI)
1069  .addImm(16)
1070  .addReg(SrcReg, getKillRegState(isKill))
1071  .addMemOperand(MMO)
1072  .add(predOps(ARMCC::AL));
1073  } else {
1074  MachineInstrBuilder MIB = BuildMI(MBB, I, DebugLoc(),
1075  get(ARM::VSTMDIA))
1076  .addFrameIndex(FI)
1077  .add(predOps(ARMCC::AL))
1078  .addMemOperand(MMO);
1079  MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI);
1080  MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI);
1081  AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI);
1082  }
1083  } else
1084  llvm_unreachable("Unknown reg class!");
1085  break;
1086  case 32:
1087  if (ARM::QQPRRegClass.hasSubClassEq(RC) || ARM::DQuadRegClass.hasSubClassEq(RC)) {
1088  if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
1089  // FIXME: It's possible to only store part of the QQ register if the
1090  // spilled def has a sub-register index.
1091  BuildMI(MBB, I, DebugLoc(), get(ARM::VST1d64QPseudo))
1092  .addFrameIndex(FI)
1093  .addImm(16)
1094  .addReg(SrcReg, getKillRegState(isKill))
1095  .addMemOperand(MMO)
1096  .add(predOps(ARMCC::AL));
1097  } else {
1098  MachineInstrBuilder MIB = BuildMI(MBB, I, DebugLoc(),
1099  get(ARM::VSTMDIA))
1100  .addFrameIndex(FI)
1101  .add(predOps(ARMCC::AL))
1102  .addMemOperand(MMO);
1103  MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI);
1104  MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI);
1105  MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI);
1106  AddDReg(MIB, SrcReg, ARM::dsub_3, 0, TRI);
1107  }
1108  } else
1109  llvm_unreachable("Unknown reg class!");
1110  break;
1111  case 64:
1112  if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) {
1113  MachineInstrBuilder MIB = BuildMI(MBB, I, DebugLoc(), get(ARM::VSTMDIA))
1114  .addFrameIndex(FI)
1115  .add(predOps(ARMCC::AL))
1116  .addMemOperand(MMO);
1117  MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI);
1118  MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI);
1119  MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI);
1120  MIB = AddDReg(MIB, SrcReg, ARM::dsub_3, 0, TRI);
1121  MIB = AddDReg(MIB, SrcReg, ARM::dsub_4, 0, TRI);
1122  MIB = AddDReg(MIB, SrcReg, ARM::dsub_5, 0, TRI);
1123  MIB = AddDReg(MIB, SrcReg, ARM::dsub_6, 0, TRI);
1124  AddDReg(MIB, SrcReg, ARM::dsub_7, 0, TRI);
1125  } else
1126  llvm_unreachable("Unknown reg class!");
1127  break;
1128  default:
1129  llvm_unreachable("Unknown reg class!");
1130  }
1131 }
1132 
1134  int &FrameIndex) const {
1135  switch (MI.getOpcode()) {
1136  default: break;
1137  case ARM::STRrs:
1138  case ARM::t2STRs: // FIXME: don't use t2STRs to access frame.
1139  if (MI.getOperand(1).isFI() && MI.getOperand(2).isReg() &&
1140  MI.getOperand(3).isImm() && MI.getOperand(2).getReg() == 0 &&
1141  MI.getOperand(3).getImm() == 0) {
1142  FrameIndex = MI.getOperand(1).getIndex();
1143  return MI.getOperand(0).getReg();
1144  }
1145  break;
1146  case ARM::STRi12:
1147  case ARM::t2STRi12:
1148  case ARM::tSTRspi:
1149  case ARM::VSTRD:
1150  case ARM::VSTRS:
1151  if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
1152  MI.getOperand(2).getImm() == 0) {
1153  FrameIndex = MI.getOperand(1).getIndex();
1154  return MI.getOperand(0).getReg();
1155  }
1156  break;
1157  case ARM::VST1q64:
1158  case ARM::VST1d64TPseudo:
1159  case ARM::VST1d64QPseudo:
1160  if (MI.getOperand(0).isFI() && MI.getOperand(2).getSubReg() == 0) {
1161  FrameIndex = MI.getOperand(0).getIndex();
1162  return MI.getOperand(2).getReg();
1163  }
1164  break;
1165  case ARM::VSTMQIA:
1166  if (MI.getOperand(1).isFI() && MI.getOperand(0).getSubReg() == 0) {
1167  FrameIndex = MI.getOperand(1).getIndex();
1168  return MI.getOperand(0).getReg();
1169  }
1170  break;
1171  }
1172 
1173  return 0;
1174 }
1175 
1177  int &FrameIndex) const {
1179  if (MI.mayStore() && hasStoreToStackSlot(MI, Accesses) &&
1180  Accesses.size() == 1) {
1181  FrameIndex =
1182  cast<FixedStackPseudoSourceValue>(Accesses.front()->getPseudoValue())
1183  ->getFrameIndex();
1184  return true;
1185  }
1186  return false;
1187 }
1188 
1189 void ARMBaseInstrInfo::
1191  unsigned DestReg, int FI,
1192  const TargetRegisterClass *RC,
1193  const TargetRegisterInfo *TRI) const {
1194  DebugLoc DL;
1195  if (I != MBB.end()) DL = I->getDebugLoc();
1196  MachineFunction &MF = *MBB.getParent();
1197  MachineFrameInfo &MFI = MF.getFrameInfo();
1198  unsigned Align = MFI.getObjectAlignment(FI);
1201  MFI.getObjectSize(FI), Align);
1202 
1203  switch (TRI->getSpillSize(*RC)) {
1204  case 2:
1205  if (ARM::HPRRegClass.hasSubClassEq(RC)) {
1206  BuildMI(MBB, I, DL, get(ARM::VLDRH), DestReg)
1207  .addFrameIndex(FI)
1208  .addImm(0)
1209  .addMemOperand(MMO)
1210  .add(predOps(ARMCC::AL));
1211  } else
1212  llvm_unreachable("Unknown reg class!");
1213  break;
1214  case 4:
1215  if (ARM::GPRRegClass.hasSubClassEq(RC)) {
1216  BuildMI(MBB, I, DL, get(ARM::LDRi12), DestReg)
1217  .addFrameIndex(FI)
1218  .addImm(0)
1219  .addMemOperand(MMO)
1220  .add(predOps(ARMCC::AL));
1221  } else if (ARM::SPRRegClass.hasSubClassEq(RC)) {
1222  BuildMI(MBB, I, DL, get(ARM::VLDRS), DestReg)
1223  .addFrameIndex(FI)
1224  .addImm(0)
1225  .addMemOperand(MMO)
1226  .add(predOps(ARMCC::AL));
1227  } else
1228  llvm_unreachable("Unknown reg class!");
1229  break;
1230  case 8:
1231  if (ARM::DPRRegClass.hasSubClassEq(RC)) {
1232  BuildMI(MBB, I, DL, get(ARM::VLDRD), DestReg)
1233  .addFrameIndex(FI)
1234  .addImm(0)
1235  .addMemOperand(MMO)
1236  .add(predOps(ARMCC::AL));
1237  } else if (ARM::GPRPairRegClass.hasSubClassEq(RC)) {
1238  MachineInstrBuilder MIB;
1239 
1240  if (Subtarget.hasV5TEOps()) {
1241  MIB = BuildMI(MBB, I, DL, get(ARM::LDRD));
1242  AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead, TRI);
1243  AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead, TRI);
1244  MIB.addFrameIndex(FI).addReg(0).addImm(0).addMemOperand(MMO)
1245  .add(predOps(ARMCC::AL));
1246  } else {
1247  // Fallback to LDM instruction, which has existed since the dawn of
1248  // time.
1249  MIB = BuildMI(MBB, I, DL, get(ARM::LDMIA))
1250  .addFrameIndex(FI)
1251  .addMemOperand(MMO)
1252  .add(predOps(ARMCC::AL));
1253  MIB = AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead, TRI);
1254  MIB = AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead, TRI);
1255  }
1256 
1258  MIB.addReg(DestReg, RegState::ImplicitDefine);
1259  } else
1260  llvm_unreachable("Unknown reg class!");
1261  break;
1262  case 16:
1263  if (ARM::DPairRegClass.hasSubClassEq(RC)) {
1264  if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
1265  BuildMI(MBB, I, DL, get(ARM::VLD1q64), DestReg)
1266  .addFrameIndex(FI)
1267  .addImm(16)
1268  .addMemOperand(MMO)
1269  .add(predOps(ARMCC::AL));
1270  } else {
1271  BuildMI(MBB, I, DL, get(ARM::VLDMQIA), DestReg)
1272  .addFrameIndex(FI)
1273  .addMemOperand(MMO)
1274  .add(predOps(ARMCC::AL));
1275  }
1276  } else
1277  llvm_unreachable("Unknown reg class!");
1278  break;
1279  case 24:
1280  if (ARM::DTripleRegClass.hasSubClassEq(RC)) {
1281  if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
1282  BuildMI(MBB, I, DL, get(ARM::VLD1d64TPseudo), DestReg)
1283  .addFrameIndex(FI)
1284  .addImm(16)
1285  .addMemOperand(MMO)
1286  .add(predOps(ARMCC::AL));
1287  } else {
1288  MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::VLDMDIA))
1289  .addFrameIndex(FI)
1290  .addMemOperand(MMO)
1291  .add(predOps(ARMCC::AL));
1292  MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI);
1293  MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI);
1294  MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI);
1296  MIB.addReg(DestReg, RegState::ImplicitDefine);
1297  }
1298  } else
1299  llvm_unreachable("Unknown reg class!");
1300  break;
1301  case 32:
1302  if (ARM::QQPRRegClass.hasSubClassEq(RC) || ARM::DQuadRegClass.hasSubClassEq(RC)) {
1303  if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
1304  BuildMI(MBB, I, DL, get(ARM::VLD1d64QPseudo), DestReg)
1305  .addFrameIndex(FI)
1306  .addImm(16)
1307  .addMemOperand(MMO)
1308  .add(predOps(ARMCC::AL));
1309  } else {
1310  MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::VLDMDIA))
1311  .addFrameIndex(FI)
1312  .add(predOps(ARMCC::AL))
1313  .addMemOperand(MMO);
1314  MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI);
1315  MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI);
1316  MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI);
1317  MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::DefineNoRead, TRI);
1319  MIB.addReg(DestReg, RegState::ImplicitDefine);
1320  }
1321  } else
1322  llvm_unreachable("Unknown reg class!");
1323  break;
1324  case 64:
1325  if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) {
1326  MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::VLDMDIA))
1327  .addFrameIndex(FI)
1328  .add(predOps(ARMCC::AL))
1329  .addMemOperand(MMO);
1330  MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI);
1331  MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI);
1332  MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI);
1333  MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::DefineNoRead, TRI);
1334  MIB = AddDReg(MIB, DestReg, ARM::dsub_4, RegState::DefineNoRead, TRI);
1335  MIB = AddDReg(MIB, DestReg, ARM::dsub_5, RegState::DefineNoRead, TRI);
1336  MIB = AddDReg(MIB, DestReg, ARM::dsub_6, RegState::DefineNoRead, TRI);
1337  MIB = AddDReg(MIB, DestReg, ARM::dsub_7, RegState::DefineNoRead, TRI);
1339  MIB.addReg(DestReg, RegState::ImplicitDefine);
1340  } else
1341  llvm_unreachable("Unknown reg class!");
1342  break;
1343  default:
1344  llvm_unreachable("Unknown regclass!");
1345  }
1346 }
1347 
1349  int &FrameIndex) const {
1350  switch (MI.getOpcode()) {
1351  default: break;
1352  case ARM::LDRrs:
1353  case ARM::t2LDRs: // FIXME: don't use t2LDRs to access frame.
1354  if (MI.getOperand(1).isFI() && MI.getOperand(2).isReg() &&
1355  MI.getOperand(3).isImm() && MI.getOperand(2).getReg() == 0 &&
1356  MI.getOperand(3).getImm() == 0) {
1357  FrameIndex = MI.getOperand(1).getIndex();
1358  return MI.getOperand(0).getReg();
1359  }
1360  break;
1361  case ARM::LDRi12:
1362  case ARM::t2LDRi12:
1363  case ARM::tLDRspi:
1364  case ARM::VLDRD:
1365  case ARM::VLDRS:
1366  if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
1367  MI.getOperand(2).getImm() == 0) {
1368  FrameIndex = MI.getOperand(1).getIndex();
1369  return MI.getOperand(0).getReg();
1370  }
1371  break;
1372  case ARM::VLD1q64:
1373  case ARM::VLD1d8TPseudo:
1374  case ARM::VLD1d16TPseudo:
1375  case ARM::VLD1d32TPseudo:
1376  case ARM::VLD1d64TPseudo:
1377  case ARM::VLD1d8QPseudo:
1378  case ARM::VLD1d16QPseudo:
1379  case ARM::VLD1d32QPseudo:
1380  case ARM::VLD1d64QPseudo:
1381  if (MI.getOperand(1).isFI() && MI.getOperand(0).getSubReg() == 0) {
1382  FrameIndex = MI.getOperand(1).getIndex();
1383  return MI.getOperand(0).getReg();
1384  }
1385  break;
1386  case ARM::VLDMQIA:
1387  if (MI.getOperand(1).isFI() && MI.getOperand(0).getSubReg() == 0) {
1388  FrameIndex = MI.getOperand(1).getIndex();
1389  return MI.getOperand(0).getReg();
1390  }
1391  break;
1392  }
1393 
1394  return 0;
1395 }
1396 
1398  int &FrameIndex) const {
1400  if (MI.mayLoad() && hasLoadFromStackSlot(MI, Accesses) &&
1401  Accesses.size() == 1) {
1402  FrameIndex =
1403  cast<FixedStackPseudoSourceValue>(Accesses.front()->getPseudoValue())
1404  ->getFrameIndex();
1405  return true;
1406  }
1407  return false;
1408 }
1409 
1410 /// Expands MEMCPY to either LDMIA/STMIA or LDMIA_UPD/STMID_UPD
1411 /// depending on whether the result is used.
1412 void ARMBaseInstrInfo::expandMEMCPY(MachineBasicBlock::iterator MI) const {
1413  bool isThumb1 = Subtarget.isThumb1Only();
1414  bool isThumb2 = Subtarget.isThumb2();
1415  const ARMBaseInstrInfo *TII = Subtarget.getInstrInfo();
1416 
1417  DebugLoc dl = MI->getDebugLoc();
1418  MachineBasicBlock *BB = MI->getParent();
1419 
1420  MachineInstrBuilder LDM, STM;
1421  if (isThumb1 || !MI->getOperand(1).isDead()) {
1422  MachineOperand LDWb(MI->getOperand(1));
1423  LDM = BuildMI(*BB, MI, dl, TII->get(isThumb2 ? ARM::t2LDMIA_UPD
1424  : isThumb1 ? ARM::tLDMIA_UPD
1425  : ARM::LDMIA_UPD))
1426  .add(LDWb);
1427  } else {
1428  LDM = BuildMI(*BB, MI, dl, TII->get(isThumb2 ? ARM::t2LDMIA : ARM::LDMIA));
1429  }
1430 
1431  if (isThumb1 || !MI->getOperand(0).isDead()) {
1432  MachineOperand STWb(MI->getOperand(0));
1433  STM = BuildMI(*BB, MI, dl, TII->get(isThumb2 ? ARM::t2STMIA_UPD
1434  : isThumb1 ? ARM::tSTMIA_UPD
1435  : ARM::STMIA_UPD))
1436  .add(STWb);
1437  } else {
1438  STM = BuildMI(*BB, MI, dl, TII->get(isThumb2 ? ARM::t2STMIA : ARM::STMIA));
1439  }
1440 
1441  MachineOperand LDBase(MI->getOperand(3));
1442  LDM.add(LDBase).add(predOps(ARMCC::AL));
1443 
1444  MachineOperand STBase(MI->getOperand(2));
1445  STM.add(STBase).add(predOps(ARMCC::AL));
1446 
1447  // Sort the scratch registers into ascending order.
1449  SmallVector<unsigned, 6> ScratchRegs;
1450  for(unsigned I = 5; I < MI->getNumOperands(); ++I)
1451  ScratchRegs.push_back(MI->getOperand(I).getReg());
1452  llvm::sort(ScratchRegs,
1453  [&TRI](const unsigned &Reg1, const unsigned &Reg2) -> bool {
1454  return TRI.getEncodingValue(Reg1) <
1455  TRI.getEncodingValue(Reg2);
1456  });
1457 
1458  for (const auto &Reg : ScratchRegs) {
1459  LDM.addReg(Reg, RegState::Define);
1460  STM.addReg(Reg, RegState::Kill);
1461  }
1462 
1463  BB->erase(MI);
1464 }
1465 
1467  if (MI.getOpcode() == TargetOpcode::LOAD_STACK_GUARD) {
1468  assert(getSubtarget().getTargetTriple().isOSBinFormatMachO() &&
1469  "LOAD_STACK_GUARD currently supported only for MachO.");
1470  expandLoadStackGuard(MI);
1471  MI.getParent()->erase(MI);
1472  return true;
1473  }
1474 
1475  if (MI.getOpcode() == ARM::MEMCPY) {
1476  expandMEMCPY(MI);
1477  return true;
1478  }
1479 
1480  // This hook gets to expand COPY instructions before they become
1481  // copyPhysReg() calls. Look for VMOVS instructions that can legally be
1482  // widened to VMOVD. We prefer the VMOVD when possible because it may be
1483  // changed into a VORR that can go down the NEON pipeline.
1484  if (!MI.isCopy() || Subtarget.dontWidenVMOVS() || Subtarget.isFPOnlySP())
1485  return false;
1486 
1487  // Look for a copy between even S-registers. That is where we keep floats
1488  // when using NEON v2f32 instructions for f32 arithmetic.
1489  unsigned DstRegS = MI.getOperand(0).getReg();
1490  unsigned SrcRegS = MI.getOperand(1).getReg();
1491  if (!ARM::SPRRegClass.contains(DstRegS, SrcRegS))
1492  return false;
1493 
1495  unsigned DstRegD = TRI->getMatchingSuperReg(DstRegS, ARM::ssub_0,
1496  &ARM::DPRRegClass);
1497  unsigned SrcRegD = TRI->getMatchingSuperReg(SrcRegS, ARM::ssub_0,
1498  &ARM::DPRRegClass);
1499  if (!DstRegD || !SrcRegD)
1500  return false;
1501 
1502  // We want to widen this into a DstRegD = VMOVD SrcRegD copy. This is only
1503  // legal if the COPY already defines the full DstRegD, and it isn't a
1504  // sub-register insertion.
1505  if (!MI.definesRegister(DstRegD, TRI) || MI.readsRegister(DstRegD, TRI))
1506  return false;
1507 
1508  // A dead copy shouldn't show up here, but reject it just in case.
1509  if (MI.getOperand(0).isDead())
1510  return false;
1511 
1512  // All clear, widen the COPY.
1513  LLVM_DEBUG(dbgs() << "widening: " << MI);
1514  MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI);
1515 
1516  // Get rid of the old implicit-def of DstRegD. Leave it if it defines a Q-reg
1517  // or some other super-register.
1518  int ImpDefIdx = MI.findRegisterDefOperandIdx(DstRegD);
1519  if (ImpDefIdx != -1)
1520  MI.RemoveOperand(ImpDefIdx);
1521 
1522  // Change the opcode and operands.
1523  MI.setDesc(get(ARM::VMOVD));
1524  MI.getOperand(0).setReg(DstRegD);
1525  MI.getOperand(1).setReg(SrcRegD);
1526  MIB.add(predOps(ARMCC::AL));
1527 
1528  // We are now reading SrcRegD instead of SrcRegS. This may upset the
1529  // register scavenger and machine verifier, so we need to indicate that we
1530  // are reading an undefined value from SrcRegD, but a proper value from
1531  // SrcRegS.
1532  MI.getOperand(1).setIsUndef();
1533  MIB.addReg(SrcRegS, RegState::Implicit);
1534 
1535  // SrcRegD may actually contain an unrelated value in the ssub_1
1536  // sub-register. Don't kill it. Only kill the ssub_0 sub-register.
1537  if (MI.getOperand(1).isKill()) {
1538  MI.getOperand(1).setIsKill(false);
1539  MI.addRegisterKilled(SrcRegS, TRI, true);
1540  }
1541 
1542  LLVM_DEBUG(dbgs() << "replaced by: " << MI);
1543  return true;
1544 }
1545 
1546 /// Create a copy of a const pool value. Update CPI to the new index and return
1547 /// the label UID.
1548 static unsigned duplicateCPV(MachineFunction &MF, unsigned &CPI) {
1551 
1552  const MachineConstantPoolEntry &MCPE = MCP->getConstants()[CPI];
1553  assert(MCPE.isMachineConstantPoolEntry() &&
1554  "Expecting a machine constantpool entry!");
1555  ARMConstantPoolValue *ACPV =
1556  static_cast<ARMConstantPoolValue*>(MCPE.Val.MachineCPVal);
1557 
1558  unsigned PCLabelId = AFI->createPICLabelUId();
1559  ARMConstantPoolValue *NewCPV = nullptr;
1560 
1561  // FIXME: The below assumes PIC relocation model and that the function
1562  // is Thumb mode (t1 or t2). PCAdjustment would be 8 for ARM mode PIC, and
1563  // zero for non-PIC in ARM or Thumb. The callers are all of thumb LDR
1564  // instructions, so that's probably OK, but is PIC always correct when
1565  // we get here?
1566  if (ACPV->isGlobalValue())
1568  cast<ARMConstantPoolConstant>(ACPV)->getGV(), PCLabelId, ARMCP::CPValue,
1569  4, ACPV->getModifier(), ACPV->mustAddCurrentAddress());
1570  else if (ACPV->isExtSymbol())
1571  NewCPV = ARMConstantPoolSymbol::
1572  Create(MF.getFunction().getContext(),
1573  cast<ARMConstantPoolSymbol>(ACPV)->getSymbol(), PCLabelId, 4);
1574  else if (ACPV->isBlockAddress())
1575  NewCPV = ARMConstantPoolConstant::
1576  Create(cast<ARMConstantPoolConstant>(ACPV)->getBlockAddress(), PCLabelId,
1578  else if (ACPV->isLSDA())
1579  NewCPV = ARMConstantPoolConstant::Create(&MF.getFunction(), PCLabelId,
1580  ARMCP::CPLSDA, 4);
1581  else if (ACPV->isMachineBasicBlock())
1582  NewCPV = ARMConstantPoolMBB::
1583  Create(MF.getFunction().getContext(),
1584  cast<ARMConstantPoolMBB>(ACPV)->getMBB(), PCLabelId, 4);
1585  else
1586  llvm_unreachable("Unexpected ARM constantpool value type!!");
1587  CPI = MCP->getConstantPoolIndex(NewCPV, MCPE.getAlignment());
1588  return PCLabelId;
1589 }
1590 
1593  unsigned DestReg, unsigned SubIdx,
1594  const MachineInstr &Orig,
1595  const TargetRegisterInfo &TRI) const {
1596  unsigned Opcode = Orig.getOpcode();
1597  switch (Opcode) {
1598  default: {
1599  MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig);
1600  MI->substituteRegister(Orig.getOperand(0).getReg(), DestReg, SubIdx, TRI);
1601  MBB.insert(I, MI);
1602  break;
1603  }
1604  case ARM::tLDRpci_pic:
1605  case ARM::t2LDRpci_pic: {
1606  MachineFunction &MF = *MBB.getParent();
1607  unsigned CPI = Orig.getOperand(1).getIndex();
1608  unsigned PCLabelId = duplicateCPV(MF, CPI);
1609  BuildMI(MBB, I, Orig.getDebugLoc(), get(Opcode), DestReg)
1610  .addConstantPoolIndex(CPI)
1611  .addImm(PCLabelId)
1612  .cloneMemRefs(Orig);
1613  break;
1614  }
1615  }
1616 }
1617 
1618 MachineInstr &
1620  MachineBasicBlock::iterator InsertBefore,
1621  const MachineInstr &Orig) const {
1622  MachineInstr &Cloned = TargetInstrInfo::duplicate(MBB, InsertBefore, Orig);
1624  for (;;) {
1625  switch (I->getOpcode()) {
1626  case ARM::tLDRpci_pic:
1627  case ARM::t2LDRpci_pic: {
1628  MachineFunction &MF = *MBB.getParent();
1629  unsigned CPI = I->getOperand(1).getIndex();
1630  unsigned PCLabelId = duplicateCPV(MF, CPI);
1631  I->getOperand(1).setIndex(CPI);
1632  I->getOperand(2).setImm(PCLabelId);
1633  break;
1634  }
1635  }
1636  if (!I->isBundledWithSucc())
1637  break;
1638  ++I;
1639  }
1640  return Cloned;
1641 }
1642 
1644  const MachineInstr &MI1,
1645  const MachineRegisterInfo *MRI) const {
1646  unsigned Opcode = MI0.getOpcode();
1647  if (Opcode == ARM::t2LDRpci ||
1648  Opcode == ARM::t2LDRpci_pic ||
1649  Opcode == ARM::tLDRpci ||
1650  Opcode == ARM::tLDRpci_pic ||
1651  Opcode == ARM::LDRLIT_ga_pcrel ||
1652  Opcode == ARM::LDRLIT_ga_pcrel_ldr ||
1653  Opcode == ARM::tLDRLIT_ga_pcrel ||
1654  Opcode == ARM::MOV_ga_pcrel ||
1655  Opcode == ARM::MOV_ga_pcrel_ldr ||
1656  Opcode == ARM::t2MOV_ga_pcrel) {
1657  if (MI1.getOpcode() != Opcode)
1658  return false;
1659  if (MI0.getNumOperands() != MI1.getNumOperands())
1660  return false;
1661 
1662  const MachineOperand &MO0 = MI0.getOperand(1);
1663  const MachineOperand &MO1 = MI1.getOperand(1);
1664  if (MO0.getOffset() != MO1.getOffset())
1665  return false;
1666 
1667  if (Opcode == ARM::LDRLIT_ga_pcrel ||
1668  Opcode == ARM::LDRLIT_ga_pcrel_ldr ||
1669  Opcode == ARM::tLDRLIT_ga_pcrel ||
1670  Opcode == ARM::MOV_ga_pcrel ||
1671  Opcode == ARM::MOV_ga_pcrel_ldr ||
1672  Opcode == ARM::t2MOV_ga_pcrel)
1673  // Ignore the PC labels.
1674  return MO0.getGlobal() == MO1.getGlobal();
1675 
1676  const MachineFunction *MF = MI0.getParent()->getParent();
1677  const MachineConstantPool *MCP = MF->getConstantPool();
1678  int CPI0 = MO0.getIndex();
1679  int CPI1 = MO1.getIndex();
1680  const MachineConstantPoolEntry &MCPE0 = MCP->getConstants()[CPI0];
1681  const MachineConstantPoolEntry &MCPE1 = MCP->getConstants()[CPI1];
1682  bool isARMCP0 = MCPE0.isMachineConstantPoolEntry();
1683  bool isARMCP1 = MCPE1.isMachineConstantPoolEntry();
1684  if (isARMCP0 && isARMCP1) {
1685  ARMConstantPoolValue *ACPV0 =
1686  static_cast<ARMConstantPoolValue*>(MCPE0.Val.MachineCPVal);
1687  ARMConstantPoolValue *ACPV1 =
1688  static_cast<ARMConstantPoolValue*>(MCPE1.Val.MachineCPVal);
1689  return ACPV0->hasSameValue(ACPV1);
1690  } else if (!isARMCP0 && !isARMCP1) {
1691  return MCPE0.Val.ConstVal == MCPE1.Val.ConstVal;
1692  }
1693  return false;
1694  } else if (Opcode == ARM::PICLDR) {
1695  if (MI1.getOpcode() != Opcode)
1696  return false;
1697  if (MI0.getNumOperands() != MI1.getNumOperands())
1698  return false;
1699 
1700  unsigned Addr0 = MI0.getOperand(1).getReg();
1701  unsigned Addr1 = MI1.getOperand(1).getReg();
1702  if (Addr0 != Addr1) {
1703  if (!MRI ||
1706  return false;
1707 
1708  // This assumes SSA form.
1709  MachineInstr *Def0 = MRI->getVRegDef(Addr0);
1710  MachineInstr *Def1 = MRI->getVRegDef(Addr1);
1711  // Check if the loaded value, e.g. a constantpool of a global address, are
1712  // the same.
1713  if (!produceSameValue(*Def0, *Def1, MRI))
1714  return false;
1715  }
1716 
1717  for (unsigned i = 3, e = MI0.getNumOperands(); i != e; ++i) {
1718  // %12 = PICLDR %11, 0, 14, %noreg
1719  const MachineOperand &MO0 = MI0.getOperand(i);
1720  const MachineOperand &MO1 = MI1.getOperand(i);
1721  if (!MO0.isIdenticalTo(MO1))
1722  return false;
1723  }
1724  return true;
1725  }
1726 
1728 }
1729 
1730 /// areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler to
1731 /// determine if two loads are loading from the same base address. It should
1732 /// only return true if the base pointers are the same and the only differences
1733 /// between the two addresses is the offset. It also returns the offsets by
1734 /// reference.
1735 ///
1736 /// FIXME: remove this in favor of the MachineInstr interface once pre-RA-sched
1737 /// is permanently disabled.
1739  int64_t &Offset1,
1740  int64_t &Offset2) const {
1741  // Don't worry about Thumb: just ARM and Thumb2.
1742  if (Subtarget.isThumb1Only()) return false;
1743 
1744  if (!Load1->isMachineOpcode() || !Load2->isMachineOpcode())
1745  return false;
1746 
1747  switch (Load1->getMachineOpcode()) {
1748  default:
1749  return false;
1750  case ARM::LDRi12:
1751  case ARM::LDRBi12:
1752  case ARM::LDRD:
1753  case ARM::LDRH:
1754  case ARM::LDRSB:
1755  case ARM::LDRSH:
1756  case ARM::VLDRD:
1757  case ARM::VLDRS:
1758  case ARM::t2LDRi8:
1759  case ARM::t2LDRBi8:
1760  case ARM::t2LDRDi8:
1761  case ARM::t2LDRSHi8:
1762  case ARM::t2LDRi12:
1763  case ARM::t2LDRBi12:
1764  case ARM::t2LDRSHi12:
1765  break;
1766  }
1767 
1768  switch (Load2->getMachineOpcode()) {
1769  default:
1770  return false;
1771  case ARM::LDRi12:
1772  case ARM::LDRBi12:
1773  case ARM::LDRD:
1774  case ARM::LDRH:
1775  case ARM::LDRSB:
1776  case ARM::LDRSH:
1777  case ARM::VLDRD:
1778  case ARM::VLDRS:
1779  case ARM::t2LDRi8:
1780  case ARM::t2LDRBi8:
1781  case ARM::t2LDRSHi8:
1782  case ARM::t2LDRi12:
1783  case ARM::t2LDRBi12:
1784  case ARM::t2LDRSHi12:
1785  break;
1786  }
1787 
1788  // Check if base addresses and chain operands match.
1789  if (Load1->getOperand(0) != Load2->getOperand(0) ||
1790  Load1->getOperand(4) != Load2->getOperand(4))
1791  return false;
1792 
1793  // Index should be Reg0.
1794  if (Load1->getOperand(3) != Load2->getOperand(3))
1795  return false;
1796 
1797  // Determine the offsets.
1798  if (isa<ConstantSDNode>(Load1->getOperand(1)) &&
1799  isa<ConstantSDNode>(Load2->getOperand(1))) {
1800  Offset1 = cast<ConstantSDNode>(Load1->getOperand(1))->getSExtValue();
1801  Offset2 = cast<ConstantSDNode>(Load2->getOperand(1))->getSExtValue();
1802  return true;
1803  }
1804 
1805  return false;
1806 }
1807 
1808 /// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to
1809 /// determine (in conjunction with areLoadsFromSameBasePtr) if two loads should
1810 /// be scheduled togther. On some targets if two loads are loading from
1811 /// addresses in the same cache line, it's better if they are scheduled
1812 /// together. This function takes two integers that represent the load offsets
1813 /// from the common base address. It returns true if it decides it's desirable
1814 /// to schedule the two loads together. "NumLoads" is the number of loads that
1815 /// have already been scheduled after Load1.
1816 ///
1817 /// FIXME: remove this in favor of the MachineInstr interface once pre-RA-sched
1818 /// is permanently disabled.
1820  int64_t Offset1, int64_t Offset2,
1821  unsigned NumLoads) const {
1822  // Don't worry about Thumb: just ARM and Thumb2.
1823  if (Subtarget.isThumb1Only()) return false;
1824 
1825  assert(Offset2 > Offset1);
1826 
1827  if ((Offset2 - Offset1) / 8 > 64)
1828  return false;
1829 
1830  // Check if the machine opcodes are different. If they are different
1831  // then we consider them to not be of the same base address,
1832  // EXCEPT in the case of Thumb2 byte loads where one is LDRBi8 and the other LDRBi12.
1833  // In this case, they are considered to be the same because they are different
1834  // encoding forms of the same basic instruction.
1835  if ((Load1->getMachineOpcode() != Load2->getMachineOpcode()) &&
1836  !((Load1->getMachineOpcode() == ARM::t2LDRBi8 &&
1837  Load2->getMachineOpcode() == ARM::t2LDRBi12) ||
1838  (Load1->getMachineOpcode() == ARM::t2LDRBi12 &&
1839  Load2->getMachineOpcode() == ARM::t2LDRBi8)))
1840  return false; // FIXME: overly conservative?
1841 
1842  // Four loads in a row should be sufficient.
1843  if (NumLoads >= 3)
1844  return false;
1845 
1846  return true;
1847 }
1848 
1850  const MachineBasicBlock *MBB,
1851  const MachineFunction &MF) const {
1852  // Debug info is never a scheduling boundary. It's necessary to be explicit
1853  // due to the special treatment of IT instructions below, otherwise a
1854  // dbg_value followed by an IT will result in the IT instruction being
1855  // considered a scheduling hazard, which is wrong. It should be the actual
1856  // instruction preceding the dbg_value instruction(s), just like it is
1857  // when debug info is not present.
1858  if (MI.isDebugInstr())
1859  return false;
1860 
1861  // Terminators and labels can't be scheduled around.
1862  if (MI.isTerminator() || MI.isPosition())
1863  return true;
1864 
1865  // Treat the start of the IT block as a scheduling boundary, but schedule
1866  // t2IT along with all instructions following it.
1867  // FIXME: This is a big hammer. But the alternative is to add all potential
1868  // true and anti dependencies to IT block instructions as implicit operands
1869  // to the t2IT instruction. The added compile time and complexity does not
1870  // seem worth it.
1872  // Make sure to skip any debug instructions
1873  while (++I != MBB->end() && I->isDebugInstr())
1874  ;
1875  if (I != MBB->end() && I->getOpcode() == ARM::t2IT)
1876  return true;
1877 
1878  // Don't attempt to schedule around any instruction that defines
1879  // a stack-oriented pointer, as it's unlikely to be profitable. This
1880  // saves compile time, because it doesn't require every single
1881  // stack slot reference to depend on the instruction that does the
1882  // modification.
1883  // Calls don't actually change the stack pointer, even if they have imp-defs.
1884  // No ARM calling conventions change the stack pointer. (X86 calling
1885  // conventions sometimes do).
1886  if (!MI.isCall() && MI.definesRegister(ARM::SP))
1887  return true;
1888 
1889  return false;
1890 }
1891 
1892 bool ARMBaseInstrInfo::
1894  unsigned NumCycles, unsigned ExtraPredCycles,
1895  BranchProbability Probability) const {
1896  if (!NumCycles)
1897  return false;
1898 
1899  // If we are optimizing for size, see if the branch in the predecessor can be
1900  // lowered to cbn?z by the constant island lowering pass, and return false if
1901  // so. This results in a shorter instruction sequence.
1902  if (MBB.getParent()->getFunction().hasOptSize()) {
1903  MachineBasicBlock *Pred = *MBB.pred_begin();
1904  if (!Pred->empty()) {
1905  MachineInstr *LastMI = &*Pred->rbegin();
1906  if (LastMI->getOpcode() == ARM::t2Bcc) {
1908  MachineInstr *CmpMI = findCMPToFoldIntoCBZ(LastMI, TRI);
1909  if (CmpMI)
1910  return false;
1911  }
1912  }
1913  }
1914  return isProfitableToIfCvt(MBB, NumCycles, ExtraPredCycles,
1915  MBB, 0, 0, Probability);
1916 }
1917 
1918 bool ARMBaseInstrInfo::
1920  unsigned TCycles, unsigned TExtra,
1921  MachineBasicBlock &FBB,
1922  unsigned FCycles, unsigned FExtra,
1923  BranchProbability Probability) const {
1924  if (!TCycles)
1925  return false;
1926 
1927  // In thumb code we often end up trading one branch for a IT block, and
1928  // if we are cloning the instruction can increase code size. Prevent
1929  // blocks with multiple predecesors from being ifcvted to prevent this
1930  // cloning.
1931  if (Subtarget.isThumb2() && TBB.getParent()->getFunction().hasMinSize()) {
1932  if (TBB.pred_size() != 1 || FBB.pred_size() != 1)
1933  return false;
1934  }
1935 
1936  // Attempt to estimate the relative costs of predication versus branching.
1937  // Here we scale up each component of UnpredCost to avoid precision issue when
1938  // scaling TCycles/FCycles by Probability.
1939  const unsigned ScalingUpFactor = 1024;
1940 
1941  unsigned PredCost = (TCycles + FCycles + TExtra + FExtra) * ScalingUpFactor;
1942  unsigned UnpredCost;
1943  if (!Subtarget.hasBranchPredictor()) {
1944  // When we don't have a branch predictor it's always cheaper to not take a
1945  // branch than take it, so we have to take that into account.
1946  unsigned NotTakenBranchCost = 1;
1947  unsigned TakenBranchCost = Subtarget.getMispredictionPenalty();
1948  unsigned TUnpredCycles, FUnpredCycles;
1949  if (!FCycles) {
1950  // Triangle: TBB is the fallthrough
1951  TUnpredCycles = TCycles + NotTakenBranchCost;
1952  FUnpredCycles = TakenBranchCost;
1953  } else {
1954  // Diamond: TBB is the block that is branched to, FBB is the fallthrough
1955  TUnpredCycles = TCycles + TakenBranchCost;
1956  FUnpredCycles = FCycles + NotTakenBranchCost;
1957  // The branch at the end of FBB will disappear when it's predicated, so
1958  // discount it from PredCost.
1959  PredCost -= 1 * ScalingUpFactor;
1960  }
1961  // The total cost is the cost of each path scaled by their probabilites
1962  unsigned TUnpredCost = Probability.scale(TUnpredCycles * ScalingUpFactor);
1963  unsigned FUnpredCost = Probability.getCompl().scale(FUnpredCycles * ScalingUpFactor);
1964  UnpredCost = TUnpredCost + FUnpredCost;
1965  // When predicating assume that the first IT can be folded away but later
1966  // ones cost one cycle each
1967  if (Subtarget.isThumb2() && TCycles + FCycles > 4) {
1968  PredCost += ((TCycles + FCycles - 4) / 4) * ScalingUpFactor;
1969  }
1970  } else {
1971  unsigned TUnpredCost = Probability.scale(TCycles * ScalingUpFactor);
1972  unsigned FUnpredCost =
1973  Probability.getCompl().scale(FCycles * ScalingUpFactor);
1974  UnpredCost = TUnpredCost + FUnpredCost;
1975  UnpredCost += 1 * ScalingUpFactor; // The branch itself
1976  UnpredCost += Subtarget.getMispredictionPenalty() * ScalingUpFactor / 10;
1977  }
1978 
1979  return PredCost <= UnpredCost;
1980 }
1981 
1982 bool
1984  MachineBasicBlock &FMBB) const {
1985  // Reduce false anti-dependencies to let the target's out-of-order execution
1986  // engine do its thing.
1987  return Subtarget.isProfitableToUnpredicate();
1988 }
1989 
1990 /// getInstrPredicate - If instruction is predicated, returns its predicate
1991 /// condition, otherwise returns AL. It also returns the condition code
1992 /// register by reference.
1994  unsigned &PredReg) {
1995  int PIdx = MI.findFirstPredOperandIdx();
1996  if (PIdx == -1) {
1997  PredReg = 0;
1998  return ARMCC::AL;
1999  }
2000 
2001  PredReg = MI.getOperand(PIdx+1).getReg();
2002  return (ARMCC::CondCodes)MI.getOperand(PIdx).getImm();
2003 }
2004 
2005 unsigned llvm::getMatchingCondBranchOpcode(unsigned Opc) {
2006  if (Opc == ARM::B)
2007  return ARM::Bcc;
2008  if (Opc == ARM::tB)
2009  return ARM::tBcc;
2010  if (Opc == ARM::t2B)
2011  return ARM::t2Bcc;
2012 
2013  llvm_unreachable("Unknown unconditional branch opcode!");
2014 }
2015 
2017  bool NewMI,
2018  unsigned OpIdx1,
2019  unsigned OpIdx2) const {
2020  switch (MI.getOpcode()) {
2021  case ARM::MOVCCr:
2022  case ARM::t2MOVCCr: {
2023  // MOVCC can be commuted by inverting the condition.
2024  unsigned PredReg = 0;
2025  ARMCC::CondCodes CC = getInstrPredicate(MI, PredReg);
2026  // MOVCC AL can't be inverted. Shouldn't happen.
2027  if (CC == ARMCC::AL || PredReg != ARM::CPSR)
2028  return nullptr;
2029  MachineInstr *CommutedMI =
2030  TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
2031  if (!CommutedMI)
2032  return nullptr;
2033  // After swapping the MOVCC operands, also invert the condition.
2034  CommutedMI->getOperand(CommutedMI->findFirstPredOperandIdx())
2035  .setImm(ARMCC::getOppositeCondition(CC));
2036  return CommutedMI;
2037  }
2038  }
2039  return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
2040 }
2041 
2042 /// Identify instructions that can be folded into a MOVCC instruction, and
2043 /// return the defining instruction.
2045  const MachineRegisterInfo &MRI,
2046  const TargetInstrInfo *TII) {
2048  return nullptr;
2049  if (!MRI.hasOneNonDBGUse(Reg))
2050  return nullptr;
2051  MachineInstr *MI = MRI.getVRegDef(Reg);
2052  if (!MI)
2053  return nullptr;
2054  // MI is folded into the MOVCC by predicating it.
2055  if (!MI->isPredicable())
2056  return nullptr;
2057  // Check if MI has any non-dead defs or physreg uses. This also detects
2058  // predicated instructions which will be reading CPSR.
2059  for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) {
2060  const MachineOperand &MO = MI->getOperand(i);
2061  // Reject frame index operands, PEI can't handle the predicated pseudos.
2062  if (MO.isFI() || MO.isCPI() || MO.isJTI())
2063  return nullptr;
2064  if (!MO.isReg())
2065  continue;
2066  // MI can't have any tied operands, that would conflict with predication.
2067  if (MO.isTied())
2068  return nullptr;
2070  return nullptr;
2071  if (MO.isDef() && !MO.isDead())
2072  return nullptr;
2073  }
2074  bool DontMoveAcrossStores = true;
2075  if (!MI->isSafeToMove(/* AliasAnalysis = */ nullptr, DontMoveAcrossStores))
2076  return nullptr;
2077  return MI;
2078 }
2079 
2082  unsigned &TrueOp, unsigned &FalseOp,
2083  bool &Optimizable) const {
2084  assert((MI.getOpcode() == ARM::MOVCCr || MI.getOpcode() == ARM::t2MOVCCr) &&
2085  "Unknown select instruction");
2086  // MOVCC operands:
2087  // 0: Def.
2088  // 1: True use.
2089  // 2: False use.
2090  // 3: Condition code.
2091  // 4: CPSR use.
2092  TrueOp = 1;
2093  FalseOp = 2;
2094  Cond.push_back(MI.getOperand(3));
2095  Cond.push_back(MI.getOperand(4));
2096  // We can always fold a def.
2097  Optimizable = true;
2098  return false;
2099 }
2100 
2101 MachineInstr *
2104  bool PreferFalse) const {
2105  assert((MI.getOpcode() == ARM::MOVCCr || MI.getOpcode() == ARM::t2MOVCCr) &&
2106  "Unknown select instruction");
2109  bool Invert = !DefMI;
2110  if (!DefMI)
2111  DefMI = canFoldIntoMOVCC(MI.getOperand(1).getReg(), MRI, this);
2112  if (!DefMI)
2113  return nullptr;
2114 
2115  // Find new register class to use.
2116  MachineOperand FalseReg = MI.getOperand(Invert ? 2 : 1);
2117  unsigned DestReg = MI.getOperand(0).getReg();
2118  const TargetRegisterClass *PreviousClass = MRI.getRegClass(FalseReg.getReg());
2119  if (!MRI.constrainRegClass(DestReg, PreviousClass))
2120  return nullptr;
2121 
2122  // Create a new predicated version of DefMI.
2123  // Rfalse is the first use.
2124  MachineInstrBuilder NewMI =
2125  BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), DefMI->getDesc(), DestReg);
2126 
2127  // Copy all the DefMI operands, excluding its (null) predicate.
2128  const MCInstrDesc &DefDesc = DefMI->getDesc();
2129  for (unsigned i = 1, e = DefDesc.getNumOperands();
2130  i != e && !DefDesc.OpInfo[i].isPredicate(); ++i)
2131  NewMI.add(DefMI->getOperand(i));
2132 
2133  unsigned CondCode = MI.getOperand(3).getImm();
2134  if (Invert)
2136  else
2137  NewMI.addImm(CondCode);
2138  NewMI.add(MI.getOperand(4));
2139 
2140  // DefMI is not the -S version that sets CPSR, so add an optional %noreg.
2141  if (NewMI->hasOptionalDef())
2142  NewMI.add(condCodeOp());
2143 
2144  // The output register value when the predicate is false is an implicit
2145  // register operand tied to the first def.
2146  // The tie makes the register allocator ensure the FalseReg is allocated the
2147  // same register as operand 0.
2148  FalseReg.setImplicit();
2149  NewMI.add(FalseReg);
2150  NewMI->tieOperands(0, NewMI->getNumOperands() - 1);
2151 
2152  // Update SeenMIs set: register newly created MI and erase removed DefMI.
2153  SeenMIs.insert(NewMI);
2154  SeenMIs.erase(DefMI);
2155 
2156  // If MI is inside a loop, and DefMI is outside the loop, then kill flags on
2157  // DefMI would be invalid when tranferred inside the loop. Checking for a
2158  // loop is expensive, but at least remove kill flags if they are in different
2159  // BBs.
2160  if (DefMI->getParent() != MI.getParent())
2161  NewMI->clearKillInfo();
2162 
2163  // The caller will erase MI, but not DefMI.
2164  DefMI->eraseFromParent();
2165  return NewMI;
2166 }
2167 
2168 /// Map pseudo instructions that imply an 'S' bit onto real opcodes. Whether the
2169 /// instruction is encoded with an 'S' bit is determined by the optional CPSR
2170 /// def operand.
2171 ///
2172 /// This will go away once we can teach tblgen how to set the optional CPSR def
2173 /// operand itself.
2175  uint16_t PseudoOpc;
2176  uint16_t MachineOpc;
2177 };
2178 
2180  {ARM::ADDSri, ARM::ADDri},
2181  {ARM::ADDSrr, ARM::ADDrr},
2182  {ARM::ADDSrsi, ARM::ADDrsi},
2183  {ARM::ADDSrsr, ARM::ADDrsr},
2184 
2185  {ARM::SUBSri, ARM::SUBri},
2186  {ARM::SUBSrr, ARM::SUBrr},
2187  {ARM::SUBSrsi, ARM::SUBrsi},
2188  {ARM::SUBSrsr, ARM::SUBrsr},
2189 
2190  {ARM::RSBSri, ARM::RSBri},
2191  {ARM::RSBSrsi, ARM::RSBrsi},
2192  {ARM::RSBSrsr, ARM::RSBrsr},
2193 
2194  {ARM::tADDSi3, ARM::tADDi3},
2195  {ARM::tADDSi8, ARM::tADDi8},
2196  {ARM::tADDSrr, ARM::tADDrr},
2197  {ARM::tADCS, ARM::tADC},
2198 
2199  {ARM::tSUBSi3, ARM::tSUBi3},
2200  {ARM::tSUBSi8, ARM::tSUBi8},
2201  {ARM::tSUBSrr, ARM::tSUBrr},
2202  {ARM::tSBCS, ARM::tSBC},
2203  {ARM::tRSBS, ARM::tRSB},
2204 
2205  {ARM::t2ADDSri, ARM::t2ADDri},
2206  {ARM::t2ADDSrr, ARM::t2ADDrr},
2207  {ARM::t2ADDSrs, ARM::t2ADDrs},
2208 
2209  {ARM::t2SUBSri, ARM::t2SUBri},
2210  {ARM::t2SUBSrr, ARM::t2SUBrr},
2211  {ARM::t2SUBSrs, ARM::t2SUBrs},
2212 
2213  {ARM::t2RSBSri, ARM::t2RSBri},
2214  {ARM::t2RSBSrs, ARM::t2RSBrs},
2215 };
2216 
2217 unsigned llvm::convertAddSubFlagsOpcode(unsigned OldOpc) {
2218  for (unsigned i = 0, e = array_lengthof(AddSubFlagsOpcodeMap); i != e; ++i)
2219  if (OldOpc == AddSubFlagsOpcodeMap[i].PseudoOpc)
2220  return AddSubFlagsOpcodeMap[i].MachineOpc;
2221  return 0;
2222 }
2223 
2226  const DebugLoc &dl, unsigned DestReg,
2227  unsigned BaseReg, int NumBytes,
2228  ARMCC::CondCodes Pred, unsigned PredReg,
2229  const ARMBaseInstrInfo &TII,
2230  unsigned MIFlags) {
2231  if (NumBytes == 0 && DestReg != BaseReg) {
2232  BuildMI(MBB, MBBI, dl, TII.get(ARM::MOVr), DestReg)
2233  .addReg(BaseReg, RegState::Kill)
2234  .add(predOps(Pred, PredReg))
2235  .add(condCodeOp())
2236  .setMIFlags(MIFlags);
2237  return;
2238  }
2239 
2240  bool isSub = NumBytes < 0;
2241  if (isSub) NumBytes = -NumBytes;
2242 
2243  while (NumBytes) {
2244  unsigned RotAmt = ARM_AM::getSOImmValRotate(NumBytes);
2245  unsigned ThisVal = NumBytes & ARM_AM::rotr32(0xFF, RotAmt);
2246  assert(ThisVal && "Didn't extract field correctly");
2247 
2248  // We will handle these bits from offset, clear them.
2249  NumBytes &= ~ThisVal;
2250 
2251  assert(ARM_AM::getSOImmVal(ThisVal) != -1 && "Bit extraction didn't work?");
2252 
2253  // Build the new ADD / SUB.
2254  unsigned Opc = isSub ? ARM::SUBri : ARM::ADDri;
2255  BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg)
2256  .addReg(BaseReg, RegState::Kill)
2257  .addImm(ThisVal)
2258  .add(predOps(Pred, PredReg))
2259  .add(condCodeOp())
2260  .setMIFlags(MIFlags);
2261  BaseReg = DestReg;
2262  }
2263 }
2264 
2266  MachineFunction &MF, MachineInstr *MI,
2267  unsigned NumBytes) {
2268  // This optimisation potentially adds lots of load and store
2269  // micro-operations, it's only really a great benefit to code-size.
2270  if (!Subtarget.hasMinSize())
2271  return false;
2272 
2273  // If only one register is pushed/popped, LLVM can use an LDR/STR
2274  // instead. We can't modify those so make sure we're dealing with an
2275  // instruction we understand.
2276  bool IsPop = isPopOpcode(MI->getOpcode());
2277  bool IsPush = isPushOpcode(MI->getOpcode());
2278  if (!IsPush && !IsPop)
2279  return false;
2280 
2281  bool IsVFPPushPop = MI->getOpcode() == ARM::VSTMDDB_UPD ||
2282  MI->getOpcode() == ARM::VLDMDIA_UPD;
2283  bool IsT1PushPop = MI->getOpcode() == ARM::tPUSH ||
2284  MI->getOpcode() == ARM::tPOP ||
2285  MI->getOpcode() == ARM::tPOP_RET;
2286 
2287  assert((IsT1PushPop || (MI->getOperand(0).getReg() == ARM::SP &&
2288  MI->getOperand(1).getReg() == ARM::SP)) &&
2289  "trying to fold sp update into non-sp-updating push/pop");
2290 
2291  // The VFP push & pop act on D-registers, so we can only fold an adjustment
2292  // by a multiple of 8 bytes in correctly. Similarly rN is 4-bytes. Don't try
2293  // if this is violated.
2294  if (NumBytes % (IsVFPPushPop ? 8 : 4) != 0)
2295  return false;
2296 
2297  // ARM and Thumb2 push/pop insts have explicit "sp, sp" operands (+
2298  // pred) so the list starts at 4. Thumb1 starts after the predicate.
2299  int RegListIdx = IsT1PushPop ? 2 : 4;
2300 
2301  // Calculate the space we'll need in terms of registers.
2302  unsigned RegsNeeded;
2303  const TargetRegisterClass *RegClass;
2304  if (IsVFPPushPop) {
2305  RegsNeeded = NumBytes / 8;
2306  RegClass = &ARM::DPRRegClass;
2307  } else {
2308  RegsNeeded = NumBytes / 4;
2309  RegClass = &ARM::GPRRegClass;
2310  }
2311 
2312  // We're going to have to strip all list operands off before
2313  // re-adding them since the order matters, so save the existing ones
2314  // for later.
2316 
2317  // We're also going to need the first register transferred by this
2318  // instruction, which won't necessarily be the first register in the list.
2319  unsigned FirstRegEnc = -1;
2320 
2322  for (int i = MI->getNumOperands() - 1; i >= RegListIdx; --i) {
2323  MachineOperand &MO = MI->getOperand(i);
2324  RegList.push_back(MO);
2325 
2326  if (MO.isReg() && TRI->getEncodingValue(MO.getReg()) < FirstRegEnc)
2327  FirstRegEnc = TRI->getEncodingValue(MO.getReg());
2328  }
2329 
2330  const MCPhysReg *CSRegs = TRI->getCalleeSavedRegs(&MF);
2331 
2332  // Now try to find enough space in the reglist to allocate NumBytes.
2333  for (int CurRegEnc = FirstRegEnc - 1; CurRegEnc >= 0 && RegsNeeded;
2334  --CurRegEnc) {
2335  unsigned CurReg = RegClass->getRegister(CurRegEnc);
2336  if (IsT1PushPop && CurReg > ARM::R7)
2337  continue;
2338  if (!IsPop) {
2339  // Pushing any register is completely harmless, mark the register involved
2340  // as undef since we don't care about its value and must not restore it
2341  // during stack unwinding.
2342  RegList.push_back(MachineOperand::CreateReg(CurReg, false, false,
2343  false, false, true));
2344  --RegsNeeded;
2345  continue;
2346  }
2347 
2348  // However, we can only pop an extra register if it's not live. For
2349  // registers live within the function we might clobber a return value
2350  // register; the other way a register can be live here is if it's
2351  // callee-saved.
2352  if (isCalleeSavedRegister(CurReg, CSRegs) ||
2353  MI->getParent()->computeRegisterLiveness(TRI, CurReg, MI) !=
2355  // VFP pops don't allow holes in the register list, so any skip is fatal
2356  // for our transformation. GPR pops do, so we should just keep looking.
2357  if (IsVFPPushPop)
2358  return false;
2359  else
2360  continue;
2361  }
2362 
2363  // Mark the unimportant registers as <def,dead> in the POP.
2364  RegList.push_back(MachineOperand::CreateReg(CurReg, true, false, false,
2365  true));
2366  --RegsNeeded;
2367  }
2368 
2369  if (RegsNeeded > 0)
2370  return false;
2371 
2372  // Finally we know we can profitably perform the optimisation so go
2373  // ahead: strip all existing registers off and add them back again
2374  // in the right order.
2375  for (int i = MI->getNumOperands() - 1; i >= RegListIdx; --i)
2376  MI->RemoveOperand(i);
2377 
2378  // Add the complete list back in.
2379  MachineInstrBuilder MIB(MF, &*MI);
2380  for (int i = RegList.size() - 1; i >= 0; --i)
2381  MIB.add(RegList[i]);
2382 
2383  return true;
2384 }
2385 
2386 bool llvm::rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
2387  unsigned FrameReg, int &Offset,
2388  const ARMBaseInstrInfo &TII) {
2389  unsigned Opcode = MI.getOpcode();
2390  const MCInstrDesc &Desc = MI.getDesc();
2391  unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
2392  bool isSub = false;
2393 
2394  // Memory operands in inline assembly always use AddrMode2.
2395  if (Opcode == ARM::INLINEASM)
2396  AddrMode = ARMII::AddrMode2;
2397 
2398  if (Opcode == ARM::ADDri) {
2399  Offset += MI.getOperand(FrameRegIdx+1).getImm();
2400  if (Offset == 0) {
2401  // Turn it into a move.
2402  MI.setDesc(TII.get(ARM::MOVr));
2403  MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
2404  MI.RemoveOperand(FrameRegIdx+1);
2405  Offset = 0;
2406  return true;
2407  } else if (Offset < 0) {
2408  Offset = -Offset;
2409  isSub = true;
2410  MI.setDesc(TII.get(ARM::SUBri));
2411  }
2412 
2413  // Common case: small offset, fits into instruction.
2414  if (ARM_AM::getSOImmVal(Offset) != -1) {
2415  // Replace the FrameIndex with sp / fp
2416  MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
2417  MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset);
2418  Offset = 0;
2419  return true;
2420  }
2421 
2422  // Otherwise, pull as much of the immedidate into this ADDri/SUBri
2423  // as possible.
2424  unsigned RotAmt = ARM_AM::getSOImmValRotate(Offset);
2425  unsigned ThisImmVal = Offset & ARM_AM::rotr32(0xFF, RotAmt);
2426 
2427  // We will handle these bits from offset, clear them.
2428  Offset &= ~ThisImmVal;
2429 
2430  // Get the properly encoded SOImmVal field.
2431  assert(ARM_AM::getSOImmVal(ThisImmVal) != -1 &&
2432  "Bit extraction didn't work?");
2433  MI.getOperand(FrameRegIdx+1).ChangeToImmediate(ThisImmVal);
2434  } else {
2435  unsigned ImmIdx = 0;
2436  int InstrOffs = 0;
2437  unsigned NumBits = 0;
2438  unsigned Scale = 1;
2439  switch (AddrMode) {
2440  case ARMII::AddrMode_i12:
2441  ImmIdx = FrameRegIdx + 1;
2442  InstrOffs = MI.getOperand(ImmIdx).getImm();
2443  NumBits = 12;
2444  break;
2445  case ARMII::AddrMode2:
2446  ImmIdx = FrameRegIdx+2;
2447  InstrOffs = ARM_AM::getAM2Offset(MI.getOperand(ImmIdx).getImm());
2448  if (ARM_AM::getAM2Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
2449  InstrOffs *= -1;
2450  NumBits = 12;
2451  break;
2452  case ARMII::AddrMode3:
2453  ImmIdx = FrameRegIdx+2;
2454  InstrOffs = ARM_AM::getAM3Offset(MI.getOperand(ImmIdx).getImm());
2455  if (ARM_AM::getAM3Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
2456  InstrOffs *= -1;
2457  NumBits = 8;
2458  break;
2459  case ARMII::AddrMode4:
2460  case ARMII::AddrMode6:
2461  // Can't fold any offset even if it's zero.
2462  return false;
2463  case ARMII::AddrMode5:
2464  ImmIdx = FrameRegIdx+1;
2465  InstrOffs = ARM_AM::getAM5Offset(MI.getOperand(ImmIdx).getImm());
2466  if (ARM_AM::getAM5Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
2467  InstrOffs *= -1;
2468  NumBits = 8;
2469  Scale = 4;
2470  break;
2471  case ARMII::AddrMode5FP16:
2472  ImmIdx = FrameRegIdx+1;
2473  InstrOffs = ARM_AM::getAM5Offset(MI.getOperand(ImmIdx).getImm());
2474  if (ARM_AM::getAM5Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
2475  InstrOffs *= -1;
2476  NumBits = 8;
2477  Scale = 2;
2478  break;
2479  default:
2480  llvm_unreachable("Unsupported addressing mode!");
2481  }
2482 
2483  Offset += InstrOffs * Scale;
2484  assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!");
2485  if (Offset < 0) {
2486  Offset = -Offset;
2487  isSub = true;
2488  }
2489 
2490  // Attempt to fold address comp. if opcode has offset bits
2491  if (NumBits > 0) {
2492  // Common case: small offset, fits into instruction.
2493  MachineOperand &ImmOp = MI.getOperand(ImmIdx);
2494  int ImmedOffset = Offset / Scale;
2495  unsigned Mask = (1 << NumBits) - 1;
2496  if ((unsigned)Offset <= Mask * Scale) {
2497  // Replace the FrameIndex with sp
2498  MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
2499  // FIXME: When addrmode2 goes away, this will simplify (like the
2500  // T2 version), as the LDR.i12 versions don't need the encoding
2501  // tricks for the offset value.
2502  if (isSub) {
2503  if (AddrMode == ARMII::AddrMode_i12)
2504  ImmedOffset = -ImmedOffset;
2505  else
2506  ImmedOffset |= 1 << NumBits;
2507  }
2508  ImmOp.ChangeToImmediate(ImmedOffset);
2509  Offset = 0;
2510  return true;
2511  }
2512 
2513  // Otherwise, it didn't fit. Pull in what we can to simplify the immed.
2514  ImmedOffset = ImmedOffset & Mask;
2515  if (isSub) {
2516  if (AddrMode == ARMII::AddrMode_i12)
2517  ImmedOffset = -ImmedOffset;
2518  else
2519  ImmedOffset |= 1 << NumBits;
2520  }
2521  ImmOp.ChangeToImmediate(ImmedOffset);
2522  Offset &= ~(Mask*Scale);
2523  }
2524  }
2525 
2526  Offset = (isSub) ? -Offset : Offset;
2527  return Offset == 0;
2528 }
2529 
2530 /// analyzeCompare - For a comparison instruction, return the source registers
2531 /// in SrcReg and SrcReg2 if having two register operands, and the value it
2532 /// compares against in CmpValue. Return true if the comparison instruction
2533 /// can be analyzed.
2534 bool ARMBaseInstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
2535  unsigned &SrcReg2, int &CmpMask,
2536  int &CmpValue) const {
2537  switch (MI.getOpcode()) {
2538  default: break;
2539  case ARM::CMPri:
2540  case ARM::t2CMPri:
2541  case ARM::tCMPi8:
2542  SrcReg = MI.getOperand(0).getReg();
2543  SrcReg2 = 0;
2544  CmpMask = ~0;
2545  CmpValue = MI.getOperand(1).getImm();
2546  return true;
2547  case ARM::CMPrr:
2548  case ARM::t2CMPrr:
2549  case ARM::tCMPr:
2550  SrcReg = MI.getOperand(0).getReg();
2551  SrcReg2 = MI.getOperand(1).getReg();
2552  CmpMask = ~0;
2553  CmpValue = 0;
2554  return true;
2555  case ARM::TSTri:
2556  case ARM::t2TSTri:
2557  SrcReg = MI.getOperand(0).getReg();
2558  SrcReg2 = 0;
2559  CmpMask = MI.getOperand(1).getImm();
2560  CmpValue = 0;
2561  return true;
2562  }
2563 
2564  return false;
2565 }
2566 
2567 /// isSuitableForMask - Identify a suitable 'and' instruction that
2568 /// operates on the given source register and applies the same mask
2569 /// as a 'tst' instruction. Provide a limited look-through for copies.
2570 /// When successful, MI will hold the found instruction.
2571 static bool isSuitableForMask(MachineInstr *&MI, unsigned SrcReg,
2572  int CmpMask, bool CommonUse) {
2573  switch (MI->getOpcode()) {
2574  case ARM::ANDri:
2575  case ARM::t2ANDri:
2576  if (CmpMask != MI->getOperand(2).getImm())
2577  return false;
2578  if (SrcReg == MI->getOperand(CommonUse ? 1 : 0).getReg())
2579  return true;
2580  break;
2581  }
2582 
2583  return false;
2584 }
2585 
2586 /// getSwappedCondition - assume the flags are set by MI(a,b), return
2587 /// the condition code if we modify the instructions such that flags are
2588 /// set by MI(b,a).
2590  switch (CC) {
2591  default: return ARMCC::AL;
2592  case ARMCC::EQ: return ARMCC::EQ;
2593  case ARMCC::NE: return ARMCC::NE;
2594  case ARMCC::HS: return ARMCC::LS;
2595  case ARMCC::LO: return ARMCC::HI;
2596  case ARMCC::HI: return ARMCC::LO;
2597  case ARMCC::LS: return ARMCC::HS;
2598  case ARMCC::GE: return ARMCC::LE;
2599  case ARMCC::LT: return ARMCC::GT;
2600  case ARMCC::GT: return ARMCC::LT;
2601  case ARMCC::LE: return ARMCC::GE;
2602  }
2603 }
2604 
2605 /// getCmpToAddCondition - assume the flags are set by CMP(a,b), return
2606 /// the condition code if we modify the instructions such that flags are
2607 /// set by ADD(a,b,X).
2609  switch (CC) {
2610  default: return ARMCC::AL;
2611  case ARMCC::HS: return ARMCC::LO;
2612  case ARMCC::LO: return ARMCC::HS;
2613  case ARMCC::VS: return ARMCC::VS;
2614  case ARMCC::VC: return ARMCC::VC;
2615  }
2616 }
2617 
2618 /// isRedundantFlagInstr - check whether the first instruction, whose only
2619 /// purpose is to update flags, can be made redundant.
2620 /// CMPrr can be made redundant by SUBrr if the operands are the same.
2621 /// CMPri can be made redundant by SUBri if the operands are the same.
2622 /// CMPrr(r0, r1) can be made redundant by ADDr[ri](r0, r1, X).
2623 /// This function can be extended later on.
2624 inline static bool isRedundantFlagInstr(const MachineInstr *CmpI,
2625  unsigned SrcReg, unsigned SrcReg2,
2626  int ImmValue, const MachineInstr *OI,
2627  bool &IsThumb1) {
2628  if ((CmpI->getOpcode() == ARM::CMPrr || CmpI->getOpcode() == ARM::t2CMPrr) &&
2629  (OI->getOpcode() == ARM::SUBrr || OI->getOpcode() == ARM::t2SUBrr) &&
2630  ((OI->getOperand(1).getReg() == SrcReg &&
2631  OI->getOperand(2).getReg() == SrcReg2) ||
2632  (OI->getOperand(1).getReg() == SrcReg2 &&
2633  OI->getOperand(2).getReg() == SrcReg))) {
2634  IsThumb1 = false;
2635  return true;
2636  }
2637 
2638  if (CmpI->getOpcode() == ARM::tCMPr && OI->getOpcode() == ARM::tSUBrr &&
2639  ((OI->getOperand(2).getReg() == SrcReg &&
2640  OI->getOperand(3).getReg() == SrcReg2) ||
2641  (OI->getOperand(2).getReg() == SrcReg2 &&
2642  OI->getOperand(3).getReg() == SrcReg))) {
2643  IsThumb1 = true;
2644  return true;
2645  }
2646 
2647  if ((CmpI->getOpcode() == ARM::CMPri || CmpI->getOpcode() == ARM::t2CMPri) &&
2648  (OI->getOpcode() == ARM::SUBri || OI->getOpcode() == ARM::t2SUBri) &&
2649  OI->getOperand(1).getReg() == SrcReg &&
2650  OI->getOperand(2).getImm() == ImmValue) {
2651  IsThumb1 = false;
2652  return true;
2653  }
2654 
2655  if (CmpI->getOpcode() == ARM::tCMPi8 &&
2656  (OI->getOpcode() == ARM::tSUBi8 || OI->getOpcode() == ARM::tSUBi3) &&
2657  OI->getOperand(2).getReg() == SrcReg &&
2658  OI->getOperand(3).getImm() == ImmValue) {
2659  IsThumb1 = true;
2660  return true;
2661  }
2662 
2663  if ((CmpI->getOpcode() == ARM::CMPrr || CmpI->getOpcode() == ARM::t2CMPrr) &&
2664  (OI->getOpcode() == ARM::ADDrr || OI->getOpcode() == ARM::t2ADDrr ||
2665  OI->getOpcode() == ARM::ADDri || OI->getOpcode() == ARM::t2ADDri) &&
2666  OI->getOperand(0).isReg() && OI->getOperand(1).isReg() &&
2667  OI->getOperand(0).getReg() == SrcReg &&
2668  OI->getOperand(1).getReg() == SrcReg2) {
2669  IsThumb1 = false;
2670  return true;
2671  }
2672 
2673  if (CmpI->getOpcode() == ARM::tCMPr &&
2674  (OI->getOpcode() == ARM::tADDi3 || OI->getOpcode() == ARM::tADDi8 ||
2675  OI->getOpcode() == ARM::tADDrr) &&
2676  OI->getOperand(0).getReg() == SrcReg &&
2677  OI->getOperand(2).getReg() == SrcReg2) {
2678  IsThumb1 = true;
2679  return true;
2680  }
2681 
2682  return false;
2683 }
2684 
2685 static bool isOptimizeCompareCandidate(MachineInstr *MI, bool &IsThumb1) {
2686  switch (MI->getOpcode()) {
2687  default: return false;
2688  case ARM::tLSLri:
2689  case ARM::tLSRri:
2690  case ARM::tLSLrr:
2691  case ARM::tLSRrr:
2692  case ARM::tSUBrr:
2693  case ARM::tADDrr:
2694  case ARM::tADDi3:
2695  case ARM::tADDi8:
2696  case ARM::tSUBi3:
2697  case ARM::tSUBi8:
2698  case ARM::tMUL:
2699  case ARM::tADC:
2700  case ARM::tSBC:
2701  case ARM::tRSB:
2702  case ARM::tAND:
2703  case ARM::tORR:
2704  case ARM::tEOR:
2705  case ARM::tBIC:
2706  case ARM::tMVN:
2707  case ARM::tASRri:
2708  case ARM::tASRrr:
2709  case ARM::tROR:
2710  IsThumb1 = true;
2712  case ARM::RSBrr:
2713  case ARM::RSBri:
2714  case ARM::RSCrr:
2715  case ARM::RSCri:
2716  case ARM::ADDrr:
2717  case ARM::ADDri:
2718  case ARM::ADCrr:
2719  case ARM::ADCri:
2720  case ARM::SUBrr:
2721  case ARM::SUBri:
2722  case ARM::SBCrr:
2723  case ARM::SBCri:
2724  case ARM::t2RSBri:
2725  case ARM::t2ADDrr:
2726  case ARM::t2ADDri:
2727  case ARM::t2ADCrr:
2728  case ARM::t2ADCri:
2729  case ARM::t2SUBrr:
2730  case ARM::t2SUBri:
2731  case ARM::t2SBCrr:
2732  case ARM::t2SBCri:
2733  case ARM::ANDrr:
2734  case ARM::ANDri:
2735  case ARM::t2ANDrr:
2736  case ARM::t2ANDri:
2737  case ARM::ORRrr:
2738  case ARM::ORRri:
2739  case ARM::t2ORRrr:
2740  case ARM::t2ORRri:
2741  case ARM::EORrr:
2742  case ARM::EORri:
2743  case ARM::t2EORrr:
2744  case ARM::t2EORri:
2745  case ARM::t2LSRri:
2746  case ARM::t2LSRrr:
2747  case ARM::t2LSLri:
2748  case ARM::t2LSLrr:
2749  return true;
2750  }
2751 }
2752 
2753 /// optimizeCompareInstr - Convert the instruction supplying the argument to the
2754 /// comparison into one that sets the zero bit in the flags register;
2755 /// Remove a redundant Compare instruction if an earlier instruction can set the
2756 /// flags in the same way as Compare.
2757 /// E.g. SUBrr(r1,r2) and CMPrr(r1,r2). We also handle the case where two
2758 /// operands are swapped: SUBrr(r1,r2) and CMPrr(r2,r1), by updating the
2759 /// condition code of instructions which use the flags.
2761  MachineInstr &CmpInstr, unsigned SrcReg, unsigned SrcReg2, int CmpMask,
2762  int CmpValue, const MachineRegisterInfo *MRI) const {
2763  // Get the unique definition of SrcReg.
2764  MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg);
2765  if (!MI) return false;
2766 
2767  // Masked compares sometimes use the same register as the corresponding 'and'.
2768  if (CmpMask != ~0) {
2769  if (!isSuitableForMask(MI, SrcReg, CmpMask, false) || isPredicated(*MI)) {
2770  MI = nullptr;
2772  UI = MRI->use_instr_begin(SrcReg), UE = MRI->use_instr_end();
2773  UI != UE; ++UI) {
2774  if (UI->getParent() != CmpInstr.getParent())
2775  continue;
2776  MachineInstr *PotentialAND = &*UI;
2777  if (!isSuitableForMask(PotentialAND, SrcReg, CmpMask, true) ||
2778  isPredicated(*PotentialAND))
2779  continue;
2780  MI = PotentialAND;
2781  break;
2782  }
2783  if (!MI) return false;
2784  }
2785  }
2786 
2787  // Get ready to iterate backward from CmpInstr.
2788  MachineBasicBlock::iterator I = CmpInstr, E = MI,
2789  B = CmpInstr.getParent()->begin();
2790 
2791  // Early exit if CmpInstr is at the beginning of the BB.
2792  if (I == B) return false;
2793 
2794  // There are two possible candidates which can be changed to set CPSR:
2795  // One is MI, the other is a SUB or ADD instruction.
2796  // For CMPrr(r1,r2), we are looking for SUB(r1,r2), SUB(r2,r1), or
2797  // ADDr[ri](r1, r2, X).
2798  // For CMPri(r1, CmpValue), we are looking for SUBri(r1, CmpValue).
2799  MachineInstr *SubAdd = nullptr;
2800  if (SrcReg2 != 0)
2801  // MI is not a candidate for CMPrr.
2802  MI = nullptr;
2803  else if (MI->getParent() != CmpInstr.getParent() || CmpValue != 0) {
2804  // Conservatively refuse to convert an instruction which isn't in the same
2805  // BB as the comparison.
2806  // For CMPri w/ CmpValue != 0, a SubAdd may still be a candidate.
2807  // Thus we cannot return here.
2808  if (CmpInstr.getOpcode() == ARM::CMPri ||
2809  CmpInstr.getOpcode() == ARM::t2CMPri ||
2810  CmpInstr.getOpcode() == ARM::tCMPi8)
2811  MI = nullptr;
2812  else
2813  return false;
2814  }
2815 
2816  bool IsThumb1 = false;
2817  if (MI && !isOptimizeCompareCandidate(MI, IsThumb1))
2818  return false;
2819 
2820  // We also want to do this peephole for cases like this: if (a*b == 0),
2821  // and optimise away the CMP instruction from the generated code sequence:
2822  // MULS, MOVS, MOVS, CMP. Here the MOVS instructions load the boolean values
2823  // resulting from the select instruction, but these MOVS instructions for
2824  // Thumb1 (V6M) are flag setting and are thus preventing this optimisation.
2825  // However, if we only have MOVS instructions in between the CMP and the
2826  // other instruction (the MULS in this example), then the CPSR is dead so we
2827  // can safely reorder the sequence into: MOVS, MOVS, MULS, CMP. We do this
2828  // reordering and then continue the analysis hoping we can eliminate the
2829  // CMP. This peephole works on the vregs, so is still in SSA form. As a
2830  // consequence, the movs won't redefine/kill the MUL operands which would
2831  // make this reordering illegal.
2833  if (MI && IsThumb1) {
2834  --I;
2835  if (I != E && !MI->readsRegister(ARM::CPSR, TRI)) {
2836  bool CanReorder = true;
2837  for (; I != E; --I) {
2838  if (I->getOpcode() != ARM::tMOVi8) {
2839  CanReorder = false;
2840  break;
2841  }
2842  }
2843  if (CanReorder) {
2844  MI = MI->removeFromParent();
2845  E = CmpInstr;
2846  CmpInstr.getParent()->insert(E, MI);
2847  }
2848  }
2849  I = CmpInstr;
2850  E = MI;
2851  }
2852 
2853  // Check that CPSR isn't set between the comparison instruction and the one we
2854  // want to change. At the same time, search for SubAdd.
2855  bool SubAddIsThumb1 = false;
2856  do {
2857  const MachineInstr &Instr = *--I;
2858 
2859  // Check whether CmpInstr can be made redundant by the current instruction.
2860  if (isRedundantFlagInstr(&CmpInstr, SrcReg, SrcReg2, CmpValue, &Instr,
2861  SubAddIsThumb1)) {
2862  SubAdd = &*I;
2863  break;
2864  }
2865 
2866  // Allow E (which was initially MI) to be SubAdd but do not search before E.
2867  if (I == E)
2868  break;
2869 
2870  if (Instr.modifiesRegister(ARM::CPSR, TRI) ||
2871  Instr.readsRegister(ARM::CPSR, TRI))
2872  // This instruction modifies or uses CPSR after the one we want to
2873  // change. We can't do this transformation.
2874  return false;
2875 
2876  if (I == B) {
2877  // In some cases, we scan the use-list of an instruction for an AND;
2878  // that AND is in the same BB, but may not be scheduled before the
2879  // corresponding TST. In that case, bail out.
2880  //
2881  // FIXME: We could try to reschedule the AND.
2882  return false;
2883  }
2884  } while (true);
2885 
2886  // Return false if no candidates exist.
2887  if (!MI && !SubAdd)
2888  return false;
2889 
2890  // If we found a SubAdd, use it as it will be closer to the CMP
2891  if (SubAdd) {
2892  MI = SubAdd;
2893  IsThumb1 = SubAddIsThumb1;
2894  }
2895 
2896  // We can't use a predicated instruction - it doesn't always write the flags.
2897  if (isPredicated(*MI))
2898  return false;
2899 
2900  // Scan forward for the use of CPSR
2901  // When checking against MI: if it's a conditional code that requires
2902  // checking of the V bit or C bit, then this is not safe to do.
2903  // It is safe to remove CmpInstr if CPSR is redefined or killed.
2904  // If we are done with the basic block, we need to check whether CPSR is
2905  // live-out.
2907  OperandsToUpdate;
2908  bool isSafe = false;
2909  I = CmpInstr;
2910  E = CmpInstr.getParent()->end();
2911  while (!isSafe && ++I != E) {
2912  const MachineInstr &Instr = *I;
2913  for (unsigned IO = 0, EO = Instr.getNumOperands();
2914  !isSafe && IO != EO; ++IO) {
2915  const MachineOperand &MO = Instr.getOperand(IO);
2916  if (MO.isRegMask() && MO.clobbersPhysReg(ARM::CPSR)) {
2917  isSafe = true;
2918  break;
2919  }
2920  if (!MO.isReg() || MO.getReg() != ARM::CPSR)
2921  continue;
2922  if (MO.isDef()) {
2923  isSafe = true;
2924  break;
2925  }
2926  // Condition code is after the operand before CPSR except for VSELs.
2927  ARMCC::CondCodes CC;
2928  bool IsInstrVSel = true;
2929  switch (Instr.getOpcode()) {
2930  default:
2931  IsInstrVSel = false;
2932  CC = (ARMCC::CondCodes)Instr.getOperand(IO - 1).getImm();
2933  break;
2934  case ARM::VSELEQD:
2935  case ARM::VSELEQS:
2936  CC = ARMCC::EQ;
2937  break;
2938  case ARM::VSELGTD:
2939  case ARM::VSELGTS:
2940  CC = ARMCC::GT;
2941  break;
2942  case ARM::VSELGED:
2943  case ARM::VSELGES:
2944  CC = ARMCC::GE;
2945  break;
2946  case ARM::VSELVSS:
2947  case ARM::VSELVSD:
2948  CC = ARMCC::VS;
2949  break;
2950  }
2951 
2952  if (SubAdd) {
2953  // If we have SUB(r1, r2) and CMP(r2, r1), the condition code based
2954  // on CMP needs to be updated to be based on SUB.
2955  // If we have ADD(r1, r2, X) and CMP(r1, r2), the condition code also
2956  // needs to be modified.
2957  // Push the condition code operands to OperandsToUpdate.
2958  // If it is safe to remove CmpInstr, the condition code of these
2959  // operands will be modified.
2960  unsigned Opc = SubAdd->getOpcode();
2961  bool IsSub = Opc == ARM::SUBrr || Opc == ARM::t2SUBrr ||
2962  Opc == ARM::SUBri || Opc == ARM::t2SUBri ||
2963  Opc == ARM::tSUBrr || Opc == ARM::tSUBi3 ||
2964  Opc == ARM::tSUBi8;
2965  unsigned OpI = Opc != ARM::tSUBrr ? 1 : 2;
2966  if (!IsSub ||
2967  (SrcReg2 != 0 && SubAdd->getOperand(OpI).getReg() == SrcReg2 &&
2968  SubAdd->getOperand(OpI + 1).getReg() == SrcReg)) {
2969  // VSel doesn't support condition code update.
2970  if (IsInstrVSel)
2971  return false;
2972  // Ensure we can swap the condition.
2973  ARMCC::CondCodes NewCC = (IsSub ? getSwappedCondition(CC) : getCmpToAddCondition(CC));
2974  if (NewCC == ARMCC::AL)
2975  return false;
2976  OperandsToUpdate.push_back(
2977  std::make_pair(&((*I).getOperand(IO - 1)), NewCC));
2978  }
2979  } else {
2980  // No SubAdd, so this is x = <op> y, z; cmp x, 0.
2981  switch (CC) {
2982  case ARMCC::EQ: // Z
2983  case ARMCC::NE: // Z
2984  case ARMCC::MI: // N
2985  case ARMCC::PL: // N
2986  case ARMCC::AL: // none
2987  // CPSR can be used multiple times, we should continue.
2988  break;
2989  case ARMCC::HS: // C
2990  case ARMCC::LO: // C
2991  case ARMCC::VS: // V
2992  case ARMCC::VC: // V
2993  case ARMCC::HI: // C Z
2994  case ARMCC::LS: // C Z
2995  case ARMCC::GE: // N V
2996  case ARMCC::LT: // N V
2997  case ARMCC::GT: // Z N V
2998  case ARMCC::LE: // Z N V
2999  // The instruction uses the V bit or C bit which is not safe.
3000  return false;
3001  }
3002  }
3003  }
3004  }
3005 
3006  // If CPSR is not killed nor re-defined, we should check whether it is
3007  // live-out. If it is live-out, do not optimize.
3008  if (!isSafe) {
3009  MachineBasicBlock *MBB = CmpInstr.getParent();
3011  SE = MBB->succ_end(); SI != SE; ++SI)
3012  if ((*SI)->isLiveIn(ARM::CPSR))
3013  return false;
3014  }
3015 
3016  // Toggle the optional operand to CPSR (if it exists - in Thumb1 we always
3017  // set CPSR so this is represented as an explicit output)
3018  if (!IsThumb1) {
3019  MI->getOperand(5).setReg(ARM::CPSR);
3020  MI->getOperand(5).setIsDef(true);
3021  }
3022  assert(!isPredicated(*MI) && "Can't use flags from predicated instruction");
3023  CmpInstr.eraseFromParent();
3024 
3025  // Modify the condition code of operands in OperandsToUpdate.
3026  // Since we have SUB(r1, r2) and CMP(r2, r1), the condition code needs to
3027  // be changed from r2 > r1 to r1 < r2, from r2 < r1 to r1 > r2, etc.
3028  for (unsigned i = 0, e = OperandsToUpdate.size(); i < e; i++)
3029  OperandsToUpdate[i].first->setImm(OperandsToUpdate[i].second);
3030 
3031  MI->clearRegisterDeads(ARM::CPSR);
3032 
3033  return true;
3034 }
3035 
3037  // Do not sink MI if it might be used to optimize a redundant compare.
3038  // We heuristically only look at the instruction immediately following MI to
3039  // avoid potentially searching the entire basic block.
3040  if (isPredicated(MI))
3041  return true;
3043  ++Next;
3044  unsigned SrcReg, SrcReg2;
3045  int CmpMask, CmpValue;
3046  bool IsThumb1;
3047  if (Next != MI.getParent()->end() &&
3048  analyzeCompare(*Next, SrcReg, SrcReg2, CmpMask, CmpValue) &&
3049  isRedundantFlagInstr(&*Next, SrcReg, SrcReg2, CmpValue, &MI, IsThumb1))
3050  return false;
3051  return true;
3052 }
3053 
3055  unsigned Reg,
3056  MachineRegisterInfo *MRI) const {
3057  // Fold large immediates into add, sub, or, xor.
3058  unsigned DefOpc = DefMI.getOpcode();
3059  if (DefOpc != ARM::t2MOVi32imm && DefOpc != ARM::MOVi32imm)
3060  return false;
3061  if (!DefMI.getOperand(1).isImm())
3062  // Could be t2MOVi32imm @xx
3063  return false;
3064 
3065  if (!MRI->hasOneNonDBGUse(Reg))
3066  return false;
3067 
3068  const MCInstrDesc &DefMCID = DefMI.getDesc();
3069  if (DefMCID.hasOptionalDef()) {
3070  unsigned NumOps = DefMCID.getNumOperands();
3071  const MachineOperand &MO = DefMI.getOperand(NumOps - 1);
3072  if (MO.getReg() == ARM::CPSR && !MO.isDead())
3073  // If DefMI defines CPSR and it is not dead, it's obviously not safe
3074  // to delete DefMI.
3075  return false;
3076  }
3077 
3078  const MCInstrDesc &UseMCID = UseMI.getDesc();
3079  if (UseMCID.hasOptionalDef()) {
3080  unsigned NumOps = UseMCID.getNumOperands();
3081  if (UseMI.getOperand(NumOps - 1).getReg() == ARM::CPSR)
3082  // If the instruction sets the flag, do not attempt this optimization
3083  // since it may change the semantics of the code.
3084  return false;
3085  }
3086 
3087  unsigned UseOpc = UseMI.getOpcode();
3088  unsigned NewUseOpc = 0;
3089  uint32_t ImmVal = (uint32_t)DefMI.getOperand(1).getImm();
3090  uint32_t SOImmValV1 = 0, SOImmValV2 = 0;
3091  bool Commute = false;
3092  switch (UseOpc) {
3093  default: return false;
3094  case ARM::SUBrr:
3095  case ARM::ADDrr:
3096  case ARM::ORRrr:
3097  case ARM::EORrr:
3098  case ARM::t2SUBrr:
3099  case ARM::t2ADDrr:
3100  case ARM::t2ORRrr:
3101  case ARM::t2EORrr: {
3102  Commute = UseMI.getOperand(2).getReg() != Reg;
3103  switch (UseOpc) {
3104  default: break;
3105  case ARM::ADDrr:
3106  case ARM::SUBrr:
3107  if (UseOpc == ARM::SUBrr && Commute)
3108  return false;
3109 
3110  // ADD/SUB are special because they're essentially the same operation, so
3111  // we can handle a larger range of immediates.
3112  if (ARM_AM::isSOImmTwoPartVal(ImmVal))
3113  NewUseOpc = UseOpc == ARM::ADDrr ? ARM::ADDri : ARM::SUBri;
3114  else if (ARM_AM::isSOImmTwoPartVal(-ImmVal)) {
3115  ImmVal = -ImmVal;
3116  NewUseOpc = UseOpc == ARM::ADDrr ? ARM::SUBri : ARM::ADDri;
3117  } else
3118  return false;
3119  SOImmValV1 = (uint32_t)ARM_AM::getSOImmTwoPartFirst(ImmVal);
3120  SOImmValV2 = (uint32_t)ARM_AM::getSOImmTwoPartSecond(ImmVal);
3121  break;
3122  case ARM::ORRrr:
3123  case ARM::EORrr:
3124  if (!ARM_AM::isSOImmTwoPartVal(ImmVal))
3125  return false;
3126  SOImmValV1 = (uint32_t)ARM_AM::getSOImmTwoPartFirst(ImmVal);
3127  SOImmValV2 = (uint32_t)ARM_AM::getSOImmTwoPartSecond(ImmVal);
3128  switch (UseOpc) {
3129  default: break;
3130  case ARM::ORRrr: NewUseOpc = ARM::ORRri; break;
3131  case ARM::EORrr: NewUseOpc = ARM::EORri; break;
3132  }
3133  break;
3134  case ARM::t2ADDrr:
3135  case ARM::t2SUBrr:
3136  if (UseOpc == ARM::t2SUBrr && Commute)
3137  return false;
3138 
3139  // ADD/SUB are special because they're essentially the same operation, so
3140  // we can handle a larger range of immediates.
3141  if (ARM_AM::isT2SOImmTwoPartVal(ImmVal))
3142  NewUseOpc = UseOpc == ARM::t2ADDrr ? ARM::t2ADDri : ARM::t2SUBri;
3143  else if (ARM_AM::isT2SOImmTwoPartVal(-ImmVal)) {
3144  ImmVal = -ImmVal;
3145  NewUseOpc = UseOpc == ARM::t2ADDrr ? ARM::t2SUBri : ARM::t2ADDri;
3146  } else
3147  return false;
3148  SOImmValV1 = (uint32_t)ARM_AM::getT2SOImmTwoPartFirst(ImmVal);
3149  SOImmValV2 = (uint32_t)ARM_AM::getT2SOImmTwoPartSecond(ImmVal);
3150  break;
3151  case ARM::t2ORRrr:
3152  case ARM::t2EORrr:
3153  if (!ARM_AM::isT2SOImmTwoPartVal(ImmVal))
3154  return false;
3155  SOImmValV1 = (uint32_t)ARM_AM::getT2SOImmTwoPartFirst(ImmVal);
3156  SOImmValV2 = (uint32_t)ARM_AM::getT2SOImmTwoPartSecond(ImmVal);
3157  switch (UseOpc) {
3158  default: break;
3159  case ARM::t2ORRrr: NewUseOpc = ARM::t2ORRri; break;
3160  case ARM::t2EORrr: NewUseOpc = ARM::t2EORri; break;
3161  }
3162  break;
3163  }
3164  }
3165  }
3166 
3167  unsigned OpIdx = Commute ? 2 : 1;
3168  unsigned Reg1 = UseMI.getOperand(OpIdx).getReg();
3169  bool isKill = UseMI.getOperand(OpIdx).isKill();
3170  unsigned NewReg = MRI->createVirtualRegister(MRI->getRegClass(Reg));
3171  BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), get(NewUseOpc),
3172  NewReg)
3173  .addReg(Reg1, getKillRegState(isKill))
3174  .addImm(SOImmValV1)
3175  .add(predOps(ARMCC::AL))
3176  .add(condCodeOp());
3177  UseMI.setDesc(get(NewUseOpc));
3178  UseMI.getOperand(1).setReg(NewReg);
3179  UseMI.getOperand(1).setIsKill();
3180  UseMI.getOperand(2).ChangeToImmediate(SOImmValV2);
3181  DefMI.eraseFromParent();
3182  return true;
3183 }
3184 
3185 static unsigned getNumMicroOpsSwiftLdSt(const InstrItineraryData *ItinData,
3186  const MachineInstr &MI) {
3187  switch (MI.getOpcode()) {
3188  default: {
3189  const MCInstrDesc &Desc = MI.getDesc();
3190  int UOps = ItinData->getNumMicroOps(Desc.getSchedClass());
3191  assert(UOps >= 0 && "bad # UOps");
3192  return UOps;
3193  }
3194 
3195  case ARM::LDRrs:
3196  case ARM::LDRBrs:
3197  case ARM::STRrs:
3198  case ARM::STRBrs: {
3199  unsigned ShOpVal = MI.getOperand(3).getImm();
3200  bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub;
3201  unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
3202  if (!isSub &&
3203  (ShImm == 0 ||
3204  ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3205  ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)))
3206  return 1;
3207  return 2;
3208  }
3209 
3210  case ARM::LDRH:
3211  case ARM::STRH: {
3212  if (!MI.getOperand(2).getReg())
3213  return 1;
3214 
3215  unsigned ShOpVal = MI.getOperand(3).getImm();
3216  bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub;
3217  unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
3218  if (!isSub &&
3219  (ShImm == 0 ||
3220  ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3221  ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)))
3222  return 1;
3223  return 2;
3224  }
3225 
3226  case ARM::LDRSB:
3227  case ARM::LDRSH:
3228  return (ARM_AM::getAM3Op(MI.getOperand(3).getImm()) == ARM_AM::sub) ? 3 : 2;
3229 
3230  case ARM::LDRSB_POST:
3231  case ARM::LDRSH_POST: {
3232  unsigned Rt = MI.getOperand(0).getReg();
3233  unsigned Rm = MI.getOperand(3).getReg();
3234  return (Rt == Rm) ? 4 : 3;
3235  }
3236 
3237  case ARM::LDR_PRE_REG:
3238  case ARM::LDRB_PRE_REG: {
3239  unsigned Rt = MI.getOperand(0).getReg();
3240  unsigned Rm = MI.getOperand(3).getReg();
3241  if (Rt == Rm)
3242  return 3;
3243  unsigned ShOpVal = MI.getOperand(4).getImm();
3244  bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub;
3245  unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
3246  if (!isSub &&
3247  (ShImm == 0 ||
3248  ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3249  ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)))
3250  return 2;
3251  return 3;
3252  }
3253 
3254  case ARM::STR_PRE_REG:
3255  case ARM::STRB_PRE_REG: {
3256  unsigned ShOpVal = MI.getOperand(4).getImm();
3257  bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub;
3258  unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
3259  if (!isSub &&
3260  (ShImm == 0 ||
3261  ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3262  ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)))
3263  return 2;
3264  return 3;
3265  }
3266 
3267  case ARM::LDRH_PRE:
3268  case ARM::STRH_PRE: {
3269  unsigned Rt = MI.getOperand(0).getReg();
3270  unsigned Rm = MI.getOperand(3).getReg();
3271  if (!Rm)
3272  return 2;
3273  if (Rt == Rm)
3274  return 3;
3275  return (ARM_AM::getAM3Op(MI.getOperand(4).getImm()) == ARM_AM::sub) ? 3 : 2;
3276  }
3277 
3278  case ARM::LDR_POST_REG:
3279  case ARM::LDRB_POST_REG:
3280  case ARM::LDRH_POST: {
3281  unsigned Rt = MI.getOperand(0).getReg();
3282  unsigned Rm = MI.getOperand(3).getReg();
3283  return (Rt == Rm) ? 3 : 2;
3284  }
3285 
3286  case ARM::LDR_PRE_IMM:
3287  case ARM::LDRB_PRE_IMM:
3288  case ARM::LDR_POST_IMM:
3289  case ARM::LDRB_POST_IMM:
3290  case ARM::STRB_POST_IMM:
3291  case ARM::STRB_POST_REG:
3292  case ARM::STRB_PRE_IMM:
3293  case ARM::STRH_POST:
3294  case ARM::STR_POST_IMM:
3295  case ARM::STR_POST_REG:
3296  case ARM::STR_PRE_IMM:
3297  return 2;
3298 
3299  case ARM::LDRSB_PRE:
3300  case ARM::LDRSH_PRE: {
3301  unsigned Rm = MI.getOperand(3).getReg();
3302  if (Rm == 0)
3303  return 3;
3304  unsigned Rt = MI.getOperand(0).getReg();
3305  if (Rt == Rm)
3306  return 4;
3307  unsigned ShOpVal = MI.getOperand(4).getImm();
3308  bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub;
3309  unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
3310  if (!isSub &&
3311  (ShImm == 0 ||
3312  ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3313  ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)))
3314  return 3;
3315  return 4;
3316  }
3317 
3318  case ARM::LDRD: {
3319  unsigned Rt = MI.getOperand(0).getReg();
3320  unsigned Rn = MI.getOperand(2).getReg();
3321  unsigned Rm = MI.getOperand(3).getReg();
3322  if (Rm)
3323  return (ARM_AM::getAM3Op(MI.getOperand(4).getImm()) == ARM_AM::sub) ? 4
3324  : 3;
3325  return (Rt == Rn) ? 3 : 2;
3326  }
3327 
3328  case ARM::STRD: {
3329  unsigned Rm = MI.getOperand(3).getReg();
3330  if (Rm)
3331  return (ARM_AM::getAM3Op(MI.getOperand(4).getImm()) == ARM_AM::sub) ? 4
3332  : 3;
3333  return 2;
3334  }
3335 
3336  case ARM::LDRD_POST:
3337  case ARM::t2LDRD_POST:
3338  return 3;
3339 
3340  case ARM::STRD_POST:
3341  case ARM::t2STRD_POST:
3342  return 4;
3343 
3344  case ARM::LDRD_PRE: {
3345  unsigned Rt = MI.getOperand(0).getReg();
3346  unsigned Rn = MI.getOperand(3).getReg();
3347  unsigned Rm = MI.getOperand(4).getReg();
3348  if (Rm)
3349  return (ARM_AM::getAM3Op(MI.getOperand(5).getImm()) == ARM_AM::sub) ? 5
3350  : 4;
3351  return (Rt == Rn) ? 4 : 3;
3352  }
3353 
3354  case ARM::t2LDRD_PRE: {
3355  unsigned Rt = MI.getOperand(0).getReg();
3356  unsigned Rn = MI.getOperand(3).getReg();
3357  return (Rt == Rn) ? 4 : 3;
3358  }
3359 
3360  case ARM::STRD_PRE: {
3361  unsigned Rm = MI.getOperand(4).getReg();
3362  if (Rm)
3363  return (ARM_AM::getAM3Op(MI.getOperand(5).getImm()) == ARM_AM::sub) ? 5
3364  : 4;
3365  return 3;
3366  }
3367 
3368  case ARM::t2STRD_PRE:
3369  return 3;
3370 
3371  case ARM::t2LDR_POST:
3372  case ARM::t2LDRB_POST:
3373  case ARM::t2LDRB_PRE:
3374  case ARM::t2LDRSBi12:
3375  case ARM::t2LDRSBi8:
3376  case ARM::t2LDRSBpci:
3377  case ARM::t2LDRSBs:
3378  case ARM::t2LDRH_POST:
3379  case ARM::t2LDRH_PRE:
3380  case ARM::t2LDRSBT:
3381  case ARM::t2LDRSB_POST:
3382  case ARM::t2LDRSB_PRE:
3383  case ARM::t2LDRSH_POST:
3384  case ARM::t2LDRSH_PRE:
3385  case ARM::t2LDRSHi12:
3386  case ARM::t2LDRSHi8:
3387  case ARM::t2LDRSHpci:
3388  case ARM::t2LDRSHs:
3389  return 2;
3390 
3391  case ARM::t2LDRDi8: {
3392  unsigned Rt = MI.getOperand(0).getReg();
3393  unsigned Rn = MI.getOperand(2).getReg();
3394  return (Rt == Rn) ? 3 : 2;
3395  }
3396 
3397  case ARM::t2STRB_POST:
3398  case ARM::t2STRB_PRE:
3399  case ARM::t2STRBs:
3400  case ARM::t2STRDi8:
3401  case ARM::t2STRH_POST:
3402  case ARM::t2STRH_PRE:
3403  case ARM::t2STRHs:
3404  case ARM::t2STR_POST:
3405  case ARM::t2STR_PRE:
3406  case ARM::t2STRs:
3407  return 2;
3408  }
3409 }
3410 
3411 // Return the number of 32-bit words loaded by LDM or stored by STM. If this
3412 // can't be easily determined return 0 (missing MachineMemOperand).
3413 //
3414 // FIXME: The current MachineInstr design does not support relying on machine
3415 // mem operands to determine the width of a memory access. Instead, we expect
3416 // the target to provide this information based on the instruction opcode and
3417 // operands. However, using MachineMemOperand is the best solution now for
3418 // two reasons:
3419 //
3420 // 1) getNumMicroOps tries to infer LDM memory width from the total number of MI
3421 // operands. This is much more dangerous than using the MachineMemOperand
3422 // sizes because CodeGen passes can insert/remove optional machine operands. In
3423 // fact, it's totally incorrect for preRA passes and appears to be wrong for
3424 // postRA passes as well.
3425 //
3426 // 2) getNumLDMAddresses is only used by the scheduling machine model and any
3427 // machine model that calls this should handle the unknown (zero size) case.
3428 //
3429 // Long term, we should require a target hook that verifies MachineMemOperand
3430 // sizes during MC lowering. That target hook should be local to MC lowering
3431 // because we can't ensure that it is aware of other MI forms. Doing this will
3432 // ensure that MachineMemOperands are correctly propagated through all passes.
3434  unsigned Size = 0;
3436  E = MI.memoperands_end();
3437  I != E; ++I) {
3438  Size += (*I)->getSize();
3439  }
3440  // FIXME: The scheduler currently can't handle values larger than 16. But
3441  // the values can actually go up to 32 for floating-point load/store
3442  // multiple (VLDMIA etc.). Also, the way this code is reasoning about memory
3443  // operations isn't right; we could end up with "extra" memory operands for
3444  // various reasons, like tail merge merging two memory operations.
3445  return std::min(Size / 4, 16U);
3446 }
3447 
3448 static unsigned getNumMicroOpsSingleIssuePlusExtras(unsigned Opc,
3449  unsigned NumRegs) {
3450  unsigned UOps = 1 + NumRegs; // 1 for address computation.
3451  switch (Opc) {
3452  default:
3453  break;
3454  case ARM::VLDMDIA_UPD:
3455  case ARM::VLDMDDB_UPD:
3456  case ARM::VLDMSIA_UPD:
3457  case ARM::VLDMSDB_UPD:
3458  case ARM::VSTMDIA_UPD:
3459  case ARM::VSTMDDB_UPD:
3460  case ARM::VSTMSIA_UPD:
3461  case ARM::VSTMSDB_UPD:
3462  case ARM::LDMIA_UPD:
3463  case ARM::LDMDA_UPD:
3464  case ARM::LDMDB_UPD:
3465  case ARM::LDMIB_UPD:
3466  case ARM::STMIA_UPD:
3467  case ARM::STMDA_UPD:
3468  case ARM::STMDB_UPD:
3469  case ARM::STMIB_UPD:
3470  case ARM::tLDMIA_UPD:
3471  case ARM::tSTMIA_UPD:
3472  case ARM::t2LDMIA_UPD:
3473  case ARM::t2LDMDB_UPD:
3474  case ARM::t2STMIA_UPD:
3475  case ARM::t2STMDB_UPD:
3476  ++UOps; // One for base register writeback.
3477  break;
3478  case ARM::LDMIA_RET:
3479  case ARM::tPOP_RET:
3480  case ARM::t2LDMIA_RET:
3481  UOps += 2; // One for base reg wb, one for write to pc.
3482  break;
3483  }
3484  return UOps;
3485 }
3486 
3488  const MachineInstr &MI) const {
3489  if (!ItinData || ItinData->isEmpty())
3490  return 1;
3491 
3492  const MCInstrDesc &Desc = MI.getDesc();
3493  unsigned Class = Desc.getSchedClass();
3494  int ItinUOps = ItinData->getNumMicroOps(Class);
3495  if (ItinUOps >= 0) {
3496  if (Subtarget.isSwift() && (Desc.mayLoad() || Desc.mayStore()))
3497  return getNumMicroOpsSwiftLdSt(ItinData, MI);
3498 
3499  return ItinUOps;
3500  }
3501 
3502  unsigned Opc = MI.getOpcode();
3503  switch (Opc) {
3504  default:
3505  llvm_unreachable("Unexpected multi-uops instruction!");
3506  case ARM::VLDMQIA:
3507  case ARM::VSTMQIA:
3508  return 2;
3509 
3510  // The number of uOps for load / store multiple are determined by the number
3511  // registers.
3512  //
3513  // On Cortex-A8, each pair of register loads / stores can be scheduled on the
3514  // same cycle. The scheduling for the first load / store must be done
3515  // separately by assuming the address is not 64-bit aligned.
3516  //
3517  // On Cortex-A9, the formula is simply (#reg / 2) + (#reg % 2). If the address
3518  // is not 64-bit aligned, then AGU would take an extra cycle. For VFP / NEON
3519  // load / store multiple, the formula is (#reg / 2) + (#reg % 2) + 1.
3520  case ARM::VLDMDIA:
3521  case ARM::VLDMDIA_UPD:
3522  case ARM::VLDMDDB_UPD:
3523  case ARM::VLDMSIA:
3524  case ARM::VLDMSIA_UPD:
3525  case ARM::VLDMSDB_UPD:
3526  case ARM::VSTMDIA:
3527  case ARM::VSTMDIA_UPD:
3528  case ARM::VSTMDDB_UPD:
3529  case ARM::VSTMSIA:
3530  case ARM::VSTMSIA_UPD:
3531  case ARM::VSTMSDB_UPD: {
3532  unsigned NumRegs = MI.getNumOperands() - Desc.getNumOperands();
3533  return (NumRegs / 2) + (NumRegs % 2) + 1;
3534  }
3535 
3536  case ARM::LDMIA_RET:
3537  case ARM::LDMIA:
3538  case ARM::LDMDA:
3539  case ARM::LDMDB:
3540  case ARM::LDMIB:
3541  case ARM::LDMIA_UPD:
3542  case ARM::LDMDA_UPD:
3543  case ARM::LDMDB_UPD:
3544  case ARM::LDMIB_UPD:
3545  case ARM::STMIA:
3546  case ARM::STMDA:
3547  case ARM::STMDB:
3548  case ARM::STMIB:
3549  case ARM::STMIA_UPD:
3550  case ARM::STMDA_UPD:
3551  case ARM::STMDB_UPD:
3552  case ARM::STMIB_UPD:
3553  case ARM::tLDMIA:
3554  case ARM::tLDMIA_UPD:
3555  case ARM::tSTMIA_UPD:
3556  case ARM::tPOP_RET:
3557  case ARM::tPOP:
3558  case ARM::tPUSH:
3559  case ARM::t2LDMIA_RET:
3560  case ARM::t2LDMIA:
3561  case ARM::t2LDMDB:
3562  case ARM::t2LDMIA_UPD:
3563  case ARM::t2LDMDB_UPD:
3564  case ARM::t2STMIA:
3565  case ARM::t2STMDB:
3566  case ARM::t2STMIA_UPD:
3567  case ARM::t2STMDB_UPD: {
3568  unsigned NumRegs = MI.getNumOperands() - Desc.getNumOperands() + 1;
3569  switch (Subtarget.getLdStMultipleTiming()) {
3571  return getNumMicroOpsSingleIssuePlusExtras(Opc, NumRegs);
3573  // Assume the worst.
3574  return NumRegs;
3576  if (NumRegs < 4)
3577  return 2;
3578  // 4 registers would be issued: 2, 2.
3579  // 5 registers would be issued: 2, 2, 1.
3580  unsigned UOps = (NumRegs / 2);
3581  if (NumRegs % 2)
3582  ++UOps;
3583  return UOps;
3584  }
3586  unsigned UOps = (NumRegs / 2);
3587  // If there are odd number of registers or if it's not 64-bit aligned,
3588  // then it takes an extra AGU (Address Generation Unit) cycle.
3589  if ((NumRegs % 2) || !MI.hasOneMemOperand() ||
3590  (*MI.memoperands_begin())->getAlignment() < 8)
3591  ++UOps;
3592  return UOps;
3593  }
3594  }
3595  }
3596  }
3597  llvm_unreachable("Didn't find the number of microops");
3598 }
3599 
3600 int
3601 ARMBaseInstrInfo::getVLDMDefCycle(const InstrItineraryData *ItinData,
3602  const MCInstrDesc &DefMCID,
3603  unsigned DefClass,
3604  unsigned DefIdx, unsigned DefAlign) const {
3605  int RegNo = (int)(DefIdx+1) - DefMCID.getNumOperands() + 1;
3606  if (RegNo <= 0)
3607  // Def is the address writeback.
3608  return ItinData->getOperandCycle(DefClass, DefIdx);
3609 
3610  int DefCycle;
3611  if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) {
3612  // (regno / 2) + (regno % 2) + 1
3613  DefCycle = RegNo / 2 + 1;
3614  if (RegNo % 2)
3615  ++DefCycle;
3616  } else if (Subtarget.isLikeA9() || Subtarget.isSwift()) {
3617  DefCycle = RegNo;
3618  bool isSLoad = false;
3619 
3620  switch (DefMCID.getOpcode()) {
3621  default: break;
3622  case ARM::VLDMSIA:
3623  case ARM::VLDMSIA_UPD:
3624  case ARM::VLDMSDB_UPD:
3625  isSLoad = true;
3626  break;
3627  }
3628 
3629  // If there are odd number of 'S' registers or if it's not 64-bit aligned,
3630  // then it takes an extra cycle.
3631  if ((isSLoad && (RegNo % 2)) || DefAlign < 8)
3632  ++DefCycle;
3633  } else {
3634  // Assume the worst.
3635  DefCycle = RegNo + 2;
3636  }
3637 
3638  return DefCycle;
3639 }
3640 
3642  unsigned BaseReg = MI.getOperand(0).getReg();
3643  for (unsigned i = 1, sz = MI.getNumOperands(); i < sz; ++i) {
3644  const auto &Op = MI.getOperand(i);
3645  if (Op.isReg() && Op.getReg() == BaseReg)
3646  return true;
3647  }
3648  return false;
3649 }
3650 unsigned
3652  // ins GPR:$Rn, $p (2xOp), reglist:$regs, variable_ops
3653  // (outs GPR:$wb), (ins GPR:$Rn, $p (2xOp), reglist:$regs, variable_ops)
3654  return MI.getNumOperands() + 1 - MI.getDesc().getNumOperands();
3655 }
3656 
3657 int
3658 ARMBaseInstrInfo::getLDMDefCycle(const InstrItineraryData *ItinData,
3659  const MCInstrDesc &DefMCID,
3660  unsigned DefClass,
3661  unsigned DefIdx, unsigned DefAlign) const {
3662  int RegNo = (int)(DefIdx+1) - DefMCID.getNumOperands() + 1;
3663  if (RegNo <= 0)
3664  // Def is the address writeback.
3665  return ItinData->getOperandCycle(DefClass, DefIdx);
3666 
3667  int DefCycle;
3668  if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) {
3669  // 4 registers would be issued: 1, 2, 1.
3670  // 5 registers would be issued: 1, 2, 2.
3671  DefCycle = RegNo / 2;
3672  if (DefCycle < 1)
3673  DefCycle = 1;
3674  // Result latency is issue cycle + 2: E2.
3675  DefCycle += 2;
3676  } else if (Subtarget.isLikeA9() || Subtarget.isSwift()) {
3677  DefCycle = (RegNo / 2);
3678  // If there are odd number of registers or if it's not 64-bit aligned,
3679  // then it takes an extra AGU (Address Generation Unit) cycle.
3680  if ((RegNo % 2) || DefAlign < 8)
3681  ++DefCycle;
3682  // Result latency is AGU cycles + 2.
3683  DefCycle += 2;
3684  } else {
3685  // Assume the worst.
3686  DefCycle = RegNo + 2;
3687  }
3688 
3689  return DefCycle;
3690 }
3691 
3692 int
3693 ARMBaseInstrInfo::getVSTMUseCycle(const InstrItineraryData *ItinData,
3694  const MCInstrDesc &UseMCID,
3695  unsigned UseClass,
3696  unsigned UseIdx, unsigned UseAlign) const {
3697  int RegNo = (int)(UseIdx+1) - UseMCID.getNumOperands() + 1;
3698  if (RegNo <= 0)
3699  return ItinData->getOperandCycle(UseClass, UseIdx);
3700 
3701  int UseCycle;
3702  if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) {
3703  // (regno / 2) + (regno % 2) + 1
3704  UseCycle = RegNo / 2 + 1;
3705  if (RegNo % 2)
3706  ++UseCycle;
3707  } else if (Subtarget.isLikeA9() || Subtarget.isSwift()) {
3708  UseCycle = RegNo;
3709  bool isSStore = false;
3710 
3711  switch (UseMCID.getOpcode()) {
3712  default: break;
3713  case ARM::VSTMSIA:
3714  case ARM::VSTMSIA_UPD:
3715  case ARM::VSTMSDB_UPD:
3716  isSStore = true;
3717  break;
3718  }
3719 
3720  // If there are odd number of 'S' registers or if it's not 64-bit aligned,
3721  // then it takes an extra cycle.
3722  if ((isSStore && (RegNo % 2)) || UseAlign < 8)
3723  ++UseCycle;
3724  } else {
3725  // Assume the worst.
3726  UseCycle = RegNo + 2;
3727  }
3728 
3729  return UseCycle;
3730 }
3731 
3732 int
3733 ARMBaseInstrInfo::getSTMUseCycle(const InstrItineraryData *ItinData,
3734  const MCInstrDesc &UseMCID,
3735  unsigned UseClass,
3736  unsigned UseIdx, unsigned UseAlign) const {
3737  int RegNo = (int)(UseIdx+1) - UseMCID.getNumOperands() + 1;
3738  if (RegNo <= 0)
3739  return ItinData->getOperandCycle(UseClass, UseIdx);
3740 
3741  int UseCycle;
3742  if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) {
3743  UseCycle = RegNo / 2;
3744  if (UseCycle < 2)
3745  UseCycle = 2;
3746  // Read in E3.
3747  UseCycle += 2;
3748  } else if (Subtarget.isLikeA9() || Subtarget.isSwift()) {
3749  UseCycle = (RegNo / 2);
3750  // If there are odd number of registers or if it's not 64-bit aligned,
3751  // then it takes an extra AGU (Address Generation Unit) cycle.
3752  if ((RegNo % 2) || UseAlign < 8)
3753  ++UseCycle;
3754  } else {
3755  // Assume the worst.
3756  UseCycle = 1;
3757  }
3758  return UseCycle;
3759 }
3760 
3761 int
3763  const MCInstrDesc &DefMCID,
3764  unsigned DefIdx, unsigned DefAlign,
3765  const MCInstrDesc &UseMCID,
3766  unsigned UseIdx, unsigned UseAlign) const {
3767  unsigned DefClass = DefMCID.getSchedClass();
3768  unsigned UseClass = UseMCID.getSchedClass();
3769 
3770  if (DefIdx < DefMCID.getNumDefs() && UseIdx < UseMCID.getNumOperands())
3771  return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
3772 
3773  // This may be a def / use of a variable_ops instruction, the operand
3774  // latency might be determinable dynamically. Let the target try to
3775  // figure it out.
3776  int DefCycle = -1;
3777  bool LdmBypass = false;
3778  switch (DefMCID.getOpcode()) {
3779  default:
3780  DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
3781  break;
3782 
3783  case ARM::VLDMDIA:
3784  case ARM::VLDMDIA_UPD:
3785  case ARM::VLDMDDB_UPD:
3786  case ARM::VLDMSIA:
3787  case ARM::VLDMSIA_UPD:
3788  case ARM::VLDMSDB_UPD:
3789  DefCycle = getVLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign);
3790  break;
3791 
3792  case ARM::LDMIA_RET:
3793  case ARM::LDMIA:
3794  case ARM::LDMDA:
3795  case ARM::LDMDB:
3796  case ARM::LDMIB:
3797  case ARM::LDMIA_UPD:
3798  case ARM::LDMDA_UPD:
3799  case ARM::LDMDB_UPD:
3800  case ARM::LDMIB_UPD:
3801  case ARM::tLDMIA:
3802  case ARM::tLDMIA_UPD:
3803  case ARM::tPUSH:
3804  case ARM::t2LDMIA_RET:
3805  case ARM::t2LDMIA:
3806  case ARM::t2LDMDB:
3807  case ARM::t2LDMIA_UPD:
3808  case ARM::t2LDMDB_UPD:
3809  LdmBypass = true;
3810  DefCycle = getLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign);
3811  break;
3812  }
3813 
3814  if (DefCycle == -1)
3815  // We can't seem to determine the result latency of the def, assume it's 2.
3816  DefCycle = 2;
3817 
3818  int UseCycle = -1;
3819  switch (UseMCID.getOpcode()) {
3820  default:
3821  UseCycle = ItinData->getOperandCycle(UseClass, UseIdx);
3822  break;
3823 
3824  case ARM::VSTMDIA:
3825  case ARM::VSTMDIA_UPD:
3826  case ARM::VSTMDDB_UPD:
3827  case ARM::VSTMSIA:
3828  case ARM::VSTMSIA_UPD:
3829  case ARM::VSTMSDB_UPD:
3830  UseCycle = getVSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign);
3831  break;
3832 
3833  case ARM::STMIA:
3834  case ARM::STMDA:
3835  case ARM::STMDB:
3836  case ARM::STMIB:
3837  case ARM::STMIA_UPD:
3838  case ARM::STMDA_UPD:
3839  case ARM::STMDB_UPD:
3840  case ARM::STMIB_UPD:
3841  case ARM::tSTMIA_UPD:
3842  case ARM::tPOP_RET:
3843  case ARM::tPOP:
3844  case ARM::t2STMIA:
3845  case ARM::t2STMDB:
3846  case ARM::t2STMIA_UPD:
3847  case ARM::t2STMDB_UPD:
3848  UseCycle = getSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign);
3849  break;
3850  }
3851 
3852  if (UseCycle == -1)
3853  // Assume it's read in the first stage.
3854  UseCycle = 1;
3855 
3856  UseCycle = DefCycle - UseCycle + 1;
3857  if (UseCycle > 0) {
3858  if (LdmBypass) {
3859  // It's a variable_ops instruction so we can't use DefIdx here. Just use
3860  // first def operand.
3861  if (ItinData->hasPipelineForwarding(DefClass, DefMCID.getNumOperands()-1,
3862  UseClass, UseIdx))
3863  --UseCycle;
3864  } else if (ItinData->hasPipelineForwarding(DefClass, DefIdx,
3865  UseClass, UseIdx)) {
3866  --UseCycle;
3867  }
3868  }
3869 
3870  return UseCycle;
3871 }
3872 
3874  const MachineInstr *MI, unsigned Reg,
3875  unsigned &DefIdx, unsigned &Dist) {
3876  Dist = 0;
3877 
3880  assert(II->isInsideBundle() && "Empty bundle?");
3881 
3882  int Idx = -1;
3883  while (II->isInsideBundle()) {
3884  Idx = II->findRegisterDefOperandIdx(Reg, false, true, TRI);
3885  if (Idx != -1)
3886  break;
3887  --II;
3888  ++Dist;
3889  }
3890 
3891  assert(Idx != -1 && "Cannot find bundled definition!");
3892  DefIdx = Idx;
3893  return &*II;
3894 }
3895 
3897  const MachineInstr &MI, unsigned Reg,
3898  unsigned &UseIdx, unsigned &Dist) {
3899  Dist = 0;
3900 
3902  assert(II->isInsideBundle() && "Empty bundle?");
3904 
3905  // FIXME: This doesn't properly handle multiple uses.
3906  int Idx = -1;
3907  while (II != E && II->isInsideBundle()) {
3908  Idx = II->findRegisterUseOperandIdx(Reg, false, TRI);
3909  if (Idx != -1)
3910  break;
3911  if (II->getOpcode() != ARM::t2IT)
3912  ++Dist;
3913  ++II;
3914  }
3915 
3916  if (Idx == -1) {
3917  Dist = 0;
3918  return nullptr;
3919  }
3920 
3921  UseIdx = Idx;
3922  return &*II;
3923 }
3924 
3925 /// Return the number of cycles to add to (or subtract from) the static
3926 /// itinerary based on the def opcode and alignment. The caller will ensure that
3927 /// adjusted latency is at least one cycle.
3928 static int adjustDefLatency(const ARMSubtarget &Subtarget,
3929  const MachineInstr &DefMI,
3930  const MCInstrDesc &DefMCID, unsigned DefAlign) {
3931  int Adjust = 0;
3932  if (Subtarget.isCortexA8() || Subtarget.isLikeA9() || Subtarget.isCortexA7()) {
3933  // FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2]
3934  // variants are one cycle cheaper.
3935  switch (DefMCID.getOpcode()) {
3936  default: break;
3937  case ARM::LDRrs:
3938  case ARM::LDRBrs: {
3939  unsigned ShOpVal = DefMI.getOperand(3).getImm();
3940  unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
3941  if (ShImm == 0 ||
3942  (ShImm == 2 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))
3943  --Adjust;
3944  break;
3945  }
3946  case ARM::t2LDRs:
3947  case ARM::t2LDRBs:
3948  case ARM::t2LDRHs:
3949  case ARM::t2LDRSHs: {
3950  // Thumb2 mode: lsl only.
3951  unsigned ShAmt = DefMI.getOperand(3).getImm();
3952  if (ShAmt == 0 || ShAmt == 2)
3953  --Adjust;
3954  break;
3955  }
3956  }
3957  } else if (Subtarget.isSwift()) {
3958  // FIXME: Properly handle all of the latency adjustments for address
3959  // writeback.
3960  switch (DefMCID.getOpcode()) {
3961  default: break;
3962  case ARM::LDRrs:
3963  case ARM::LDRBrs: {
3964  unsigned ShOpVal = DefMI.getOperand(3).getImm();
3965  bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub;
3966  unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
3967  if (!isSub &&
3968  (ShImm == 0 ||
3969  ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3970  ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)))
3971  Adjust -= 2;
3972  else if (!isSub &&
3973  ShImm == 1 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsr)
3974  --Adjust;
3975  break;
3976  }
3977  case ARM::t2LDRs:
3978  case ARM::t2LDRBs:
3979  case ARM::t2LDRHs:
3980  case ARM::t2LDRSHs: {
3981  // Thumb2 mode: lsl only.
3982  unsigned ShAmt = DefMI.getOperand(3).getImm();
3983  if (ShAmt == 0 || ShAmt == 1 || ShAmt == 2 || ShAmt == 3)
3984  Adjust -= 2;
3985  break;
3986  }
3987  }
3988  }
3989 
3990  if (DefAlign < 8 && Subtarget.checkVLDnAccessAlignment()) {
3991  switch (DefMCID.getOpcode()) {
3992  default: break;
3993  case ARM::VLD1q8:
3994  case ARM::VLD1q16:
3995  case ARM::VLD1q32:
3996  case ARM::VLD1q64:
3997  case ARM::VLD1q8wb_fixed:
3998  case ARM::VLD1q16wb_fixed:
3999  case ARM::VLD1q32wb_fixed:
4000  case ARM::VLD1q64wb_fixed:
4001  case ARM::VLD1q8wb_register:
4002  case ARM::VLD1q16wb_register:
4003  case ARM::VLD1q32wb_register:
4004  case ARM::VLD1q64wb_register:
4005  case ARM::VLD2d8:
4006  case ARM::VLD2d16:
4007  case ARM::VLD2d32:
4008  case ARM::VLD2q8:
4009  case ARM::VLD2q16:
4010  case ARM::VLD2q32:
4011  case ARM::VLD2d8wb_fixed:
4012  case ARM::VLD2d16wb_fixed:
4013  case ARM::VLD2d32wb_fixed:
4014  case ARM::VLD2q8wb_fixed:
4015  case ARM::VLD2q16wb_fixed:
4016  case ARM::VLD2q32wb_fixed:
4017  case ARM::VLD2d8wb_register:
4018  case ARM::VLD2d16wb_register:
4019  case ARM::VLD2d32wb_register:
4020  case ARM::VLD2q8wb_register:
4021  case ARM::VLD2q16wb_register:
4022  case ARM::VLD2q32wb_register:
4023  case ARM::VLD3d8:
4024  case ARM::VLD3d16:
4025  case ARM::VLD3d32:
4026  case ARM::VLD1d64T:
4027  case ARM::VLD3d8_UPD:
4028  case ARM::VLD3d16_UPD:
4029  case ARM::VLD3d32_UPD:
4030  case ARM::VLD1d64Twb_fixed:
4031  case ARM::VLD1d64Twb_register:
4032  case ARM::VLD3q8_UPD:
4033  case ARM::VLD3q16_UPD:
4034  case ARM::VLD3q32_UPD:
4035  case ARM::VLD4d8:
4036  case ARM::VLD4d16:
4037  case ARM::VLD4d32:
4038  case ARM::VLD1d64Q:
4039  case ARM::VLD4d8_UPD:
4040  case ARM::VLD4d16_UPD:
4041  case ARM::VLD4d32_UPD:
4042  case ARM::VLD1d64Qwb_fixed:
4043  case ARM::VLD1d64Qwb_register:
4044  case ARM::VLD4q8_UPD:
4045  case ARM::VLD4q16_UPD:
4046  case ARM::VLD4q32_UPD:
4047  case ARM::VLD1DUPq8:
4048  case ARM::VLD1DUPq16:
4049  case ARM::VLD1DUPq32:
4050  case ARM::VLD1DUPq8wb_fixed:
4051  case ARM::VLD1DUPq16wb_fixed:
4052  case ARM::VLD1DUPq32wb_fixed:
4053  case ARM::VLD1DUPq8wb_register:
4054  case ARM::VLD1DUPq16wb_register:
4055  case ARM::VLD1DUPq32wb_register:
4056  case ARM::VLD2DUPd8:
4057  case ARM::VLD2DUPd16:
4058  case ARM::VLD2DUPd32:
4059  case ARM::VLD2DUPd8wb_fixed:
4060  case ARM::VLD2DUPd16wb_fixed:
4061  case ARM::VLD2DUPd32wb_fixed:
4062  case ARM::VLD2DUPd8wb_register:
4063  case ARM::VLD2DUPd16wb_register:
4064  case ARM::VLD2DUPd32wb_register:
4065  case ARM::VLD4DUPd8:
4066  case ARM::VLD4DUPd16:
4067  case ARM::VLD4DUPd32:
4068  case ARM::VLD4DUPd8_UPD:
4069  case ARM::VLD4DUPd16_UPD:
4070  case ARM::VLD4DUPd32_UPD:
4071  case ARM::VLD1LNd8:
4072  case ARM::VLD1LNd16:
4073  case ARM::VLD1LNd32:
4074  case ARM::VLD1LNd8_UPD:
4075  case ARM::VLD1LNd16_UPD:
4076  case ARM::VLD1LNd32_UPD:
4077  case ARM::VLD2LNd8:
4078  case ARM::VLD2LNd16:
4079  case ARM::VLD2LNd32:
4080  case ARM::VLD2LNq16:
4081  case ARM::VLD2LNq32:
4082  case ARM::VLD2LNd8_UPD:
4083  case ARM::VLD2LNd16_UPD:
4084  case ARM::VLD2LNd32_UPD:
4085  case ARM::VLD2LNq16_UPD:
4086  case ARM::VLD2LNq32_UPD:
4087  case ARM::VLD4LNd8:
4088  case ARM::VLD4LNd16:
4089  case ARM::VLD4LNd32:
4090  case ARM::VLD4LNq16:
4091  case ARM::VLD4LNq32:
4092  case ARM::VLD4LNd8_UPD:
4093  case ARM::VLD4LNd16_UPD:
4094  case ARM::VLD4LNd32_UPD:
4095  case ARM::VLD4LNq16_UPD:
4096  case ARM::VLD4LNq32_UPD:
4097  // If the address is not 64-bit aligned, the latencies of these
4098  // instructions increases by one.
4099  ++Adjust;
4100  break;
4101  }
4102  }
4103  return Adjust;
4104 }
4105 
4107  const MachineInstr &DefMI,
4108  unsigned DefIdx,
4109  const MachineInstr &UseMI,
4110  unsigned UseIdx) const {
4111  // No operand latency. The caller may fall back to getInstrLatency.
4112  if (!ItinData || ItinData->isEmpty())
4113  return -1;
4114 
4115  const MachineOperand &DefMO = DefMI.getOperand(DefIdx);
4116  unsigned Reg = DefMO.getReg();
4117 
4118  const MachineInstr *ResolvedDefMI = &DefMI;
4119  unsigned DefAdj = 0;
4120  if (DefMI.isBundle())
4121  ResolvedDefMI =
4122  getBundledDefMI(&getRegisterInfo(), &DefMI, Reg, DefIdx, DefAdj);
4123  if (ResolvedDefMI->isCopyLike() || ResolvedDefMI->isInsertSubreg() ||
4124  ResolvedDefMI->isRegSequence() || ResolvedDefMI->isImplicitDef()) {
4125  return 1;
4126  }
4127 
4128  const MachineInstr *ResolvedUseMI = &UseMI;
4129  unsigned UseAdj = 0;
4130  if (UseMI.isBundle()) {
4131  ResolvedUseMI =
4132  getBundledUseMI(&getRegisterInfo(), UseMI, Reg, UseIdx, UseAdj);
4133  if (!ResolvedUseMI)
4134  return -1;
4135  }
4136 
4137  return getOperandLatencyImpl(
4138  ItinData, *ResolvedDefMI, DefIdx, ResolvedDefMI->getDesc(), DefAdj, DefMO,
4139  Reg, *ResolvedUseMI, UseIdx, ResolvedUseMI->getDesc(), UseAdj);
4140 }
4141 
4142 int ARMBaseInstrInfo::getOperandLatencyImpl(
4143  const InstrItineraryData *ItinData, const MachineInstr &DefMI,
4144  unsigned DefIdx, const MCInstrDesc &DefMCID, unsigned DefAdj,
4145  const MachineOperand &DefMO, unsigned Reg, const MachineInstr &UseMI,
4146  unsigned UseIdx, const MCInstrDesc &UseMCID, unsigned UseAdj) const {
4147  if (Reg == ARM::CPSR) {
4148  if (DefMI.getOpcode() == ARM::FMSTAT) {
4149  // fpscr -> cpsr stalls over 20 cycles on A8 (and earlier?)
4150  return Subtarget.isLikeA9() ? 1 : 20;
4151  }
4152 
4153  // CPSR set and branch can be paired in the same cycle.
4154  if (UseMI.isBranch())
4155  return 0;
4156 
4157  // Otherwise it takes the instruction latency (generally one).
4158  unsigned Latency = getInstrLatency(ItinData, DefMI);
4159 
4160  // For Thumb2 and -Os, prefer scheduling CPSR setting instruction close to
4161  // its uses. Instructions which are otherwise scheduled between them may
4162  // incur a code size penalty (not able to use the CPSR setting 16-bit
4163  // instructions).
4164  if (Latency > 0 && Subtarget.isThumb2()) {
4165  const MachineFunction *MF = DefMI.getParent()->getParent();
4166  // FIXME: Use Function::hasOptSize().
4167  if (MF->getFunction().hasFnAttribute(Attribute::OptimizeForSize))
4168  --Latency;
4169  }
4170  return Latency;
4171  }
4172 
4173  if (DefMO.isImplicit() || UseMI.getOperand(UseIdx).isImplicit())
4174  return -1;
4175 
4176  unsigned DefAlign = DefMI.hasOneMemOperand()
4177  ? (*DefMI.memoperands_begin())->getAlignment()
4178  : 0;
4179  unsigned UseAlign = UseMI.hasOneMemOperand()
4180  ? (*UseMI.memoperands_begin())->getAlignment()
4181  : 0;
4182 
4183  // Get the itinerary's latency if possible, and handle variable_ops.
4184  int Latency = getOperandLatency(ItinData, DefMCID, DefIdx, DefAlign, UseMCID,
4185  UseIdx, UseAlign);
4186  // Unable to find operand latency. The caller may resort to getInstrLatency.
4187  if (Latency < 0)
4188  return Latency;
4189 
4190  // Adjust for IT block position.
4191  int Adj = DefAdj + UseAdj;
4192 
4193  // Adjust for dynamic def-side opcode variants not captured by the itinerary.
4194  Adj += adjustDefLatency(Subtarget, DefMI, DefMCID, DefAlign);
4195  if (Adj >= 0 || (int)Latency > -Adj) {
4196  return Latency + Adj;
4197  }
4198  // Return the itinerary latency, which may be zero but not less than zero.
4199  return Latency;
4200 }
4201 
4202 int
4204  SDNode *DefNode, unsigned DefIdx,
4205  SDNode *UseNode, unsigned UseIdx) const {
4206  if (!DefNode->isMachineOpcode())
4207  return 1;
4208 
4209  const MCInstrDesc &DefMCID = get(DefNode->getMachineOpcode());
4210 
4211  if (isZeroCost(DefMCID.Opcode))
4212  return 0;
4213 
4214  if (!ItinData || ItinData->isEmpty())
4215  return DefMCID.mayLoad() ? 3 : 1;
4216 
4217  if (!UseNode->isMachineOpcode()) {
4218  int Latency = ItinData->getOperandCycle(DefMCID.getSchedClass(), DefIdx);
4219  int Adj = Subtarget.getPreISelOperandLatencyAdjustment();
4220  int Threshold = 1 + Adj;
4221  return Latency <= Threshold ? 1 : Latency - Adj;
4222  }
4223 
4224  const MCInstrDesc &UseMCID = get(UseNode->getMachineOpcode());
4225  const MachineSDNode *DefMN = dyn_cast<MachineSDNode>(DefNode);
4226  unsigned DefAlign = !DefMN->memoperands_empty()
4227  ? (*DefMN->memoperands_begin())->getAlignment() : 0;
4228  const MachineSDNode *UseMN = dyn_cast<MachineSDNode>(UseNode);
4229  unsigned UseAlign = !UseMN->memoperands_empty()
4230  ? (*UseMN->memoperands_begin())->getAlignment() : 0;
4231  int Latency = getOperandLatency(ItinData, DefMCID, DefIdx, DefAlign,
4232  UseMCID, UseIdx, UseAlign);
4233 
4234  if (Latency > 1 &&
4235  (Subtarget.isCortexA8() || Subtarget.isLikeA9() ||
4236  Subtarget.isCortexA7())) {
4237  // FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2]
4238  // variants are one cycle cheaper.
4239  switch (DefMCID.getOpcode()) {
4240  default: break;
4241  case ARM::LDRrs:
4242  case ARM::LDRBrs: {
4243  unsigned ShOpVal =
4244  cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue();
4245  unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
4246  if (ShImm == 0 ||
4247  (ShImm == 2 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))
4248  --Latency;
4249  break;
4250  }
4251  case ARM::t2LDRs:
4252  case ARM::t2LDRBs:
4253  case ARM::t2LDRHs:
4254  case ARM::t2LDRSHs: {
4255  // Thumb2 mode: lsl only.
4256  unsigned ShAmt =
4257  cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue();
4258  if (ShAmt == 0 || ShAmt == 2)
4259  --Latency;
4260  break;
4261  }
4262  }
4263  } else if (DefIdx == 0 && Latency > 2 && Subtarget.isSwift()) {
4264  // FIXME: Properly handle all of the latency adjustments for address
4265  // writeback.
4266  switch (DefMCID.getOpcode()) {
4267  default: break;
4268  case ARM::LDRrs:
4269  case ARM::LDRBrs: {
4270  unsigned ShOpVal =
4271  cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue();
4272  unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
4273  if (ShImm == 0 ||
4274  ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
4275  ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))
4276  Latency -= 2;
4277  else if (ShImm == 1 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsr)
4278  --Latency;
4279  break;
4280  }
4281  case ARM::t2LDRs:
4282  case ARM::t2LDRBs:
4283  case ARM::t2LDRHs:
4284  case ARM::t2LDRSHs:
4285  // Thumb2 mode: lsl 0-3 only.
4286  Latency -= 2;
4287  break;
4288  }
4289  }
4290 
4291  if (DefAlign < 8 && Subtarget.checkVLDnAccessAlignment())
4292  switch (DefMCID.getOpcode()) {
4293  default: break;
4294  case ARM::VLD1q8:
4295  case ARM::VLD1q16:
4296  case ARM::VLD1q32:
4297  case ARM::VLD1q64:
4298  case ARM::VLD1q8wb_register:
4299  case ARM::VLD1q16wb_register:
4300  case ARM::VLD1q32wb_register:
4301  case ARM::VLD1q64wb_register:
4302  case ARM::VLD1q8wb_fixed:
4303  case ARM::VLD1q16wb_fixed:
4304  case ARM::VLD1q32wb_fixed:
4305  case ARM::VLD1q64wb_fixed:
4306  case ARM::VLD2d8:
4307  case ARM::VLD2d16:
4308  case ARM::VLD2d32:
4309  case ARM::VLD2q8Pseudo:
4310  case ARM::VLD2q16Pseudo:
4311  case ARM::VLD2q32Pseudo:
4312  case ARM::VLD2d8wb_fixed:
4313  case ARM::VLD2d16wb_fixed:
4314  case ARM::VLD2d32wb_fixed:
4315  case ARM::VLD2q8PseudoWB_fixed:
4316  case ARM::VLD2q16PseudoWB_fixed:
4317  case ARM::VLD2q32PseudoWB_fixed:
4318  case ARM::VLD2d8wb_register:
4319  case ARM::VLD2d16wb_register:
4320  case ARM::VLD2d32wb_register:
4321  case ARM::VLD2q8PseudoWB_register:
4322  case ARM::VLD2q16PseudoWB_register:
4323  case ARM::VLD2q32PseudoWB_register:
4324  case ARM::VLD3d8Pseudo:
4325  case ARM::VLD3d16Pseudo:
4326  case ARM::VLD3d32Pseudo:
4327  case ARM::VLD1d8TPseudo:
4328  case ARM::VLD1d16TPseudo:
4329  case ARM::VLD1d32TPseudo:
4330  case ARM::VLD1d64TPseudo:
4331  case ARM::VLD1d64TPseudoWB_fixed:
4332  case ARM::VLD1d64TPseudoWB_register:
4333  case ARM::VLD3d8Pseudo_UPD:
4334  case ARM::VLD3d16Pseudo_UPD:
4335  case ARM::VLD3d32Pseudo_UPD:
4336  case ARM::VLD3q8Pseudo_UPD:
4337  case ARM::VLD3q16Pseudo_UPD:
4338  case ARM::VLD3q32Pseudo_UPD:
4339  case ARM::VLD3q8oddPseudo:
4340  case ARM::VLD3q16oddPseudo:
4341  case ARM::VLD3q32oddPseudo:
4342  case ARM::VLD3q8oddPseudo_UPD:
4343  case ARM::VLD3q16oddPseudo_UPD:
4344  case ARM::VLD3q32oddPseudo_UPD:
4345  case ARM::VLD4d8Pseudo:
4346  case ARM::VLD4d16Pseudo:
4347  case ARM::VLD4d32Pseudo:
4348  case ARM::VLD1d8QPseudo:
4349  case ARM::VLD1d16QPseudo:
4350  case ARM::VLD1d32QPseudo:
4351  case ARM::VLD1d64QPseudo:
4352  case ARM::VLD1d64QPseudoWB_fixed:
4353  case ARM::VLD1d64QPseudoWB_register:
4354  case ARM::VLD1q8HighQPseudo:
4355  case ARM::VLD1q8LowQPseudo_UPD:
4356  case ARM::VLD1q8HighTPseudo:
4357  case ARM::VLD1q8LowTPseudo_UPD:
4358  case ARM::VLD1q16HighQPseudo:
4359  case ARM::VLD1q16LowQPseudo_UPD:
4360  case ARM::VLD1q16HighTPseudo:
4361  case ARM::VLD1q16LowTPseudo_UPD:
4362  case ARM::VLD1q32HighQPseudo:
4363  case ARM::VLD1q32LowQPseudo_UPD:
4364  case ARM::VLD1q32HighTPseudo:
4365  case ARM::VLD1q32LowTPseudo_UPD:
4366  case ARM::VLD1q64HighQPseudo:
4367  case ARM::VLD1q64LowQPseudo_UPD:
4368  case ARM::VLD1q64HighTPseudo:
4369  case ARM::VLD1q64LowTPseudo_UPD:
4370  case ARM::VLD4d8Pseudo_UPD:
4371  case ARM::VLD4d16Pseudo_UPD:
4372  case ARM::VLD4d32Pseudo_UPD:
4373  case ARM::VLD4q8Pseudo_UPD:
4374  case ARM::VLD4q16Pseudo_UPD:
4375  case ARM::VLD4q32Pseudo_UPD:
4376  case ARM::VLD4q8oddPseudo:
4377  case ARM::VLD4q16oddPseudo:
4378  case ARM::VLD4q32oddPseudo:
4379  case ARM::VLD4q8oddPseudo_UPD:
4380  case ARM::VLD4q16oddPseudo_UPD:
4381  case ARM::VLD4q32oddPseudo_UPD:
4382  case ARM::VLD1DUPq8:
4383  case ARM::VLD1DUPq16:
4384  case ARM::VLD1DUPq32:
4385  case ARM::VLD1DUPq8wb_fixed:
4386  case ARM::VLD1DUPq16wb_fixed:
4387  case ARM::VLD1DUPq32wb_fixed:
4388  case ARM::VLD1DUPq8wb_register:
4389  case ARM::VLD1DUPq16wb_register:
4390  case ARM::VLD1DUPq32wb_register:
4391  case ARM::VLD2DUPd8:
4392  case ARM::VLD2DUPd16:
4393  case ARM::VLD2DUPd32:
4394  case ARM::VLD2DUPd8wb_fixed:
4395  case ARM::VLD2DUPd16wb_fixed:
4396  case ARM::VLD2DUPd32wb_fixed:
4397  case ARM::VLD2DUPd8wb_register:
4398  case ARM::VLD2DUPd16wb_register:
4399  case ARM::VLD2DUPd32wb_register:
4400  case ARM::VLD2DUPq8EvenPseudo:
4401  case ARM::VLD2DUPq8OddPseudo:
4402  case ARM::VLD2DUPq16EvenPseudo:
4403  case ARM::VLD2DUPq16OddPseudo:
4404  case ARM::VLD2DUPq32EvenPseudo:
4405  case ARM::VLD2DUPq32OddPseudo:
4406  case ARM::VLD3DUPq8EvenPseudo:
4407  case ARM::VLD3DUPq8OddPseudo:
4408  case ARM::VLD3DUPq16EvenPseudo:
4409  case ARM::VLD3DUPq16OddPseudo:
4410  case ARM::VLD3DUPq32EvenPseudo:
4411  case ARM::VLD3DUPq32OddPseudo:
4412  case ARM::VLD4DUPd8Pseudo:
4413  case ARM::VLD4DUPd16Pseudo:
4414  case ARM::VLD4DUPd32Pseudo:
4415  case ARM::VLD4DUPd8Pseudo_UPD:
4416  case ARM::VLD4DUPd16Pseudo_UPD:
4417  case ARM::VLD4DUPd32Pseudo_UPD:
4418  case ARM::VLD4DUPq8EvenPseudo:
4419  case ARM::VLD4DUPq8OddPseudo:
4420  case ARM::VLD4DUPq16EvenPseudo:
4421  case ARM::VLD4DUPq16OddPseudo:
4422  case ARM::VLD4DUPq32EvenPseudo:
4423  case ARM::VLD4DUPq32OddPseudo:
4424  case ARM::VLD1LNq8Pseudo:
4425  case ARM::VLD1LNq16Pseudo:
4426  case ARM::VLD1LNq32Pseudo:
4427  case ARM::VLD1LNq8Pseudo_UPD:
4428  case ARM::VLD1LNq16Pseudo_UPD:
4429  case ARM::VLD1LNq32Pseudo_UPD:
4430  case ARM::VLD2LNd8Pseudo:
4431  case ARM::VLD2LNd16Pseudo:
4432  case ARM::VLD2LNd32Pseudo:
4433  case ARM::VLD2LNq16Pseudo:
4434  case ARM::VLD2LNq32Pseudo:
4435  case ARM::VLD2LNd8Pseudo_UPD:
4436  case ARM::VLD2LNd16Pseudo_UPD:
4437  case ARM::VLD2LNd32Pseudo_UPD:
4438  case ARM::VLD2LNq16Pseudo_UPD:
4439  case ARM::VLD2LNq32Pseudo_UPD:
4440  case ARM::VLD4LNd8Pseudo:
4441  case ARM::VLD4LNd16Pseudo:
4442  case ARM::VLD4LNd32Pseudo:
4443  case ARM::VLD4LNq16Pseudo:
4444  case ARM::VLD4LNq32Pseudo:
4445  case ARM::VLD4LNd8Pseudo_UPD:
4446  case ARM::VLD4LNd16Pseudo_UPD:
4447  case ARM::VLD4LNd32Pseudo_UPD:
4448  case ARM::VLD4LNq16Pseudo_UPD:
4449  case ARM::VLD4LNq32Pseudo_UPD:
4450  // If the address is not 64-bit aligned, the latencies of these
4451  // instructions increases by one.
4452  ++Latency;
4453  break;
4454  }
4455 
4456  return Latency;
4457 }
4458 
4459 unsigned ARMBaseInstrInfo::getPredicationCost(const MachineInstr &MI) const {
4460  if (MI.isCopyLike() || MI.isInsertSubreg() || MI.isRegSequence() ||
4461  MI.isImplicitDef())
4462  return 0;
4463 
4464  if (MI.isBundle())
4465  return 0;
4466 
4467  const MCInstrDesc &MCID = MI.getDesc();
4468 
4469  if (MCID.isCall() || (MCID.hasImplicitDefOfPhysReg(ARM::CPSR) &&
4470  !Subtarget.cheapPredicableCPSRDef())) {
4471  // When predicated, CPSR is an additional source operand for CPSR updating
4472  // instructions, this apparently increases their latencies.
4473  return 1;
4474  }
4475  return 0;
4476 }
4477 
4478 unsigned ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
4479  const MachineInstr &MI,
4480  unsigned *PredCost) const {
4481  if (MI.isCopyLike() || MI.isInsertSubreg() || MI.isRegSequence() ||
4482  MI.isImplicitDef())
4483  return 1;
4484 
4485  // An instruction scheduler typically runs on unbundled instructions, however
4486  // other passes may query the latency of a bundled instruction.
4487  if (MI.isBundle()) {
4488  unsigned Latency = 0;
4491  while (++I != E && I->isInsideBundle()) {
4492  if (I->getOpcode() != ARM::t2IT)
4493  Latency += getInstrLatency(ItinData, *I, PredCost);
4494  }
4495  return Latency;
4496  }
4497 
4498  const MCInstrDesc &MCID = MI.getDesc();
4499  if (PredCost && (MCID.isCall() || (MCID.hasImplicitDefOfPhysReg(ARM::CPSR) &&
4500  !Subtarget.cheapPredicableCPSRDef()))) {
4501  // When predicated, CPSR is an additional source operand for CPSR updating
4502  // instructions, this apparently increases their latencies.
4503  *PredCost = 1;
4504  }
4505  // Be sure to call getStageLatency for an empty itinerary in case it has a
4506  // valid MinLatency property.
4507  if (!ItinData)
4508  return MI.mayLoad() ? 3 : 1;
4509 
4510  unsigned Class = MCID.getSchedClass();
4511 
4512  // For instructions with variable uops, use uops as latency.
4513  if (!ItinData->isEmpty() && ItinData->getNumMicroOps(Class) < 0)
4514  return getNumMicroOps(ItinData, MI);
4515 
4516  // For the common case, fall back on the itinerary's latency.
4517  unsigned Latency = ItinData->getStageLatency(Class);
4518 
4519  // Adjust for dynamic def-side opcode variants not captured by the itinerary.
4520  unsigned DefAlign =
4521  MI.hasOneMemOperand() ? (*MI.memoperands_begin())->getAlignment() : 0;
4522  int Adj = adjustDefLatency(Subtarget, MI, MCID, DefAlign);
4523  if (Adj >= 0 || (int)Latency > -Adj) {
4524  return Latency + Adj;
4525  }
4526  return Latency;
4527 }
4528 
4529 int ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
4530  SDNode *Node) const {
4531  if (!Node->isMachineOpcode())
4532  return 1;
4533 
4534  if (!ItinData || ItinData->isEmpty())
4535  return 1;
4536 
4537  unsigned Opcode = Node->getMachineOpcode();
4538  switch (Opcode) {
4539  default:
4540  return ItinData->getStageLatency(get(Opcode).getSchedClass());
4541  case ARM::VLDMQIA:
4542  case ARM::VSTMQIA:
4543  return 2;
4544  }
4545 }
4546 
4547 bool ARMBaseInstrInfo::hasHighOperandLatency(const TargetSchedModel &SchedModel,
4548  const MachineRegisterInfo *MRI,
4549  const MachineInstr &DefMI,
4550  unsigned DefIdx,
4551  const MachineInstr &UseMI,
4552  unsigned UseIdx) const {
4553  unsigned DDomain = DefMI.getDesc().TSFlags & ARMII::DomainMask;
4554  unsigned UDomain = UseMI.getDesc().TSFlags & ARMII::DomainMask;
4555  if (Subtarget.nonpipelinedVFP() &&
4556  (DDomain == ARMII::DomainVFP || UDomain == ARMII::DomainVFP))
4557  return true;
4558 
4559  // Hoist VFP / NEON instructions with 4 or higher latency.
4560  unsigned Latency =
4561  SchedModel.computeOperandLatency(&DefMI, DefIdx, &UseMI, UseIdx);
4562  if (Latency <= 3)
4563  return false;
4564  return DDomain == ARMII::DomainVFP || DDomain == ARMII::DomainNEON ||
4565  UDomain == ARMII::DomainVFP || UDomain == ARMII::DomainNEON;
4566 }
4567 
4568 bool ARMBaseInstrInfo::hasLowDefLatency(const TargetSchedModel &SchedModel,
4569  const MachineInstr &DefMI,
4570  unsigned DefIdx) const {
4571  const InstrItineraryData *ItinData = SchedModel.getInstrItineraries();
4572  if (!ItinData || ItinData->isEmpty())
4573  return false;
4574 
4575  unsigned DDomain = DefMI.getDesc().TSFlags & ARMII::DomainMask;
4576  if (DDomain == ARMII::DomainGeneral) {
4577  unsigned DefClass = DefMI.getDesc().getSchedClass();
4578  int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
4579  return (DefCycle != -1 && DefCycle <= 2);
4580  }
4581  return false;
4582 }
4583 
4584 bool ARMBaseInstrInfo::verifyInstruction(const MachineInstr &MI,
4585  StringRef &ErrInfo) const {
4586  if (convertAddSubFlagsOpcode(MI.getOpcode())) {
4587  ErrInfo = "Pseudo flag setting opcodes only exist in Selection DAG";
4588  return false;
4589  }
4590  if (MI.getOpcode() == ARM::tMOVr && !Subtarget.hasV6Ops()) {
4591  // Make sure we don't generate a lo-lo mov that isn't supported.
4592  if (!ARM::hGPRRegClass.contains(MI.getOperand(0).getReg()) &&
4593  !ARM::hGPRRegClass.contains(MI.getOperand(1).getReg())) {
4594  ErrInfo = "Non-flag-setting Thumb1 mov is v6-only";
4595  return false;
4596  }
4597  }
4598  if (MI.getOpcode() == ARM::tPUSH ||
4599  MI.getOpcode() == ARM::tPOP ||
4600  MI.getOpcode() == ARM::tPOP_RET) {
4601  for (int i = 2, e = MI.getNumOperands(); i < e; ++i) {
4602  if (MI.getOperand(i).isImplicit() ||
4603  !MI.getOperand(i).isReg())
4604  continue;
4605  unsigned Reg = MI.getOperand(i).getReg();
4606  if (Reg < ARM::R0 || Reg > ARM::R7) {
4607  if (!(MI.getOpcode() == ARM::tPUSH && Reg == ARM::LR) &&
4608  !(MI.getOpcode() == ARM::tPOP_RET && Reg == ARM::PC)) {
4609  ErrInfo = "Unsupported register in Thumb1 push/pop";
4610  return false;
4611  }
4612  }
4613  }
4614  }
4615  return true;
4616 }
4617 
4618 // LoadStackGuard has so far only been implemented for MachO. Different code
4619 // sequence is needed for other targets.
4621  unsigned LoadImmOpc,
4622  unsigned LoadOpc) const {
4623  assert(!Subtarget.isROPI() && !Subtarget.isRWPI() &&
4624  "ROPI/RWPI not currently supported with stack guard");
4625 
4626  MachineBasicBlock &MBB = *MI->getParent();
4627  DebugLoc DL = MI->getDebugLoc();
4628  unsigned Reg = MI->getOperand(0).getReg();
4629  const GlobalValue *GV =
4630  cast<GlobalValue>((*MI->memoperands_begin())->getValue());
4631  MachineInstrBuilder MIB;
4632 
4633  BuildMI(MBB, MI, DL, get(LoadImmOpc), Reg)
4635 
4636  if (Subtarget.isGVIndirectSymbol(GV)) {
4637  MIB = BuildMI(MBB, MI, DL, get(LoadOpc), Reg);
4638  MIB.addReg(Reg, RegState::Kill).addImm(0);
4639  auto Flags = MachineMemOperand::MOLoad |
4642  MachineMemOperand *MMO = MBB.getParent()->getMachineMemOperand(
4643  MachinePointerInfo::getGOT(*MBB.getParent()), Flags, 4, 4);
4644  MIB.addMemOperand(MMO).add(predOps(ARMCC::AL));
4645  }
4646 
4647  MIB = BuildMI(MBB, MI, DL, get(LoadOpc), Reg);
4648  MIB.addReg(Reg, RegState::Kill)
4649  .addImm(0)
4650  .cloneMemRefs(*MI)
4651  .add(predOps(ARMCC::AL));
4652 }
4653 
4654 bool
4655 ARMBaseInstrInfo::isFpMLxInstruction(unsigned Opcode, unsigned &MulOpc,
4656  unsigned &AddSubOpc,
4657  bool &NegAcc, bool &HasLane) const {
4658  DenseMap<unsigned, unsigned>::const_iterator I = MLxEntryMap.find(Opcode);
4659  if (I == MLxEntryMap.end())
4660  return false;
4661 
4662  const ARM_MLxEntry &Entry = ARM_MLxTable[I->second];
4663  MulOpc = Entry.MulOpc;
4664  AddSubOpc = Entry.AddSubOpc;
4665  NegAcc = Entry.NegAcc;
4666  HasLane = Entry.HasLane;
4667  return true;
4668 }
4669 
4670 //===----------------------------------------------------------------------===//
4671 // Execution domains.
4672 //===----------------------------------------------------------------------===//
4673 //
4674 // Some instructions go down the NEON pipeline, some go down the VFP pipeline,
4675 // and some can go down both. The vmov instructions go down the VFP pipeline,
4676 // but they can be changed to vorr equivalents that are executed by the NEON
4677 // pipeline.
4678 //
4679 // We use the following execution domain numbering:
4680 //
4683  ExeVFP = 1,
4685 };
4686 
4687 //
4688 // Also see ARMInstrFormats.td and Domain* enums in ARMBaseInfo.h
4689 //
4690 std::pair<uint16_t, uint16_t>
4692  // If we don't have access to NEON instructions then we won't be able
4693  // to swizzle anything to the NEON domain. Check to make sure.
4694  if (Subtarget.hasNEON()) {
4695  // VMOVD, VMOVRS and VMOVSR are VFP instructions, but can be changed to NEON
4696  // if they are not predicated.
4697  if (MI.getOpcode() == ARM::VMOVD && !isPredicated(MI))
4698  return std::make_pair(ExeVFP, (1 << ExeVFP) | (1 << ExeNEON));
4699 
4700  // CortexA9 is particularly picky about mixing the two and wants these
4701  // converted.
4702  if (Subtarget.useNEONForFPMovs() && !isPredicated(MI) &&
4703  (MI.getOpcode() == ARM::VMOVRS || MI.getOpcode() == ARM::VMOVSR ||
4704  MI.getOpcode() == ARM::VMOVS))
4705  return std::make_pair(ExeVFP, (1 << ExeVFP) | (1 << ExeNEON));
4706  }
4707  // No other instructions can be swizzled, so just determine their domain.
4708  unsigned Domain = MI.getDesc().TSFlags & ARMII::DomainMask;
4709 
4710  if (Domain & ARMII::DomainNEON)
4711  return std::make_pair(ExeNEON, 0);
4712 
4713  // Certain instructions can go either way on Cortex-A8.
4714  // Treat them as NEON instructions.
4715  if ((Domain & ARMII::DomainNEONA8) && Subtarget.isCortexA8())
4716  return std::make_pair(ExeNEON, 0);
4717 
4718  if (Domain & ARMII::DomainVFP)
4719  return std::make_pair(ExeVFP, 0);
4720 
4721  return std::make_pair(ExeGeneric, 0);
4722 }
4723 
4725  unsigned SReg, unsigned &Lane) {
4726  unsigned DReg = TRI->getMatchingSuperReg(SReg, ARM::ssub_0, &ARM::DPRRegClass);
4727  Lane = 0;
4728 
4729  if (DReg != ARM::NoRegister)
4730  return DReg;
4731 
4732  Lane = 1;
4733  DReg = TRI->getMatchingSuperReg(SReg, ARM::ssub_1, &ARM::DPRRegClass);
4734 
4735  assert(DReg && "S-register with no D super-register?");
4736  return DReg;
4737 }
4738 
4739 /// getImplicitSPRUseForDPRUse - Given a use of a DPR register and lane,
4740 /// set ImplicitSReg to a register number that must be marked as implicit-use or
4741 /// zero if no register needs to be defined as implicit-use.
4742 ///
4743 /// If the function cannot determine if an SPR should be marked implicit use or
4744 /// not, it returns false.
4745 ///
4746 /// This function handles cases where an instruction is being modified from taking
4747 /// an SPR to a DPR[Lane]. A use of the DPR is being added, which may conflict
4748 /// with an earlier def of an SPR corresponding to DPR[Lane^1] (i.e. the other
4749 /// lane of the DPR).
4750 ///
4751 /// If the other SPR is defined, an implicit-use of it should be added. Else,
4752 /// (including the case where the DPR itself is defined), it should not.
4753 ///
4755  MachineInstr &MI, unsigned DReg,
4756  unsigned Lane, unsigned &ImplicitSReg) {
4757  // If the DPR is defined or used already, the other SPR lane will be chained
4758  // correctly, so there is nothing to be done.
4759  if (MI.definesRegister(DReg, TRI) || MI.readsRegister(DReg, TRI)) {
4760  ImplicitSReg = 0;
4761  return true;
4762  }
4763 
4764  // Otherwise we need to go searching to see if the SPR is set explicitly.
4765  ImplicitSReg = TRI->getSubReg(DReg,
4766  (Lane & 1) ? ARM::ssub_0 : ARM::ssub_1);
4768  MI.getParent()->computeRegisterLiveness(TRI, ImplicitSReg, MI);
4769 
4770  if (LQR == MachineBasicBlock::LQR_Live)
4771  return true;
4772  else if (LQR == MachineBasicBlock::LQR_Unknown)
4773  return false;
4774 
4775  // If the register is known not to be live, there is no need to add an
4776  // implicit-use.
4777  ImplicitSReg = 0;
4778  return true;
4779 }
4780 
4782  unsigned Domain) const {
4783  unsigned DstReg, SrcReg, DReg;
4784  unsigned Lane;
4785  MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI);
4787  switch (MI.getOpcode()) {
4788  default:
4789  llvm_unreachable("cannot handle opcode!");
4790  break;
4791  case ARM::VMOVD:
4792  if (Domain != ExeNEON)
4793  break;
4794 
4795  // Zap the predicate operands.
4796  assert(!isPredicated(MI) && "Cannot predicate a VORRd");
4797 
4798  // Make sure we've got NEON instructions.
4799  assert(Subtarget.hasNEON() && "VORRd requires NEON");
4800 
4801  // Source instruction is %DDst = VMOVD %DSrc, 14, %noreg (; implicits)
4802  DstReg = MI.getOperand(0).getReg();
4803  SrcReg = MI.getOperand(1).getReg();
4804 
4805  for (unsigned i = MI.getDesc().getNumOperands(); i; --i)
4806  MI.RemoveOperand(i - 1);
4807 
4808  // Change to a %DDst = VORRd %DSrc, %DSrc, 14, %noreg (; implicits)
4809  MI.setDesc(get(ARM::VORRd));
4810  MIB.addReg(DstReg, RegState::Define)
4811  .addReg(SrcReg)
4812  .addReg(SrcReg)
4813  .add(predOps(ARMCC::AL));
4814  break;
4815  case ARM::VMOVRS:
4816  if (Domain != ExeNEON)
4817  break;
4818  assert(!isPredicated(MI) && "Cannot predicate a VGETLN");
4819 
4820  // Source instruction is %RDst = VMOVRS %SSrc, 14, %noreg (; implicits)
4821  DstReg = MI.getOperand(0).getReg();
4822  SrcReg = MI.getOperand(1).getReg();
4823 
4824  for (unsigned i = MI.getDesc().getNumOperands(); i; --i)
4825  MI.RemoveOperand(i - 1);
4826 
4827  DReg = getCorrespondingDRegAndLane(TRI, SrcReg, Lane);
4828 
4829  // Convert to %RDst = VGETLNi32 %DSrc, Lane, 14, %noreg (; imps)
4830  // Note that DSrc has been widened and the other lane may be undef, which
4831  // contaminates the entire register.
4832  MI.setDesc(get(ARM::VGETLNi32));
4833  MIB.addReg(DstReg, RegState::Define)
4834  .addReg(DReg, RegState::Undef)
4835  .addImm(Lane)
4836  .add(predOps(ARMCC::AL));
4837 
4838  // The old source should be an implicit use, otherwise we might think it
4839  // was dead before here.
4840  MIB.addReg(SrcReg, RegState::Implicit);
4841  break;
4842  case ARM::VMOVSR: {
4843  if (Domain != ExeNEON)
4844  break;
4845  assert(!isPredicated(MI) && "Cannot predicate a VSETLN");
4846 
4847  // Source instruction is %SDst = VMOVSR %RSrc, 14, %noreg (; implicits)
4848  DstReg = MI.getOperand(0).getReg();
4849  SrcReg = MI.getOperand(1).getReg();
4850 
4851  DReg = getCorrespondingDRegAndLane(TRI, DstReg, Lane);
4852 
4853  unsigned ImplicitSReg;
4854  if (!getImplicitSPRUseForDPRUse(TRI, MI, DReg, Lane, ImplicitSReg))
4855  break;
4856 
4857  for (unsigned i = MI.getDesc().getNumOperands(); i; --i)
4858  MI.RemoveOperand(i - 1);
4859 
4860  // Convert to %DDst = VSETLNi32 %DDst, %RSrc, Lane, 14, %noreg (; imps)
4861  // Again DDst may be undefined at the beginning of this instruction.
4862  MI.setDesc(get(ARM::VSETLNi32));
4863  MIB.addReg(DReg, RegState::Define)
4864  .addReg(DReg, getUndefRegState(!MI.readsRegister(DReg, TRI)))
4865  .addReg(SrcReg)
4866  .addImm(Lane)
4867  .add(predOps(ARMCC::AL));
4868 
4869  // The narrower destination must be marked as set to keep previous chains
4870  // in place.
4871  MIB.addReg(DstReg, RegState::Define | RegState::Implicit);
4872  if (ImplicitSReg != 0)
4873  MIB.addReg(ImplicitSReg, RegState::Implicit);
4874  break;
4875  }
4876  case ARM::VMOVS: {
4877  if (Domain != ExeNEON)
4878  break;
4879 
4880  // Source instruction is %SDst = VMOVS %SSrc, 14, %noreg (; implicits)
4881  DstReg = MI.getOperand(0).getReg();
4882  SrcReg = MI.getOperand(1).getReg();
4883 
4884  unsigned DstLane = 0, SrcLane = 0, DDst, DSrc;
4885  DDst = getCorrespondingDRegAndLane(TRI, DstReg, DstLane);
4886  DSrc = getCorrespondingDRegAndLane(TRI, SrcReg, SrcLane);
4887 
4888  unsigned ImplicitSReg;
4889  if (!getImplicitSPRUseForDPRUse(TRI, MI, DSrc, SrcLane, ImplicitSReg))
4890  break;
4891 
4892  for (unsigned i = MI.getDesc().getNumOperands(); i; --i)
4893  MI.RemoveOperand(i - 1);
4894 
4895  if (DSrc == DDst) {
4896  // Destination can be:
4897  // %DDst = VDUPLN32d %DDst, Lane, 14, %noreg (; implicits)
4898  MI.setDesc(get(ARM::VDUPLN32d));
4899  MIB.addReg(DDst, RegState::Define)
4900  .addReg(DDst, getUndefRegState(!MI.readsRegister(DDst, TRI)))
4901  .addImm(SrcLane)
4902  .add(predOps(ARMCC::AL));
4903 
4904  // Neither the source or the destination are naturally represented any
4905  // more, so add them in manually.
4906  MIB.addReg(DstReg, RegState::Implicit | RegState::Define);
4907  MIB.addReg(SrcReg, RegState::Implicit);
4908  if (ImplicitSReg != 0)
4909  MIB.addReg(ImplicitSReg, RegState::Implicit);
4910  break;
4911  }
4912 
4913  // In general there's no single instruction that can perform an S <-> S
4914  // move in NEON space, but a pair of VEXT instructions *can* do the
4915  // job. It turns out that the VEXTs needed will only use DSrc once, with
4916  // the position based purely on the combination of lane-0 and lane-1
4917  // involved. For example
4918  // vmov s0, s2 -> vext.32 d0, d0, d1, #1 vext.32 d0, d0, d0, #1
4919  // vmov s1, s3 -> vext.32 d0, d1, d0, #1 vext.32 d0, d0, d0, #1
4920  // vmov s0, s3 -> vext.32 d0, d0, d0, #1 vext.32 d0, d1, d0, #1
4921  // vmov s1, s2 -> vext.32 d0, d0, d0, #1 vext.32 d0, d0, d1, #1
4922  //
4923  // Pattern of the MachineInstrs is:
4924  // %DDst = VEXTd32 %DSrc1, %DSrc2, Lane, 14, %noreg (;implicits)
4925  MachineInstrBuilder NewMIB;
4926  NewMIB = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(ARM::VEXTd32),
4927  DDst);
4928 
4929  // On the first instruction, both DSrc and DDst may be undef if present.
4930  // Specifically when the original instruction didn't have them as an
4931  // <imp-use>.
4932  unsigned CurReg = SrcLane == 1 && DstLane == 1 ? DSrc : DDst;
4933  bool CurUndef = !MI.readsRegister(CurReg, TRI);
4934  NewMIB.addReg(CurReg, getUndefRegState(CurUndef));
4935 
4936  CurReg = SrcLane == 0 && DstLane == 0 ? DSrc : DDst;
4937  CurUndef = !MI.readsRegister(CurReg, TRI);
4938  NewMIB.addReg(CurReg, getUndefRegState(CurUndef))
4939  .addImm(1)
4940  .add(predOps(ARMCC::AL));
4941 
4942  if (SrcLane == DstLane)
4943  NewMIB.addReg(SrcReg, RegState::Implicit);
4944 
4945  MI.setDesc(get(ARM::VEXTd32));
4946  MIB.addReg(DDst, RegState::Define);
4947 
4948  // On the second instruction, DDst has definitely been defined above, so
4949  // it is not undef. DSrc, if present, can be undef as above.
4950  CurReg = SrcLane == 1 && DstLane == 0 ? DSrc : DDst;
4951  CurUndef = CurReg == DSrc && !MI.readsRegister(CurReg, TRI);
4952  MIB.addReg(CurReg, getUndefRegState(CurUndef));
4953 
4954  CurReg = SrcLane == 0 && DstLane == 1 ? DSrc : DDst;
4955  CurUndef = CurReg == DSrc && !MI.readsRegister(CurReg, TRI);
4956  MIB.addReg(CurReg, getUndefRegState(CurUndef))
4957  .addImm(1)
4958  .add(predOps(ARMCC::AL));
4959 
4960  if (SrcLane != DstLane)
4961  MIB.addReg(SrcReg, RegState::Implicit);
4962 
4963  // As before, the original destination is no longer represented, add it
4964  // implicitly.
4965  MIB.addReg(DstReg, RegState::Define | RegState::Implicit);
4966  if (ImplicitSReg != 0)
4967  MIB.addReg(ImplicitSReg, RegState::Implicit);
4968  break;
4969  }
4970  }
4971 }
4972 
4973 //===----------------------------------------------------------------------===//
4974 // Partial register updates
4975 //===----------------------------------------------------------------------===//
4976 //
4977 // Swift renames NEON registers with 64-bit granularity. That means any
4978 // instruction writing an S-reg implicitly reads the containing D-reg. The
4979 // problem is mostly avoided by translating f32 operations to v2f32 operations
4980 // on D-registers, but f32 loads are still a problem.
4981 //
4982 // These instructions can load an f32 into a NEON register:
4983 //
4984 // VLDRS - Only writes S, partial D update.
4985 // VLD1LNd32 - Writes all D-regs, explicit partial D update, 2 uops.
4986 // VLD1DUPd32 - Writes all D-regs, no partial reg update, 2 uops.
4987 //
4988 // FCONSTD can be used as a dependency-breaking instruction.
4990  const MachineInstr &MI, unsigned OpNum,
4991  const TargetRegisterInfo *TRI) const {
4992  auto PartialUpdateClearance = Subtarget.getPartialUpdateClearance();
4993  if (!PartialUpdateClearance)
4994  return 0;
4995 
4996  assert(TRI && "Need TRI instance");
4997 
4998  const MachineOperand &MO = MI.getOperand(OpNum);
4999  if (MO.readsReg())
5000  return 0;
5001  unsigned Reg = MO.getReg();
5002  int UseOp = -1;
5003 
5004  switch (MI.getOpcode()) {
5005  // Normal instructions writing only an S-register.
5006  case ARM::VLDRS:
5007  case ARM::FCONSTS:
5008  case ARM::VMOVSR:
5009  case ARM::VMOVv8i8:
5010  case ARM::VMOVv4i16:
5011  case ARM::VMOVv2i32:
5012  case ARM::VMOVv2f32:
5013  case ARM::VMOVv1i64:
5014  UseOp = MI.findRegisterUseOperandIdx(Reg, false, TRI);
5015  break;
5016 
5017  // Explicitly reads the dependency.
5018  case ARM::VLD1LNd32:
5019  UseOp = 3;
5020  break;
5021  default:
5022  return 0;
5023  }
5024 
5025  // If this instruction actually reads a value from Reg, there is no unwanted
5026  // dependency.
5027  if (UseOp != -1 && MI.getOperand(UseOp).readsReg())
5028  return 0;
5029 
5030  // We must be able to clobber the whole D-reg.
5032  // Virtual register must be a def undef foo:ssub_0 operand.
5033  if (!MO.getSubReg() || MI.readsVirtualRegister(Reg))
5034  return 0;
5035  } else if (ARM::SPRRegClass.contains(Reg)) {
5036  // Physical register: MI must define the full D-reg.
5037  unsigned DReg = TRI->getMatchingSuperReg(Reg, ARM::ssub_0,
5038  &ARM::DPRRegClass);
5039  if (!DReg || !MI.definesRegister(DReg, TRI))
5040  return 0;
5041  }
5042 
5043  // MI has an unwanted D-register dependency.
5044  // Avoid defs in the previous N instructrions.
5045  return PartialUpdateClearance;
5046 }
5047 
5048 // Break a partial register dependency after getPartialRegUpdateClearance
5049 // returned non-zero.
5051  MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const {
5052  assert(OpNum < MI.getDesc().getNumDefs() && "OpNum is not a def");
5053  assert(TRI && "Need TRI instance");
5054 
5055  const MachineOperand &MO = MI.getOperand(OpNum);
5056  unsigned Reg = MO.getReg();
5058  "Can't break virtual register dependencies.");
5059  unsigned DReg = Reg;
5060 
5061  // If MI defines an S-reg, find the corresponding D super-register.
5062  if (ARM::SPRRegClass.contains(Reg)) {
5063  DReg = ARM::D0 + (Reg - ARM::S0) / 2;
5064  assert(TRI->isSuperRegister(Reg, DReg) && "Register enums broken");
5065  }
5066 
5067  assert(ARM::DPRRegClass.contains(DReg) && "Can only break D-reg deps");
5068  assert(MI.definesRegister(DReg, TRI) && "MI doesn't clobber full D-reg");
5069 
5070  // FIXME: In some cases, VLDRS can be changed to a VLD1DUPd32 which defines
5071  // the full D-register by loading the same value to both lanes. The
5072  // instruction is micro-coded with 2 uops, so don't do this until we can
5073  // properly schedule micro-coded instructions. The dispatcher stalls cause
5074  // too big regressions.
5075 
5076  // Insert the dependency-breaking FCONSTD before MI.
5077  // 96 is the encoding of 0.5, but the actual value doesn't matter here.
5078  BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(ARM::FCONSTD), DReg)
5079  .addImm(96)
5080  .add(predOps(ARMCC::AL));
5081  MI.addRegisterKilled(DReg, TRI, true);
5082 }
5083 
5085  return Subtarget.getFeatureBits()[ARM::HasV6KOps];
5086 }
5087 
5089  if (MI->getNumOperands() < 4)
5090  return true;
5091  unsigned ShOpVal = MI->getOperand(3).getImm();
5092  unsigned ShImm = ARM_AM::getSORegOffset(ShOpVal);
5093  // Swift supports faster shifts for: lsl 2, lsl 1, and lsr 1.
5094  if ((ShImm == 1 && ARM_AM::getSORegShOp(ShOpVal) == ARM_AM::lsr) ||
5095  ((ShImm == 1 || ShImm == 2) &&
5096  ARM_AM::getSORegShOp(ShOpVal) == ARM_AM::lsl))
5097  return true;
5098 
5099  return false;
5100 }
5101 
5103  const MachineInstr &MI, unsigned DefIdx,
5104  SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
5105  assert(DefIdx < MI.getDesc().getNumDefs() && "Invalid definition index");
5106  assert(MI.isRegSequenceLike() && "Invalid kind of instruction");
5107 
5108  switch (MI.getOpcode()) {
5109  case ARM::VMOVDRR:
5110  // dX = VMOVDRR rY, rZ
5111  // is the same as:
5112  // dX = REG_SEQUENCE rY, ssub_0, rZ, ssub_1
5113  // Populate the InputRegs accordingly.
5114  // rY
5115  const MachineOperand *MOReg = &MI.getOperand(1);
5116  if (!MOReg->isUndef())
5117  InputRegs.push_back(RegSubRegPairAndIdx(MOReg->getReg(),
5118  MOReg->getSubReg(), ARM::ssub_0));
5119  // rZ
5120  MOReg = &MI.getOperand(2);
5121  if (!MOReg->isUndef())
5122  InputRegs.push_back(RegSubRegPairAndIdx(MOReg->getReg(),
5123  MOReg->getSubReg(), ARM::ssub_1));
5124  return true;
5125  }
5126  llvm_unreachable("Target dependent opcode missing");
5127 }
5128 
5130  const MachineInstr &MI, unsigned DefIdx,
5131  RegSubRegPairAndIdx &InputReg) const {
5132  assert(DefIdx < MI.getDesc().getNumDefs() && "Invalid definition index");
5133  assert(MI.isExtractSubregLike() && "Invalid kind of instruction");
5134 
5135  switch (MI.getOpcode()) {
5136  case ARM::VMOVRRD:
5137  // rX, rY = VMOVRRD dZ
5138  // is the same as:
5139  // rX = EXTRACT_SUBREG dZ, ssub_0
5140  // rY = EXTRACT_SUBREG dZ, ssub_1
5141  const MachineOperand &MOReg = MI.getOperand(2);
5142  if (MOReg.isUndef())
5143  return false;
5144  InputReg.Reg = MOReg.getReg();
5145  InputReg.SubReg = MOReg.getSubReg();
5146  InputReg.SubIdx = DefIdx == 0 ? ARM::ssub_0 : ARM::ssub_1;
5147  return true;
5148  }
5149  llvm_unreachable("Target dependent opcode missing");
5150 }
5151 
5153  const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg,
5154  RegSubRegPairAndIdx &InsertedReg) const {
5155  assert(DefIdx < MI.getDesc().getNumDefs() && "Invalid definition index");
5156  assert(MI.isInsertSubregLike() && "Invalid kind of instruction");
5157 
5158  switch (MI.getOpcode()) {
5159  case ARM::VSETLNi32:
5160  // dX = VSETLNi32 dY, rZ, imm
5161  const MachineOperand &MOBaseReg = MI.getOperand(1);
5162  const MachineOperand &MOInsertedReg = MI.getOperand(2);
5163  if (MOInsertedReg.isUndef())
5164  return false;
5165  const MachineOperand &MOIndex = MI.getOperand(3);
5166  BaseReg.Reg = MOBaseReg.getReg();
5167  BaseReg.SubReg = MOBaseReg.getSubReg();
5168 
5169  InsertedReg.Reg = MOInsertedReg.getReg();
5170  InsertedReg.SubReg = MOInsertedReg.getSubReg();
5171  InsertedReg.SubIdx = MOIndex.getImm() == 0 ? ARM::ssub_0 : ARM::ssub_1;
5172  return true;
5173  }
5174  llvm_unreachable("Target dependent opcode missing");
5175 }
5176 
5177 std::pair<unsigned, unsigned>
5179  const unsigned Mask = ARMII::MO_OPTION_MASK;
5180  return std::make_pair(TF & Mask, TF & ~Mask);
5181 }
5182 
5185  using namespace ARMII;
5186 
5187  static const std::pair<unsigned, const char *> TargetFlags[] = {
5188  {MO_LO16, "arm-lo16"}, {MO_HI16, "arm-hi16"}};
5189  return makeArrayRef(TargetFlags);
5190 }
5191 
5194  using namespace ARMII;
5195 
5196  static const std::pair<unsigned, const char *> TargetFlags[] = {
5197  {MO_COFFSTUB, "arm-coffstub"},
5198  {MO_GOT, "arm-got"},
5199  {MO_SBREL, "arm-sbrel"},
5200  {MO_DLLIMPORT, "arm-dllimport"},
5201  {MO_SECREL, "arm-secrel"},
5202  {MO_NONLAZY, "arm-nonlazy"}};
5203  return makeArrayRef(TargetFlags);
5204 }
5205 
5209  const TargetRegisterInfo *TRI) {
5210  for (auto I = From; I != To; ++I)
5211  if (I->modifiesRegister(Reg, TRI))
5212  return true;
5213  return false;
5214 }
5215 
5217  const TargetRegisterInfo *TRI) {
5218  // Search backwards to the instruction that defines CSPR. This may or not
5219  // be a CMP, we check that after this loop. If we find another instruction
5220  // that reads cpsr, we return nullptr.
5221  MachineBasicBlock::iterator CmpMI = Br;
5222  while (CmpMI != Br->getParent()->begin()) {
5223  --CmpMI;
5224  if (CmpMI->modifiesRegister(ARM::CPSR, TRI))
5225  break;
5226  if (CmpMI->readsRegister(ARM::CPSR, TRI))
5227  break;
5228  }
5229 
5230  // Check that this inst is a CMP r[0-7], #0 and that the register
5231  // is not redefined between the cmp and the br.
5232  if (CmpMI->getOpcode() != ARM::tCMPi8 && CmpMI->getOpcode() != ARM::t2CMPri)
5233  return nullptr;
5234  unsigned Reg = CmpMI->getOperand(0).getReg();
5235  unsigned PredReg = 0;
5236  ARMCC::CondCodes Pred = getInstrPredicate(*CmpMI, PredReg);
5237  if (Pred != ARMCC::AL || CmpMI->getOperand(1).getImm() != 0)
5238  return nullptr;
5239  if (!isARMLowRegister(Reg))
5240  return nullptr;
5241  if (registerDefinedBetween(Reg, CmpMI->getNextNode(), Br, TRI))
5242  return nullptr;
5243 
5244  return &*CmpMI;
5245 }
bool isLdstSoMinusReg(const MachineInstr &MI, unsigned Op) const
MachineConstantPoolValue * MachineCPVal
bool isRegMask() const
isRegMask - Tests if this is a MO_RegisterMask operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, unsigned ExtraPredCycles, BranchProbability Probability) const override
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
The MachineConstantPool class keeps track of constants referenced by a function which must be spilled...
bool isProfitableToUnpredicate(MachineBasicBlock &TMBB, MachineBasicBlock &FMBB) const override
bool getInsertSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const override
Build the equivalent inputs of a INSERT_SUBREG for the given MI and DefIdx.
bool modifiesRegister(unsigned Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr modifies (fully define or partially define) the specified register...
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
bool checkVLDnAccessAlignment() const
Definition: ARMSubtarget.h:624
BranchProbability getCompl() const
int findFirstPredOperandIdx() const
Find the index of the first operand in the operand list that is used to represent the predicate...
bool isCall(QueryType Type=AnyInBundle) const
Definition: MachineInstr.h:632
instr_iterator instr_end()
const TargetRegisterClass * getRegClass(unsigned Reg) const
Return the register class of the specified virtual register.
unsigned getT2SOImmTwoPartFirst(unsigned Imm)
bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const override
bool isThumb() const
Definition: ARMSubtarget.h:717
This class represents lattice values for constants.
Definition: AllocatorList.h:23
bool DefinesPredicate(MachineInstr &MI, std::vector< MachineOperand > &Pred) const override
void ChangeToRegister(unsigned Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isDebug=false)
ChangeToRegister - Replace this operand with a new register operand of the specified value...
LivenessQueryResult computeRegisterLiveness(const TargetRegisterInfo *TRI, unsigned Reg, const_iterator Before, unsigned Neighborhood=10) const
Return whether (physical) register Reg has been defined and not killed as of just before Before...
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
Definition: Function.h:603
unsigned getRegister(unsigned i) const
Return the specified register in the class.
bool isExtractSubregLike(QueryType Type=IgnoreBundle) const
Return true if this instruction behaves the same way as the generic EXTRACT_SUBREG instructions...
Definition: MachineInstr.h:781
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:382
ARMConstantPoolValue - ARM specific constantpool value.
void setIsDef(bool Val=true)
Change a def to a use, or a use to a def.
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:163
unsigned getReg() const
getReg - Returns the register number.
bool expandPostRAPseudo(MachineInstr &MI) const override