LLVM  14.0.0git
AArch64ExpandPseudoInsts.cpp
Go to the documentation of this file.
1 //===- AArch64ExpandPseudoInsts.cpp - Expand pseudo instructions ----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains a pass that expands pseudo instructions into target
10 // instructions to allow proper scheduling and other late optimizations. This
11 // pass should be run after register allocation but before the post-regalloc
12 // scheduling pass.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "AArch64ExpandImm.h"
17 #include "AArch64InstrInfo.h"
19 #include "AArch64Subtarget.h"
21 #include "Utils/AArch64BaseInfo.h"
22 #include "llvm/ADT/DenseMap.h"
23 #include "llvm/ADT/Triple.h"
33 #include "llvm/IR/DebugLoc.h"
34 #include "llvm/MC/MCInstrDesc.h"
35 #include "llvm/Pass.h"
36 #include "llvm/Support/CodeGen.h"
39 #include <cassert>
40 #include <cstdint>
41 #include <iterator>
42 #include <limits>
43 #include <utility>
44 
45 using namespace llvm;
46 
47 #define AARCH64_EXPAND_PSEUDO_NAME "AArch64 pseudo instruction expansion pass"
48 
49 namespace {
50 
51 class AArch64ExpandPseudo : public MachineFunctionPass {
52 public:
53  const AArch64InstrInfo *TII;
54 
55  static char ID;
56 
57  AArch64ExpandPseudo() : MachineFunctionPass(ID) {
59  }
60 
61  bool runOnMachineFunction(MachineFunction &Fn) override;
62 
63  StringRef getPassName() const override { return AARCH64_EXPAND_PSEUDO_NAME; }
64 
65 private:
66  bool expandMBB(MachineBasicBlock &MBB);
68  MachineBasicBlock::iterator &NextMBBI);
70  unsigned BitSize);
71 
72  bool expand_DestructiveOp(MachineInstr &MI, MachineBasicBlock &MBB,
75  unsigned LdarOp, unsigned StlrOp, unsigned CmpOp,
76  unsigned ExtendImm, unsigned ZeroReg,
77  MachineBasicBlock::iterator &NextMBBI);
78  bool expandCMP_SWAP_128(MachineBasicBlock &MBB,
80  MachineBasicBlock::iterator &NextMBBI);
81  bool expandSetTagLoop(MachineBasicBlock &MBB,
83  MachineBasicBlock::iterator &NextMBBI);
84  bool expandSVESpillFill(MachineBasicBlock &MBB,
85  MachineBasicBlock::iterator MBBI, unsigned Opc,
86  unsigned N);
87  bool expandCALL_RVMARKER(MachineBasicBlock &MBB,
89  bool expandStoreSwiftAsyncContext(MachineBasicBlock &MBB,
91 };
92 
93 } // end anonymous namespace
94 
96 
97 INITIALIZE_PASS(AArch64ExpandPseudo, "aarch64-expand-pseudo",
98  AARCH64_EXPAND_PSEUDO_NAME, false, false)
99 
100 /// Transfer implicit operands on the pseudo instruction to the
101 /// instructions created from the expansion.
102 static void transferImpOps(MachineInstr &OldMI, MachineInstrBuilder &UseMI,
104  const MCInstrDesc &Desc = OldMI.getDesc();
105  for (unsigned i = Desc.getNumOperands(), e = OldMI.getNumOperands(); i != e;
106  ++i) {
107  const MachineOperand &MO = OldMI.getOperand(i);
108  assert(MO.isReg() && MO.getReg());
109  if (MO.isUse())
110  UseMI.add(MO);
111  else
112  DefMI.add(MO);
113  }
114 }
115 
116 /// Expand a MOVi32imm or MOVi64imm pseudo instruction to one or more
117 /// real move-immediate instructions to synthesize the immediate.
120  unsigned BitSize) {
121  MachineInstr &MI = *MBBI;
122  Register DstReg = MI.getOperand(0).getReg();
123  uint64_t RenamableState =
124  MI.getOperand(0).isRenamable() ? RegState::Renamable : 0;
125  uint64_t Imm = MI.getOperand(1).getImm();
126 
127  if (DstReg == AArch64::XZR || DstReg == AArch64::WZR) {
128  // Useless def, and we don't want to risk creating an invalid ORR (which
129  // would really write to sp).
130  MI.eraseFromParent();
131  return true;
132  }
133 
135  AArch64_IMM::expandMOVImm(Imm, BitSize, Insn);
136  assert(Insn.size() != 0);
137 
139  for (auto I = Insn.begin(), E = Insn.end(); I != E; ++I) {
140  bool LastItem = std::next(I) == E;
141  switch (I->Opcode)
142  {
143  default: llvm_unreachable("unhandled!"); break;
144 
145  case AArch64::ORRWri:
146  case AArch64::ORRXri:
147  MIBS.push_back(BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(I->Opcode))
148  .add(MI.getOperand(0))
149  .addReg(BitSize == 32 ? AArch64::WZR : AArch64::XZR)
150  .addImm(I->Op2));
151  break;
152  case AArch64::MOVNWi:
153  case AArch64::MOVNXi:
154  case AArch64::MOVZWi:
155  case AArch64::MOVZXi: {
156  bool DstIsDead = MI.getOperand(0).isDead();
157  MIBS.push_back(BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(I->Opcode))
158  .addReg(DstReg, RegState::Define |
159  getDeadRegState(DstIsDead && LastItem) |
160  RenamableState)
161  .addImm(I->Op1)
162  .addImm(I->Op2));
163  } break;
164  case AArch64::MOVKWi:
165  case AArch64::MOVKXi: {
166  Register DstReg = MI.getOperand(0).getReg();
167  bool DstIsDead = MI.getOperand(0).isDead();
168  MIBS.push_back(BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(I->Opcode))
169  .addReg(DstReg,
171  getDeadRegState(DstIsDead && LastItem) |
172  RenamableState)
173  .addReg(DstReg)
174  .addImm(I->Op1)
175  .addImm(I->Op2));
176  } break;
177  }
178  }
179  transferImpOps(MI, MIBS.front(), MIBS.back());
180  MI.eraseFromParent();
181  return true;
182 }
183 
184 bool AArch64ExpandPseudo::expandCMP_SWAP(
186  unsigned StlrOp, unsigned CmpOp, unsigned ExtendImm, unsigned ZeroReg,
187  MachineBasicBlock::iterator &NextMBBI) {
188  MachineInstr &MI = *MBBI;
189  DebugLoc DL = MI.getDebugLoc();
190  const MachineOperand &Dest = MI.getOperand(0);
191  Register StatusReg = MI.getOperand(1).getReg();
192  bool StatusDead = MI.getOperand(1).isDead();
193  // Duplicating undef operands into 2 instructions does not guarantee the same
194  // value on both; However undef should be replaced by xzr anyway.
195  assert(!MI.getOperand(2).isUndef() && "cannot handle undef");
196  Register AddrReg = MI.getOperand(2).getReg();
197  Register DesiredReg = MI.getOperand(3).getReg();
198  Register NewReg = MI.getOperand(4).getReg();
199 
200  MachineFunction *MF = MBB.getParent();
201  auto LoadCmpBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
202  auto StoreBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
203  auto DoneBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
204 
205  MF->insert(++MBB.getIterator(), LoadCmpBB);
206  MF->insert(++LoadCmpBB->getIterator(), StoreBB);
207  MF->insert(++StoreBB->getIterator(), DoneBB);
208 
209  // .Lloadcmp:
210  // mov wStatus, 0
211  // ldaxr xDest, [xAddr]
212  // cmp xDest, xDesired
213  // b.ne .Ldone
214  if (!StatusDead)
215  BuildMI(LoadCmpBB, DL, TII->get(AArch64::MOVZWi), StatusReg)
216  .addImm(0).addImm(0);
217  BuildMI(LoadCmpBB, DL, TII->get(LdarOp), Dest.getReg())
218  .addReg(AddrReg);
219  BuildMI(LoadCmpBB, DL, TII->get(CmpOp), ZeroReg)
220  .addReg(Dest.getReg(), getKillRegState(Dest.isDead()))
221  .addReg(DesiredReg)
222  .addImm(ExtendImm);
223  BuildMI(LoadCmpBB, DL, TII->get(AArch64::Bcc))
225  .addMBB(DoneBB)
226  .addReg(AArch64::NZCV, RegState::Implicit | RegState::Kill);
227  LoadCmpBB->addSuccessor(DoneBB);
228  LoadCmpBB->addSuccessor(StoreBB);
229 
230  // .Lstore:
231  // stlxr wStatus, xNew, [xAddr]
232  // cbnz wStatus, .Lloadcmp
233  BuildMI(StoreBB, DL, TII->get(StlrOp), StatusReg)
234  .addReg(NewReg)
235  .addReg(AddrReg);
236  BuildMI(StoreBB, DL, TII->get(AArch64::CBNZW))
237  .addReg(StatusReg, getKillRegState(StatusDead))
238  .addMBB(LoadCmpBB);
239  StoreBB->addSuccessor(LoadCmpBB);
240  StoreBB->addSuccessor(DoneBB);
241 
242  DoneBB->splice(DoneBB->end(), &MBB, MI, MBB.end());
243  DoneBB->transferSuccessors(&MBB);
244 
245  MBB.addSuccessor(LoadCmpBB);
246 
247  NextMBBI = MBB.end();
248  MI.eraseFromParent();
249 
250  // Recompute livein lists.
251  LivePhysRegs LiveRegs;
252  computeAndAddLiveIns(LiveRegs, *DoneBB);
253  computeAndAddLiveIns(LiveRegs, *StoreBB);
254  computeAndAddLiveIns(LiveRegs, *LoadCmpBB);
255  // Do an extra pass around the loop to get loop carried registers right.
256  StoreBB->clearLiveIns();
257  computeAndAddLiveIns(LiveRegs, *StoreBB);
258  LoadCmpBB->clearLiveIns();
259  computeAndAddLiveIns(LiveRegs, *LoadCmpBB);
260 
261  return true;
262 }
263 
264 bool AArch64ExpandPseudo::expandCMP_SWAP_128(
266  MachineBasicBlock::iterator &NextMBBI) {
267  MachineInstr &MI = *MBBI;
268  DebugLoc DL = MI.getDebugLoc();
269  MachineOperand &DestLo = MI.getOperand(0);
270  MachineOperand &DestHi = MI.getOperand(1);
271  Register StatusReg = MI.getOperand(2).getReg();
272  bool StatusDead = MI.getOperand(2).isDead();
273  // Duplicating undef operands into 2 instructions does not guarantee the same
274  // value on both; However undef should be replaced by xzr anyway.
275  assert(!MI.getOperand(3).isUndef() && "cannot handle undef");
276  Register AddrReg = MI.getOperand(3).getReg();
277  Register DesiredLoReg = MI.getOperand(4).getReg();
278  Register DesiredHiReg = MI.getOperand(5).getReg();
279  Register NewLoReg = MI.getOperand(6).getReg();
280  Register NewHiReg = MI.getOperand(7).getReg();
281 
282  unsigned LdxpOp, StxpOp;
283 
284  switch (MI.getOpcode()) {
285  case AArch64::CMP_SWAP_128_MONOTONIC:
286  LdxpOp = AArch64::LDXPX;
287  StxpOp = AArch64::STXPX;
288  break;
289  case AArch64::CMP_SWAP_128_RELEASE:
290  LdxpOp = AArch64::LDXPX;
291  StxpOp = AArch64::STLXPX;
292  break;
293  case AArch64::CMP_SWAP_128_ACQUIRE:
294  LdxpOp = AArch64::LDAXPX;
295  StxpOp = AArch64::STXPX;
296  break;
297  case AArch64::CMP_SWAP_128:
298  LdxpOp = AArch64::LDAXPX;
299  StxpOp = AArch64::STLXPX;
300  break;
301  default:
302  llvm_unreachable("Unexpected opcode");
303  }
304 
305  MachineFunction *MF = MBB.getParent();
306  auto LoadCmpBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
307  auto StoreBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
308  auto FailBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
309  auto DoneBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
310 
311  MF->insert(++MBB.getIterator(), LoadCmpBB);
312  MF->insert(++LoadCmpBB->getIterator(), StoreBB);
313  MF->insert(++StoreBB->getIterator(), FailBB);
314  MF->insert(++FailBB->getIterator(), DoneBB);
315 
316  // .Lloadcmp:
317  // ldaxp xDestLo, xDestHi, [xAddr]
318  // cmp xDestLo, xDesiredLo
319  // sbcs xDestHi, xDesiredHi
320  // b.ne .Ldone
321  BuildMI(LoadCmpBB, DL, TII->get(LdxpOp))
322  .addReg(DestLo.getReg(), RegState::Define)
323  .addReg(DestHi.getReg(), RegState::Define)
324  .addReg(AddrReg);
325  BuildMI(LoadCmpBB, DL, TII->get(AArch64::SUBSXrs), AArch64::XZR)
326  .addReg(DestLo.getReg(), getKillRegState(DestLo.isDead()))
327  .addReg(DesiredLoReg)
328  .addImm(0);
329  BuildMI(LoadCmpBB, DL, TII->get(AArch64::CSINCWr), StatusReg)
330  .addUse(AArch64::WZR)
331  .addUse(AArch64::WZR)
333  BuildMI(LoadCmpBB, DL, TII->get(AArch64::SUBSXrs), AArch64::XZR)
334  .addReg(DestHi.getReg(), getKillRegState(DestHi.isDead()))
335  .addReg(DesiredHiReg)
336  .addImm(0);
337  BuildMI(LoadCmpBB, DL, TII->get(AArch64::CSINCWr), StatusReg)
338  .addUse(StatusReg, RegState::Kill)
339  .addUse(StatusReg, RegState::Kill)
341  BuildMI(LoadCmpBB, DL, TII->get(AArch64::CBNZW))
342  .addUse(StatusReg, getKillRegState(StatusDead))
343  .addMBB(FailBB);
344  LoadCmpBB->addSuccessor(FailBB);
345  LoadCmpBB->addSuccessor(StoreBB);
346 
347  // .Lstore:
348  // stlxp wStatus, xNewLo, xNewHi, [xAddr]
349  // cbnz wStatus, .Lloadcmp
350  BuildMI(StoreBB, DL, TII->get(StxpOp), StatusReg)
351  .addReg(NewLoReg)
352  .addReg(NewHiReg)
353  .addReg(AddrReg);
354  BuildMI(StoreBB, DL, TII->get(AArch64::CBNZW))
355  .addReg(StatusReg, getKillRegState(StatusDead))
356  .addMBB(LoadCmpBB);
357  BuildMI(StoreBB, DL, TII->get(AArch64::B)).addMBB(DoneBB);
358  StoreBB->addSuccessor(LoadCmpBB);
359  StoreBB->addSuccessor(DoneBB);
360 
361  // .Lfail:
362  // stlxp wStatus, xDestLo, xDestHi, [xAddr]
363  // cbnz wStatus, .Lloadcmp
364  BuildMI(FailBB, DL, TII->get(StxpOp), StatusReg)
365  .addReg(DestLo.getReg())
366  .addReg(DestHi.getReg())
367  .addReg(AddrReg);
368  BuildMI(FailBB, DL, TII->get(AArch64::CBNZW))
369  .addReg(StatusReg, getKillRegState(StatusDead))
370  .addMBB(LoadCmpBB);
371  FailBB->addSuccessor(LoadCmpBB);
372  FailBB->addSuccessor(DoneBB);
373 
374  DoneBB->splice(DoneBB->end(), &MBB, MI, MBB.end());
375  DoneBB->transferSuccessors(&MBB);
376 
377  MBB.addSuccessor(LoadCmpBB);
378 
379  NextMBBI = MBB.end();
380  MI.eraseFromParent();
381 
382  // Recompute liveness bottom up.
383  LivePhysRegs LiveRegs;
384  computeAndAddLiveIns(LiveRegs, *DoneBB);
385  computeAndAddLiveIns(LiveRegs, *FailBB);
386  computeAndAddLiveIns(LiveRegs, *StoreBB);
387  computeAndAddLiveIns(LiveRegs, *LoadCmpBB);
388 
389  // Do an extra pass in the loop to get the loop carried dependencies right.
390  FailBB->clearLiveIns();
391  computeAndAddLiveIns(LiveRegs, *FailBB);
392  StoreBB->clearLiveIns();
393  computeAndAddLiveIns(LiveRegs, *StoreBB);
394  LoadCmpBB->clearLiveIns();
395  computeAndAddLiveIns(LiveRegs, *LoadCmpBB);
396 
397  return true;
398 }
399 
400 /// \brief Expand Pseudos to Instructions with destructive operands.
401 ///
402 /// This mechanism uses MOVPRFX instructions for zeroing the false lanes
403 /// or for fixing relaxed register allocation conditions to comply with
404 /// the instructions register constraints. The latter case may be cheaper
405 /// than setting the register constraints in the register allocator,
406 /// since that will insert regular MOV instructions rather than MOVPRFX.
407 ///
408 /// Example (after register allocation):
409 ///
410 /// FSUB_ZPZZ_ZERO_B Z0, Pg, Z1, Z0
411 ///
412 /// * The Pseudo FSUB_ZPZZ_ZERO_B maps to FSUB_ZPmZ_B.
413 /// * We cannot map directly to FSUB_ZPmZ_B because the register
414 /// constraints of the instruction are not met.
415 /// * Also the _ZERO specifies the false lanes need to be zeroed.
416 ///
417 /// We first try to see if the destructive operand == result operand,
418 /// if not, we try to swap the operands, e.g.
419 ///
420 /// FSUB_ZPmZ_B Z0, Pg/m, Z0, Z1
421 ///
422 /// But because FSUB_ZPmZ is not commutative, this is semantically
423 /// different, so we need a reverse instruction:
424 ///
425 /// FSUBR_ZPmZ_B Z0, Pg/m, Z0, Z1
426 ///
427 /// Then we implement the zeroing of the false lanes of Z0 by adding
428 /// a zeroing MOVPRFX instruction:
429 ///
430 /// MOVPRFX_ZPzZ_B Z0, Pg/z, Z0
431 /// FSUBR_ZPmZ_B Z0, Pg/m, Z0, Z1
432 ///
433 /// Note that this can only be done for _ZERO or _UNDEF variants where
434 /// we can guarantee the false lanes to be zeroed (by implementing this)
435 /// or that they are undef (don't care / not used), otherwise the
436 /// swapping of operands is illegal because the operation is not
437 /// (or cannot be emulated to be) fully commutative.
438 bool AArch64ExpandPseudo::expand_DestructiveOp(
439  MachineInstr &MI,
442  unsigned Opcode = AArch64::getSVEPseudoMap(MI.getOpcode());
443  uint64_t DType = TII->get(Opcode).TSFlags & AArch64::DestructiveInstTypeMask;
444  uint64_t FalseLanes = MI.getDesc().TSFlags & AArch64::FalseLanesMask;
445  bool FalseZero = FalseLanes == AArch64::FalseLanesZero;
446 
447  unsigned DstReg = MI.getOperand(0).getReg();
448  bool DstIsDead = MI.getOperand(0).isDead();
449 
450  if (DType == AArch64::DestructiveBinary)
451  assert(DstReg != MI.getOperand(3).getReg());
452 
453  bool UseRev = false;
454  unsigned PredIdx, DOPIdx, SrcIdx, Src2Idx;
455  switch (DType) {
458  if (DstReg == MI.getOperand(3).getReg()) {
459  // FSUB Zd, Pg, Zs1, Zd ==> FSUBR Zd, Pg/m, Zd, Zs1
460  std::tie(PredIdx, DOPIdx, SrcIdx) = std::make_tuple(1, 3, 2);
461  UseRev = true;
462  break;
463  }
467  std::tie(PredIdx, DOPIdx, SrcIdx) = std::make_tuple(1, 2, 3);
468  break;
470  std::tie(PredIdx, DOPIdx, SrcIdx) = std::make_tuple(2, 3, 3);
471  break;
473  std::tie(PredIdx, DOPIdx, SrcIdx, Src2Idx) = std::make_tuple(1, 2, 3, 4);
474  if (DstReg == MI.getOperand(3).getReg()) {
475  // FMLA Zd, Pg, Za, Zd, Zm ==> FMAD Zdn, Pg, Zm, Za
476  std::tie(PredIdx, DOPIdx, SrcIdx, Src2Idx) = std::make_tuple(1, 3, 4, 2);
477  UseRev = true;
478  } else if (DstReg == MI.getOperand(4).getReg()) {
479  // FMLA Zd, Pg, Za, Zm, Zd ==> FMAD Zdn, Pg, Zm, Za
480  std::tie(PredIdx, DOPIdx, SrcIdx, Src2Idx) = std::make_tuple(1, 4, 3, 2);
481  UseRev = true;
482  }
483  break;
484  default:
485  llvm_unreachable("Unsupported Destructive Operand type");
486  }
487 
488 #ifndef NDEBUG
489  // MOVPRFX can only be used if the destination operand
490  // is the destructive operand, not as any other operand,
491  // so the Destructive Operand must be unique.
492  bool DOPRegIsUnique = false;
493  switch (DType) {
496  DOPRegIsUnique =
497  DstReg != MI.getOperand(DOPIdx).getReg() ||
498  MI.getOperand(DOPIdx).getReg() != MI.getOperand(SrcIdx).getReg();
499  break;
502  DOPRegIsUnique = true;
503  break;
505  DOPRegIsUnique =
506  DstReg != MI.getOperand(DOPIdx).getReg() ||
507  (MI.getOperand(DOPIdx).getReg() != MI.getOperand(SrcIdx).getReg() &&
508  MI.getOperand(DOPIdx).getReg() != MI.getOperand(Src2Idx).getReg());
509  break;
510  }
511 #endif
512 
513  // Resolve the reverse opcode
514  if (UseRev) {
515  int NewOpcode;
516  // e.g. DIV -> DIVR
517  if ((NewOpcode = AArch64::getSVERevInstr(Opcode)) != -1)
518  Opcode = NewOpcode;
519  // e.g. DIVR -> DIV
520  else if ((NewOpcode = AArch64::getSVENonRevInstr(Opcode)) != -1)
521  Opcode = NewOpcode;
522  }
523 
524  // Get the right MOVPRFX
525  uint64_t ElementSize = TII->getElementSizeForOpcode(Opcode);
526  unsigned MovPrfx, MovPrfxZero;
527  switch (ElementSize) {
530  MovPrfx = AArch64::MOVPRFX_ZZ;
531  MovPrfxZero = AArch64::MOVPRFX_ZPzZ_B;
532  break;
534  MovPrfx = AArch64::MOVPRFX_ZZ;
535  MovPrfxZero = AArch64::MOVPRFX_ZPzZ_H;
536  break;
538  MovPrfx = AArch64::MOVPRFX_ZZ;
539  MovPrfxZero = AArch64::MOVPRFX_ZPzZ_S;
540  break;
542  MovPrfx = AArch64::MOVPRFX_ZZ;
543  MovPrfxZero = AArch64::MOVPRFX_ZPzZ_D;
544  break;
545  default:
546  llvm_unreachable("Unsupported ElementSize");
547  }
548 
549  //
550  // Create the destructive operation (if required)
551  //
552  MachineInstrBuilder PRFX, DOP;
553  if (FalseZero) {
554 #ifndef NDEBUG
555  assert(DOPRegIsUnique && "The destructive operand should be unique");
556 #endif
557  assert(ElementSize != AArch64::ElementSizeNone &&
558  "This instruction is unpredicated");
559 
560  // Merge source operand into destination register
561  PRFX = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(MovPrfxZero))
562  .addReg(DstReg, RegState::Define)
563  .addReg(MI.getOperand(PredIdx).getReg())
564  .addReg(MI.getOperand(DOPIdx).getReg());
565 
566  // After the movprfx, the destructive operand is same as Dst
567  DOPIdx = 0;
568  } else if (DstReg != MI.getOperand(DOPIdx).getReg()) {
569 #ifndef NDEBUG
570  assert(DOPRegIsUnique && "The destructive operand should be unique");
571 #endif
572  PRFX = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(MovPrfx))
573  .addReg(DstReg, RegState::Define)
574  .addReg(MI.getOperand(DOPIdx).getReg());
575  DOPIdx = 0;
576  }
577 
578  //
579  // Create the destructive operation
580  //
581  DOP = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opcode))
582  .addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead));
583 
584  switch (DType) {
586  DOP.addReg(MI.getOperand(DOPIdx).getReg(), RegState::Kill)
587  .add(MI.getOperand(PredIdx))
588  .add(MI.getOperand(SrcIdx));
589  break;
593  DOP.add(MI.getOperand(PredIdx))
594  .addReg(MI.getOperand(DOPIdx).getReg(), RegState::Kill)
595  .add(MI.getOperand(SrcIdx));
596  break;
598  DOP.add(MI.getOperand(PredIdx))
599  .addReg(MI.getOperand(DOPIdx).getReg(), RegState::Kill)
600  .add(MI.getOperand(SrcIdx))
601  .add(MI.getOperand(Src2Idx));
602  break;
603  }
604 
605  if (PRFX) {
607  transferImpOps(MI, PRFX, DOP);
608  } else
609  transferImpOps(MI, DOP, DOP);
610 
611  MI.eraseFromParent();
612  return true;
613 }
614 
615 bool AArch64ExpandPseudo::expandSetTagLoop(
617  MachineBasicBlock::iterator &NextMBBI) {
618  MachineInstr &MI = *MBBI;
619  DebugLoc DL = MI.getDebugLoc();
620  Register SizeReg = MI.getOperand(0).getReg();
621  Register AddressReg = MI.getOperand(1).getReg();
622 
623  MachineFunction *MF = MBB.getParent();
624 
625  bool ZeroData = MI.getOpcode() == AArch64::STZGloop_wback;
626  const unsigned OpCode1 =
627  ZeroData ? AArch64::STZGPostIndex : AArch64::STGPostIndex;
628  const unsigned OpCode2 =
629  ZeroData ? AArch64::STZ2GPostIndex : AArch64::ST2GPostIndex;
630 
631  unsigned Size = MI.getOperand(2).getImm();
632  assert(Size > 0 && Size % 16 == 0);
633  if (Size % (16 * 2) != 0) {
634  BuildMI(MBB, MBBI, DL, TII->get(OpCode1), AddressReg)
635  .addReg(AddressReg)
636  .addReg(AddressReg)
637  .addImm(1);
638  Size -= 16;
639  }
641  BuildMI(MBB, MBBI, DL, TII->get(AArch64::MOVi64imm), SizeReg)
642  .addImm(Size);
643  expandMOVImm(MBB, I, 64);
644 
645  auto LoopBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
646  auto DoneBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
647 
648  MF->insert(++MBB.getIterator(), LoopBB);
649  MF->insert(++LoopBB->getIterator(), DoneBB);
650 
651  BuildMI(LoopBB, DL, TII->get(OpCode2))
652  .addDef(AddressReg)
653  .addReg(AddressReg)
654  .addReg(AddressReg)
655  .addImm(2)
656  .cloneMemRefs(MI)
657  .setMIFlags(MI.getFlags());
658  BuildMI(LoopBB, DL, TII->get(AArch64::SUBXri))
659  .addDef(SizeReg)
660  .addReg(SizeReg)
661  .addImm(16 * 2)
662  .addImm(0);
663  BuildMI(LoopBB, DL, TII->get(AArch64::CBNZX)).addUse(SizeReg).addMBB(LoopBB);
664 
665  LoopBB->addSuccessor(LoopBB);
666  LoopBB->addSuccessor(DoneBB);
667 
668  DoneBB->splice(DoneBB->end(), &MBB, MI, MBB.end());
669  DoneBB->transferSuccessors(&MBB);
670 
671  MBB.addSuccessor(LoopBB);
672 
673  NextMBBI = MBB.end();
674  MI.eraseFromParent();
675  // Recompute liveness bottom up.
676  LivePhysRegs LiveRegs;
677  computeAndAddLiveIns(LiveRegs, *DoneBB);
678  computeAndAddLiveIns(LiveRegs, *LoopBB);
679  // Do an extra pass in the loop to get the loop carried dependencies right.
680  // FIXME: is this necessary?
681  LoopBB->clearLiveIns();
682  computeAndAddLiveIns(LiveRegs, *LoopBB);
683  DoneBB->clearLiveIns();
684  computeAndAddLiveIns(LiveRegs, *DoneBB);
685 
686  return true;
687 }
688 
689 bool AArch64ExpandPseudo::expandSVESpillFill(MachineBasicBlock &MBB,
691  unsigned Opc, unsigned N) {
692  const TargetRegisterInfo *TRI =
694  MachineInstr &MI = *MBBI;
695  for (unsigned Offset = 0; Offset < N; ++Offset) {
696  int ImmOffset = MI.getOperand(2).getImm() + Offset;
697  bool Kill = (Offset + 1 == N) ? MI.getOperand(1).isKill() : false;
698  assert(ImmOffset >= -256 && ImmOffset < 256 &&
699  "Immediate spill offset out of range");
700  BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opc))
701  .addReg(
702  TRI->getSubReg(MI.getOperand(0).getReg(), AArch64::zsub0 + Offset),
703  Opc == AArch64::LDR_ZXI ? RegState::Define : 0)
704  .addReg(MI.getOperand(1).getReg(), getKillRegState(Kill))
705  .addImm(ImmOffset);
706  }
707  MI.eraseFromParent();
708  return true;
709 }
710 
711 bool AArch64ExpandPseudo::expandCALL_RVMARKER(
713  // Expand CALL_RVMARKER pseudo to a branch, followed by the special `mov x29,
714  // x29` marker. Mark the sequence as bundle, to avoid passes moving other code
715  // in between.
716  MachineInstr &MI = *MBBI;
717 
718  MachineInstr *OriginalCall;
719  MachineOperand &CallTarget = MI.getOperand(0);
720  assert((CallTarget.isGlobal() || CallTarget.isReg()) &&
721  "invalid operand for regular call");
722  unsigned Opc = CallTarget.isGlobal() ? AArch64::BL : AArch64::BLR;
723  OriginalCall = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opc)).getInstr();
724  OriginalCall->addOperand(CallTarget);
725 
726  unsigned RegMaskStartIdx = 1;
727  // Skip register arguments. Those are added during ISel, but are not
728  // needed for the concrete branch.
729  while (!MI.getOperand(RegMaskStartIdx).isRegMask()) {
730  auto MOP = MI.getOperand(RegMaskStartIdx);
731  assert(MOP.isReg() && "can only add register operands");
733  MOP.getReg(), /*Def=*/false, /*Implicit=*/true));
734  RegMaskStartIdx++;
735  }
736  for (; RegMaskStartIdx < MI.getNumOperands(); ++RegMaskStartIdx)
737  OriginalCall->addOperand(MI.getOperand(RegMaskStartIdx));
738 
739  auto *Marker = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ORRXrs))
740  .addReg(AArch64::FP, RegState::Define)
741  .addReg(AArch64::XZR)
742  .addReg(AArch64::FP)
743  .addImm(0)
744  .getInstr();
745  if (MI.shouldUpdateCallSiteInfo())
746  MBB.getParent()->moveCallSiteInfo(&MI, Marker);
747  MI.eraseFromParent();
748  finalizeBundle(MBB, OriginalCall->getIterator(),
749  std::next(Marker->getIterator()));
750  return true;
751 }
752 
753 bool AArch64ExpandPseudo::expandStoreSwiftAsyncContext(
755  Register CtxReg = MBBI->getOperand(0).getReg();
756  Register BaseReg = MBBI->getOperand(1).getReg();
757  int Offset = MBBI->getOperand(2).getImm();
758  DebugLoc DL(MBBI->getDebugLoc());
759  auto &STI = MBB.getParent()->getSubtarget<AArch64Subtarget>();
760 
761  if (STI.getTargetTriple().getArchName() != "arm64e") {
762  BuildMI(MBB, MBBI, DL, TII->get(AArch64::STRXui))
763  .addUse(CtxReg)
764  .addUse(BaseReg)
765  .addImm(Offset / 8)
768  return true;
769  }
770 
771  // We need to sign the context in an address-discriminated way. 0xc31a is a
772  // fixed random value, chosen as part of the ABI.
773  // add x16, xBase, #Offset
774  // movk x16, #0xc31a, lsl #48
775  // mov x17, x22/xzr
776  // pacdb x17, x16
777  // str x17, [xBase, #Offset]
778  unsigned Opc = Offset >= 0 ? AArch64::ADDXri : AArch64::SUBXri;
779  BuildMI(MBB, MBBI, DL, TII->get(Opc), AArch64::X16)
780  .addUse(BaseReg)
781  .addImm(abs(Offset))
782  .addImm(0)
784  BuildMI(MBB, MBBI, DL, TII->get(AArch64::MOVKXi), AArch64::X16)
785  .addUse(AArch64::X16)
786  .addImm(0xc31a)
787  .addImm(48)
789  // We're not allowed to clobber X22 (and couldn't clobber XZR if we tried), so
790  // move it somewhere before signing.
791  BuildMI(MBB, MBBI, DL, TII->get(AArch64::ORRXrs), AArch64::X17)
792  .addUse(AArch64::XZR)
793  .addUse(CtxReg)
794  .addImm(0)
796  BuildMI(MBB, MBBI, DL, TII->get(AArch64::PACDB), AArch64::X17)
797  .addUse(AArch64::X17)
798  .addUse(AArch64::X16)
800  BuildMI(MBB, MBBI, DL, TII->get(AArch64::STRXui))
801  .addUse(AArch64::X17)
802  .addUse(BaseReg)
803  .addImm(Offset / 8)
805 
807  return true;
808 }
809 
810 /// If MBBI references a pseudo instruction that should be expanded here,
811 /// do the expansion and return true. Otherwise return false.
812 bool AArch64ExpandPseudo::expandMI(MachineBasicBlock &MBB,
814  MachineBasicBlock::iterator &NextMBBI) {
815  MachineInstr &MI = *MBBI;
816  unsigned Opcode = MI.getOpcode();
817 
818  // Check if we can expand the destructive op
819  int OrigInstr = AArch64::getSVEPseudoMap(MI.getOpcode());
820  if (OrigInstr != -1) {
821  auto &Orig = TII->get(OrigInstr);
822  if ((Orig.TSFlags & AArch64::DestructiveInstTypeMask)
824  return expand_DestructiveOp(MI, MBB, MBBI);
825  }
826  }
827 
828  switch (Opcode) {
829  default:
830  break;
831 
832  case AArch64::BSPv8i8:
833  case AArch64::BSPv16i8: {
834  Register DstReg = MI.getOperand(0).getReg();
835  if (DstReg == MI.getOperand(3).getReg()) {
836  // Expand to BIT
837  BuildMI(MBB, MBBI, MI.getDebugLoc(),
838  TII->get(Opcode == AArch64::BSPv8i8 ? AArch64::BITv8i8
839  : AArch64::BITv16i8))
840  .add(MI.getOperand(0))
841  .add(MI.getOperand(3))
842  .add(MI.getOperand(2))
843  .add(MI.getOperand(1));
844  } else if (DstReg == MI.getOperand(2).getReg()) {
845  // Expand to BIF
846  BuildMI(MBB, MBBI, MI.getDebugLoc(),
847  TII->get(Opcode == AArch64::BSPv8i8 ? AArch64::BIFv8i8
848  : AArch64::BIFv16i8))
849  .add(MI.getOperand(0))
850  .add(MI.getOperand(2))
851  .add(MI.getOperand(3))
852  .add(MI.getOperand(1));
853  } else {
854  // Expand to BSL, use additional move if required
855  if (DstReg == MI.getOperand(1).getReg()) {
856  BuildMI(MBB, MBBI, MI.getDebugLoc(),
857  TII->get(Opcode == AArch64::BSPv8i8 ? AArch64::BSLv8i8
858  : AArch64::BSLv16i8))
859  .add(MI.getOperand(0))
860  .add(MI.getOperand(1))
861  .add(MI.getOperand(2))
862  .add(MI.getOperand(3));
863  } else {
864  BuildMI(MBB, MBBI, MI.getDebugLoc(),
865  TII->get(Opcode == AArch64::BSPv8i8 ? AArch64::ORRv8i8
866  : AArch64::ORRv16i8))
867  .addReg(DstReg,
869  getRenamableRegState(MI.getOperand(0).isRenamable()))
870  .add(MI.getOperand(1))
871  .add(MI.getOperand(1));
872  BuildMI(MBB, MBBI, MI.getDebugLoc(),
873  TII->get(Opcode == AArch64::BSPv8i8 ? AArch64::BSLv8i8
874  : AArch64::BSLv16i8))
875  .add(MI.getOperand(0))
876  .addReg(DstReg,
878  getRenamableRegState(MI.getOperand(0).isRenamable()))
879  .add(MI.getOperand(2))
880  .add(MI.getOperand(3));
881  }
882  }
883  MI.eraseFromParent();
884  return true;
885  }
886 
887  case AArch64::ADDWrr:
888  case AArch64::SUBWrr:
889  case AArch64::ADDXrr:
890  case AArch64::SUBXrr:
891  case AArch64::ADDSWrr:
892  case AArch64::SUBSWrr:
893  case AArch64::ADDSXrr:
894  case AArch64::SUBSXrr:
895  case AArch64::ANDWrr:
896  case AArch64::ANDXrr:
897  case AArch64::BICWrr:
898  case AArch64::BICXrr:
899  case AArch64::ANDSWrr:
900  case AArch64::ANDSXrr:
901  case AArch64::BICSWrr:
902  case AArch64::BICSXrr:
903  case AArch64::EONWrr:
904  case AArch64::EONXrr:
905  case AArch64::EORWrr:
906  case AArch64::EORXrr:
907  case AArch64::ORNWrr:
908  case AArch64::ORNXrr:
909  case AArch64::ORRWrr:
910  case AArch64::ORRXrr: {
911  unsigned Opcode;
912  switch (MI.getOpcode()) {
913  default:
914  return false;
915  case AArch64::ADDWrr: Opcode = AArch64::ADDWrs; break;
916  case AArch64::SUBWrr: Opcode = AArch64::SUBWrs; break;
917  case AArch64::ADDXrr: Opcode = AArch64::ADDXrs; break;
918  case AArch64::SUBXrr: Opcode = AArch64::SUBXrs; break;
919  case AArch64::ADDSWrr: Opcode = AArch64::ADDSWrs; break;
920  case AArch64::SUBSWrr: Opcode = AArch64::SUBSWrs; break;
921  case AArch64::ADDSXrr: Opcode = AArch64::ADDSXrs; break;
922  case AArch64::SUBSXrr: Opcode = AArch64::SUBSXrs; break;
923  case AArch64::ANDWrr: Opcode = AArch64::ANDWrs; break;
924  case AArch64::ANDXrr: Opcode = AArch64::ANDXrs; break;
925  case AArch64::BICWrr: Opcode = AArch64::BICWrs; break;
926  case AArch64::BICXrr: Opcode = AArch64::BICXrs; break;
927  case AArch64::ANDSWrr: Opcode = AArch64::ANDSWrs; break;
928  case AArch64::ANDSXrr: Opcode = AArch64::ANDSXrs; break;
929  case AArch64::BICSWrr: Opcode = AArch64::BICSWrs; break;
930  case AArch64::BICSXrr: Opcode = AArch64::BICSXrs; break;
931  case AArch64::EONWrr: Opcode = AArch64::EONWrs; break;
932  case AArch64::EONXrr: Opcode = AArch64::EONXrs; break;
933  case AArch64::EORWrr: Opcode = AArch64::EORWrs; break;
934  case AArch64::EORXrr: Opcode = AArch64::EORXrs; break;
935  case AArch64::ORNWrr: Opcode = AArch64::ORNWrs; break;
936  case AArch64::ORNXrr: Opcode = AArch64::ORNXrs; break;
937  case AArch64::ORRWrr: Opcode = AArch64::ORRWrs; break;
938  case AArch64::ORRXrr: Opcode = AArch64::ORRXrs; break;
939  }
940  MachineFunction &MF = *MBB.getParent();
941  // Try to create new inst without implicit operands added.
942  MachineInstr *NewMI = MF.CreateMachineInstr(
943  TII->get(Opcode), MI.getDebugLoc(), /*NoImplicit=*/true);
944  MBB.insert(MBBI, NewMI);
945  MachineInstrBuilder MIB1(MF, NewMI);
946  MIB1.addReg(MI.getOperand(0).getReg(), RegState::Define)
947  .add(MI.getOperand(1))
948  .add(MI.getOperand(2))
950  transferImpOps(MI, MIB1, MIB1);
951  MI.eraseFromParent();
952  return true;
953  }
954 
955  case AArch64::LOADgot: {
956  MachineFunction *MF = MBB.getParent();
957  Register DstReg = MI.getOperand(0).getReg();
958  const MachineOperand &MO1 = MI.getOperand(1);
959  unsigned Flags = MO1.getTargetFlags();
960 
961  if (MF->getTarget().getCodeModel() == CodeModel::Tiny) {
962  // Tiny codemodel expand to LDR
963  MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(),
964  TII->get(AArch64::LDRXl), DstReg);
965 
966  if (MO1.isGlobal()) {
967  MIB.addGlobalAddress(MO1.getGlobal(), 0, Flags);
968  } else if (MO1.isSymbol()) {
969  MIB.addExternalSymbol(MO1.getSymbolName(), Flags);
970  } else {
971  assert(MO1.isCPI() &&
972  "Only expect globals, externalsymbols, or constant pools");
973  MIB.addConstantPoolIndex(MO1.getIndex(), MO1.getOffset(), Flags);
974  }
975  } else {
976  // Small codemodel expand into ADRP + LDR.
977  MachineFunction &MF = *MI.getParent()->getParent();
978  DebugLoc DL = MI.getDebugLoc();
979  MachineInstrBuilder MIB1 =
980  BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ADRP), DstReg);
981 
982  MachineInstrBuilder MIB2;
985  unsigned Reg32 = TRI->getSubReg(DstReg, AArch64::sub_32);
986  unsigned DstFlags = MI.getOperand(0).getTargetFlags();
987  MIB2 = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::LDRWui))
988  .addDef(Reg32)
989  .addReg(DstReg, RegState::Kill)
990  .addReg(DstReg, DstFlags | RegState::Implicit);
991  } else {
992  unsigned DstReg = MI.getOperand(0).getReg();
993  MIB2 = BuildMI(MBB, MBBI, DL, TII->get(AArch64::LDRXui))
994  .add(MI.getOperand(0))
995  .addUse(DstReg, RegState::Kill);
996  }
997 
998  if (MO1.isGlobal()) {
999  MIB1.addGlobalAddress(MO1.getGlobal(), 0, Flags | AArch64II::MO_PAGE);
1000  MIB2.addGlobalAddress(MO1.getGlobal(), 0,
1002  } else if (MO1.isSymbol()) {
1003  MIB1.addExternalSymbol(MO1.getSymbolName(), Flags | AArch64II::MO_PAGE);
1004  MIB2.addExternalSymbol(MO1.getSymbolName(), Flags |
1007  } else {
1008  assert(MO1.isCPI() &&
1009  "Only expect globals, externalsymbols, or constant pools");
1010  MIB1.addConstantPoolIndex(MO1.getIndex(), MO1.getOffset(),
1011  Flags | AArch64II::MO_PAGE);
1012  MIB2.addConstantPoolIndex(MO1.getIndex(), MO1.getOffset(),
1013  Flags | AArch64II::MO_PAGEOFF |
1015  }
1016 
1017  transferImpOps(MI, MIB1, MIB2);
1018  }
1019  MI.eraseFromParent();
1020  return true;
1021  }
1022  case AArch64::MOVaddrBA: {
1023  MachineFunction &MF = *MI.getParent()->getParent();
1025  // blockaddress expressions have to come from a constant pool because the
1026  // largest addend (and hence offset within a function) allowed for ADRP is
1027  // only 8MB.
1028  const BlockAddress *BA = MI.getOperand(1).getBlockAddress();
1029  assert(MI.getOperand(1).getOffset() == 0 && "unexpected offset");
1030 
1032  unsigned CPIdx = MCP->getConstantPoolIndex(BA, Align(8));
1033 
1034  Register DstReg = MI.getOperand(0).getReg();
1035  auto MIB1 =
1036  BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ADRP), DstReg)
1038  auto MIB2 = BuildMI(MBB, MBBI, MI.getDebugLoc(),
1039  TII->get(AArch64::LDRXui), DstReg)
1040  .addUse(DstReg)
1043  transferImpOps(MI, MIB1, MIB2);
1044  MI.eraseFromParent();
1045  return true;
1046  }
1047  }
1049  case AArch64::MOVaddr:
1050  case AArch64::MOVaddrJT:
1051  case AArch64::MOVaddrCP:
1052  case AArch64::MOVaddrTLS:
1053  case AArch64::MOVaddrEXT: {
1054  // Expand into ADRP + ADD.
1055  Register DstReg = MI.getOperand(0).getReg();
1056  assert(DstReg != AArch64::XZR);
1057  MachineInstrBuilder MIB1 =
1058  BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ADRP), DstReg)
1059  .add(MI.getOperand(1));
1060 
1061  if (MI.getOperand(1).getTargetFlags() & AArch64II::MO_TAGGED) {
1062  // MO_TAGGED on the page indicates a tagged address. Set the tag now.
1063  // We do so by creating a MOVK that sets bits 48-63 of the register to
1064  // (global address + 0x100000000 - PC) >> 48. This assumes that we're in
1065  // the small code model so we can assume a binary size of <= 4GB, which
1066  // makes the untagged PC relative offset positive. The binary must also be
1067  // loaded into address range [0, 2^48). Both of these properties need to
1068  // be ensured at runtime when using tagged addresses.
1069  auto Tag = MI.getOperand(1);
1070  Tag.setTargetFlags(AArch64II::MO_PREL | AArch64II::MO_G3);
1071  Tag.setOffset(0x100000000);
1072  BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::MOVKXi), DstReg)
1073  .addReg(DstReg)
1074  .add(Tag)
1075  .addImm(48);
1076  }
1077 
1078  MachineInstrBuilder MIB2 =
1079  BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ADDXri))
1080  .add(MI.getOperand(0))
1081  .addReg(DstReg)
1082  .add(MI.getOperand(2))
1083  .addImm(0);
1084 
1085  transferImpOps(MI, MIB1, MIB2);
1086  MI.eraseFromParent();
1087  return true;
1088  }
1089  case AArch64::ADDlowTLS:
1090  // Produce a plain ADD
1091  BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ADDXri))
1092  .add(MI.getOperand(0))
1093  .add(MI.getOperand(1))
1094  .add(MI.getOperand(2))
1095  .addImm(0);
1096  MI.eraseFromParent();
1097  return true;
1098 
1099  case AArch64::MOVbaseTLS: {
1100  Register DstReg = MI.getOperand(0).getReg();
1101  auto SysReg = AArch64SysReg::TPIDR_EL0;
1102  MachineFunction *MF = MBB.getParent();
1104  SysReg = AArch64SysReg::TPIDR_EL3;
1105  else if (MF->getSubtarget<AArch64Subtarget>().useEL2ForTP())
1106  SysReg = AArch64SysReg::TPIDR_EL2;
1107  else if (MF->getSubtarget<AArch64Subtarget>().useEL1ForTP())
1108  SysReg = AArch64SysReg::TPIDR_EL1;
1109  BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::MRS), DstReg)
1110  .addImm(SysReg);
1111  MI.eraseFromParent();
1112  return true;
1113  }
1114 
1115  case AArch64::MOVi32imm:
1116  return expandMOVImm(MBB, MBBI, 32);
1117  case AArch64::MOVi64imm:
1118  return expandMOVImm(MBB, MBBI, 64);
1119  case AArch64::RET_ReallyLR: {
1120  // Hiding the LR use with RET_ReallyLR may lead to extra kills in the
1121  // function and missing live-ins. We are fine in practice because callee
1122  // saved register handling ensures the register value is restored before
1123  // RET, but we need the undef flag here to appease the MachineVerifier
1124  // liveness checks.
1125  MachineInstrBuilder MIB =
1126  BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::RET))
1127  .addReg(AArch64::LR, RegState::Undef);
1128  transferImpOps(MI, MIB, MIB);
1129  MI.eraseFromParent();
1130  return true;
1131  }
1132  case AArch64::CMP_SWAP_8:
1133  return expandCMP_SWAP(MBB, MBBI, AArch64::LDAXRB, AArch64::STLXRB,
1134  AArch64::SUBSWrx,
1136  AArch64::WZR, NextMBBI);
1137  case AArch64::CMP_SWAP_16:
1138  return expandCMP_SWAP(MBB, MBBI, AArch64::LDAXRH, AArch64::STLXRH,
1139  AArch64::SUBSWrx,
1141  AArch64::WZR, NextMBBI);
1142  case AArch64::CMP_SWAP_32:
1143  return expandCMP_SWAP(MBB, MBBI, AArch64::LDAXRW, AArch64::STLXRW,
1144  AArch64::SUBSWrs,
1146  AArch64::WZR, NextMBBI);
1147  case AArch64::CMP_SWAP_64:
1148  return expandCMP_SWAP(MBB, MBBI,
1149  AArch64::LDAXRX, AArch64::STLXRX, AArch64::SUBSXrs,
1151  AArch64::XZR, NextMBBI);
1152  case AArch64::CMP_SWAP_128:
1153  case AArch64::CMP_SWAP_128_RELEASE:
1154  case AArch64::CMP_SWAP_128_ACQUIRE:
1155  case AArch64::CMP_SWAP_128_MONOTONIC:
1156  return expandCMP_SWAP_128(MBB, MBBI, NextMBBI);
1157 
1158  case AArch64::AESMCrrTied:
1159  case AArch64::AESIMCrrTied: {
1160  MachineInstrBuilder MIB =
1161  BuildMI(MBB, MBBI, MI.getDebugLoc(),
1162  TII->get(Opcode == AArch64::AESMCrrTied ? AArch64::AESMCrr :
1163  AArch64::AESIMCrr))
1164  .add(MI.getOperand(0))
1165  .add(MI.getOperand(1));
1166  transferImpOps(MI, MIB, MIB);
1167  MI.eraseFromParent();
1168  return true;
1169  }
1170  case AArch64::IRGstack: {
1171  MachineFunction &MF = *MBB.getParent();
1172  const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
1173  const AArch64FrameLowering *TFI =
1174  MF.getSubtarget<AArch64Subtarget>().getFrameLowering();
1175 
1176  // IRG does not allow immediate offset. getTaggedBasePointerOffset should
1177  // almost always point to SP-after-prologue; if not, emit a longer
1178  // instruction sequence.
1179  int BaseOffset = -AFI->getTaggedBasePointerOffset();
1180  Register FrameReg;
1181  StackOffset FrameRegOffset = TFI->resolveFrameOffsetReference(
1182  MF, BaseOffset, false /*isFixed*/, false /*isSVE*/, FrameReg,
1183  /*PreferFP=*/false,
1184  /*ForSimm=*/true);
1185  Register SrcReg = FrameReg;
1186  if (FrameRegOffset) {
1187  // Use output register as temporary.
1188  SrcReg = MI.getOperand(0).getReg();
1189  emitFrameOffset(MBB, &MI, MI.getDebugLoc(), SrcReg, FrameReg,
1190  FrameRegOffset, TII);
1191  }
1192  BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::IRG))
1193  .add(MI.getOperand(0))
1194  .addUse(SrcReg)
1195  .add(MI.getOperand(2));
1196  MI.eraseFromParent();
1197  return true;
1198  }
1199  case AArch64::TAGPstack: {
1200  int64_t Offset = MI.getOperand(2).getImm();
1201  BuildMI(MBB, MBBI, MI.getDebugLoc(),
1202  TII->get(Offset >= 0 ? AArch64::ADDG : AArch64::SUBG))
1203  .add(MI.getOperand(0))
1204  .add(MI.getOperand(1))
1206  .add(MI.getOperand(4));
1207  MI.eraseFromParent();
1208  return true;
1209  }
1210  case AArch64::STGloop_wback:
1211  case AArch64::STZGloop_wback:
1212  return expandSetTagLoop(MBB, MBBI, NextMBBI);
1213  case AArch64::STGloop:
1214  case AArch64::STZGloop:
1216  "Non-writeback variants of STGloop / STZGloop should not "
1217  "survive past PrologEpilogInserter.");
1218  case AArch64::STR_ZZZZXI:
1219  return expandSVESpillFill(MBB, MBBI, AArch64::STR_ZXI, 4);
1220  case AArch64::STR_ZZZXI:
1221  return expandSVESpillFill(MBB, MBBI, AArch64::STR_ZXI, 3);
1222  case AArch64::STR_ZZXI:
1223  return expandSVESpillFill(MBB, MBBI, AArch64::STR_ZXI, 2);
1224  case AArch64::LDR_ZZZZXI:
1225  return expandSVESpillFill(MBB, MBBI, AArch64::LDR_ZXI, 4);
1226  case AArch64::LDR_ZZZXI:
1227  return expandSVESpillFill(MBB, MBBI, AArch64::LDR_ZXI, 3);
1228  case AArch64::LDR_ZZXI:
1229  return expandSVESpillFill(MBB, MBBI, AArch64::LDR_ZXI, 2);
1230  case AArch64::BLR_RVMARKER:
1231  return expandCALL_RVMARKER(MBB, MBBI);
1232  case AArch64::StoreSwiftAsyncContext:
1233  return expandStoreSwiftAsyncContext(MBB, MBBI);
1234  }
1235  return false;
1236 }
1237 
1238 /// Iterate over the instructions in basic block MBB and expand any
1239 /// pseudo instructions. Return true if anything was modified.
1240 bool AArch64ExpandPseudo::expandMBB(MachineBasicBlock &MBB) {
1241  bool Modified = false;
1242 
1244  while (MBBI != E) {
1245  MachineBasicBlock::iterator NMBBI = std::next(MBBI);
1246  Modified |= expandMI(MBB, MBBI, NMBBI);
1247  MBBI = NMBBI;
1248  }
1249 
1250  return Modified;
1251 }
1252 
1253 bool AArch64ExpandPseudo::runOnMachineFunction(MachineFunction &MF) {
1254  TII = static_cast<const AArch64InstrInfo *>(MF.getSubtarget().getInstrInfo());
1255 
1256  bool Modified = false;
1257  for (auto &MBB : MF)
1258  Modified |= expandMBB(MBB);
1259  return Modified;
1260 }
1261 
1262 /// Returns an instance of the pseudo instruction expansion pass.
1264  return new AArch64ExpandPseudo();
1265 }
llvm::Check::Size
@ Size
Definition: FileCheck.h:73
i
i
Definition: README.txt:29
llvm::AArch64II::MO_G3
@ MO_G3
MO_G3 - A symbol operand with this flag (granule 3) represents the high 16-bits of a 64-bit address,...
Definition: AArch64BaseInfo.h:686
llvm::AArch64ISD::LOADgot
@ LOADgot
Definition: AArch64ISelLowering.h:64
llvm::AArch64::ElementSizeNone
@ ElementSizeNone
Definition: AArch64InstrInfo.h:467
MI
IRTranslator LLVM IR MI
Definition: IRTranslator.cpp:105
MachineInstr.h
MathExtras.h
llvm::MachineInstrBuilder::addImm
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
Definition: MachineInstrBuilder.h:131
llvm::MachineConstantPool::getConstantPoolIndex
unsigned getConstantPoolIndex(const Constant *C, Align Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one.
Definition: MachineFunction.cpp:1457
llvm
This file implements support for optimizing divisions by a constant.
Definition: AllocatorList.h:23
AArch64MachineFunctionInfo.h
UseMI
MachineInstrBuilder & UseMI
Definition: AArch64ExpandPseudoInsts.cpp:102
llvm::AArch64CC::NE
@ NE
Definition: AArch64BaseInfo.h:256
llvm::MachineOperand::CreateReg
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
Definition: MachineOperand.h:791
llvm::MachineBasicBlock::getBasicBlock
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
Definition: MachineBasicBlock.h:202
llvm::AArch64_AM::LSL
@ LSL
Definition: AArch64AddressingModes.h:35
AARCH64_EXPAND_PSEUDO_NAME
#define AARCH64_EXPAND_PSEUDO_NAME
Definition: AArch64ExpandPseudoInsts.cpp:47
MCInstrDesc.h
llvm::MachineOperand::getGlobal
const GlobalValue * getGlobal() const
Definition: MachineOperand.h:563
llvm::MachineInstrBuilder::add
const MachineInstrBuilder & add(const MachineOperand &MO) const
Definition: MachineInstrBuilder.h:224
llvm::AArch64::FalseLanesZero
@ FalseLanesZero
Definition: AArch64InstrInfo.h:490
Pass.h
llvm::TargetSubtargetInfo::getInstrInfo
virtual const TargetInstrInfo * getInstrInfo() const
Definition: TargetSubtargetInfo.h:92
llvm::AArch64::ElementSizeS
@ ElementSizeS
Definition: AArch64InstrInfo.h:470
llvm::RegState::Implicit
@ Implicit
Not emitted register (e.g. carry, or temporary result).
Definition: MachineInstrBuilder.h:46
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1168
llvm::AArch64Subtarget::useEL3ForTP
bool useEL3ForTP() const
Definition: AArch64Subtarget.h:435
AArch64BaseInfo.h
llvm::MachineFunctionPass
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
Definition: MachineFunctionPass.h:30
MachineBasicBlock.h
llvm::LivePhysRegs
A set of physical registers with utility functions to track liveness when walking backward/forward th...
Definition: LivePhysRegs.h:48
llvm::TargetSubtargetInfo::getRegisterInfo
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
Definition: TargetSubtargetInfo.h:124
llvm::AArch64II::MO_PREL
@ MO_PREL
MO_PREL - Indicates that the bits of the symbol operand represented by MO_G0 etc are PC relative.
Definition: AArch64BaseInfo.h:737
llvm::TargetRegisterInfo
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Definition: TargetRegisterInfo.h:233
llvm::MachineFunction::moveCallSiteInfo
void moveCallSiteInfo(const MachineInstr *Old, const MachineInstr *New)
Move the call site info from Old to \New call site info.
Definition: MachineFunction.cpp:949
llvm::MachineOperand::isSymbol
bool isSymbol() const
isSymbol - Tests if this is a MO_ExternalSymbol operand.
Definition: MachineOperand.h:341
DenseMap.h
llvm::MachineFunction::insert
void insert(iterator MBBI, MachineBasicBlock *MBB)
Definition: MachineFunction.h:831
llvm::getDeadRegState
unsigned getDeadRegState(bool B)
Definition: MachineInstrBuilder.h:511
llvm::AArch64::ElementSizeB
@ ElementSizeB
Definition: AArch64InstrInfo.h:468
llvm::AArch64::DestructiveTernaryCommWithRev
@ DestructiveTernaryCommWithRev
Definition: AArch64InstrInfo.h:484
Offset
uint64_t Offset
Definition: ELFObjHandler.cpp:81
llvm::dwarf::Tag
Tag
Definition: Dwarf.h:104
llvm::MachineOperand::getOffset
int64_t getOffset() const
Return the offset from the symbol in this operand.
Definition: MachineOperand.h:600
TRI
unsigned const TargetRegisterInfo * TRI
Definition: MachineSink.cpp:1567
llvm::ARCISD::BL
@ BL
Definition: ARCISelLowering.h:34
llvm::RegState::Kill
@ Kill
The last use of a register.
Definition: MachineInstrBuilder.h:48
llvm::AArch64FrameLowering
Definition: AArch64FrameLowering.h:23
llvm::RegState::Define
@ Define
Register definition.
Definition: MachineInstrBuilder.h:44
llvm::MachineBasicBlock::addSuccessor
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
Definition: MachineBasicBlock.cpp:751
llvm::AArch64::getSVERevInstr
int getSVERevInstr(uint16_t Opcode)
llvm::MachineInstrBuilder::addDef
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Definition: MachineInstrBuilder.h:116
llvm::finalizeBundle
void finalizeBundle(MachineBasicBlock &MBB, MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
finalizeBundle - Finalize a machine instruction bundle which includes a sequence of instructions star...
Definition: MachineInstrBundle.cpp:123
AArch64InstrInfo.h
llvm::getRenamableRegState
unsigned getRenamableRegState(bool B)
Definition: MachineInstrBuilder.h:523
TargetMachine.h
llvm::PassRegistry::getPassRegistry
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Definition: PassRegistry.cpp:31
llvm::MachineInstrBuilder::addMBB
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Definition: MachineInstrBuilder.h:146
llvm::RegState::Undef
@ Undef
Value of the register doesn't matter.
Definition: MachineInstrBuilder.h:52
llvm::AArch64InstrInfo
Definition: AArch64InstrInfo.h:38
llvm::AArch64ISD::MRS
@ MRS
Definition: AArch64ISelLowering.h:289
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::initializeAArch64ExpandPseudoPass
void initializeAArch64ExpandPseudoPass(PassRegistry &)
llvm::MachineFunction::getInfo
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Definition: MachineFunction.h:728
llvm::AArch64Subtarget::isTargetILP32
bool isTargetILP32() const
Definition: AArch64Subtarget.h:529
llvm::MachineOperand::isUse
bool isUse() const
Definition: MachineOperand.h:370
llvm::MachineBasicBlock::eraseFromParent
void eraseFromParent()
This method unlinks 'this' from the containing function and deletes it.
Definition: MachineBasicBlock.cpp:1337
TII
const HexagonInstrInfo * TII
Definition: HexagonCopyToCombine.cpp:129
llvm::MCInstrDesc
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:195
B
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
llvm::MachineOperand
MachineOperand class - Representation of each machine instruction operand.
Definition: MachineOperand.h:49
llvm::MachineInstr::FrameSetup
@ FrameSetup
Definition: MachineInstr.h:82
llvm::AArch64::DestructiveUnaryPassthru
@ DestructiveUnaryPassthru
Definition: AArch64InstrInfo.h:485
llvm::AArch64_AM::getShifterImm
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
Definition: AArch64AddressingModes.h:99
llvm::report_fatal_error
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:140
LoopDeletionResult::Modified
@ Modified
llvm::computeAndAddLiveIns
void computeAndAddLiveIns(LivePhysRegs &LiveRegs, MachineBasicBlock &MBB)
Convenience function combining computeLiveIns() and addLiveIns().
Definition: LivePhysRegs.cpp:339
DebugLoc.h
Align
uint64_t Align
Definition: ELFObjHandler.cpp:83
llvm::emitFrameOffset
void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, StackOffset Offset, const TargetInstrInfo *TII, MachineInstr::MIFlag=MachineInstr::NoFlags, bool SetNZCV=false, bool NeedsWinCFI=false, bool *HasWinCFI=nullptr)
emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg plus Offset.
Definition: AArch64InstrInfo.cpp:4041
llvm::MachineInstrBuilder::addExternalSymbol
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
Definition: MachineInstrBuilder.h:184
llvm::MachineBasicBlock
Definition: MachineBasicBlock.h:95
llvm::MachineInstrBuilder::cloneMemRefs
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
Definition: MachineInstrBuilder.h:213
AArch64AddressingModes.h
llvm::MachineFunction::getSubtarget
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Definition: MachineFunction.h:630
llvm::AArch64::NotDestructive
@ NotDestructive
Definition: AArch64InstrInfo.h:476
llvm::MachineInstrBuilder::setMIFlag
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
Definition: MachineInstrBuilder.h:278
llvm::MachineOperand::getTargetFlags
unsigned getTargetFlags() const
Definition: MachineOperand.h:221
llvm::MachineOperand::isReg
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Definition: MachineOperand.h:321
llvm::MachineInstr
Representation of each machine instruction.
Definition: MachineInstr.h:64
llvm::MachineInstrBuilder
Definition: MachineInstrBuilder.h:69
uint64_t
llvm::AArch64FunctionInfo::getTaggedBasePointerOffset
unsigned getTaggedBasePointerOffset() const
Definition: AArch64MachineFunctionInfo.h:380
AArch64ExpandImm.h
llvm::createAArch64ExpandPseudoPass
FunctionPass * createAArch64ExpandPseudoPass()
Returns an instance of the pseudo instruction expansion pass.
Definition: AArch64ExpandPseudoInsts.cpp:1263
llvm::AArch64II::MO_NC
@ MO_NC
MO_NC - Indicates whether the linker is expected to check the symbol reference for overflow.
Definition: AArch64BaseInfo.h:718
llvm::numbers::e
constexpr double e
Definition: MathExtras.h:57
llvm::AArch64II::MO_PAGEOFF
@ MO_PAGEOFF
MO_PAGEOFF - A symbol operand with this flag represents the offset of that symbol within a 4K page.
Definition: AArch64BaseInfo.h:682
llvm::MachineConstantPool
The MachineConstantPool class keeps track of constants referenced by a function which must be spilled...
Definition: MachineConstantPool.h:117
llvm::MachineOperand::isDead
bool isDead() const
Definition: MachineOperand.h:385
llvm::AArch64FunctionInfo
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
Definition: AArch64MachineFunctionInfo.h:37
I
#define I(x, y, z)
Definition: MD5.cpp:59
llvm::AArch64Subtarget::useEL2ForTP
bool useEL2ForTP() const
Definition: AArch64Subtarget.h:434
MachineConstantPool.h
llvm::MachineOperand::isCPI
bool isCPI() const
isCPI - Tests if this is a MO_ConstantPoolIndex operand.
Definition: MachineOperand.h:333
llvm::MachineFunction::CreateMachineBasicBlock
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
Definition: MachineFunction.cpp:415
MachineFunctionPass.h
llvm::MachineFunction::getConstantPool
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
Definition: MachineFunction.h:662
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::MachineBasicBlock::getParent
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
Definition: MachineBasicBlock.h:225
llvm::MachineInstrBuilder::addReg
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Definition: MachineInstrBuilder.h:97
llvm::MachineInstrBuilder::addUse
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
Definition: MachineInstrBuilder.h:123
llvm::MachineOperand::getReg
Register getReg() const
getReg - Returns the register number.
Definition: MachineOperand.h:360
llvm::MachineFunction
Definition: MachineFunction.h:234
Triple.h
llvm::BlockAddress
The address of a basic block.
Definition: Constants.h:848
llvm::StringRef
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:58
MBBI
MachineBasicBlock MachineBasicBlock::iterator MBBI
Definition: AArch64SLSHardening.cpp:75
llvm::AArch64::FalseLanesMask
@ FalseLanesMask
Definition: AArch64InstrInfo.h:489
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:134
llvm::StackOffset
StackOffset is a class to represent an offset with 2 dimensions, named fixed and scalable,...
Definition: TypeSize.h:134
llvm::ilist_node_impl::getIterator
self_iterator getIterator()
Definition: ilist_node.h:81
TargetSubtargetInfo.h
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
llvm::AArch64Subtarget::useEL1ForTP
bool useEL1ForTP() const
Definition: AArch64Subtarget.h:433
LLVM_FALLTHROUGH
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
Definition: Compiler.h:286
llvm::CodeModel::Tiny
@ Tiny
Definition: CodeGen.h:28
llvm::AArch64CC::EQ
@ EQ
Definition: AArch64BaseInfo.h:255
llvm::MachineInstrBuilder::getInstr
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
Definition: MachineInstrBuilder.h:89
llvm::Register
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
llvm::AArch64_AM::getArithExtendImm
static unsigned getArithExtendImm(AArch64_AM::ShiftExtendType ET, unsigned Imm)
getArithExtendImm - Encode the extend type and shift amount for an arithmetic instruction: imm: 3-bit...
Definition: AArch64AddressingModes.h:171
MBB
MachineBasicBlock & MBB
Definition: AArch64SLSHardening.cpp:74
llvm::AArch64::ElementSizeD
@ ElementSizeD
Definition: AArch64InstrInfo.h:471
Insn
SmallVector< AArch64_IMM::ImmInsnModel, 4 > Insn
Definition: AArch64MIPeepholeOpt.cpp:74
llvm::MachineFunction::getTarget
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Definition: MachineFunction.h:626
llvm::AArch64_IMM::expandMOVImm
void expandMOVImm(uint64_t Imm, unsigned BitSize, SmallVectorImpl< ImmInsnModel > &Insn)
Expand a MOVi32imm or MOVi64imm pseudo instruction to one or more real move-immediate instructions to...
Definition: AArch64ExpandImm.cpp:304
llvm::MachineBasicBlock::insert
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
Definition: MachineBasicBlock.cpp:1317
llvm::ARCISD::RET
@ RET
Definition: ARCISelLowering.h:52
llvm::MachineOperand::getIndex
int getIndex() const
Definition: MachineOperand.h:557
llvm::MachineInstrBuilder::addConstantPoolIndex
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned TargetFlags=0) const
Definition: MachineInstrBuilder.h:158
llvm::AArch64::DestructiveInstTypeMask
@ DestructiveInstTypeMask
Definition: AArch64InstrInfo.h:475
llvm::RegState::Renamable
@ Renamable
Register that may be renamed.
Definition: MachineInstrBuilder.h:61
llvm::TargetMachine::getCodeModel
CodeModel::Model getCodeModel() const
Returns the code model.
Definition: TargetMachine.cpp:74
llvm::AArch64ISD::ADRP
@ ADRP
Definition: AArch64ISelLowering.h:61
llvm::AArch64::DestructiveBinaryComm
@ DestructiveBinaryComm
Definition: AArch64InstrInfo.h:482
llvm::MachineInstrBuilder::addGlobalAddress
const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned TargetFlags=0) const
Definition: MachineInstrBuilder.h:177
CodeGen.h
llvm::AArch64Subtarget::isTargetMachO
bool isTargetMachO() const
Definition: AArch64Subtarget.h:527
INITIALIZE_PASS
INITIALIZE_PASS(AArch64ExpandPseudo, "aarch64-expand-pseudo", AARCH64_EXPAND_PSEUDO_NAME, false, false) static void transferImpOps(MachineInstr &OldMI
Transfer implicit operands on the pseudo instruction to the instructions created from the expansion.
llvm::getKillRegState
unsigned getKillRegState(bool B)
Definition: MachineInstrBuilder.h:508
AArch64Subtarget.h
llvm::AArch64::DestructiveBinary
@ DestructiveBinary
Definition: AArch64InstrInfo.h:481
llvm::MachineBasicBlock::begin
iterator begin()
Definition: MachineBasicBlock.h:268
MachineInstrBuilder.h
llvm::MachineOperand::getSymbolName
const char * getSymbolName() const
Definition: MachineOperand.h:608
llvm::MachineInstrBuilder::setMIFlags
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
Definition: MachineInstrBuilder.h:273
llvm::BuildMI
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
Definition: MachineInstrBuilder.h:328
N
#define N
DefMI
MachineInstrBuilder MachineInstrBuilder & DefMI
Definition: AArch64ExpandPseudoInsts.cpp:103
llvm::MachineInstr::addOperand
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
Definition: MachineInstr.cpp:207
llvm::AArch64::getSVEPseudoMap
int getSVEPseudoMap(uint16_t Opcode)
llvm::AArch64_AM::UXTB
@ UXTB
Definition: AArch64AddressingModes.h:41
MachineOperand.h
llvm::TargetRegisterInfo::getSubReg
MCRegister getSubReg(MCRegister Reg, unsigned Idx) const
Returns the physical register number of sub-register "Index" for physical register RegNo.
Definition: TargetRegisterInfo.h:1094
llvm::AArch64::DestructiveBinaryImm
@ DestructiveBinaryImm
Definition: AArch64InstrInfo.h:479
llvm::AArch64::DestructiveBinaryCommWithRev
@ DestructiveBinaryCommWithRev
Definition: AArch64InstrInfo.h:483
llvm::FunctionPass
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:298
llvm::MachineFunction::CreateMachineInstr
MachineInstr * CreateMachineInstr(const MCInstrDesc &MCID, const DebugLoc &DL, bool NoImplicit=false)
CreateMachineInstr - Allocate a new MachineInstr.
Definition: MachineFunction.cpp:349
llvm::AArch64_AM::UXTH
@ UXTH
Definition: AArch64AddressingModes.h:42
llvm::DebugLoc
A debug info location.
Definition: DebugLoc.h:33
llvm::AArch64II::MO_TAGGED
@ MO_TAGGED
MO_TAGGED - With MO_PAGE, indicates that the page includes a memory tag in bits 56-63.
Definition: AArch64BaseInfo.h:745
llvm::AArch64Subtarget
Definition: AArch64Subtarget.h:38
MachineFunction.h
llvm::MachineInstrBundleIterator< MachineInstr >
llvm::abs
APFloat abs(APFloat X)
Returns the absolute value of the argument.
Definition: APFloat.h:1282
llvm::AArch64::getSVENonRevInstr
int getSVENonRevInstr(uint16_t Opcode)
llvm::MCInstrDesc::getNumOperands
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:228
llvm::MachineBasicBlock::end
iterator end()
Definition: MachineBasicBlock.h:270
llvm::AArch64II::MO_PAGE
@ MO_PAGE
MO_PAGE - A symbol operand with this flag represents the pc-relative offset of the 4K page containing...
Definition: AArch64BaseInfo.h:677
llvm::MachineOperand::isGlobal
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
Definition: MachineOperand.h:339
llvm::Intrinsic::ID
unsigned ID
Definition: TargetTransformInfo.h:37
LivePhysRegs.h
llvm::AArch64::ElementSizeH
@ ElementSizeH
Definition: AArch64InstrInfo.h:469