LLVM  13.0.0git
AArch64ExpandPseudoInsts.cpp
Go to the documentation of this file.
1 //===- AArch64ExpandPseudoInsts.cpp - Expand pseudo instructions ----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains a pass that expands pseudo instructions into target
10 // instructions to allow proper scheduling and other late optimizations. This
11 // pass should be run after register allocation but before the post-regalloc
12 // scheduling pass.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "AArch64ExpandImm.h"
17 #include "AArch64InstrInfo.h"
19 #include "AArch64Subtarget.h"
21 #include "Utils/AArch64BaseInfo.h"
22 #include "llvm/ADT/DenseMap.h"
23 #include "llvm/ADT/Triple.h"
33 #include "llvm/IR/DebugLoc.h"
34 #include "llvm/MC/MCInstrDesc.h"
35 #include "llvm/Pass.h"
36 #include "llvm/Support/CodeGen.h"
39 #include <cassert>
40 #include <cstdint>
41 #include <iterator>
42 #include <limits>
43 #include <utility>
44 
45 using namespace llvm;
46 
47 #define AARCH64_EXPAND_PSEUDO_NAME "AArch64 pseudo instruction expansion pass"
48 
49 namespace {
50 
51 class AArch64ExpandPseudo : public MachineFunctionPass {
52 public:
53  const AArch64InstrInfo *TII;
54 
55  static char ID;
56 
57  AArch64ExpandPseudo() : MachineFunctionPass(ID) {
59  }
60 
61  bool runOnMachineFunction(MachineFunction &Fn) override;
62 
63  StringRef getPassName() const override { return AARCH64_EXPAND_PSEUDO_NAME; }
64 
65 private:
66  bool expandMBB(MachineBasicBlock &MBB);
68  MachineBasicBlock::iterator &NextMBBI);
70  unsigned BitSize);
71 
72  bool expand_DestructiveOp(MachineInstr &MI, MachineBasicBlock &MBB,
75  unsigned LdarOp, unsigned StlrOp, unsigned CmpOp,
76  unsigned ExtendImm, unsigned ZeroReg,
77  MachineBasicBlock::iterator &NextMBBI);
78  bool expandCMP_SWAP_128(MachineBasicBlock &MBB,
80  MachineBasicBlock::iterator &NextMBBI);
81  bool expandSetTagLoop(MachineBasicBlock &MBB,
83  MachineBasicBlock::iterator &NextMBBI);
84  bool expandSVESpillFill(MachineBasicBlock &MBB,
85  MachineBasicBlock::iterator MBBI, unsigned Opc,
86  unsigned N);
87  bool expandCALL_RVMARKER(MachineBasicBlock &MBB,
89 };
90 
91 } // end anonymous namespace
92 
94 
95 INITIALIZE_PASS(AArch64ExpandPseudo, "aarch64-expand-pseudo",
96  AARCH64_EXPAND_PSEUDO_NAME, false, false)
97 
98 /// Transfer implicit operands on the pseudo instruction to the
99 /// instructions created from the expansion.
100 static void transferImpOps(MachineInstr &OldMI, MachineInstrBuilder &UseMI,
102  const MCInstrDesc &Desc = OldMI.getDesc();
103  for (unsigned i = Desc.getNumOperands(), e = OldMI.getNumOperands(); i != e;
104  ++i) {
105  const MachineOperand &MO = OldMI.getOperand(i);
106  assert(MO.isReg() && MO.getReg());
107  if (MO.isUse())
108  UseMI.add(MO);
109  else
110  DefMI.add(MO);
111  }
112 }
113 
114 /// Expand a MOVi32imm or MOVi64imm pseudo instruction to one or more
115 /// real move-immediate instructions to synthesize the immediate.
118  unsigned BitSize) {
119  MachineInstr &MI = *MBBI;
120  Register DstReg = MI.getOperand(0).getReg();
121  uint64_t RenamableState =
122  MI.getOperand(0).isRenamable() ? RegState::Renamable : 0;
123  uint64_t Imm = MI.getOperand(1).getImm();
124 
125  if (DstReg == AArch64::XZR || DstReg == AArch64::WZR) {
126  // Useless def, and we don't want to risk creating an invalid ORR (which
127  // would really write to sp).
128  MI.eraseFromParent();
129  return true;
130  }
131 
133  AArch64_IMM::expandMOVImm(Imm, BitSize, Insn);
134  assert(Insn.size() != 0);
135 
137  for (auto I = Insn.begin(), E = Insn.end(); I != E; ++I) {
138  bool LastItem = std::next(I) == E;
139  switch (I->Opcode)
140  {
141  default: llvm_unreachable("unhandled!"); break;
142 
143  case AArch64::ORRWri:
144  case AArch64::ORRXri:
145  MIBS.push_back(BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(I->Opcode))
146  .add(MI.getOperand(0))
147  .addReg(BitSize == 32 ? AArch64::WZR : AArch64::XZR)
148  .addImm(I->Op2));
149  break;
150  case AArch64::MOVNWi:
151  case AArch64::MOVNXi:
152  case AArch64::MOVZWi:
153  case AArch64::MOVZXi: {
154  bool DstIsDead = MI.getOperand(0).isDead();
155  MIBS.push_back(BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(I->Opcode))
156  .addReg(DstReg, RegState::Define |
157  getDeadRegState(DstIsDead && LastItem) |
158  RenamableState)
159  .addImm(I->Op1)
160  .addImm(I->Op2));
161  } break;
162  case AArch64::MOVKWi:
163  case AArch64::MOVKXi: {
164  Register DstReg = MI.getOperand(0).getReg();
165  bool DstIsDead = MI.getOperand(0).isDead();
166  MIBS.push_back(BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(I->Opcode))
167  .addReg(DstReg,
169  getDeadRegState(DstIsDead && LastItem) |
170  RenamableState)
171  .addReg(DstReg)
172  .addImm(I->Op1)
173  .addImm(I->Op2));
174  } break;
175  }
176  }
177  transferImpOps(MI, MIBS.front(), MIBS.back());
178  MI.eraseFromParent();
179  return true;
180 }
181 
182 bool AArch64ExpandPseudo::expandCMP_SWAP(
184  unsigned StlrOp, unsigned CmpOp, unsigned ExtendImm, unsigned ZeroReg,
185  MachineBasicBlock::iterator &NextMBBI) {
186  MachineInstr &MI = *MBBI;
187  DebugLoc DL = MI.getDebugLoc();
188  const MachineOperand &Dest = MI.getOperand(0);
189  Register StatusReg = MI.getOperand(1).getReg();
190  bool StatusDead = MI.getOperand(1).isDead();
191  // Duplicating undef operands into 2 instructions does not guarantee the same
192  // value on both; However undef should be replaced by xzr anyway.
193  assert(!MI.getOperand(2).isUndef() && "cannot handle undef");
194  Register AddrReg = MI.getOperand(2).getReg();
195  Register DesiredReg = MI.getOperand(3).getReg();
196  Register NewReg = MI.getOperand(4).getReg();
197 
198  MachineFunction *MF = MBB.getParent();
199  auto LoadCmpBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
200  auto StoreBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
201  auto DoneBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
202 
203  MF->insert(++MBB.getIterator(), LoadCmpBB);
204  MF->insert(++LoadCmpBB->getIterator(), StoreBB);
205  MF->insert(++StoreBB->getIterator(), DoneBB);
206 
207  // .Lloadcmp:
208  // mov wStatus, 0
209  // ldaxr xDest, [xAddr]
210  // cmp xDest, xDesired
211  // b.ne .Ldone
212  if (!StatusDead)
213  BuildMI(LoadCmpBB, DL, TII->get(AArch64::MOVZWi), StatusReg)
214  .addImm(0).addImm(0);
215  BuildMI(LoadCmpBB, DL, TII->get(LdarOp), Dest.getReg())
216  .addReg(AddrReg);
217  BuildMI(LoadCmpBB, DL, TII->get(CmpOp), ZeroReg)
218  .addReg(Dest.getReg(), getKillRegState(Dest.isDead()))
219  .addReg(DesiredReg)
220  .addImm(ExtendImm);
221  BuildMI(LoadCmpBB, DL, TII->get(AArch64::Bcc))
223  .addMBB(DoneBB)
224  .addReg(AArch64::NZCV, RegState::Implicit | RegState::Kill);
225  LoadCmpBB->addSuccessor(DoneBB);
226  LoadCmpBB->addSuccessor(StoreBB);
227 
228  // .Lstore:
229  // stlxr wStatus, xNew, [xAddr]
230  // cbnz wStatus, .Lloadcmp
231  BuildMI(StoreBB, DL, TII->get(StlrOp), StatusReg)
232  .addReg(NewReg)
233  .addReg(AddrReg);
234  BuildMI(StoreBB, DL, TII->get(AArch64::CBNZW))
235  .addReg(StatusReg, getKillRegState(StatusDead))
236  .addMBB(LoadCmpBB);
237  StoreBB->addSuccessor(LoadCmpBB);
238  StoreBB->addSuccessor(DoneBB);
239 
240  DoneBB->splice(DoneBB->end(), &MBB, MI, MBB.end());
241  DoneBB->transferSuccessors(&MBB);
242 
243  MBB.addSuccessor(LoadCmpBB);
244 
245  NextMBBI = MBB.end();
246  MI.eraseFromParent();
247 
248  // Recompute livein lists.
249  LivePhysRegs LiveRegs;
250  computeAndAddLiveIns(LiveRegs, *DoneBB);
251  computeAndAddLiveIns(LiveRegs, *StoreBB);
252  computeAndAddLiveIns(LiveRegs, *LoadCmpBB);
253  // Do an extra pass around the loop to get loop carried registers right.
254  StoreBB->clearLiveIns();
255  computeAndAddLiveIns(LiveRegs, *StoreBB);
256  LoadCmpBB->clearLiveIns();
257  computeAndAddLiveIns(LiveRegs, *LoadCmpBB);
258 
259  return true;
260 }
261 
262 bool AArch64ExpandPseudo::expandCMP_SWAP_128(
264  MachineBasicBlock::iterator &NextMBBI) {
265  MachineInstr &MI = *MBBI;
266  DebugLoc DL = MI.getDebugLoc();
267  MachineOperand &DestLo = MI.getOperand(0);
268  MachineOperand &DestHi = MI.getOperand(1);
269  Register StatusReg = MI.getOperand(2).getReg();
270  bool StatusDead = MI.getOperand(2).isDead();
271  // Duplicating undef operands into 2 instructions does not guarantee the same
272  // value on both; However undef should be replaced by xzr anyway.
273  assert(!MI.getOperand(3).isUndef() && "cannot handle undef");
274  Register AddrReg = MI.getOperand(3).getReg();
275  Register DesiredLoReg = MI.getOperand(4).getReg();
276  Register DesiredHiReg = MI.getOperand(5).getReg();
277  Register NewLoReg = MI.getOperand(6).getReg();
278  Register NewHiReg = MI.getOperand(7).getReg();
279 
280  MachineFunction *MF = MBB.getParent();
281  auto LoadCmpBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
282  auto StoreBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
283  auto DoneBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
284 
285  MF->insert(++MBB.getIterator(), LoadCmpBB);
286  MF->insert(++LoadCmpBB->getIterator(), StoreBB);
287  MF->insert(++StoreBB->getIterator(), DoneBB);
288 
289  // .Lloadcmp:
290  // ldaxp xDestLo, xDestHi, [xAddr]
291  // cmp xDestLo, xDesiredLo
292  // sbcs xDestHi, xDesiredHi
293  // b.ne .Ldone
294  BuildMI(LoadCmpBB, DL, TII->get(AArch64::LDAXPX))
295  .addReg(DestLo.getReg(), RegState::Define)
296  .addReg(DestHi.getReg(), RegState::Define)
297  .addReg(AddrReg);
298  BuildMI(LoadCmpBB, DL, TII->get(AArch64::SUBSXrs), AArch64::XZR)
299  .addReg(DestLo.getReg(), getKillRegState(DestLo.isDead()))
300  .addReg(DesiredLoReg)
301  .addImm(0);
302  BuildMI(LoadCmpBB, DL, TII->get(AArch64::CSINCWr), StatusReg)
303  .addUse(AArch64::WZR)
304  .addUse(AArch64::WZR)
306  BuildMI(LoadCmpBB, DL, TII->get(AArch64::SUBSXrs), AArch64::XZR)
307  .addReg(DestHi.getReg(), getKillRegState(DestHi.isDead()))
308  .addReg(DesiredHiReg)
309  .addImm(0);
310  BuildMI(LoadCmpBB, DL, TII->get(AArch64::CSINCWr), StatusReg)
311  .addUse(StatusReg, RegState::Kill)
312  .addUse(StatusReg, RegState::Kill)
314  BuildMI(LoadCmpBB, DL, TII->get(AArch64::CBNZW))
315  .addUse(StatusReg, getKillRegState(StatusDead))
316  .addMBB(DoneBB);
317  LoadCmpBB->addSuccessor(DoneBB);
318  LoadCmpBB->addSuccessor(StoreBB);
319 
320  // .Lstore:
321  // stlxp wStatus, xNewLo, xNewHi, [xAddr]
322  // cbnz wStatus, .Lloadcmp
323  BuildMI(StoreBB, DL, TII->get(AArch64::STLXPX), StatusReg)
324  .addReg(NewLoReg)
325  .addReg(NewHiReg)
326  .addReg(AddrReg);
327  BuildMI(StoreBB, DL, TII->get(AArch64::CBNZW))
328  .addReg(StatusReg, getKillRegState(StatusDead))
329  .addMBB(LoadCmpBB);
330  StoreBB->addSuccessor(LoadCmpBB);
331  StoreBB->addSuccessor(DoneBB);
332 
333  DoneBB->splice(DoneBB->end(), &MBB, MI, MBB.end());
334  DoneBB->transferSuccessors(&MBB);
335 
336  MBB.addSuccessor(LoadCmpBB);
337 
338  NextMBBI = MBB.end();
339  MI.eraseFromParent();
340 
341  // Recompute liveness bottom up.
342  LivePhysRegs LiveRegs;
343  computeAndAddLiveIns(LiveRegs, *DoneBB);
344  computeAndAddLiveIns(LiveRegs, *StoreBB);
345  computeAndAddLiveIns(LiveRegs, *LoadCmpBB);
346  // Do an extra pass in the loop to get the loop carried dependencies right.
347  StoreBB->clearLiveIns();
348  computeAndAddLiveIns(LiveRegs, *StoreBB);
349  LoadCmpBB->clearLiveIns();
350  computeAndAddLiveIns(LiveRegs, *LoadCmpBB);
351 
352  return true;
353 }
354 
355 /// \brief Expand Pseudos to Instructions with destructive operands.
356 ///
357 /// This mechanism uses MOVPRFX instructions for zeroing the false lanes
358 /// or for fixing relaxed register allocation conditions to comply with
359 /// the instructions register constraints. The latter case may be cheaper
360 /// than setting the register constraints in the register allocator,
361 /// since that will insert regular MOV instructions rather than MOVPRFX.
362 ///
363 /// Example (after register allocation):
364 ///
365 /// FSUB_ZPZZ_ZERO_B Z0, Pg, Z1, Z0
366 ///
367 /// * The Pseudo FSUB_ZPZZ_ZERO_B maps to FSUB_ZPmZ_B.
368 /// * We cannot map directly to FSUB_ZPmZ_B because the register
369 /// constraints of the instruction are not met.
370 /// * Also the _ZERO specifies the false lanes need to be zeroed.
371 ///
372 /// We first try to see if the destructive operand == result operand,
373 /// if not, we try to swap the operands, e.g.
374 ///
375 /// FSUB_ZPmZ_B Z0, Pg/m, Z0, Z1
376 ///
377 /// But because FSUB_ZPmZ is not commutative, this is semantically
378 /// different, so we need a reverse instruction:
379 ///
380 /// FSUBR_ZPmZ_B Z0, Pg/m, Z0, Z1
381 ///
382 /// Then we implement the zeroing of the false lanes of Z0 by adding
383 /// a zeroing MOVPRFX instruction:
384 ///
385 /// MOVPRFX_ZPzZ_B Z0, Pg/z, Z0
386 /// FSUBR_ZPmZ_B Z0, Pg/m, Z0, Z1
387 ///
388 /// Note that this can only be done for _ZERO or _UNDEF variants where
389 /// we can guarantee the false lanes to be zeroed (by implementing this)
390 /// or that they are undef (don't care / not used), otherwise the
391 /// swapping of operands is illegal because the operation is not
392 /// (or cannot be emulated to be) fully commutative.
393 bool AArch64ExpandPseudo::expand_DestructiveOp(
394  MachineInstr &MI,
397  unsigned Opcode = AArch64::getSVEPseudoMap(MI.getOpcode());
398  uint64_t DType = TII->get(Opcode).TSFlags & AArch64::DestructiveInstTypeMask;
399  uint64_t FalseLanes = MI.getDesc().TSFlags & AArch64::FalseLanesMask;
400  bool FalseZero = FalseLanes == AArch64::FalseLanesZero;
401 
402  unsigned DstReg = MI.getOperand(0).getReg();
403  bool DstIsDead = MI.getOperand(0).isDead();
404 
405  if (DType == AArch64::DestructiveBinary)
406  assert(DstReg != MI.getOperand(3).getReg());
407 
408  bool UseRev = false;
409  unsigned PredIdx, DOPIdx, SrcIdx, Src2Idx;
410  switch (DType) {
413  if (DstReg == MI.getOperand(3).getReg()) {
414  // FSUB Zd, Pg, Zs1, Zd ==> FSUBR Zd, Pg/m, Zd, Zs1
415  std::tie(PredIdx, DOPIdx, SrcIdx) = std::make_tuple(1, 3, 2);
416  UseRev = true;
417  break;
418  }
422  std::tie(PredIdx, DOPIdx, SrcIdx) = std::make_tuple(1, 2, 3);
423  break;
425  std::tie(PredIdx, DOPIdx, SrcIdx, Src2Idx) = std::make_tuple(1, 2, 3, 4);
426  if (DstReg == MI.getOperand(3).getReg()) {
427  // FMLA Zd, Pg, Za, Zd, Zm ==> FMAD Zdn, Pg, Zm, Za
428  std::tie(PredIdx, DOPIdx, SrcIdx, Src2Idx) = std::make_tuple(1, 3, 4, 2);
429  UseRev = true;
430  } else if (DstReg == MI.getOperand(4).getReg()) {
431  // FMLA Zd, Pg, Za, Zm, Zd ==> FMAD Zdn, Pg, Zm, Za
432  std::tie(PredIdx, DOPIdx, SrcIdx, Src2Idx) = std::make_tuple(1, 4, 3, 2);
433  UseRev = true;
434  }
435  break;
436  default:
437  llvm_unreachable("Unsupported Destructive Operand type");
438  }
439 
440 #ifndef NDEBUG
441  // MOVPRFX can only be used if the destination operand
442  // is the destructive operand, not as any other operand,
443  // so the Destructive Operand must be unique.
444  bool DOPRegIsUnique = false;
445  switch (DType) {
448  DOPRegIsUnique =
449  DstReg != MI.getOperand(DOPIdx).getReg() ||
450  MI.getOperand(DOPIdx).getReg() != MI.getOperand(SrcIdx).getReg();
451  break;
453  DOPRegIsUnique = true;
454  break;
456  DOPRegIsUnique =
457  DstReg != MI.getOperand(DOPIdx).getReg() ||
458  (MI.getOperand(DOPIdx).getReg() != MI.getOperand(SrcIdx).getReg() &&
459  MI.getOperand(DOPIdx).getReg() != MI.getOperand(Src2Idx).getReg());
460  break;
461  }
462 #endif
463 
464  // Resolve the reverse opcode
465  if (UseRev) {
466  int NewOpcode;
467  // e.g. DIV -> DIVR
468  if ((NewOpcode = AArch64::getSVERevInstr(Opcode)) != -1)
469  Opcode = NewOpcode;
470  // e.g. DIVR -> DIV
471  else if ((NewOpcode = AArch64::getSVENonRevInstr(Opcode)) != -1)
472  Opcode = NewOpcode;
473  }
474 
475  // Get the right MOVPRFX
476  uint64_t ElementSize = TII->getElementSizeForOpcode(Opcode);
477  unsigned MovPrfx, MovPrfxZero;
478  switch (ElementSize) {
481  MovPrfx = AArch64::MOVPRFX_ZZ;
482  MovPrfxZero = AArch64::MOVPRFX_ZPzZ_B;
483  break;
485  MovPrfx = AArch64::MOVPRFX_ZZ;
486  MovPrfxZero = AArch64::MOVPRFX_ZPzZ_H;
487  break;
489  MovPrfx = AArch64::MOVPRFX_ZZ;
490  MovPrfxZero = AArch64::MOVPRFX_ZPzZ_S;
491  break;
493  MovPrfx = AArch64::MOVPRFX_ZZ;
494  MovPrfxZero = AArch64::MOVPRFX_ZPzZ_D;
495  break;
496  default:
497  llvm_unreachable("Unsupported ElementSize");
498  }
499 
500  //
501  // Create the destructive operation (if required)
502  //
503  MachineInstrBuilder PRFX, DOP;
504  if (FalseZero) {
505 #ifndef NDEBUG
506  assert(DOPRegIsUnique && "The destructive operand should be unique");
507 #endif
508  assert(ElementSize != AArch64::ElementSizeNone &&
509  "This instruction is unpredicated");
510 
511  // Merge source operand into destination register
512  PRFX = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(MovPrfxZero))
513  .addReg(DstReg, RegState::Define)
514  .addReg(MI.getOperand(PredIdx).getReg())
515  .addReg(MI.getOperand(DOPIdx).getReg());
516 
517  // After the movprfx, the destructive operand is same as Dst
518  DOPIdx = 0;
519  } else if (DstReg != MI.getOperand(DOPIdx).getReg()) {
520 #ifndef NDEBUG
521  assert(DOPRegIsUnique && "The destructive operand should be unique");
522 #endif
523  PRFX = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(MovPrfx))
524  .addReg(DstReg, RegState::Define)
525  .addReg(MI.getOperand(DOPIdx).getReg());
526  DOPIdx = 0;
527  }
528 
529  //
530  // Create the destructive operation
531  //
532  DOP = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opcode))
533  .addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead));
534 
535  switch (DType) {
539  DOP.add(MI.getOperand(PredIdx))
540  .addReg(MI.getOperand(DOPIdx).getReg(), RegState::Kill)
541  .add(MI.getOperand(SrcIdx));
542  break;
544  DOP.add(MI.getOperand(PredIdx))
545  .addReg(MI.getOperand(DOPIdx).getReg(), RegState::Kill)
546  .add(MI.getOperand(SrcIdx))
547  .add(MI.getOperand(Src2Idx));
548  break;
549  }
550 
551  if (PRFX) {
553  transferImpOps(MI, PRFX, DOP);
554  } else
555  transferImpOps(MI, DOP, DOP);
556 
557  MI.eraseFromParent();
558  return true;
559 }
560 
561 bool AArch64ExpandPseudo::expandSetTagLoop(
563  MachineBasicBlock::iterator &NextMBBI) {
564  MachineInstr &MI = *MBBI;
565  DebugLoc DL = MI.getDebugLoc();
566  Register SizeReg = MI.getOperand(0).getReg();
567  Register AddressReg = MI.getOperand(1).getReg();
568 
569  MachineFunction *MF = MBB.getParent();
570 
571  bool ZeroData = MI.getOpcode() == AArch64::STZGloop_wback;
572  const unsigned OpCode1 =
573  ZeroData ? AArch64::STZGPostIndex : AArch64::STGPostIndex;
574  const unsigned OpCode2 =
575  ZeroData ? AArch64::STZ2GPostIndex : AArch64::ST2GPostIndex;
576 
577  unsigned Size = MI.getOperand(2).getImm();
578  assert(Size > 0 && Size % 16 == 0);
579  if (Size % (16 * 2) != 0) {
580  BuildMI(MBB, MBBI, DL, TII->get(OpCode1), AddressReg)
581  .addReg(AddressReg)
582  .addReg(AddressReg)
583  .addImm(1);
584  Size -= 16;
585  }
587  BuildMI(MBB, MBBI, DL, TII->get(AArch64::MOVi64imm), SizeReg)
588  .addImm(Size);
589  expandMOVImm(MBB, I, 64);
590 
591  auto LoopBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
592  auto DoneBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
593 
594  MF->insert(++MBB.getIterator(), LoopBB);
595  MF->insert(++LoopBB->getIterator(), DoneBB);
596 
597  BuildMI(LoopBB, DL, TII->get(OpCode2))
598  .addDef(AddressReg)
599  .addReg(AddressReg)
600  .addReg(AddressReg)
601  .addImm(2)
602  .cloneMemRefs(MI)
603  .setMIFlags(MI.getFlags());
604  BuildMI(LoopBB, DL, TII->get(AArch64::SUBXri))
605  .addDef(SizeReg)
606  .addReg(SizeReg)
607  .addImm(16 * 2)
608  .addImm(0);
609  BuildMI(LoopBB, DL, TII->get(AArch64::CBNZX)).addUse(SizeReg).addMBB(LoopBB);
610 
611  LoopBB->addSuccessor(LoopBB);
612  LoopBB->addSuccessor(DoneBB);
613 
614  DoneBB->splice(DoneBB->end(), &MBB, MI, MBB.end());
615  DoneBB->transferSuccessors(&MBB);
616 
617  MBB.addSuccessor(LoopBB);
618 
619  NextMBBI = MBB.end();
620  MI.eraseFromParent();
621  // Recompute liveness bottom up.
622  LivePhysRegs LiveRegs;
623  computeAndAddLiveIns(LiveRegs, *DoneBB);
624  computeAndAddLiveIns(LiveRegs, *LoopBB);
625  // Do an extra pass in the loop to get the loop carried dependencies right.
626  // FIXME: is this necessary?
627  LoopBB->clearLiveIns();
628  computeAndAddLiveIns(LiveRegs, *LoopBB);
629  DoneBB->clearLiveIns();
630  computeAndAddLiveIns(LiveRegs, *DoneBB);
631 
632  return true;
633 }
634 
635 bool AArch64ExpandPseudo::expandSVESpillFill(MachineBasicBlock &MBB,
637  unsigned Opc, unsigned N) {
638  const TargetRegisterInfo *TRI =
640  MachineInstr &MI = *MBBI;
641  for (unsigned Offset = 0; Offset < N; ++Offset) {
642  int ImmOffset = MI.getOperand(2).getImm() + Offset;
643  bool Kill = (Offset + 1 == N) ? MI.getOperand(1).isKill() : false;
644  assert(ImmOffset >= -256 && ImmOffset < 256 &&
645  "Immediate spill offset out of range");
646  BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opc))
647  .addReg(
648  TRI->getSubReg(MI.getOperand(0).getReg(), AArch64::zsub0 + Offset),
649  Opc == AArch64::LDR_ZXI ? RegState::Define : 0)
650  .addReg(MI.getOperand(1).getReg(), getKillRegState(Kill))
651  .addImm(ImmOffset);
652  }
653  MI.eraseFromParent();
654  return true;
655 }
656 
657 bool AArch64ExpandPseudo::expandCALL_RVMARKER(
659  // Expand CALL_RVMARKER pseudo to a branch, followed by the special `mov x29,
660  // x29` marker. Mark the sequence as bundle, to avoid passes moving other code
661  // in between.
662  MachineInstr &MI = *MBBI;
663 
664  MachineInstr *OriginalCall;
665  MachineOperand &CallTarget = MI.getOperand(0);
666  assert((CallTarget.isGlobal() || CallTarget.isReg()) &&
667  "invalid operand for regular call");
668  unsigned Opc = CallTarget.isGlobal() ? AArch64::BL : AArch64::BLR;
669  OriginalCall = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opc)).getInstr();
670  OriginalCall->addOperand(CallTarget);
671 
672  unsigned RegMaskStartIdx = 1;
673  // Skip register arguments. Those are added during ISel, but are not
674  // needed for the concrete branch.
675  while (!MI.getOperand(RegMaskStartIdx).isRegMask()) {
676  auto MOP = MI.getOperand(RegMaskStartIdx);
677  assert(MOP.isReg() && "can only add register operands");
679  MOP.getReg(), /*Def=*/false, /*Implicit=*/true));
680  RegMaskStartIdx++;
681  }
682  for (; RegMaskStartIdx < MI.getNumOperands(); ++RegMaskStartIdx)
683  OriginalCall->addOperand(MI.getOperand(RegMaskStartIdx));
684 
685  auto *Marker = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ORRXrs))
686  .addReg(AArch64::FP, RegState::Define)
687  .addReg(AArch64::XZR)
688  .addReg(AArch64::FP)
689  .addImm(0)
690  .getInstr();
691  if (MI.shouldUpdateCallSiteInfo())
692  MBB.getParent()->moveCallSiteInfo(&MI, Marker);
693  MI.eraseFromParent();
694  finalizeBundle(MBB, OriginalCall->getIterator(),
695  std::next(Marker->getIterator()));
696  return true;
697 }
698 
699 /// If MBBI references a pseudo instruction that should be expanded here,
700 /// do the expansion and return true. Otherwise return false.
701 bool AArch64ExpandPseudo::expandMI(MachineBasicBlock &MBB,
703  MachineBasicBlock::iterator &NextMBBI) {
704  MachineInstr &MI = *MBBI;
705  unsigned Opcode = MI.getOpcode();
706 
707  // Check if we can expand the destructive op
708  int OrigInstr = AArch64::getSVEPseudoMap(MI.getOpcode());
709  if (OrigInstr != -1) {
710  auto &Orig = TII->get(OrigInstr);
711  if ((Orig.TSFlags & AArch64::DestructiveInstTypeMask)
713  return expand_DestructiveOp(MI, MBB, MBBI);
714  }
715  }
716 
717  switch (Opcode) {
718  default:
719  break;
720 
721  case AArch64::BSPv8i8:
722  case AArch64::BSPv16i8: {
723  Register DstReg = MI.getOperand(0).getReg();
724  if (DstReg == MI.getOperand(3).getReg()) {
725  // Expand to BIT
726  BuildMI(MBB, MBBI, MI.getDebugLoc(),
727  TII->get(Opcode == AArch64::BSPv8i8 ? AArch64::BITv8i8
728  : AArch64::BITv16i8))
729  .add(MI.getOperand(0))
730  .add(MI.getOperand(3))
731  .add(MI.getOperand(2))
732  .add(MI.getOperand(1));
733  } else if (DstReg == MI.getOperand(2).getReg()) {
734  // Expand to BIF
735  BuildMI(MBB, MBBI, MI.getDebugLoc(),
736  TII->get(Opcode == AArch64::BSPv8i8 ? AArch64::BIFv8i8
737  : AArch64::BIFv16i8))
738  .add(MI.getOperand(0))
739  .add(MI.getOperand(2))
740  .add(MI.getOperand(3))
741  .add(MI.getOperand(1));
742  } else {
743  // Expand to BSL, use additional move if required
744  if (DstReg == MI.getOperand(1).getReg()) {
745  BuildMI(MBB, MBBI, MI.getDebugLoc(),
746  TII->get(Opcode == AArch64::BSPv8i8 ? AArch64::BSLv8i8
747  : AArch64::BSLv16i8))
748  .add(MI.getOperand(0))
749  .add(MI.getOperand(1))
750  .add(MI.getOperand(2))
751  .add(MI.getOperand(3));
752  } else {
753  BuildMI(MBB, MBBI, MI.getDebugLoc(),
754  TII->get(Opcode == AArch64::BSPv8i8 ? AArch64::ORRv8i8
755  : AArch64::ORRv16i8))
756  .addReg(DstReg,
758  getRenamableRegState(MI.getOperand(0).isRenamable()))
759  .add(MI.getOperand(1))
760  .add(MI.getOperand(1));
761  BuildMI(MBB, MBBI, MI.getDebugLoc(),
762  TII->get(Opcode == AArch64::BSPv8i8 ? AArch64::BSLv8i8
763  : AArch64::BSLv16i8))
764  .add(MI.getOperand(0))
765  .addReg(DstReg,
767  getRenamableRegState(MI.getOperand(0).isRenamable()))
768  .add(MI.getOperand(2))
769  .add(MI.getOperand(3));
770  }
771  }
772  MI.eraseFromParent();
773  return true;
774  }
775 
776  case AArch64::ADDWrr:
777  case AArch64::SUBWrr:
778  case AArch64::ADDXrr:
779  case AArch64::SUBXrr:
780  case AArch64::ADDSWrr:
781  case AArch64::SUBSWrr:
782  case AArch64::ADDSXrr:
783  case AArch64::SUBSXrr:
784  case AArch64::ANDWrr:
785  case AArch64::ANDXrr:
786  case AArch64::BICWrr:
787  case AArch64::BICXrr:
788  case AArch64::ANDSWrr:
789  case AArch64::ANDSXrr:
790  case AArch64::BICSWrr:
791  case AArch64::BICSXrr:
792  case AArch64::EONWrr:
793  case AArch64::EONXrr:
794  case AArch64::EORWrr:
795  case AArch64::EORXrr:
796  case AArch64::ORNWrr:
797  case AArch64::ORNXrr:
798  case AArch64::ORRWrr:
799  case AArch64::ORRXrr: {
800  unsigned Opcode;
801  switch (MI.getOpcode()) {
802  default:
803  return false;
804  case AArch64::ADDWrr: Opcode = AArch64::ADDWrs; break;
805  case AArch64::SUBWrr: Opcode = AArch64::SUBWrs; break;
806  case AArch64::ADDXrr: Opcode = AArch64::ADDXrs; break;
807  case AArch64::SUBXrr: Opcode = AArch64::SUBXrs; break;
808  case AArch64::ADDSWrr: Opcode = AArch64::ADDSWrs; break;
809  case AArch64::SUBSWrr: Opcode = AArch64::SUBSWrs; break;
810  case AArch64::ADDSXrr: Opcode = AArch64::ADDSXrs; break;
811  case AArch64::SUBSXrr: Opcode = AArch64::SUBSXrs; break;
812  case AArch64::ANDWrr: Opcode = AArch64::ANDWrs; break;
813  case AArch64::ANDXrr: Opcode = AArch64::ANDXrs; break;
814  case AArch64::BICWrr: Opcode = AArch64::BICWrs; break;
815  case AArch64::BICXrr: Opcode = AArch64::BICXrs; break;
816  case AArch64::ANDSWrr: Opcode = AArch64::ANDSWrs; break;
817  case AArch64::ANDSXrr: Opcode = AArch64::ANDSXrs; break;
818  case AArch64::BICSWrr: Opcode = AArch64::BICSWrs; break;
819  case AArch64::BICSXrr: Opcode = AArch64::BICSXrs; break;
820  case AArch64::EONWrr: Opcode = AArch64::EONWrs; break;
821  case AArch64::EONXrr: Opcode = AArch64::EONXrs; break;
822  case AArch64::EORWrr: Opcode = AArch64::EORWrs; break;
823  case AArch64::EORXrr: Opcode = AArch64::EORXrs; break;
824  case AArch64::ORNWrr: Opcode = AArch64::ORNWrs; break;
825  case AArch64::ORNXrr: Opcode = AArch64::ORNXrs; break;
826  case AArch64::ORRWrr: Opcode = AArch64::ORRWrs; break;
827  case AArch64::ORRXrr: Opcode = AArch64::ORRXrs; break;
828  }
829  MachineInstrBuilder MIB1 =
830  BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opcode),
831  MI.getOperand(0).getReg())
832  .add(MI.getOperand(1))
833  .add(MI.getOperand(2))
835  transferImpOps(MI, MIB1, MIB1);
836  MI.eraseFromParent();
837  return true;
838  }
839 
840  case AArch64::LOADgot: {
841  MachineFunction *MF = MBB.getParent();
842  Register DstReg = MI.getOperand(0).getReg();
843  const MachineOperand &MO1 = MI.getOperand(1);
844  unsigned Flags = MO1.getTargetFlags();
845 
846  if (MF->getTarget().getCodeModel() == CodeModel::Tiny) {
847  // Tiny codemodel expand to LDR
848  MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(),
849  TII->get(AArch64::LDRXl), DstReg);
850 
851  if (MO1.isGlobal()) {
852  MIB.addGlobalAddress(MO1.getGlobal(), 0, Flags);
853  } else if (MO1.isSymbol()) {
854  MIB.addExternalSymbol(MO1.getSymbolName(), Flags);
855  } else {
856  assert(MO1.isCPI() &&
857  "Only expect globals, externalsymbols, or constant pools");
858  MIB.addConstantPoolIndex(MO1.getIndex(), MO1.getOffset(), Flags);
859  }
860  } else {
861  // Small codemodel expand into ADRP + LDR.
862  MachineFunction &MF = *MI.getParent()->getParent();
863  DebugLoc DL = MI.getDebugLoc();
864  MachineInstrBuilder MIB1 =
865  BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ADRP), DstReg);
866 
867  MachineInstrBuilder MIB2;
870  unsigned Reg32 = TRI->getSubReg(DstReg, AArch64::sub_32);
871  unsigned DstFlags = MI.getOperand(0).getTargetFlags();
872  MIB2 = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::LDRWui))
873  .addDef(Reg32)
874  .addReg(DstReg, RegState::Kill)
875  .addReg(DstReg, DstFlags | RegState::Implicit);
876  } else {
877  unsigned DstReg = MI.getOperand(0).getReg();
878  MIB2 = BuildMI(MBB, MBBI, DL, TII->get(AArch64::LDRXui))
879  .add(MI.getOperand(0))
880  .addUse(DstReg, RegState::Kill);
881  }
882 
883  if (MO1.isGlobal()) {
884  MIB1.addGlobalAddress(MO1.getGlobal(), 0, Flags | AArch64II::MO_PAGE);
885  MIB2.addGlobalAddress(MO1.getGlobal(), 0,
887  } else if (MO1.isSymbol()) {
889  MIB2.addExternalSymbol(MO1.getSymbolName(), Flags |
892  } else {
893  assert(MO1.isCPI() &&
894  "Only expect globals, externalsymbols, or constant pools");
895  MIB1.addConstantPoolIndex(MO1.getIndex(), MO1.getOffset(),
896  Flags | AArch64II::MO_PAGE);
897  MIB2.addConstantPoolIndex(MO1.getIndex(), MO1.getOffset(),
898  Flags | AArch64II::MO_PAGEOFF |
900  }
901 
902  transferImpOps(MI, MIB1, MIB2);
903  }
904  MI.eraseFromParent();
905  return true;
906  }
907  case AArch64::MOVaddrBA: {
908  MachineFunction &MF = *MI.getParent()->getParent();
910  // blockaddress expressions have to come from a constant pool because the
911  // largest addend (and hence offset within a function) allowed for ADRP is
912  // only 8MB.
913  const BlockAddress *BA = MI.getOperand(1).getBlockAddress();
914  assert(MI.getOperand(1).getOffset() == 0 && "unexpected offset");
915 
917  unsigned CPIdx = MCP->getConstantPoolIndex(BA, Align(8));
918 
919  Register DstReg = MI.getOperand(0).getReg();
920  auto MIB1 =
921  BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ADRP), DstReg)
923  auto MIB2 = BuildMI(MBB, MBBI, MI.getDebugLoc(),
924  TII->get(AArch64::LDRXui), DstReg)
925  .addUse(DstReg)
928  transferImpOps(MI, MIB1, MIB2);
929  MI.eraseFromParent();
930  return true;
931  }
932  }
934  case AArch64::MOVaddr:
935  case AArch64::MOVaddrJT:
936  case AArch64::MOVaddrCP:
937  case AArch64::MOVaddrTLS:
938  case AArch64::MOVaddrEXT: {
939  // Expand into ADRP + ADD.
940  Register DstReg = MI.getOperand(0).getReg();
941  MachineInstrBuilder MIB1 =
942  BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ADRP), DstReg)
943  .add(MI.getOperand(1));
944 
945  if (MI.getOperand(1).getTargetFlags() & AArch64II::MO_TAGGED) {
946  // MO_TAGGED on the page indicates a tagged address. Set the tag now.
947  // We do so by creating a MOVK that sets bits 48-63 of the register to
948  // (global address + 0x100000000 - PC) >> 48. This assumes that we're in
949  // the small code model so we can assume a binary size of <= 4GB, which
950  // makes the untagged PC relative offset positive. The binary must also be
951  // loaded into address range [0, 2^48). Both of these properties need to
952  // be ensured at runtime when using tagged addresses.
953  auto Tag = MI.getOperand(1);
954  Tag.setTargetFlags(AArch64II::MO_PREL | AArch64II::MO_G3);
955  Tag.setOffset(0x100000000);
956  BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::MOVKXi), DstReg)
957  .addReg(DstReg)
958  .add(Tag)
959  .addImm(48);
960  }
961 
962  MachineInstrBuilder MIB2 =
963  BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ADDXri))
964  .add(MI.getOperand(0))
965  .addReg(DstReg)
966  .add(MI.getOperand(2))
967  .addImm(0);
968 
969  transferImpOps(MI, MIB1, MIB2);
970  MI.eraseFromParent();
971  return true;
972  }
973  case AArch64::ADDlowTLS:
974  // Produce a plain ADD
975  BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ADDXri))
976  .add(MI.getOperand(0))
977  .add(MI.getOperand(1))
978  .add(MI.getOperand(2))
979  .addImm(0);
980  MI.eraseFromParent();
981  return true;
982 
983  case AArch64::MOVbaseTLS: {
984  Register DstReg = MI.getOperand(0).getReg();
985  auto SysReg = AArch64SysReg::TPIDR_EL0;
986  MachineFunction *MF = MBB.getParent();
988  SysReg = AArch64SysReg::TPIDR_EL3;
989  else if (MF->getSubtarget<AArch64Subtarget>().useEL2ForTP())
990  SysReg = AArch64SysReg::TPIDR_EL2;
991  else if (MF->getSubtarget<AArch64Subtarget>().useEL1ForTP())
992  SysReg = AArch64SysReg::TPIDR_EL1;
993  BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::MRS), DstReg)
994  .addImm(SysReg);
995  MI.eraseFromParent();
996  return true;
997  }
998 
999  case AArch64::MOVi32imm:
1000  return expandMOVImm(MBB, MBBI, 32);
1001  case AArch64::MOVi64imm:
1002  return expandMOVImm(MBB, MBBI, 64);
1003  case AArch64::RET_ReallyLR: {
1004  // Hiding the LR use with RET_ReallyLR may lead to extra kills in the
1005  // function and missing live-ins. We are fine in practice because callee
1006  // saved register handling ensures the register value is restored before
1007  // RET, but we need the undef flag here to appease the MachineVerifier
1008  // liveness checks.
1009  MachineInstrBuilder MIB =
1010  BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::RET))
1011  .addReg(AArch64::LR, RegState::Undef);
1012  transferImpOps(MI, MIB, MIB);
1013  MI.eraseFromParent();
1014  return true;
1015  }
1016  case AArch64::CMP_SWAP_8:
1017  return expandCMP_SWAP(MBB, MBBI, AArch64::LDAXRB, AArch64::STLXRB,
1018  AArch64::SUBSWrx,
1020  AArch64::WZR, NextMBBI);
1021  case AArch64::CMP_SWAP_16:
1022  return expandCMP_SWAP(MBB, MBBI, AArch64::LDAXRH, AArch64::STLXRH,
1023  AArch64::SUBSWrx,
1025  AArch64::WZR, NextMBBI);
1026  case AArch64::CMP_SWAP_32:
1027  return expandCMP_SWAP(MBB, MBBI, AArch64::LDAXRW, AArch64::STLXRW,
1028  AArch64::SUBSWrs,
1030  AArch64::WZR, NextMBBI);
1031  case AArch64::CMP_SWAP_64:
1032  return expandCMP_SWAP(MBB, MBBI,
1033  AArch64::LDAXRX, AArch64::STLXRX, AArch64::SUBSXrs,
1035  AArch64::XZR, NextMBBI);
1036  case AArch64::CMP_SWAP_128:
1037  return expandCMP_SWAP_128(MBB, MBBI, NextMBBI);
1038 
1039  case AArch64::AESMCrrTied:
1040  case AArch64::AESIMCrrTied: {
1041  MachineInstrBuilder MIB =
1042  BuildMI(MBB, MBBI, MI.getDebugLoc(),
1043  TII->get(Opcode == AArch64::AESMCrrTied ? AArch64::AESMCrr :
1044  AArch64::AESIMCrr))
1045  .add(MI.getOperand(0))
1046  .add(MI.getOperand(1));
1047  transferImpOps(MI, MIB, MIB);
1048  MI.eraseFromParent();
1049  return true;
1050  }
1051  case AArch64::IRGstack: {
1052  MachineFunction &MF = *MBB.getParent();
1053  const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
1054  const AArch64FrameLowering *TFI =
1055  MF.getSubtarget<AArch64Subtarget>().getFrameLowering();
1056 
1057  // IRG does not allow immediate offset. getTaggedBasePointerOffset should
1058  // almost always point to SP-after-prologue; if not, emit a longer
1059  // instruction sequence.
1060  int BaseOffset = -AFI->getTaggedBasePointerOffset();
1061  Register FrameReg;
1062  StackOffset FrameRegOffset = TFI->resolveFrameOffsetReference(
1063  MF, BaseOffset, false /*isFixed*/, false /*isSVE*/, FrameReg,
1064  /*PreferFP=*/false,
1065  /*ForSimm=*/true);
1066  Register SrcReg = FrameReg;
1067  if (FrameRegOffset) {
1068  // Use output register as temporary.
1069  SrcReg = MI.getOperand(0).getReg();
1070  emitFrameOffset(MBB, &MI, MI.getDebugLoc(), SrcReg, FrameReg,
1071  FrameRegOffset, TII);
1072  }
1073  BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::IRG))
1074  .add(MI.getOperand(0))
1075  .addUse(SrcReg)
1076  .add(MI.getOperand(2));
1077  MI.eraseFromParent();
1078  return true;
1079  }
1080  case AArch64::TAGPstack: {
1081  int64_t Offset = MI.getOperand(2).getImm();
1082  BuildMI(MBB, MBBI, MI.getDebugLoc(),
1083  TII->get(Offset >= 0 ? AArch64::ADDG : AArch64::SUBG))
1084  .add(MI.getOperand(0))
1085  .add(MI.getOperand(1))
1087  .add(MI.getOperand(4));
1088  MI.eraseFromParent();
1089  return true;
1090  }
1091  case AArch64::STGloop_wback:
1092  case AArch64::STZGloop_wback:
1093  return expandSetTagLoop(MBB, MBBI, NextMBBI);
1094  case AArch64::STGloop:
1095  case AArch64::STZGloop:
1097  "Non-writeback variants of STGloop / STZGloop should not "
1098  "survive past PrologEpilogInserter.");
1099  case AArch64::STR_ZZZZXI:
1100  return expandSVESpillFill(MBB, MBBI, AArch64::STR_ZXI, 4);
1101  case AArch64::STR_ZZZXI:
1102  return expandSVESpillFill(MBB, MBBI, AArch64::STR_ZXI, 3);
1103  case AArch64::STR_ZZXI:
1104  return expandSVESpillFill(MBB, MBBI, AArch64::STR_ZXI, 2);
1105  case AArch64::LDR_ZZZZXI:
1106  return expandSVESpillFill(MBB, MBBI, AArch64::LDR_ZXI, 4);
1107  case AArch64::LDR_ZZZXI:
1108  return expandSVESpillFill(MBB, MBBI, AArch64::LDR_ZXI, 3);
1109  case AArch64::LDR_ZZXI:
1110  return expandSVESpillFill(MBB, MBBI, AArch64::LDR_ZXI, 2);
1111  case AArch64::BLR_RVMARKER:
1112  return expandCALL_RVMARKER(MBB, MBBI);
1113  }
1114  return false;
1115 }
1116 
1117 /// Iterate over the instructions in basic block MBB and expand any
1118 /// pseudo instructions. Return true if anything was modified.
1119 bool AArch64ExpandPseudo::expandMBB(MachineBasicBlock &MBB) {
1120  bool Modified = false;
1121 
1123  while (MBBI != E) {
1124  MachineBasicBlock::iterator NMBBI = std::next(MBBI);
1125  Modified |= expandMI(MBB, MBBI, NMBBI);
1126  MBBI = NMBBI;
1127  }
1128 
1129  return Modified;
1130 }
1131 
1132 bool AArch64ExpandPseudo::runOnMachineFunction(MachineFunction &MF) {
1133  TII = static_cast<const AArch64InstrInfo *>(MF.getSubtarget().getInstrInfo());
1134 
1135  bool Modified = false;
1136  for (auto &MBB : MF)
1137  Modified |= expandMBB(MBB);
1138  return Modified;
1139 }
1140 
1141 /// Returns an instance of the pseudo instruction expansion pass.
1143  return new AArch64ExpandPseudo();
1144 }
llvm::Check::Size
@ Size
Definition: FileCheck.h:73
i
i
Definition: README.txt:29
llvm::AArch64II::MO_G3
@ MO_G3
MO_G3 - A symbol operand with this flag (granule 3) represents the high 16-bits of a 64-bit address,...
Definition: AArch64BaseInfo.h:605
llvm::AArch64ISD::LOADgot
@ LOADgot
Definition: AArch64ISelLowering.h:64
llvm::AArch64::ElementSizeNone
@ ElementSizeNone
Definition: AArch64InstrInfo.h:457
MI
IRTranslator LLVM IR MI
Definition: IRTranslator.cpp:100
MachineInstr.h
MathExtras.h
llvm::MachineInstrBuilder::addImm
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
Definition: MachineInstrBuilder.h:132
llvm::MachineConstantPool::getConstantPoolIndex
unsigned getConstantPoolIndex(const Constant *C, Align Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one.
Definition: MachineFunction.cpp:1203
llvm
Definition: AllocatorList.h:23
AArch64MachineFunctionInfo.h
UseMI
MachineInstrBuilder & UseMI
Definition: AArch64ExpandPseudoInsts.cpp:100
llvm::AArch64CC::NE
@ NE
Definition: AArch64BaseInfo.h:237
llvm::MachineOperand::CreateReg
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
Definition: MachineOperand.h:788
llvm::MachineBasicBlock::getBasicBlock
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
Definition: MachineBasicBlock.h:202
llvm::AArch64_AM::LSL
@ LSL
Definition: AArch64AddressingModes.h:34
AARCH64_EXPAND_PSEUDO_NAME
#define AARCH64_EXPAND_PSEUDO_NAME
Definition: AArch64ExpandPseudoInsts.cpp:47
MCInstrDesc.h
llvm::MachineOperand::getGlobal
const GlobalValue * getGlobal() const
Definition: MachineOperand.h:560
llvm::MachineInstrBuilder::add
const MachineInstrBuilder & add(const MachineOperand &MO) const
Definition: MachineInstrBuilder.h:225
llvm::AArch64::FalseLanesZero
@ FalseLanesZero
Definition: AArch64InstrInfo.h:479
Pass.h
llvm::TargetSubtargetInfo::getInstrInfo
virtual const TargetInstrInfo * getInstrInfo() const
Definition: TargetSubtargetInfo.h:92
llvm::AArch64::ElementSizeS
@ ElementSizeS
Definition: AArch64InstrInfo.h:460
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1168
llvm::AArch64Subtarget::useEL3ForTP
bool useEL3ForTP() const
Definition: AArch64Subtarget.h:406
AArch64BaseInfo.h
llvm::MachineFunctionPass
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
Definition: MachineFunctionPass.h:30
MachineBasicBlock.h
llvm::LivePhysRegs
A set of physical registers with utility functions to track liveness when walking backward/forward th...
Definition: LivePhysRegs.h:48
llvm::TargetSubtargetInfo::getRegisterInfo
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
Definition: TargetSubtargetInfo.h:124
llvm::AArch64II::MO_PREL
@ MO_PREL
MO_PREL - Indicates that the bits of the symbol operand represented by MO_G0 etc are PC relative.
Definition: AArch64BaseInfo.h:656
llvm::TargetRegisterInfo
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Definition: TargetRegisterInfo.h:231
llvm::MachineFunction::moveCallSiteInfo
void moveCallSiteInfo(const MachineInstr *Old, const MachineInstr *New)
Move the call site info from Old to \New call site info.
Definition: MachineFunction.cpp:929
llvm::MachineOperand::isSymbol
bool isSymbol() const
isSymbol - Tests if this is a MO_ExternalSymbol operand.
Definition: MachineOperand.h:338
DenseMap.h
llvm::MachineFunction::insert
void insert(iterator MBBI, MachineBasicBlock *MBB)
Definition: MachineFunction.h:756
llvm::getDeadRegState
unsigned getDeadRegState(bool B)
Definition: MachineInstrBuilder.h:512
llvm::AArch64::ElementSizeB
@ ElementSizeB
Definition: AArch64InstrInfo.h:458
llvm::AArch64::DestructiveTernaryCommWithRev
@ DestructiveTernaryCommWithRev
Definition: AArch64InstrInfo.h:474
Offset
uint64_t Offset
Definition: ELFObjHandler.cpp:81
llvm::RegState::Renamable
@ Renamable
Register that may be renamed.
Definition: MachineInstrBuilder.h:62
llvm::dwarf::Tag
Tag
Definition: Dwarf.h:104
llvm::MachineOperand::getOffset
int64_t getOffset() const
Return the offset from the symbol in this operand.
Definition: MachineOperand.h:597
TRI
unsigned const TargetRegisterInfo * TRI
Definition: MachineSink.cpp:1567
llvm::ARCISD::BL
@ BL
Definition: ARCISelLowering.h:34
llvm::AArch64FrameLowering
Definition: AArch64FrameLowering.h:23
llvm::MachineBasicBlock::addSuccessor
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
Definition: MachineBasicBlock.cpp:743
llvm::AArch64::getSVERevInstr
int getSVERevInstr(uint16_t Opcode)
llvm::MachineInstrBuilder::addDef
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Definition: MachineInstrBuilder.h:117
llvm::finalizeBundle
void finalizeBundle(MachineBasicBlock &MBB, MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
finalizeBundle - Finalize a machine instruction bundle which includes a sequence of instructions star...
Definition: MachineInstrBundle.cpp:123
AArch64InstrInfo.h
llvm::getRenamableRegState
unsigned getRenamableRegState(bool B)
Definition: MachineInstrBuilder.h:524
TargetMachine.h
llvm::PassRegistry::getPassRegistry
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Definition: PassRegistry.cpp:31
llvm::MachineInstrBuilder::addMBB
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Definition: MachineInstrBuilder.h:147
llvm::AArch64InstrInfo
Definition: AArch64InstrInfo.h:38
llvm::AArch64ISD::MRS
@ MRS
Definition: AArch64ISelLowering.h:288
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::initializeAArch64ExpandPseudoPass
void initializeAArch64ExpandPseudoPass(PassRegistry &)
llvm::MachineFunction::getInfo
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Definition: MachineFunction.h:653
llvm::AArch64Subtarget::isTargetILP32
bool isTargetILP32() const
Definition: AArch64Subtarget.h:492
llvm::MachineOperand::isUse
bool isUse() const
Definition: MachineOperand.h:367
TII
const HexagonInstrInfo * TII
Definition: HexagonCopyToCombine.cpp:129
llvm::MCInstrDesc
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:196
llvm::MachineOperand
MachineOperand class - Representation of each machine instruction operand.
Definition: MachineOperand.h:49
llvm::AArch64_AM::getShifterImm
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
Definition: AArch64AddressingModes.h:98
llvm::report_fatal_error
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:140
LoopDeletionResult::Modified
@ Modified
llvm::RegState::Define
@ Define
Register definition.
Definition: MachineInstrBuilder.h:45
llvm::computeAndAddLiveIns
void computeAndAddLiveIns(LivePhysRegs &LiveRegs, MachineBasicBlock &MBB)
Convenience function combining computeLiveIns() and addLiveIns().
Definition: LivePhysRegs.cpp:335
DebugLoc.h
llvm::RegState::Implicit
@ Implicit
Not emitted register (e.g. carry, or temporary result).
Definition: MachineInstrBuilder.h:47
Align
uint64_t Align
Definition: ELFObjHandler.cpp:83
llvm::emitFrameOffset
void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, StackOffset Offset, const TargetInstrInfo *TII, MachineInstr::MIFlag=MachineInstr::NoFlags, bool SetNZCV=false, bool NeedsWinCFI=false, bool *HasWinCFI=nullptr)
emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg plus Offset.
Definition: AArch64InstrInfo.cpp:3706
llvm::MachineInstrBuilder::addExternalSymbol
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
Definition: MachineInstrBuilder.h:185
llvm::RegState::Kill
@ Kill
The last use of a register.
Definition: MachineInstrBuilder.h:49
llvm::MachineBasicBlock
Definition: MachineBasicBlock.h:95
llvm::MachineInstrBuilder::cloneMemRefs
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
Definition: MachineInstrBuilder.h:214
AArch64AddressingModes.h
llvm::MachineFunction::getSubtarget
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Definition: MachineFunction.h:555
llvm::AArch64::NotDestructive
@ NotDestructive
Definition: AArch64InstrInfo.h:466
llvm::MachineOperand::getTargetFlags
unsigned getTargetFlags() const
Definition: MachineOperand.h:218
llvm::MachineOperand::isReg
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Definition: MachineOperand.h:318
llvm::MachineInstr
Representation of each machine instruction.
Definition: MachineInstr.h:64
llvm::MachineInstrBuilder
Definition: MachineInstrBuilder.h:70
llvm::AArch64FunctionInfo::getTaggedBasePointerOffset
unsigned getTaggedBasePointerOffset() const
Definition: AArch64MachineFunctionInfo.h:354
AArch64ExpandImm.h
llvm::createAArch64ExpandPseudoPass
FunctionPass * createAArch64ExpandPseudoPass()
Returns an instance of the pseudo instruction expansion pass.
Definition: AArch64ExpandPseudoInsts.cpp:1142
llvm::AArch64II::MO_NC
@ MO_NC
MO_NC - Indicates whether the linker is expected to check the symbol reference for overflow.
Definition: AArch64BaseInfo.h:637
llvm::numbers::e
constexpr double e
Definition: MathExtras.h:58
llvm::AArch64II::MO_PAGEOFF
@ MO_PAGEOFF
MO_PAGEOFF - A symbol operand with this flag represents the offset of that symbol within a 4K page.
Definition: AArch64BaseInfo.h:601
llvm::MachineConstantPool
The MachineConstantPool class keeps track of constants referenced by a function which must be spilled...
Definition: MachineConstantPool.h:117
llvm::MachineOperand::isDead
bool isDead() const
Definition: MachineOperand.h:382
llvm::AArch64FunctionInfo
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
Definition: AArch64MachineFunctionInfo.h:37
I
#define I(x, y, z)
Definition: MD5.cpp:59
llvm::AArch64Subtarget::useEL2ForTP
bool useEL2ForTP() const
Definition: AArch64Subtarget.h:405
MachineConstantPool.h
llvm::MachineOperand::isCPI
bool isCPI() const
isCPI - Tests if this is a MO_ConstantPoolIndex operand.
Definition: MachineOperand.h:330
llvm::MachineFunction::CreateMachineBasicBlock
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
Definition: MachineFunction.cpp:414
MachineFunctionPass.h
llvm::MachineFunction::getConstantPool
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
Definition: MachineFunction.h:587
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::MachineBasicBlock::getParent
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
Definition: MachineBasicBlock.h:225
llvm::MachineInstrBuilder::addReg
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Definition: MachineInstrBuilder.h:98
llvm::MachineInstrBuilder::addUse
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
Definition: MachineInstrBuilder.h:124
llvm::MachineOperand::getReg
Register getReg() const
getReg - Returns the register number.
Definition: MachineOperand.h:357
llvm::MachineFunction
Definition: MachineFunction.h:227
Triple.h
llvm::BlockAddress
The address of a basic block.
Definition: Constants.h:847
llvm::StringRef
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:57
MBBI
MachineBasicBlock MachineBasicBlock::iterator MBBI
Definition: AArch64SLSHardening.cpp:75
llvm::AArch64::FalseLanesMask
@ FalseLanesMask
Definition: AArch64InstrInfo.h:478
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:136
llvm::StackOffset
StackOffset is a class to represent an offset with 2 dimensions, named fixed and scalable,...
Definition: TypeSize.h:134
llvm::ilist_node_impl::getIterator
self_iterator getIterator()
Definition: ilist_node.h:81
TargetSubtargetInfo.h
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
llvm::AArch64Subtarget::useEL1ForTP
bool useEL1ForTP() const
Definition: AArch64Subtarget.h:404
LLVM_FALLTHROUGH
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
Definition: Compiler.h:281
llvm::CodeModel::Tiny
@ Tiny
Definition: CodeGen.h:28
llvm::AArch64CC::EQ
@ EQ
Definition: AArch64BaseInfo.h:236
llvm::MachineInstrBuilder::getInstr
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
Definition: MachineInstrBuilder.h:90
llvm::Register
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
llvm::AArch64_AM::getArithExtendImm
static unsigned getArithExtendImm(AArch64_AM::ShiftExtendType ET, unsigned Imm)
getArithExtendImm - Encode the extend type and shift amount for an arithmetic instruction: imm: 3-bit...
Definition: AArch64AddressingModes.h:170
MBB
MachineBasicBlock & MBB
Definition: AArch64SLSHardening.cpp:74
llvm::AArch64::ElementSizeD
@ ElementSizeD
Definition: AArch64InstrInfo.h:461
llvm::MachineFunction::getTarget
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Definition: MachineFunction.h:551
llvm::AArch64_IMM::expandMOVImm
void expandMOVImm(uint64_t Imm, unsigned BitSize, SmallVectorImpl< ImmInsnModel > &Insn)
Expand a MOVi32imm or MOVi64imm pseudo instruction to one or more real move-immediate instructions to...
Definition: AArch64ExpandImm.cpp:305
llvm::ARCISD::RET
@ RET
Definition: ARCISelLowering.h:52
llvm::RegState::Undef
@ Undef
Value of the register doesn't matter.
Definition: MachineInstrBuilder.h:53
llvm::MachineOperand::getIndex
int getIndex() const
Definition: MachineOperand.h:554
llvm::MachineInstrBuilder::addConstantPoolIndex
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned TargetFlags=0) const
Definition: MachineInstrBuilder.h:159
llvm::AArch64::DestructiveInstTypeMask
@ DestructiveInstTypeMask
Definition: AArch64InstrInfo.h:465
llvm::TargetMachine::getCodeModel
CodeModel::Model getCodeModel() const
Returns the code model.
Definition: TargetMachine.cpp:74
llvm::AArch64ISD::ADRP
@ ADRP
Definition: AArch64ISelLowering.h:61
llvm::AArch64::DestructiveBinaryComm
@ DestructiveBinaryComm
Definition: AArch64InstrInfo.h:472
llvm::MachineInstrBuilder::addGlobalAddress
const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned TargetFlags=0) const
Definition: MachineInstrBuilder.h:178
CodeGen.h
llvm::AArch64Subtarget::isTargetMachO
bool isTargetMachO() const
Definition: AArch64Subtarget.h:490
INITIALIZE_PASS
INITIALIZE_PASS(AArch64ExpandPseudo, "aarch64-expand-pseudo", AARCH64_EXPAND_PSEUDO_NAME, false, false) static void transferImpOps(MachineInstr &OldMI
Transfer implicit operands on the pseudo instruction to the instructions created from the expansion.
llvm::getKillRegState
unsigned getKillRegState(bool B)
Definition: MachineInstrBuilder.h:509
AArch64Subtarget.h
llvm::AArch64::DestructiveBinary
@ DestructiveBinary
Definition: AArch64InstrInfo.h:471
llvm::MachineBasicBlock::begin
iterator begin()
Definition: MachineBasicBlock.h:268
MachineInstrBuilder.h
llvm::MachineOperand::getSymbolName
const char * getSymbolName() const
Definition: MachineOperand.h:605
llvm::MachineInstrBuilder::setMIFlags
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
Definition: MachineInstrBuilder.h:274
llvm::BuildMI
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
Definition: MachineInstrBuilder.h:329
N
#define N
DefMI
MachineInstrBuilder MachineInstrBuilder & DefMI
Definition: AArch64ExpandPseudoInsts.cpp:101
llvm::MachineInstr::addOperand
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
Definition: MachineInstr.cpp:207
llvm::AArch64::getSVEPseudoMap
int getSVEPseudoMap(uint16_t Opcode)
llvm::AArch64_AM::UXTB
@ UXTB
Definition: AArch64AddressingModes.h:40
MachineOperand.h
llvm::TargetRegisterInfo::getSubReg
MCRegister getSubReg(MCRegister Reg, unsigned Idx) const
Returns the physical register number of sub-register "Index" for physical register RegNo.
Definition: TargetRegisterInfo.h:1078
llvm::AArch64::DestructiveBinaryImm
@ DestructiveBinaryImm
Definition: AArch64InstrInfo.h:469
llvm::AArch64::DestructiveBinaryCommWithRev
@ DestructiveBinaryCommWithRev
Definition: AArch64InstrInfo.h:473
llvm::FunctionPass
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:298
llvm::AArch64_AM::UXTH
@ UXTH
Definition: AArch64AddressingModes.h:41
llvm::DebugLoc
A debug info location.
Definition: DebugLoc.h:33
llvm::AArch64II::MO_TAGGED
@ MO_TAGGED
MO_TAGGED - With MO_PAGE, indicates that the page includes a memory tag in bits 56-63.
Definition: AArch64BaseInfo.h:664
llvm::AArch64Subtarget
Definition: AArch64Subtarget.h:38
MachineFunction.h
llvm::MachineInstrBundleIterator< MachineInstr >
llvm::abs
APFloat abs(APFloat X)
Returns the absolute value of the argument.
Definition: APFloat.h:1272
llvm::AArch64::getSVENonRevInstr
int getSVENonRevInstr(uint16_t Opcode)
llvm::MCInstrDesc::getNumOperands
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:229
llvm::MachineBasicBlock::end
iterator end()
Definition: MachineBasicBlock.h:270
llvm::AArch64II::MO_PAGE
@ MO_PAGE
MO_PAGE - A symbol operand with this flag represents the pc-relative offset of the 4K page containing...
Definition: AArch64BaseInfo.h:596
llvm::MachineOperand::isGlobal
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
Definition: MachineOperand.h:336
llvm::Intrinsic::ID
unsigned ID
Definition: TargetTransformInfo.h:38
LivePhysRegs.h
llvm::AArch64::ElementSizeH
@ ElementSizeH
Definition: AArch64InstrInfo.h:459