LLVM  14.0.0git
RISCVExpandAtomicPseudoInsts.cpp
Go to the documentation of this file.
1 //===-- RISCVExpandAtomicPseudoInsts.cpp - Expand atomic pseudo instrs. ---===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains a pass that expands atomic pseudo instructions into
10 // target instructions. This pass should be run at the last possible moment,
11 // avoiding the possibility for other passes to break the requirements for
12 // forward progress in the LR/SC block.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "RISCV.h"
17 #include "RISCVInstrInfo.h"
18 #include "RISCVTargetMachine.h"
19 
23 
24 using namespace llvm;
25 
26 #define RISCV_EXPAND_ATOMIC_PSEUDO_NAME \
27  "RISCV atomic pseudo instruction expansion pass"
28 
29 namespace {
30 
31 class RISCVExpandAtomicPseudo : public MachineFunctionPass {
32 public:
33  const RISCVInstrInfo *TII;
34  static char ID;
35 
36  RISCVExpandAtomicPseudo() : MachineFunctionPass(ID) {
38  }
39 
40  bool runOnMachineFunction(MachineFunction &MF) override;
41 
42  StringRef getPassName() const override {
44  }
45 
46 private:
47  bool expandMBB(MachineBasicBlock &MBB);
49  MachineBasicBlock::iterator &NextMBBI);
50  bool expandAtomicBinOp(MachineBasicBlock &MBB,
52  bool IsMasked, int Width,
53  MachineBasicBlock::iterator &NextMBBI);
54  bool expandAtomicMinMaxOp(MachineBasicBlock &MBB,
56  AtomicRMWInst::BinOp, bool IsMasked, int Width,
57  MachineBasicBlock::iterator &NextMBBI);
58  bool expandAtomicCmpXchg(MachineBasicBlock &MBB,
59  MachineBasicBlock::iterator MBBI, bool IsMasked,
60  int Width, MachineBasicBlock::iterator &NextMBBI);
61 };
62 
64 
65 bool RISCVExpandAtomicPseudo::runOnMachineFunction(MachineFunction &MF) {
66  TII = static_cast<const RISCVInstrInfo *>(MF.getSubtarget().getInstrInfo());
67  bool Modified = false;
68  for (auto &MBB : MF)
69  Modified |= expandMBB(MBB);
70  return Modified;
71 }
72 
73 bool RISCVExpandAtomicPseudo::expandMBB(MachineBasicBlock &MBB) {
74  bool Modified = false;
75 
77  while (MBBI != E) {
78  MachineBasicBlock::iterator NMBBI = std::next(MBBI);
79  Modified |= expandMI(MBB, MBBI, NMBBI);
80  MBBI = NMBBI;
81  }
82 
83  return Modified;
84 }
85 
86 bool RISCVExpandAtomicPseudo::expandMI(MachineBasicBlock &MBB,
88  MachineBasicBlock::iterator &NextMBBI) {
89  // RISCVInstrInfo::getInstSizeInBytes hard-codes the number of expanded
90  // instructions for each pseudo, and must be updated when adding new pseudos
91  // or changing existing ones.
92  switch (MBBI->getOpcode()) {
93  case RISCV::PseudoAtomicLoadNand32:
94  return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Nand, false, 32,
95  NextMBBI);
96  case RISCV::PseudoAtomicLoadNand64:
97  return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Nand, false, 64,
98  NextMBBI);
99  case RISCV::PseudoMaskedAtomicSwap32:
100  return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Xchg, true, 32,
101  NextMBBI);
102  case RISCV::PseudoMaskedAtomicLoadAdd32:
103  return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Add, true, 32, NextMBBI);
104  case RISCV::PseudoMaskedAtomicLoadSub32:
105  return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Sub, true, 32, NextMBBI);
106  case RISCV::PseudoMaskedAtomicLoadNand32:
107  return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Nand, true, 32,
108  NextMBBI);
109  case RISCV::PseudoMaskedAtomicLoadMax32:
110  return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::Max, true, 32,
111  NextMBBI);
112  case RISCV::PseudoMaskedAtomicLoadMin32:
113  return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::Min, true, 32,
114  NextMBBI);
115  case RISCV::PseudoMaskedAtomicLoadUMax32:
116  return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::UMax, true, 32,
117  NextMBBI);
118  case RISCV::PseudoMaskedAtomicLoadUMin32:
119  return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::UMin, true, 32,
120  NextMBBI);
121  case RISCV::PseudoCmpXchg32:
122  return expandAtomicCmpXchg(MBB, MBBI, false, 32, NextMBBI);
123  case RISCV::PseudoCmpXchg64:
124  return expandAtomicCmpXchg(MBB, MBBI, false, 64, NextMBBI);
125  case RISCV::PseudoMaskedCmpXchg32:
126  return expandAtomicCmpXchg(MBB, MBBI, true, 32, NextMBBI);
127  }
128 
129  return false;
130 }
131 
132 static unsigned getLRForRMW32(AtomicOrdering Ordering) {
133  switch (Ordering) {
134  default:
135  llvm_unreachable("Unexpected AtomicOrdering");
137  return RISCV::LR_W;
139  return RISCV::LR_W_AQ;
141  return RISCV::LR_W;
143  return RISCV::LR_W_AQ;
145  return RISCV::LR_W_AQ_RL;
146  }
147 }
148 
149 static unsigned getSCForRMW32(AtomicOrdering Ordering) {
150  switch (Ordering) {
151  default:
152  llvm_unreachable("Unexpected AtomicOrdering");
154  return RISCV::SC_W;
156  return RISCV::SC_W;
158  return RISCV::SC_W_RL;
160  return RISCV::SC_W_RL;
162  return RISCV::SC_W_AQ_RL;
163  }
164 }
165 
166 static unsigned getLRForRMW64(AtomicOrdering Ordering) {
167  switch (Ordering) {
168  default:
169  llvm_unreachable("Unexpected AtomicOrdering");
171  return RISCV::LR_D;
173  return RISCV::LR_D_AQ;
175  return RISCV::LR_D;
177  return RISCV::LR_D_AQ;
179  return RISCV::LR_D_AQ_RL;
180  }
181 }
182 
183 static unsigned getSCForRMW64(AtomicOrdering Ordering) {
184  switch (Ordering) {
185  default:
186  llvm_unreachable("Unexpected AtomicOrdering");
188  return RISCV::SC_D;
190  return RISCV::SC_D;
192  return RISCV::SC_D_RL;
194  return RISCV::SC_D_RL;
196  return RISCV::SC_D_AQ_RL;
197  }
198 }
199 
200 static unsigned getLRForRMW(AtomicOrdering Ordering, int Width) {
201  if (Width == 32)
202  return getLRForRMW32(Ordering);
203  if (Width == 64)
204  return getLRForRMW64(Ordering);
205  llvm_unreachable("Unexpected LR width\n");
206 }
207 
208 static unsigned getSCForRMW(AtomicOrdering Ordering, int Width) {
209  if (Width == 32)
210  return getSCForRMW32(Ordering);
211  if (Width == 64)
212  return getSCForRMW64(Ordering);
213  llvm_unreachable("Unexpected SC width\n");
214 }
215 
216 static void doAtomicBinOpExpansion(const RISCVInstrInfo *TII, MachineInstr &MI,
217  DebugLoc DL, MachineBasicBlock *ThisMBB,
218  MachineBasicBlock *LoopMBB,
219  MachineBasicBlock *DoneMBB,
220  AtomicRMWInst::BinOp BinOp, int Width) {
221  Register DestReg = MI.getOperand(0).getReg();
222  Register ScratchReg = MI.getOperand(1).getReg();
223  Register AddrReg = MI.getOperand(2).getReg();
224  Register IncrReg = MI.getOperand(3).getReg();
225  AtomicOrdering Ordering =
226  static_cast<AtomicOrdering>(MI.getOperand(4).getImm());
227 
228  // .loop:
229  // lr.[w|d] dest, (addr)
230  // binop scratch, dest, val
231  // sc.[w|d] scratch, scratch, (addr)
232  // bnez scratch, loop
233  BuildMI(LoopMBB, DL, TII->get(getLRForRMW(Ordering, Width)), DestReg)
234  .addReg(AddrReg);
235  switch (BinOp) {
236  default:
237  llvm_unreachable("Unexpected AtomicRMW BinOp");
238  case AtomicRMWInst::Nand:
239  BuildMI(LoopMBB, DL, TII->get(RISCV::AND), ScratchReg)
240  .addReg(DestReg)
241  .addReg(IncrReg);
242  BuildMI(LoopMBB, DL, TII->get(RISCV::XORI), ScratchReg)
243  .addReg(ScratchReg)
244  .addImm(-1);
245  break;
246  }
247  BuildMI(LoopMBB, DL, TII->get(getSCForRMW(Ordering, Width)), ScratchReg)
248  .addReg(AddrReg)
249  .addReg(ScratchReg);
250  BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
251  .addReg(ScratchReg)
252  .addReg(RISCV::X0)
253  .addMBB(LoopMBB);
254 }
255 
256 static void insertMaskedMerge(const RISCVInstrInfo *TII, DebugLoc DL,
257  MachineBasicBlock *MBB, Register DestReg,
258  Register OldValReg, Register NewValReg,
259  Register MaskReg, Register ScratchReg) {
260  assert(OldValReg != ScratchReg && "OldValReg and ScratchReg must be unique");
261  assert(OldValReg != MaskReg && "OldValReg and MaskReg must be unique");
262  assert(ScratchReg != MaskReg && "ScratchReg and MaskReg must be unique");
263 
264  // We select bits from newval and oldval using:
265  // https://graphics.stanford.edu/~seander/bithacks.html#MaskedMerge
266  // r = oldval ^ ((oldval ^ newval) & masktargetdata);
267  BuildMI(MBB, DL, TII->get(RISCV::XOR), ScratchReg)
268  .addReg(OldValReg)
269  .addReg(NewValReg);
270  BuildMI(MBB, DL, TII->get(RISCV::AND), ScratchReg)
271  .addReg(ScratchReg)
272  .addReg(MaskReg);
273  BuildMI(MBB, DL, TII->get(RISCV::XOR), DestReg)
274  .addReg(OldValReg)
275  .addReg(ScratchReg);
276 }
277 
278 static void doMaskedAtomicBinOpExpansion(
280  MachineBasicBlock *ThisMBB, MachineBasicBlock *LoopMBB,
281  MachineBasicBlock *DoneMBB, AtomicRMWInst::BinOp BinOp, int Width) {
282  assert(Width == 32 && "Should never need to expand masked 64-bit operations");
283  Register DestReg = MI.getOperand(0).getReg();
284  Register ScratchReg = MI.getOperand(1).getReg();
285  Register AddrReg = MI.getOperand(2).getReg();
286  Register IncrReg = MI.getOperand(3).getReg();
287  Register MaskReg = MI.getOperand(4).getReg();
288  AtomicOrdering Ordering =
289  static_cast<AtomicOrdering>(MI.getOperand(5).getImm());
290 
291  // .loop:
292  // lr.w destreg, (alignedaddr)
293  // binop scratch, destreg, incr
294  // xor scratch, destreg, scratch
295  // and scratch, scratch, masktargetdata
296  // xor scratch, destreg, scratch
297  // sc.w scratch, scratch, (alignedaddr)
298  // bnez scratch, loop
299  BuildMI(LoopMBB, DL, TII->get(getLRForRMW32(Ordering)), DestReg)
300  .addReg(AddrReg);
301  switch (BinOp) {
302  default:
303  llvm_unreachable("Unexpected AtomicRMW BinOp");
304  case AtomicRMWInst::Xchg:
305  BuildMI(LoopMBB, DL, TII->get(RISCV::ADDI), ScratchReg)
306  .addReg(IncrReg)
307  .addImm(0);
308  break;
309  case AtomicRMWInst::Add:
310  BuildMI(LoopMBB, DL, TII->get(RISCV::ADD), ScratchReg)
311  .addReg(DestReg)
312  .addReg(IncrReg);
313  break;
314  case AtomicRMWInst::Sub:
315  BuildMI(LoopMBB, DL, TII->get(RISCV::SUB), ScratchReg)
316  .addReg(DestReg)
317  .addReg(IncrReg);
318  break;
319  case AtomicRMWInst::Nand:
320  BuildMI(LoopMBB, DL, TII->get(RISCV::AND), ScratchReg)
321  .addReg(DestReg)
322  .addReg(IncrReg);
323  BuildMI(LoopMBB, DL, TII->get(RISCV::XORI), ScratchReg)
324  .addReg(ScratchReg)
325  .addImm(-1);
326  break;
327  }
328 
329  insertMaskedMerge(TII, DL, LoopMBB, ScratchReg, DestReg, ScratchReg, MaskReg,
330  ScratchReg);
331 
332  BuildMI(LoopMBB, DL, TII->get(getSCForRMW32(Ordering)), ScratchReg)
333  .addReg(AddrReg)
334  .addReg(ScratchReg);
335  BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
336  .addReg(ScratchReg)
337  .addReg(RISCV::X0)
338  .addMBB(LoopMBB);
339 }
340 
341 bool RISCVExpandAtomicPseudo::expandAtomicBinOp(
343  AtomicRMWInst::BinOp BinOp, bool IsMasked, int Width,
344  MachineBasicBlock::iterator &NextMBBI) {
345  MachineInstr &MI = *MBBI;
346  DebugLoc DL = MI.getDebugLoc();
347 
348  MachineFunction *MF = MBB.getParent();
349  auto LoopMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
350  auto DoneMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
351 
352  // Insert new MBBs.
353  MF->insert(++MBB.getIterator(), LoopMBB);
354  MF->insert(++LoopMBB->getIterator(), DoneMBB);
355 
356  // Set up successors and transfer remaining instructions to DoneMBB.
357  LoopMBB->addSuccessor(LoopMBB);
358  LoopMBB->addSuccessor(DoneMBB);
359  DoneMBB->splice(DoneMBB->end(), &MBB, MI, MBB.end());
360  DoneMBB->transferSuccessors(&MBB);
361  MBB.addSuccessor(LoopMBB);
362 
363  if (!IsMasked)
364  doAtomicBinOpExpansion(TII, MI, DL, &MBB, LoopMBB, DoneMBB, BinOp, Width);
365  else
366  doMaskedAtomicBinOpExpansion(TII, MI, DL, &MBB, LoopMBB, DoneMBB, BinOp,
367  Width);
368 
369  NextMBBI = MBB.end();
370  MI.eraseFromParent();
371 
372  LivePhysRegs LiveRegs;
373  computeAndAddLiveIns(LiveRegs, *LoopMBB);
374  computeAndAddLiveIns(LiveRegs, *DoneMBB);
375 
376  return true;
377 }
378 
379 static void insertSext(const RISCVInstrInfo *TII, DebugLoc DL,
380  MachineBasicBlock *MBB, Register ValReg,
381  Register ShamtReg) {
382  BuildMI(MBB, DL, TII->get(RISCV::SLL), ValReg)
383  .addReg(ValReg)
384  .addReg(ShamtReg);
385  BuildMI(MBB, DL, TII->get(RISCV::SRA), ValReg)
386  .addReg(ValReg)
387  .addReg(ShamtReg);
388 }
389 
390 bool RISCVExpandAtomicPseudo::expandAtomicMinMaxOp(
392  AtomicRMWInst::BinOp BinOp, bool IsMasked, int Width,
393  MachineBasicBlock::iterator &NextMBBI) {
394  assert(IsMasked == true &&
395  "Should only need to expand masked atomic max/min");
396  assert(Width == 32 && "Should never need to expand masked 64-bit operations");
397 
398  MachineInstr &MI = *MBBI;
399  DebugLoc DL = MI.getDebugLoc();
400  MachineFunction *MF = MBB.getParent();
401  auto LoopHeadMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
402  auto LoopIfBodyMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
403  auto LoopTailMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
404  auto DoneMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
405 
406  // Insert new MBBs.
407  MF->insert(++MBB.getIterator(), LoopHeadMBB);
408  MF->insert(++LoopHeadMBB->getIterator(), LoopIfBodyMBB);
409  MF->insert(++LoopIfBodyMBB->getIterator(), LoopTailMBB);
410  MF->insert(++LoopTailMBB->getIterator(), DoneMBB);
411 
412  // Set up successors and transfer remaining instructions to DoneMBB.
413  LoopHeadMBB->addSuccessor(LoopIfBodyMBB);
414  LoopHeadMBB->addSuccessor(LoopTailMBB);
415  LoopIfBodyMBB->addSuccessor(LoopTailMBB);
416  LoopTailMBB->addSuccessor(LoopHeadMBB);
417  LoopTailMBB->addSuccessor(DoneMBB);
418  DoneMBB->splice(DoneMBB->end(), &MBB, MI, MBB.end());
419  DoneMBB->transferSuccessors(&MBB);
420  MBB.addSuccessor(LoopHeadMBB);
421 
422  Register DestReg = MI.getOperand(0).getReg();
423  Register Scratch1Reg = MI.getOperand(1).getReg();
424  Register Scratch2Reg = MI.getOperand(2).getReg();
425  Register AddrReg = MI.getOperand(3).getReg();
426  Register IncrReg = MI.getOperand(4).getReg();
427  Register MaskReg = MI.getOperand(5).getReg();
428  bool IsSigned = BinOp == AtomicRMWInst::Min || BinOp == AtomicRMWInst::Max;
429  AtomicOrdering Ordering =
430  static_cast<AtomicOrdering>(MI.getOperand(IsSigned ? 7 : 6).getImm());
431 
432  //
433  // .loophead:
434  // lr.w destreg, (alignedaddr)
435  // and scratch2, destreg, mask
436  // mv scratch1, destreg
437  // [sext scratch2 if signed min/max]
438  // ifnochangeneeded scratch2, incr, .looptail
439  BuildMI(LoopHeadMBB, DL, TII->get(getLRForRMW32(Ordering)), DestReg)
440  .addReg(AddrReg);
441  BuildMI(LoopHeadMBB, DL, TII->get(RISCV::AND), Scratch2Reg)
442  .addReg(DestReg)
443  .addReg(MaskReg);
444  BuildMI(LoopHeadMBB, DL, TII->get(RISCV::ADDI), Scratch1Reg)
445  .addReg(DestReg)
446  .addImm(0);
447 
448  switch (BinOp) {
449  default:
450  llvm_unreachable("Unexpected AtomicRMW BinOp");
451  case AtomicRMWInst::Max: {
452  insertSext(TII, DL, LoopHeadMBB, Scratch2Reg, MI.getOperand(6).getReg());
453  BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BGE))
454  .addReg(Scratch2Reg)
455  .addReg(IncrReg)
456  .addMBB(LoopTailMBB);
457  break;
458  }
459  case AtomicRMWInst::Min: {
460  insertSext(TII, DL, LoopHeadMBB, Scratch2Reg, MI.getOperand(6).getReg());
461  BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BGE))
462  .addReg(IncrReg)
463  .addReg(Scratch2Reg)
464  .addMBB(LoopTailMBB);
465  break;
466  }
467  case AtomicRMWInst::UMax:
468  BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BGEU))
469  .addReg(Scratch2Reg)
470  .addReg(IncrReg)
471  .addMBB(LoopTailMBB);
472  break;
473  case AtomicRMWInst::UMin:
474  BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BGEU))
475  .addReg(IncrReg)
476  .addReg(Scratch2Reg)
477  .addMBB(LoopTailMBB);
478  break;
479  }
480 
481  // .loopifbody:
482  // xor scratch1, destreg, incr
483  // and scratch1, scratch1, mask
484  // xor scratch1, destreg, scratch1
485  insertMaskedMerge(TII, DL, LoopIfBodyMBB, Scratch1Reg, DestReg, IncrReg,
486  MaskReg, Scratch1Reg);
487 
488  // .looptail:
489  // sc.w scratch1, scratch1, (addr)
490  // bnez scratch1, loop
491  BuildMI(LoopTailMBB, DL, TII->get(getSCForRMW32(Ordering)), Scratch1Reg)
492  .addReg(AddrReg)
493  .addReg(Scratch1Reg);
494  BuildMI(LoopTailMBB, DL, TII->get(RISCV::BNE))
495  .addReg(Scratch1Reg)
496  .addReg(RISCV::X0)
497  .addMBB(LoopHeadMBB);
498 
499  NextMBBI = MBB.end();
500  MI.eraseFromParent();
501 
502  LivePhysRegs LiveRegs;
503  computeAndAddLiveIns(LiveRegs, *LoopHeadMBB);
504  computeAndAddLiveIns(LiveRegs, *LoopIfBodyMBB);
505  computeAndAddLiveIns(LiveRegs, *LoopTailMBB);
506  computeAndAddLiveIns(LiveRegs, *DoneMBB);
507 
508  return true;
509 }
510 
511 bool RISCVExpandAtomicPseudo::expandAtomicCmpXchg(
513  int Width, MachineBasicBlock::iterator &NextMBBI) {
514  MachineInstr &MI = *MBBI;
515  DebugLoc DL = MI.getDebugLoc();
516  MachineFunction *MF = MBB.getParent();
517  auto LoopHeadMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
518  auto LoopTailMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
519  auto DoneMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
520 
521  // Insert new MBBs.
522  MF->insert(++MBB.getIterator(), LoopHeadMBB);
523  MF->insert(++LoopHeadMBB->getIterator(), LoopTailMBB);
524  MF->insert(++LoopTailMBB->getIterator(), DoneMBB);
525 
526  // Set up successors and transfer remaining instructions to DoneMBB.
527  LoopHeadMBB->addSuccessor(LoopTailMBB);
528  LoopHeadMBB->addSuccessor(DoneMBB);
529  LoopTailMBB->addSuccessor(DoneMBB);
530  LoopTailMBB->addSuccessor(LoopHeadMBB);
531  DoneMBB->splice(DoneMBB->end(), &MBB, MI, MBB.end());
532  DoneMBB->transferSuccessors(&MBB);
533  MBB.addSuccessor(LoopHeadMBB);
534 
535  Register DestReg = MI.getOperand(0).getReg();
536  Register ScratchReg = MI.getOperand(1).getReg();
537  Register AddrReg = MI.getOperand(2).getReg();
538  Register CmpValReg = MI.getOperand(3).getReg();
539  Register NewValReg = MI.getOperand(4).getReg();
540  AtomicOrdering Ordering =
541  static_cast<AtomicOrdering>(MI.getOperand(IsMasked ? 6 : 5).getImm());
542 
543  if (!IsMasked) {
544  // .loophead:
545  // lr.[w|d] dest, (addr)
546  // bne dest, cmpval, done
547  BuildMI(LoopHeadMBB, DL, TII->get(getLRForRMW(Ordering, Width)), DestReg)
548  .addReg(AddrReg);
549  BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BNE))
550  .addReg(DestReg)
551  .addReg(CmpValReg)
552  .addMBB(DoneMBB);
553  // .looptail:
554  // sc.[w|d] scratch, newval, (addr)
555  // bnez scratch, loophead
556  BuildMI(LoopTailMBB, DL, TII->get(getSCForRMW(Ordering, Width)), ScratchReg)
557  .addReg(AddrReg)
558  .addReg(NewValReg);
559  BuildMI(LoopTailMBB, DL, TII->get(RISCV::BNE))
560  .addReg(ScratchReg)
561  .addReg(RISCV::X0)
562  .addMBB(LoopHeadMBB);
563  } else {
564  // .loophead:
565  // lr.w dest, (addr)
566  // and scratch, dest, mask
567  // bne scratch, cmpval, done
568  Register MaskReg = MI.getOperand(5).getReg();
569  BuildMI(LoopHeadMBB, DL, TII->get(getLRForRMW(Ordering, Width)), DestReg)
570  .addReg(AddrReg);
571  BuildMI(LoopHeadMBB, DL, TII->get(RISCV::AND), ScratchReg)
572  .addReg(DestReg)
573  .addReg(MaskReg);
574  BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BNE))
575  .addReg(ScratchReg)
576  .addReg(CmpValReg)
577  .addMBB(DoneMBB);
578 
579  // .looptail:
580  // xor scratch, dest, newval
581  // and scratch, scratch, mask
582  // xor scratch, dest, scratch
583  // sc.w scratch, scratch, (adrr)
584  // bnez scratch, loophead
585  insertMaskedMerge(TII, DL, LoopTailMBB, ScratchReg, DestReg, NewValReg,
586  MaskReg, ScratchReg);
587  BuildMI(LoopTailMBB, DL, TII->get(getSCForRMW(Ordering, Width)), ScratchReg)
588  .addReg(AddrReg)
589  .addReg(ScratchReg);
590  BuildMI(LoopTailMBB, DL, TII->get(RISCV::BNE))
591  .addReg(ScratchReg)
592  .addReg(RISCV::X0)
593  .addMBB(LoopHeadMBB);
594  }
595 
596  NextMBBI = MBB.end();
597  MI.eraseFromParent();
598 
599  LivePhysRegs LiveRegs;
600  computeAndAddLiveIns(LiveRegs, *LoopHeadMBB);
601  computeAndAddLiveIns(LiveRegs, *LoopTailMBB);
602  computeAndAddLiveIns(LiveRegs, *DoneMBB);
603 
604  return true;
605 }
606 
607 } // end of anonymous namespace
608 
609 INITIALIZE_PASS(RISCVExpandAtomicPseudo, "riscv-expand-atomic-pseudo",
610  RISCV_EXPAND_ATOMIC_PSEUDO_NAME, false, false)
611 
612 namespace llvm {
613 
615  return new RISCVExpandAtomicPseudo();
616 }
617 
618 } // end of namespace llvm
llvm::ISD::SUB
@ SUB
Definition: ISDOpcodes.h:240
llvm::AtomicOrdering::AcquireRelease
@ AcquireRelease
MI
IRTranslator LLVM IR MI
Definition: IRTranslator.cpp:102
llvm::MachineInstrBuilder::addImm
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
Definition: MachineInstrBuilder.h:131
llvm
---------------------— PointerInfo ------------------------------------—
Definition: AllocatorList.h:23
llvm::MachineBasicBlock::getBasicBlock
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
Definition: MachineBasicBlock.h:202
llvm::TargetSubtargetInfo::getInstrInfo
virtual const TargetInstrInfo * getInstrInfo() const
Definition: TargetSubtargetInfo.h:92
llvm::AtomicRMWInst::BinOp
BinOp
This enumeration lists the possible modifications atomicrmw can make.
Definition: Instructions.h:738
llvm::MachineFunctionPass
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
Definition: MachineFunctionPass.h:30
llvm::AtomicOrdering::SequentiallyConsistent
@ SequentiallyConsistent
llvm::LivePhysRegs
A set of physical registers with utility functions to track liveness when walking backward/forward th...
Definition: LivePhysRegs.h:48
llvm::createRISCVExpandAtomicPseudoPass
FunctionPass * createRISCVExpandAtomicPseudoPass()
llvm::MachineFunction::insert
void insert(iterator MBBI, MachineBasicBlock *MBB)
Definition: MachineFunction.h:823
INITIALIZE_PASS
INITIALIZE_PASS(RISCVExpandAtomicPseudo, "riscv-expand-atomic-pseudo", RISCV_EXPAND_ATOMIC_PSEUDO_NAME, false, false) namespace llvm
Definition: RISCVExpandAtomicPseudoInsts.cpp:609
llvm::AtomicOrdering::Monotonic
@ Monotonic
llvm::MachineBasicBlock::addSuccessor
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
Definition: MachineBasicBlock.cpp:746
llvm::PassRegistry::getPassRegistry
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Definition: PassRegistry.cpp:31
llvm::MachineInstrBuilder::addMBB
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Definition: MachineInstrBuilder.h:146
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::ISD::SRA
@ SRA
Definition: ISDOpcodes.h:658
TII
const HexagonInstrInfo * TII
Definition: HexagonCopyToCombine.cpp:129
llvm::AtomicOrdering::Acquire
@ Acquire
llvm::AtomicRMWInst::Nand
@ Nand
*p = ~(old & v)
Definition: Instructions.h:748
LoopDeletionResult::Modified
@ Modified
llvm::computeAndAddLiveIns
void computeAndAddLiveIns(LivePhysRegs &LiveRegs, MachineBasicBlock &MBB)
Convenience function combining computeLiveIns() and addLiveIns().
Definition: LivePhysRegs.cpp:339
llvm::initializeRISCVExpandAtomicPseudoPass
void initializeRISCVExpandAtomicPseudoPass(PassRegistry &)
llvm::ISD::AND
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:632
llvm::AtomicRMWInst::Xchg
@ Xchg
*p = v
Definition: Instructions.h:740
llvm::AtomicRMWInst::Add
@ Add
*p = old + v
Definition: Instructions.h:742
llvm::MachineBasicBlock
Definition: MachineBasicBlock.h:95
llvm::MachineFunction::getSubtarget
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Definition: MachineFunction.h:622
llvm::AtomicOrdering
AtomicOrdering
Atomic ordering for LLVM's memory model.
Definition: AtomicOrdering.h:56
llvm::AtomicRMWInst::UMin
@ UMin
*p = old <unsigned v ? old : v
Definition: Instructions.h:760
llvm::AtomicRMWInst::Sub
@ Sub
*p = old - v
Definition: Instructions.h:744
llvm::MachineInstr
Representation of each machine instruction.
Definition: MachineInstr.h:64
llvm::AtomicRMWInst::Min
@ Min
*p = old <signed v ? old : v
Definition: Instructions.h:756
llvm::MachineFunction::CreateMachineBasicBlock
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
Definition: MachineFunction.cpp:414
MachineFunctionPass.h
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::MachineBasicBlock::getParent
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
Definition: MachineBasicBlock.h:225
llvm::MachineInstrBuilder::addReg
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Definition: MachineInstrBuilder.h:97
RISCV.h
llvm::MachineFunction
Definition: MachineFunction.h:230
llvm::RISCVInstrInfo
Definition: RISCVInstrInfo.h:27
llvm::StringRef
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:58
llvm::MachineBasicBlock::splice
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
Definition: MachineBasicBlock.h:950
MBBI
MachineBasicBlock MachineBasicBlock::iterator MBBI
Definition: AArch64SLSHardening.cpp:75
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:136
llvm::ilist_node_impl::getIterator
self_iterator getIterator()
Definition: ilist_node.h:81
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
llvm::MachineBasicBlock::transferSuccessors
void transferSuccessors(MachineBasicBlock *FromMBB)
Transfers all the successors from MBB to this machine basic block (i.e., copies all the successors Fr...
Definition: MachineBasicBlock.cpp:865
llvm::Register
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
llvm::ISD::XOR
@ XOR
Definition: ISDOpcodes.h:634
llvm::AtomicOrdering::Release
@ Release
MBB
MachineBasicBlock & MBB
Definition: AArch64SLSHardening.cpp:74
RISCVInstrInfo.h
llvm::AMDGPU::Hwreg::Width
Width
Definition: SIDefines.h:403
llvm::ISD::ADD
@ ADD
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:239
RISCV_EXPAND_ATOMIC_PSEUDO_NAME
#define RISCV_EXPAND_ATOMIC_PSEUDO_NAME
Definition: RISCVExpandAtomicPseudoInsts.cpp:26
llvm::MachineBasicBlock::begin
iterator begin()
Definition: MachineBasicBlock.h:268
MachineInstrBuilder.h
llvm::BuildMI
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
Definition: MachineInstrBuilder.h:328
llvm::FunctionPass
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:298
llvm::DebugLoc
A debug info location.
Definition: DebugLoc.h:33
llvm::AtomicRMWInst::UMax
@ UMax
*p = old >unsigned v ? old : v
Definition: Instructions.h:758
llvm::MachineInstrBundleIterator< MachineInstr >
llvm::MachineBasicBlock::end
iterator end()
Definition: MachineBasicBlock.h:270
llvm::Intrinsic::ID
unsigned ID
Definition: TargetTransformInfo.h:38
llvm::AtomicRMWInst::Max
@ Max
*p = old >signed v ? old : v
Definition: Instructions.h:754
LivePhysRegs.h
RISCVTargetMachine.h