LLVM 22.0.0git
MipsExpandPseudo.cpp
Go to the documentation of this file.
1//===-- MipsExpandPseudoInsts.cpp - Expand pseudo instructions ------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains a pass that expands pseudo instructions into target
10// instructions to allow proper scheduling, if-conversion, and other late
11// optimizations. This pass should be run after register allocation but before
12// the post-regalloc scheduling pass.
13//
14// This is currently only used for expanding atomic pseudos after register
15// allocation. We do this to avoid the fast register allocator introducing
16// spills between ll and sc. These stores cause some MIPS implementations to
17// abort the atomic RMW sequence.
18//
19//===----------------------------------------------------------------------===//
20
21#include "Mips.h"
22#include "MipsInstrInfo.h"
23#include "MipsSubtarget.h"
27
28using namespace llvm;
29
30#define DEBUG_TYPE "mips-pseudo"
31
32namespace {
33 class MipsExpandPseudo : public MachineFunctionPass {
34 public:
35 static char ID;
36 MipsExpandPseudo() : MachineFunctionPass(ID) {}
37
38 const MipsInstrInfo *TII;
39 const MipsSubtarget *STI;
40
41 bool runOnMachineFunction(MachineFunction &Fn) override;
42
43 MachineFunctionProperties getRequiredProperties() const override {
44 return MachineFunctionProperties().setNoVRegs();
45 }
46
47 StringRef getPassName() const override {
48 return "Mips pseudo instruction expansion pass";
49 }
50
51 private:
52 bool expandAtomicCmpSwap(MachineBasicBlock &MBB,
55 bool expandAtomicCmpSwapSubword(MachineBasicBlock &MBB,
58
59 bool expandAtomicBinOp(MachineBasicBlock &BB,
61 MachineBasicBlock::iterator &NMBBI, unsigned Size);
62 bool expandAtomicBinOpSubword(MachineBasicBlock &BB,
65
68 bool expandMBB(MachineBasicBlock &MBB);
69 };
70 char MipsExpandPseudo::ID = 0;
71}
72
73bool MipsExpandPseudo::expandAtomicCmpSwapSubword(
76
77 MachineFunction *MF = BB.getParent();
78
79 const bool ArePtrs64bit = STI->getABI().ArePtrs64bit();
80 DebugLoc DL = I->getDebugLoc();
81 unsigned LL, SC;
82
83 unsigned ZERO = Mips::ZERO;
84 unsigned BNE = Mips::BNE;
85 unsigned BEQ = Mips::BEQ;
86 unsigned SEOp =
87 I->getOpcode() == Mips::ATOMIC_CMP_SWAP_I8_POSTRA ? Mips::SEB : Mips::SEH;
88
89 if (STI->inMicroMipsMode()) {
90 LL = STI->hasMips32r6() ? Mips::LL_MMR6 : Mips::LL_MM;
91 SC = STI->hasMips32r6() ? Mips::SC_MMR6 : Mips::SC_MM;
92 BNE = STI->hasMips32r6() ? Mips::BNEC_MMR6 : Mips::BNE_MM;
93 BEQ = STI->hasMips32r6() ? Mips::BEQC_MMR6 : Mips::BEQ_MM;
94 } else {
95 LL = STI->hasMips32r6() ? (ArePtrs64bit ? Mips::LL64_R6 : Mips::LL_R6)
96 : (ArePtrs64bit ? Mips::LL64 : Mips::LL);
97 SC = STI->hasMips32r6() ? (ArePtrs64bit ? Mips::SC64_R6 : Mips::SC_R6)
98 : (ArePtrs64bit ? Mips::SC64 : Mips::SC);
99 }
100
101 Register Dest = I->getOperand(0).getReg();
102 Register Ptr = I->getOperand(1).getReg();
103 Register Mask = I->getOperand(2).getReg();
104 Register ShiftCmpVal = I->getOperand(3).getReg();
105 Register Mask2 = I->getOperand(4).getReg();
106 Register ShiftNewVal = I->getOperand(5).getReg();
107 Register ShiftAmnt = I->getOperand(6).getReg();
108 Register Scratch = I->getOperand(7).getReg();
109 Register Scratch2 = I->getOperand(8).getReg();
110
111 // insert new blocks after the current block
112 const BasicBlock *LLVM_BB = BB.getBasicBlock();
113 MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB);
114 MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB);
115 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(LLVM_BB);
116 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
118 MF->insert(It, loop1MBB);
119 MF->insert(It, loop2MBB);
120 MF->insert(It, sinkMBB);
121 MF->insert(It, exitMBB);
122
123 // Transfer the remainder of BB and its successor edges to exitMBB.
124 exitMBB->splice(exitMBB->begin(), &BB,
125 std::next(MachineBasicBlock::iterator(I)), BB.end());
127
128 // thisMBB:
129 // ...
130 // fallthrough --> loop1MBB
132 loop1MBB->addSuccessor(sinkMBB);
133 loop1MBB->addSuccessor(loop2MBB);
134 loop1MBB->normalizeSuccProbs();
135 loop2MBB->addSuccessor(loop1MBB);
136 loop2MBB->addSuccessor(sinkMBB);
137 loop2MBB->normalizeSuccProbs();
138 sinkMBB->addSuccessor(exitMBB, BranchProbability::getOne());
139
140 // loop1MBB:
141 // ll dest, 0(ptr)
142 // and Mask', dest, Mask
143 // bne Mask', ShiftCmpVal, exitMBB
144 BuildMI(loop1MBB, DL, TII->get(LL), Scratch).addReg(Ptr).addImm(0);
145 BuildMI(loop1MBB, DL, TII->get(Mips::AND), Scratch2)
146 .addReg(Scratch)
147 .addReg(Mask);
148 BuildMI(loop1MBB, DL, TII->get(BNE))
149 .addReg(Scratch2).addReg(ShiftCmpVal).addMBB(sinkMBB);
150
151 // loop2MBB:
152 // and dest, dest, mask2
153 // or dest, dest, ShiftNewVal
154 // sc dest, dest, 0(ptr)
155 // beq dest, $0, loop1MBB
156 BuildMI(loop2MBB, DL, TII->get(Mips::AND), Scratch)
157 .addReg(Scratch, RegState::Kill)
158 .addReg(Mask2);
159 BuildMI(loop2MBB, DL, TII->get(Mips::OR), Scratch)
160 .addReg(Scratch, RegState::Kill)
161 .addReg(ShiftNewVal);
162 BuildMI(loop2MBB, DL, TII->get(SC), Scratch)
163 .addReg(Scratch, RegState::Kill)
164 .addReg(Ptr)
165 .addImm(0);
166 BuildMI(loop2MBB, DL, TII->get(BEQ))
167 .addReg(Scratch, RegState::Kill)
168 .addReg(ZERO)
169 .addMBB(loop1MBB);
170
171 // sinkMBB:
172 // srl srlres, Mask', shiftamt
173 // sign_extend dest,srlres
174 BuildMI(sinkMBB, DL, TII->get(Mips::SRLV), Dest)
175 .addReg(Scratch2)
176 .addReg(ShiftAmnt);
177 if (STI->hasMips32r2()) {
178 BuildMI(sinkMBB, DL, TII->get(SEOp), Dest).addReg(Dest);
179 } else {
180 const unsigned ShiftImm =
181 I->getOpcode() == Mips::ATOMIC_CMP_SWAP_I16_POSTRA ? 16 : 24;
182 BuildMI(sinkMBB, DL, TII->get(Mips::SLL), Dest)
183 .addReg(Dest, RegState::Kill)
184 .addImm(ShiftImm);
185 BuildMI(sinkMBB, DL, TII->get(Mips::SRA), Dest)
186 .addReg(Dest, RegState::Kill)
187 .addImm(ShiftImm);
188 }
189
190 LivePhysRegs LiveRegs;
191 computeAndAddLiveIns(LiveRegs, *loop1MBB);
192 computeAndAddLiveIns(LiveRegs, *loop2MBB);
193 computeAndAddLiveIns(LiveRegs, *sinkMBB);
194 computeAndAddLiveIns(LiveRegs, *exitMBB);
195
196 NMBBI = BB.end();
197 I->eraseFromParent();
198 return true;
199}
200
201bool MipsExpandPseudo::expandAtomicCmpSwap(MachineBasicBlock &BB,
204
205 const unsigned Size =
206 I->getOpcode() == Mips::ATOMIC_CMP_SWAP_I32_POSTRA ? 4 : 8;
207 MachineFunction *MF = BB.getParent();
208
209 const bool ArePtrs64bit = STI->getABI().ArePtrs64bit();
210 DebugLoc DL = I->getDebugLoc();
211
212 unsigned LL, SC, ZERO, BNE, BEQ, MOVE;
213
214 if (Size == 4) {
215 if (STI->inMicroMipsMode()) {
216 LL = STI->hasMips32r6() ? Mips::LL_MMR6 : Mips::LL_MM;
217 SC = STI->hasMips32r6() ? Mips::SC_MMR6 : Mips::SC_MM;
218 BNE = STI->hasMips32r6() ? Mips::BNEC_MMR6 : Mips::BNE_MM;
219 BEQ = STI->hasMips32r6() ? Mips::BEQC_MMR6 : Mips::BEQ_MM;
220 } else {
221 LL = STI->hasMips32r6()
222 ? (ArePtrs64bit ? Mips::LL64_R6 : Mips::LL_R6)
223 : (ArePtrs64bit ? Mips::LL64 : Mips::LL);
224 SC = STI->hasMips32r6()
225 ? (ArePtrs64bit ? Mips::SC64_R6 : Mips::SC_R6)
226 : (ArePtrs64bit ? Mips::SC64 : Mips::SC);
227 BNE = Mips::BNE;
228 BEQ = Mips::BEQ;
229 }
230
231 ZERO = Mips::ZERO;
232 MOVE = Mips::OR;
233 } else {
234 LL = STI->hasMips64r6() ? Mips::LLD_R6 : Mips::LLD;
235 SC = STI->hasMips64r6() ? Mips::SCD_R6 : Mips::SCD;
236 ZERO = Mips::ZERO_64;
237 BNE = Mips::BNE64;
238 BEQ = Mips::BEQ64;
239 MOVE = Mips::OR64;
240 }
241
242 Register Dest = I->getOperand(0).getReg();
243 Register Ptr = I->getOperand(1).getReg();
244 Register OldVal = I->getOperand(2).getReg();
245 Register NewVal = I->getOperand(3).getReg();
246 Register Scratch = I->getOperand(4).getReg();
247
248 // insert new blocks after the current block
249 const BasicBlock *LLVM_BB = BB.getBasicBlock();
250 MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB);
251 MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB);
252 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
254 MF->insert(It, loop1MBB);
255 MF->insert(It, loop2MBB);
256 MF->insert(It, exitMBB);
257
258 // Transfer the remainder of BB and its successor edges to exitMBB.
259 exitMBB->splice(exitMBB->begin(), &BB,
260 std::next(MachineBasicBlock::iterator(I)), BB.end());
262
263 // thisMBB:
264 // ...
265 // fallthrough --> loop1MBB
267 loop1MBB->addSuccessor(exitMBB);
268 loop1MBB->addSuccessor(loop2MBB);
269 loop1MBB->normalizeSuccProbs();
270 loop2MBB->addSuccessor(loop1MBB);
271 loop2MBB->addSuccessor(exitMBB);
272 loop2MBB->normalizeSuccProbs();
273
274 // loop1MBB:
275 // ll dest, 0(ptr)
276 // bne dest, oldval, exitMBB
277 BuildMI(loop1MBB, DL, TII->get(LL), Dest).addReg(Ptr).addImm(0);
278 BuildMI(loop1MBB, DL, TII->get(BNE))
279 .addReg(Dest, RegState::Kill).addReg(OldVal).addMBB(exitMBB);
280
281 // loop2MBB:
282 // move scratch, NewVal
283 // sc Scratch, Scratch, 0(ptr)
284 // beq Scratch, $0, loop1MBB
285 BuildMI(loop2MBB, DL, TII->get(MOVE), Scratch).addReg(NewVal).addReg(ZERO);
286 BuildMI(loop2MBB, DL, TII->get(SC), Scratch)
287 .addReg(Scratch).addReg(Ptr).addImm(0);
288 BuildMI(loop2MBB, DL, TII->get(BEQ))
289 .addReg(Scratch, RegState::Kill).addReg(ZERO).addMBB(loop1MBB);
290
291 LivePhysRegs LiveRegs;
292 computeAndAddLiveIns(LiveRegs, *loop1MBB);
293 computeAndAddLiveIns(LiveRegs, *loop2MBB);
294 computeAndAddLiveIns(LiveRegs, *exitMBB);
295
296 NMBBI = BB.end();
297 I->eraseFromParent();
298 return true;
299}
300
301bool MipsExpandPseudo::expandAtomicBinOpSubword(
302 MachineBasicBlock &BB, MachineBasicBlock::iterator I,
304
305 MachineFunction *MF = BB.getParent();
306
307 const bool ArePtrs64bit = STI->getABI().ArePtrs64bit();
308 DebugLoc DL = I->getDebugLoc();
309
310 unsigned LL, SC, SLT, SLTu, OR, MOVN, MOVZ, SELNEZ, SELEQZ;
311 unsigned BEQ = Mips::BEQ;
312 unsigned SEOp = Mips::SEH;
313
314 if (STI->inMicroMipsMode()) {
315 LL = STI->hasMips32r6() ? Mips::LL_MMR6 : Mips::LL_MM;
316 SC = STI->hasMips32r6() ? Mips::SC_MMR6 : Mips::SC_MM;
317 BEQ = STI->hasMips32r6() ? Mips::BEQC_MMR6 : Mips::BEQ_MM;
318 SLT = Mips::SLT_MM;
319 SLTu = Mips::SLTu_MM;
320 OR = STI->hasMips32r6() ? Mips::OR_MMR6 : Mips::OR_MM;
321 MOVN = Mips::MOVN_I_MM;
322 MOVZ = Mips::MOVZ_I_MM;
323 SELNEZ = STI->hasMips32r6() ? Mips::SELNEZ_MMR6 : Mips::SELNEZ;
324 SELEQZ = STI->hasMips32r6() ? Mips::SELEQZ_MMR6 : Mips::SELEQZ;
325 } else {
326 LL = STI->hasMips32r6() ? (ArePtrs64bit ? Mips::LL64_R6 : Mips::LL_R6)
327 : (ArePtrs64bit ? Mips::LL64 : Mips::LL);
328 SC = STI->hasMips32r6() ? (ArePtrs64bit ? Mips::SC64_R6 : Mips::SC_R6)
329 : (ArePtrs64bit ? Mips::SC64 : Mips::SC);
330 SLT = Mips::SLT;
331 SLTu = Mips::SLTu;
332 OR = Mips::OR;
333 MOVN = Mips::MOVN_I_I;
334 MOVZ = Mips::MOVZ_I_I;
335 SELNEZ = Mips::SELNEZ;
336 SELEQZ = Mips::SELEQZ;
337 }
338
339 bool IsSwap = false;
340 bool IsNand = false;
341 bool IsMin = false;
342 bool IsMax = false;
343 bool IsUnsigned = false;
344 bool DestOK = false;
345
346 unsigned Opcode = 0;
347 switch (I->getOpcode()) {
348 case Mips::ATOMIC_LOAD_NAND_I8_POSTRA:
349 SEOp = Mips::SEB;
350 [[fallthrough]];
351 case Mips::ATOMIC_LOAD_NAND_I16_POSTRA:
352 IsNand = true;
353 break;
354 case Mips::ATOMIC_SWAP_I8_POSTRA:
355 SEOp = Mips::SEB;
356 [[fallthrough]];
357 case Mips::ATOMIC_SWAP_I16_POSTRA:
358 IsSwap = true;
359 break;
360 case Mips::ATOMIC_LOAD_ADD_I8_POSTRA:
361 SEOp = Mips::SEB;
362 [[fallthrough]];
363 case Mips::ATOMIC_LOAD_ADD_I16_POSTRA:
364 Opcode = Mips::ADDu;
365 break;
366 case Mips::ATOMIC_LOAD_SUB_I8_POSTRA:
367 SEOp = Mips::SEB;
368 [[fallthrough]];
369 case Mips::ATOMIC_LOAD_SUB_I16_POSTRA:
370 Opcode = Mips::SUBu;
371 break;
372 case Mips::ATOMIC_LOAD_AND_I8_POSTRA:
373 SEOp = Mips::SEB;
374 [[fallthrough]];
375 case Mips::ATOMIC_LOAD_AND_I16_POSTRA:
376 Opcode = Mips::AND;
377 break;
378 case Mips::ATOMIC_LOAD_OR_I8_POSTRA:
379 SEOp = Mips::SEB;
380 [[fallthrough]];
381 case Mips::ATOMIC_LOAD_OR_I16_POSTRA:
382 Opcode = Mips::OR;
383 break;
384 case Mips::ATOMIC_LOAD_XOR_I8_POSTRA:
385 SEOp = Mips::SEB;
386 [[fallthrough]];
387 case Mips::ATOMIC_LOAD_XOR_I16_POSTRA:
388 Opcode = Mips::XOR;
389 break;
390 case Mips::ATOMIC_LOAD_UMIN_I8_POSTRA:
391 SEOp = Mips::SEB;
392 IsUnsigned = true;
393 IsMin = true;
394 break;
395 case Mips::ATOMIC_LOAD_UMIN_I16_POSTRA:
396 IsUnsigned = true;
397 IsMin = true;
398 break;
399 case Mips::ATOMIC_LOAD_MIN_I8_POSTRA:
400 SEOp = Mips::SEB;
401 IsMin = true;
402 break;
403 case Mips::ATOMIC_LOAD_MIN_I16_POSTRA:
404 IsMin = true;
405 break;
406 case Mips::ATOMIC_LOAD_UMAX_I8_POSTRA:
407 SEOp = Mips::SEB;
408 IsUnsigned = true;
409 IsMax = true;
410 break;
411 case Mips::ATOMIC_LOAD_UMAX_I16_POSTRA:
412 IsUnsigned = true;
413 IsMax = true;
414 break;
415 case Mips::ATOMIC_LOAD_MAX_I8_POSTRA:
416 SEOp = Mips::SEB;
417 IsMax = true;
418 break;
419 case Mips::ATOMIC_LOAD_MAX_I16_POSTRA:
420 IsMax = true;
421 break;
422 default:
423 llvm_unreachable("Unknown subword atomic pseudo for expansion!");
424 }
425
426 Register Dest = I->getOperand(0).getReg();
427 Register Ptr = I->getOperand(1).getReg();
428 Register Incr = I->getOperand(2).getReg();
429 Register Mask = I->getOperand(3).getReg();
430 Register Mask2 = I->getOperand(4).getReg();
431 Register ShiftAmnt = I->getOperand(5).getReg();
432 Register OldVal = I->getOperand(6).getReg();
433 Register BinOpRes = I->getOperand(7).getReg();
434 Register StoreVal = I->getOperand(8).getReg();
435 bool NoMovnInstr = (IsMin || IsMax) && !STI->hasMips4() && !STI->hasMips32();
436
437 const BasicBlock *LLVM_BB = BB.getBasicBlock();
438 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
439 MachineBasicBlock *loop1MBB = nullptr;
440 MachineBasicBlock *loop2MBB = nullptr;
441 if (NoMovnInstr) {
442 loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB);
443 loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB);
444 }
445 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(LLVM_BB);
446 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
448 MF->insert(It, loopMBB);
449 if (NoMovnInstr) {
450 MF->insert(It, loop1MBB);
451 MF->insert(It, loop2MBB);
452 }
453 MF->insert(It, sinkMBB);
454 MF->insert(It, exitMBB);
455
456 exitMBB->splice(exitMBB->begin(), &BB, std::next(I), BB.end());
458
460 if (NoMovnInstr) {
461 loopMBB->addSuccessor(loop1MBB);
462 loopMBB->addSuccessor(loop2MBB);
463 } else {
464 loopMBB->addSuccessor(sinkMBB);
465 loopMBB->addSuccessor(loopMBB);
466 loopMBB->normalizeSuccProbs();
467 }
468 if (NoMovnInstr) {
469 loop1MBB->addSuccessor(loop2MBB);
470 loop2MBB->addSuccessor(loopMBB);
471 loop2MBB->addSuccessor(sinkMBB);
472 }
473
474 BuildMI(loopMBB, DL, TII->get(LL), OldVal).addReg(Ptr).addImm(0);
475 if (IsNand) {
476 // and andres, oldval, incr2
477 // nor binopres, $0, andres
478 // and newval, binopres, mask
479 BuildMI(loopMBB, DL, TII->get(Mips::AND), BinOpRes)
480 .addReg(OldVal)
481 .addReg(Incr);
482 BuildMI(loopMBB, DL, TII->get(Mips::NOR), BinOpRes)
483 .addReg(Mips::ZERO)
484 .addReg(BinOpRes);
485 BuildMI(loopMBB, DL, TII->get(Mips::AND), BinOpRes)
486 .addReg(BinOpRes)
487 .addReg(Mask);
488 } else if (IsMin || IsMax) {
489
490 assert(I->getNumOperands() == 10 &&
491 "Atomics min|max|umin|umax use an additional register");
492 Register Scratch4 = I->getOperand(9).getReg();
493
494 unsigned SLTScratch4 = IsUnsigned ? SLTu : SLT;
495 unsigned SELIncr = IsMax ? SELNEZ : SELEQZ;
496 unsigned SELOldVal = IsMax ? SELEQZ : SELNEZ;
497 unsigned MOVIncr = IsMax ? MOVN : MOVZ;
498
499 BuildMI(loopMBB, DL, TII->get(Mips::SRAV), StoreVal)
500 .addReg(OldVal)
501 .addReg(ShiftAmnt);
502 if (IsUnsigned) {
503 const unsigned OpMask = SEOp == Mips::SEH ? 0xffff : 0xff;
504 BuildMI(loopMBB, DL, TII->get(Mips::ANDi), StoreVal)
505 .addReg(StoreVal)
506 .addImm(OpMask);
507 } else if (STI->hasMips32r2()) {
508 BuildMI(loopMBB, DL, TII->get(SEOp), StoreVal).addReg(StoreVal);
509 } else {
510 const unsigned ShiftImm = SEOp == Mips::SEH ? 16 : 24;
511 const unsigned SROp = IsUnsigned ? Mips::SRL : Mips::SRA;
512 BuildMI(loopMBB, DL, TII->get(Mips::SLL), StoreVal)
513 .addReg(StoreVal, RegState::Kill)
514 .addImm(ShiftImm);
515 BuildMI(loopMBB, DL, TII->get(SROp), StoreVal)
516 .addReg(StoreVal, RegState::Kill)
517 .addImm(ShiftImm);
518 }
519 BuildMI(loopMBB, DL, TII->get(Mips::OR), Dest)
520 .addReg(Mips::ZERO)
521 .addReg(StoreVal);
522 DestOK = true;
523 BuildMI(loopMBB, DL, TII->get(Mips::SLLV), StoreVal)
524 .addReg(StoreVal)
525 .addReg(ShiftAmnt);
526
527 // unsigned: sltu Scratch4, StoreVal, Incr
528 // signed: slt Scratch4, StoreVal, Incr
529 BuildMI(loopMBB, DL, TII->get(SLTScratch4), Scratch4)
530 .addReg(StoreVal)
531 .addReg(Incr);
532
533 if (STI->hasMips64r6() || STI->hasMips32r6()) {
534 // max: seleqz BinOpRes, OldVal, Scratch4
535 // selnez Scratch4, Incr, Scratch4
536 // or BinOpRes, BinOpRes, Scratch4
537 // min: selnqz BinOpRes, OldVal, Scratch4
538 // seleqz Scratch4, Incr, Scratch4
539 // or BinOpRes, BinOpRes, Scratch4
540 BuildMI(loopMBB, DL, TII->get(SELOldVal), BinOpRes)
541 .addReg(StoreVal)
542 .addReg(Scratch4);
543 BuildMI(loopMBB, DL, TII->get(SELIncr), Scratch4)
544 .addReg(Incr)
545 .addReg(Scratch4);
546 BuildMI(loopMBB, DL, TII->get(OR), BinOpRes)
547 .addReg(BinOpRes)
548 .addReg(Scratch4);
549 } else if (STI->hasMips4() || STI->hasMips32()) {
550 // max: move BinOpRes, StoreVal
551 // movn BinOpRes, Incr, Scratch4, BinOpRes
552 // min: move BinOpRes, StoreVal
553 // movz BinOpRes, Incr, Scratch4, BinOpRes
554 BuildMI(loopMBB, DL, TII->get(OR), BinOpRes)
555 .addReg(StoreVal)
556 .addReg(Mips::ZERO);
557 BuildMI(loopMBB, DL, TII->get(MOVIncr), BinOpRes)
558 .addReg(Incr)
559 .addReg(Scratch4)
560 .addReg(BinOpRes);
561 } else {
562 // if min:
563 // loopMBB: move BinOpRes, StoreVal
564 // beq Scratch4, 0, loop1MBB
565 // j loop2MBB
566 // loop1MBB: move BinOpRes, Incr
567 // loop2MBB: and BinOpRes, BinOpRes, Mask
568 // and StoreVal, OlddVal, Mask2
569 // or StoreVal, StoreVal, BinOpRes
570 // StoreVal<tied1> = sc StoreVal, 0(Ptr)
571 // beq StoreVal, zero, loopMBB
572 //
573 // if max:
574 // loopMBB: move BinOpRes, Incr
575 // beq Scratch4, 0, loop1MBB
576 // j loop2MBB
577 // loop1MBB: move BinOpRes, StoreVal
578 // loop2MBB: and BinOpRes, BinOpRes, Mask
579 // and StoreVal, OlddVal, Mask2
580 // or StoreVal, StoreVal, BinOpRes
581 // StoreVal<tied1> = sc StoreVal, 0(Ptr)
582 // beq StoreVal, zero, loopMBB
583 if (IsMin) {
584 BuildMI(loopMBB, DL, TII->get(OR), BinOpRes)
585 .addReg(StoreVal)
586 .addReg(Mips::ZERO);
587 BuildMI(loop1MBB, DL, TII->get(OR), BinOpRes)
588 .addReg(Incr)
589 .addReg(Mips::ZERO);
590 } else {
591 BuildMI(loopMBB, DL, TII->get(OR), BinOpRes)
592 .addReg(Incr)
593 .addReg(Mips::ZERO);
594 BuildMI(loop1MBB, DL, TII->get(OR), BinOpRes)
595 .addReg(StoreVal)
596 .addReg(Mips::ZERO);
597 }
598 BuildMI(loopMBB, DL, TII->get(BEQ))
599 .addReg(Scratch4)
600 .addReg(Mips::ZERO)
601 .addMBB(loop1MBB);
602 BuildMI(loopMBB, DL, TII->get(Mips::J)).addMBB(loop2MBB);
603 }
604
605 // and BinOpRes, BinOpRes, Mask
606 if (NoMovnInstr)
607 BuildMI(loop2MBB, DL, TII->get(Mips::AND), BinOpRes)
608 .addReg(BinOpRes)
609 .addReg(Mask);
610 else
611 BuildMI(loopMBB, DL, TII->get(Mips::AND), BinOpRes)
612 .addReg(BinOpRes)
613 .addReg(Mask);
614
615 } else if (!IsSwap) {
616 // <binop> binopres, oldval, incr2
617 // and newval, binopres, mask
618 BuildMI(loopMBB, DL, TII->get(Opcode), BinOpRes)
619 .addReg(OldVal)
620 .addReg(Incr);
621 BuildMI(loopMBB, DL, TII->get(Mips::AND), BinOpRes)
622 .addReg(BinOpRes)
623 .addReg(Mask);
624 } else { // atomic.swap
625 // and newval, incr2, mask
626 BuildMI(loopMBB, DL, TII->get(Mips::AND), BinOpRes)
627 .addReg(Incr)
628 .addReg(Mask);
629 }
630
631 // and StoreVal, OlddVal, Mask2
632 // or StoreVal, StoreVal, BinOpRes
633 // StoreVal<tied1> = sc StoreVal, 0(Ptr)
634 // beq StoreVal, zero, loopMBB
635 if (NoMovnInstr) {
636 BuildMI(loop2MBB, DL, TII->get(Mips::AND), StoreVal)
637 .addReg(OldVal)
638 .addReg(Mask2);
639 BuildMI(loop2MBB, DL, TII->get(Mips::OR), StoreVal)
640 .addReg(StoreVal)
641 .addReg(BinOpRes);
642 BuildMI(loop2MBB, DL, TII->get(SC), StoreVal)
643 .addReg(StoreVal)
644 .addReg(Ptr)
645 .addImm(0);
646 BuildMI(loop2MBB, DL, TII->get(BEQ))
647 .addReg(StoreVal)
648 .addReg(Mips::ZERO)
649 .addMBB(loopMBB);
650 } else {
651 BuildMI(loopMBB, DL, TII->get(Mips::AND), StoreVal)
652 .addReg(OldVal)
653 .addReg(Mask2);
654 BuildMI(loopMBB, DL, TII->get(Mips::OR), StoreVal)
655 .addReg(StoreVal)
656 .addReg(BinOpRes);
657 BuildMI(loopMBB, DL, TII->get(SC), StoreVal)
658 .addReg(StoreVal)
659 .addReg(Ptr)
660 .addImm(0);
661 BuildMI(loopMBB, DL, TII->get(BEQ))
662 .addReg(StoreVal)
663 .addReg(Mips::ZERO)
664 .addMBB(loopMBB);
665 }
666
667 // sinkMBB:
668 // and maskedoldval1,oldval,mask
669 // srl srlres,maskedoldval1,shiftamt
670 // sign_extend dest,srlres
671
672 if (!DestOK) {
673 sinkMBB->addSuccessor(exitMBB, BranchProbability::getOne());
674 BuildMI(sinkMBB, DL, TII->get(Mips::AND), Dest).addReg(OldVal).addReg(Mask);
675 BuildMI(sinkMBB, DL, TII->get(Mips::SRLV), Dest)
676 .addReg(Dest)
677 .addReg(ShiftAmnt);
678
679 if (STI->hasMips32r2()) {
680 BuildMI(sinkMBB, DL, TII->get(SEOp), Dest).addReg(Dest);
681 } else {
682 const unsigned ShiftImm = SEOp == Mips::SEH ? 16 : 24;
683 BuildMI(sinkMBB, DL, TII->get(Mips::SLL), Dest)
684 .addReg(Dest, RegState::Kill)
685 .addImm(ShiftImm);
686 BuildMI(sinkMBB, DL, TII->get(Mips::SRA), Dest)
687 .addReg(Dest, RegState::Kill)
688 .addImm(ShiftImm);
689 }
690 }
691
692 LivePhysRegs LiveRegs;
693 computeAndAddLiveIns(LiveRegs, *loopMBB);
694 if (loop1MBB) {
695 assert(loop2MBB && "should have 2 loop blocks");
696 computeAndAddLiveIns(LiveRegs, *loop1MBB);
697 computeAndAddLiveIns(LiveRegs, *loop2MBB);
698 }
699 computeAndAddLiveIns(LiveRegs, *sinkMBB);
700 computeAndAddLiveIns(LiveRegs, *exitMBB);
701
702 NMBBI = BB.end();
703 I->eraseFromParent();
704
705 return true;
706}
707
708bool MipsExpandPseudo::expandAtomicBinOp(MachineBasicBlock &BB,
711 unsigned Size) {
712 MachineFunction *MF = BB.getParent();
713
714 const bool ArePtrs64bit = STI->getABI().ArePtrs64bit();
715 DebugLoc DL = I->getDebugLoc();
716
717 unsigned LL, SC, ZERO, BEQ, SLT, SLTu, OR, MOVN, MOVZ, SELNEZ, SELEQZ;
718
719 if (Size == 4) {
720 if (STI->inMicroMipsMode()) {
721 LL = STI->hasMips32r6() ? Mips::LL_MMR6 : Mips::LL_MM;
722 SC = STI->hasMips32r6() ? Mips::SC_MMR6 : Mips::SC_MM;
723 BEQ = STI->hasMips32r6() ? Mips::BEQC_MMR6 : Mips::BEQ_MM;
724 SLT = Mips::SLT_MM;
725 SLTu = Mips::SLTu_MM;
726 OR = STI->hasMips32r6() ? Mips::OR_MMR6 : Mips::OR_MM;
727 MOVN = Mips::MOVN_I_MM;
728 MOVZ = Mips::MOVZ_I_MM;
729 SELNEZ = STI->hasMips32r6() ? Mips::SELNEZ_MMR6 : Mips::SELNEZ;
730 SELEQZ = STI->hasMips32r6() ? Mips::SELEQZ_MMR6 : Mips::SELEQZ;
731 } else {
732 LL = STI->hasMips32r6()
733 ? (ArePtrs64bit ? Mips::LL64_R6 : Mips::LL_R6)
734 : (ArePtrs64bit ? Mips::LL64 : Mips::LL);
735 SC = STI->hasMips32r6()
736 ? (ArePtrs64bit ? Mips::SC64_R6 : Mips::SC_R6)
737 : (ArePtrs64bit ? Mips::SC64 : Mips::SC);
738 BEQ = Mips::BEQ;
739 SLT = Mips::SLT;
740 SLTu = Mips::SLTu;
741 OR = Mips::OR;
742 MOVN = Mips::MOVN_I_I;
743 MOVZ = Mips::MOVZ_I_I;
744 SELNEZ = Mips::SELNEZ;
745 SELEQZ = Mips::SELEQZ;
746 }
747
748 ZERO = Mips::ZERO;
749 } else {
750 LL = STI->hasMips64r6() ? Mips::LLD_R6 : Mips::LLD;
751 SC = STI->hasMips64r6() ? Mips::SCD_R6 : Mips::SCD;
752 ZERO = Mips::ZERO_64;
753 BEQ = Mips::BEQ64;
754 SLT = Mips::SLT64;
755 SLTu = Mips::SLTu64;
756 OR = Mips::OR64;
757 MOVN = Mips::MOVN_I64_I64;
758 MOVZ = Mips::MOVZ_I64_I64;
759 SELNEZ = Mips::SELNEZ64;
760 SELEQZ = Mips::SELEQZ64;
761 }
762
763 Register OldVal = I->getOperand(0).getReg();
764 Register Ptr = I->getOperand(1).getReg();
765 Register Incr = I->getOperand(2).getReg();
766 Register Scratch = I->getOperand(3).getReg();
767
768 unsigned Opcode = 0;
769 unsigned AND = 0;
770 unsigned NOR = 0;
771
772 bool IsOr = false;
773 bool IsNand = false;
774 bool IsMin = false;
775 bool IsMax = false;
776 bool IsUnsigned = false;
777
778 switch (I->getOpcode()) {
779 case Mips::ATOMIC_LOAD_ADD_I32_POSTRA:
780 Opcode = Mips::ADDu;
781 break;
782 case Mips::ATOMIC_LOAD_SUB_I32_POSTRA:
783 Opcode = Mips::SUBu;
784 break;
785 case Mips::ATOMIC_LOAD_AND_I32_POSTRA:
786 Opcode = Mips::AND;
787 break;
788 case Mips::ATOMIC_LOAD_OR_I32_POSTRA:
789 Opcode = Mips::OR;
790 break;
791 case Mips::ATOMIC_LOAD_XOR_I32_POSTRA:
792 Opcode = Mips::XOR;
793 break;
794 case Mips::ATOMIC_LOAD_NAND_I32_POSTRA:
795 IsNand = true;
796 AND = Mips::AND;
797 NOR = Mips::NOR;
798 break;
799 case Mips::ATOMIC_SWAP_I32_POSTRA:
800 IsOr = true;
801 break;
802 case Mips::ATOMIC_LOAD_ADD_I64_POSTRA:
803 Opcode = Mips::DADDu;
804 break;
805 case Mips::ATOMIC_LOAD_SUB_I64_POSTRA:
806 Opcode = Mips::DSUBu;
807 break;
808 case Mips::ATOMIC_LOAD_AND_I64_POSTRA:
809 Opcode = Mips::AND64;
810 break;
811 case Mips::ATOMIC_LOAD_OR_I64_POSTRA:
812 Opcode = Mips::OR64;
813 break;
814 case Mips::ATOMIC_LOAD_XOR_I64_POSTRA:
815 Opcode = Mips::XOR64;
816 break;
817 case Mips::ATOMIC_LOAD_NAND_I64_POSTRA:
818 IsNand = true;
819 AND = Mips::AND64;
820 NOR = Mips::NOR64;
821 break;
822 case Mips::ATOMIC_SWAP_I64_POSTRA:
823 IsOr = true;
824 break;
825 case Mips::ATOMIC_LOAD_UMIN_I32_POSTRA:
826 case Mips::ATOMIC_LOAD_UMIN_I64_POSTRA:
827 IsUnsigned = true;
828 [[fallthrough]];
829 case Mips::ATOMIC_LOAD_MIN_I32_POSTRA:
830 case Mips::ATOMIC_LOAD_MIN_I64_POSTRA:
831 IsMin = true;
832 break;
833 case Mips::ATOMIC_LOAD_UMAX_I32_POSTRA:
834 case Mips::ATOMIC_LOAD_UMAX_I64_POSTRA:
835 IsUnsigned = true;
836 [[fallthrough]];
837 case Mips::ATOMIC_LOAD_MAX_I32_POSTRA:
838 case Mips::ATOMIC_LOAD_MAX_I64_POSTRA:
839 IsMax = true;
840 break;
841 default:
842 llvm_unreachable("Unknown pseudo atomic!");
843 }
844
845 bool NoMovnInstr = (IsMin || IsMax) && !STI->hasMips4() && !STI->hasMips32();
846 const BasicBlock *LLVM_BB = BB.getBasicBlock();
847 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
848 MachineBasicBlock *loop1MBB = nullptr;
849 MachineBasicBlock *loop2MBB = nullptr;
850 if (NoMovnInstr) {
851 loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB);
852 loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB);
853 }
854 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
856 MF->insert(It, loopMBB);
857 if (NoMovnInstr) {
858 MF->insert(It, loop1MBB);
859 MF->insert(It, loop2MBB);
860 }
861 MF->insert(It, exitMBB);
862
863 exitMBB->splice(exitMBB->begin(), &BB, std::next(I), BB.end());
865
867 if (NoMovnInstr) {
868 loopMBB->addSuccessor(loop1MBB);
869 loopMBB->addSuccessor(loop2MBB);
870 } else {
871 loopMBB->addSuccessor(exitMBB);
872 loopMBB->addSuccessor(loopMBB);
873 }
874 loopMBB->normalizeSuccProbs();
875 if (NoMovnInstr) {
876 loop1MBB->addSuccessor(loop2MBB);
877 loop2MBB->addSuccessor(loopMBB);
878 loop2MBB->addSuccessor(exitMBB);
879 }
880
881 BuildMI(loopMBB, DL, TII->get(LL), OldVal).addReg(Ptr).addImm(0);
882 assert((OldVal != Ptr) && "Clobbered the wrong ptr reg!");
883 assert((OldVal != Incr) && "Clobbered the wrong reg!");
884 if (IsMin || IsMax) {
885
886 assert(I->getNumOperands() == 5 &&
887 "Atomics min|max|umin|umax use an additional register");
888 MCRegister Scratch2 = I->getOperand(4).getReg().asMCReg();
889
890 // On Mips64 result of slt is GPR32.
891 MCRegister Scratch2_32 =
892 (Size == 8) ? STI->getRegisterInfo()->getSubReg(Scratch2, Mips::sub_32)
893 : Scratch2;
894
895 unsigned SLTScratch2 = IsUnsigned ? SLTu : SLT;
896 unsigned SELIncr = IsMax ? SELNEZ : SELEQZ;
897 unsigned SELOldVal = IsMax ? SELEQZ : SELNEZ;
898 unsigned MOVIncr = IsMax ? MOVN : MOVZ;
899
900 // unsigned: sltu Scratch2, oldVal, Incr
901 // signed: slt Scratch2, oldVal, Incr
902 BuildMI(loopMBB, DL, TII->get(SLTScratch2), Scratch2_32)
903 .addReg(OldVal)
904 .addReg(Incr);
905
906 if (STI->hasMips64r6() || STI->hasMips32r6()) {
907 // max: seleqz Scratch, OldVal, Scratch2
908 // selnez Scratch2, Incr, Scratch2
909 // or Scratch, Scratch, Scratch2
910 // min: selnez Scratch, OldVal, Scratch2
911 // seleqz Scratch2, Incr, Scratch2
912 // or Scratch, Scratch, Scratch2
913 BuildMI(loopMBB, DL, TII->get(SELOldVal), Scratch)
914 .addReg(OldVal)
915 .addReg(Scratch2);
916 BuildMI(loopMBB, DL, TII->get(SELIncr), Scratch2)
917 .addReg(Incr)
918 .addReg(Scratch2);
919 BuildMI(loopMBB, DL, TII->get(OR), Scratch)
920 .addReg(Scratch)
921 .addReg(Scratch2);
922 } else if (STI->hasMips4() || STI->hasMips32()) {
923 // max: move Scratch, OldVal
924 // movn Scratch, Incr, Scratch2, Scratch
925 // min: move Scratch, OldVal
926 // movz Scratch, Incr, Scratch2, Scratch
927 BuildMI(loopMBB, DL, TII->get(OR), Scratch)
928 .addReg(OldVal)
929 .addReg(ZERO);
930 BuildMI(loopMBB, DL, TII->get(MOVIncr), Scratch)
931 .addReg(Incr)
932 .addReg(Scratch2)
933 .addReg(Scratch);
934 } else {
935 // if min:
936 // loopMBB: move Scratch, OldVal
937 // beq Scratch2_32, 0, loop1MBB
938 // j loop2MBB
939 // loop1MBB: move Scratch, Incr
940 // loop2MBB: sc $2, 0($4)
941 // beqz $2, $BB0_1
942 // nop
943 //
944 // if max:
945 // loopMBB: move Scratch, Incr
946 // beq Scratch2_32, 0, loop1MBB
947 // j loop2MBB
948 // loop1MBB: move Scratch, OldVal
949 // loop2MBB: sc $2, 0($4)
950 // beqz $2, $BB0_1
951 // nop
952 if (IsMin) {
953 BuildMI(loopMBB, DL, TII->get(OR), Scratch).addReg(OldVal).addReg(ZERO);
954 BuildMI(loop1MBB, DL, TII->get(OR), Scratch).addReg(Incr).addReg(ZERO);
955 } else {
956 BuildMI(loopMBB, DL, TII->get(OR), Scratch).addReg(Incr).addReg(ZERO);
957 BuildMI(loop1MBB, DL, TII->get(OR), Scratch)
958 .addReg(OldVal)
959 .addReg(ZERO);
960 }
961 BuildMI(loopMBB, DL, TII->get(BEQ))
962 .addReg(Scratch2_32)
963 .addReg(ZERO)
964 .addMBB(loop1MBB);
965 BuildMI(loopMBB, DL, TII->get(Mips::J)).addMBB(loop2MBB);
966 }
967
968 } else if (Opcode) {
969 BuildMI(loopMBB, DL, TII->get(Opcode), Scratch).addReg(OldVal).addReg(Incr);
970 } else if (IsNand) {
971 assert(AND && NOR &&
972 "Unknown nand instruction for atomic pseudo expansion");
973 BuildMI(loopMBB, DL, TII->get(AND), Scratch).addReg(OldVal).addReg(Incr);
974 BuildMI(loopMBB, DL, TII->get(NOR), Scratch).addReg(ZERO).addReg(Scratch);
975 } else {
976 assert(IsOr && OR && "Unknown instruction for atomic pseudo expansion!");
977 (void)IsOr;
978 BuildMI(loopMBB, DL, TII->get(OR), Scratch).addReg(Incr).addReg(ZERO);
979 }
980
981 if (NoMovnInstr) {
982 BuildMI(loop2MBB, DL, TII->get(SC), Scratch)
983 .addReg(Scratch)
984 .addReg(Ptr)
985 .addImm(0);
986 BuildMI(loop2MBB, DL, TII->get(BEQ))
987 .addReg(Scratch)
988 .addReg(ZERO)
989 .addMBB(loopMBB);
990 } else {
991 BuildMI(loopMBB, DL, TII->get(SC), Scratch)
992 .addReg(Scratch)
993 .addReg(Ptr)
994 .addImm(0);
995 BuildMI(loopMBB, DL, TII->get(BEQ))
996 .addReg(Scratch)
997 .addReg(ZERO)
998 .addMBB(loopMBB);
999 }
1000
1001 NMBBI = BB.end();
1002 I->eraseFromParent();
1003
1004 LivePhysRegs LiveRegs;
1005 computeAndAddLiveIns(LiveRegs, *loopMBB);
1006 if (loop1MBB) {
1007 assert(loop2MBB && "should have 2 loop blocks");
1008 computeAndAddLiveIns(LiveRegs, *loop1MBB);
1009 computeAndAddLiveIns(LiveRegs, *loop2MBB);
1010 }
1011 computeAndAddLiveIns(LiveRegs, *exitMBB);
1012
1013 return true;
1014}
1015
1016bool MipsExpandPseudo::expandMI(MachineBasicBlock &MBB,
1019
1020 bool Modified = false;
1021
1022 switch (MBBI->getOpcode()) {
1023 case Mips::ATOMIC_CMP_SWAP_I32_POSTRA:
1024 case Mips::ATOMIC_CMP_SWAP_I64_POSTRA:
1025 return expandAtomicCmpSwap(MBB, MBBI, NMBB);
1026 case Mips::ATOMIC_CMP_SWAP_I8_POSTRA:
1027 case Mips::ATOMIC_CMP_SWAP_I16_POSTRA:
1028 return expandAtomicCmpSwapSubword(MBB, MBBI, NMBB);
1029 case Mips::ATOMIC_SWAP_I8_POSTRA:
1030 case Mips::ATOMIC_SWAP_I16_POSTRA:
1031 case Mips::ATOMIC_LOAD_NAND_I8_POSTRA:
1032 case Mips::ATOMIC_LOAD_NAND_I16_POSTRA:
1033 case Mips::ATOMIC_LOAD_ADD_I8_POSTRA:
1034 case Mips::ATOMIC_LOAD_ADD_I16_POSTRA:
1035 case Mips::ATOMIC_LOAD_SUB_I8_POSTRA:
1036 case Mips::ATOMIC_LOAD_SUB_I16_POSTRA:
1037 case Mips::ATOMIC_LOAD_AND_I8_POSTRA:
1038 case Mips::ATOMIC_LOAD_AND_I16_POSTRA:
1039 case Mips::ATOMIC_LOAD_OR_I8_POSTRA:
1040 case Mips::ATOMIC_LOAD_OR_I16_POSTRA:
1041 case Mips::ATOMIC_LOAD_XOR_I8_POSTRA:
1042 case Mips::ATOMIC_LOAD_XOR_I16_POSTRA:
1043 case Mips::ATOMIC_LOAD_MIN_I8_POSTRA:
1044 case Mips::ATOMIC_LOAD_MIN_I16_POSTRA:
1045 case Mips::ATOMIC_LOAD_MAX_I8_POSTRA:
1046 case Mips::ATOMIC_LOAD_MAX_I16_POSTRA:
1047 case Mips::ATOMIC_LOAD_UMIN_I8_POSTRA:
1048 case Mips::ATOMIC_LOAD_UMIN_I16_POSTRA:
1049 case Mips::ATOMIC_LOAD_UMAX_I8_POSTRA:
1050 case Mips::ATOMIC_LOAD_UMAX_I16_POSTRA:
1051 return expandAtomicBinOpSubword(MBB, MBBI, NMBB);
1052 case Mips::ATOMIC_LOAD_ADD_I32_POSTRA:
1053 case Mips::ATOMIC_LOAD_SUB_I32_POSTRA:
1054 case Mips::ATOMIC_LOAD_AND_I32_POSTRA:
1055 case Mips::ATOMIC_LOAD_OR_I32_POSTRA:
1056 case Mips::ATOMIC_LOAD_XOR_I32_POSTRA:
1057 case Mips::ATOMIC_LOAD_NAND_I32_POSTRA:
1058 case Mips::ATOMIC_SWAP_I32_POSTRA:
1059 case Mips::ATOMIC_LOAD_MIN_I32_POSTRA:
1060 case Mips::ATOMIC_LOAD_MAX_I32_POSTRA:
1061 case Mips::ATOMIC_LOAD_UMIN_I32_POSTRA:
1062 case Mips::ATOMIC_LOAD_UMAX_I32_POSTRA:
1063 return expandAtomicBinOp(MBB, MBBI, NMBB, 4);
1064 case Mips::ATOMIC_LOAD_ADD_I64_POSTRA:
1065 case Mips::ATOMIC_LOAD_SUB_I64_POSTRA:
1066 case Mips::ATOMIC_LOAD_AND_I64_POSTRA:
1067 case Mips::ATOMIC_LOAD_OR_I64_POSTRA:
1068 case Mips::ATOMIC_LOAD_XOR_I64_POSTRA:
1069 case Mips::ATOMIC_LOAD_NAND_I64_POSTRA:
1070 case Mips::ATOMIC_SWAP_I64_POSTRA:
1071 case Mips::ATOMIC_LOAD_MIN_I64_POSTRA:
1072 case Mips::ATOMIC_LOAD_MAX_I64_POSTRA:
1073 case Mips::ATOMIC_LOAD_UMIN_I64_POSTRA:
1074 case Mips::ATOMIC_LOAD_UMAX_I64_POSTRA:
1075 return expandAtomicBinOp(MBB, MBBI, NMBB, 8);
1076 default:
1077 return Modified;
1078 }
1079}
1080
1081bool MipsExpandPseudo::expandMBB(MachineBasicBlock &MBB) {
1082 bool Modified = false;
1083
1085 while (MBBI != E) {
1086 MachineBasicBlock::iterator NMBBI = std::next(MBBI);
1087 Modified |= expandMI(MBB, MBBI, NMBBI);
1088 MBBI = NMBBI;
1089 }
1090
1091 return Modified;
1092}
1093
1094bool MipsExpandPseudo::runOnMachineFunction(MachineFunction &MF) {
1095 STI = &MF.getSubtarget<MipsSubtarget>();
1096 TII = STI->getInstrInfo();
1097
1098 bool Modified = false;
1099 for (MachineBasicBlock &MBB : MF)
1100 Modified |= expandMBB(MBB);
1101
1102 if (Modified)
1103 MF.RenumberBlocks();
1104
1105 return Modified;
1106}
1107
1108/// createMipsExpandPseudoPass - returns an instance of the pseudo instruction
1109/// expansion pass.
1111 return new MipsExpandPseudo();
1112}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
@ ZERO
Special weight used for cases with exact zero probability.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
const HexagonInstrInfo * TII
This file implements the LivePhysRegs utility for tracking liveness of physical registers.
#define I(x, y, z)
Definition MD5.cpp:58
Promote Memory to Register
Definition Mem2Reg.cpp:110
static BranchProbability getOne()
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
LLVM_ABI void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
void normalizeSuccProbs()
Normalize probabilities of all successors so that the sum of them becomes one.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
Properties which a MachineFunction may have at a given point in time.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
BasicBlockListType::iterator iterator
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineInstr - Allocate a new MachineInstr.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
bool ArePtrs64bit() const
Definition MipsABIInfo.h:72
bool hasMips32r6() const
bool hasMips4() const
bool inMicroMipsMode() const
const MipsInstrInfo * getInstrInfo() const override
bool hasMips64r6() const
bool hasMips32() const
const MipsRegisterInfo * getRegisterInfo() const override
bool hasMips32r2() const
const MipsABIInfo & getABI() const
MCRegister asMCReg() const
Utility to check-convert this value to a MCRegister.
Definition Register.h:102
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
self_iterator getIterator()
Definition ilist_node.h:123
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition ISDOpcodes.h:730
@ Kill
The last use of a register.
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void computeAndAddLiveIns(LivePhysRegs &LiveRegs, MachineBasicBlock &MBB)
Convenience function combining computeLiveIns() and addLiveIns().
FunctionPass * createMipsExpandPseudoPass()
createMipsExpandPseudoPass - returns an instance of the pseudo instruction expansion pass.