LLVM 19.0.0git
SystemZInstrInfo.cpp
Go to the documentation of this file.
1//===-- SystemZInstrInfo.cpp - SystemZ instruction information ------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the SystemZ implementation of the TargetInstrInfo class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "SystemZInstrInfo.h"
15#include "SystemZ.h"
16#include "SystemZInstrBuilder.h"
17#include "SystemZSubtarget.h"
18#include "llvm/ADT/Statistic.h"
35#include "llvm/MC/MCInstrDesc.h"
41#include <cassert>
42#include <cstdint>
43#include <iterator>
44
45using namespace llvm;
46
47#define GET_INSTRINFO_CTOR_DTOR
48#define GET_INSTRMAP_INFO
49#include "SystemZGenInstrInfo.inc"
50
51#define DEBUG_TYPE "systemz-II"
52
53// Return a mask with Count low bits set.
54static uint64_t allOnes(unsigned int Count) {
55 return Count == 0 ? 0 : (uint64_t(1) << (Count - 1) << 1) - 1;
56}
57
58// Pin the vtable to this file.
59void SystemZInstrInfo::anchor() {}
60
62 : SystemZGenInstrInfo(-1, -1),
63 RI(sti.getSpecialRegisters()->getReturnFunctionAddressRegister()),
64 STI(sti) {}
65
66// MI is a 128-bit load or store. Split it into two 64-bit loads or stores,
67// each having the opcode given by NewOpcode.
68void SystemZInstrInfo::splitMove(MachineBasicBlock::iterator MI,
69 unsigned NewOpcode) const {
70 MachineBasicBlock *MBB = MI->getParent();
72
73 // Get two load or store instructions. Use the original instruction for one
74 // of them (arbitrarily the second here) and create a clone for the other.
75 MachineInstr *EarlierMI = MF.CloneMachineInstr(&*MI);
76 MBB->insert(MI, EarlierMI);
77
78 // Set up the two 64-bit registers and remember super reg and its flags.
79 MachineOperand &HighRegOp = EarlierMI->getOperand(0);
80 MachineOperand &LowRegOp = MI->getOperand(0);
81 Register Reg128 = LowRegOp.getReg();
82 unsigned Reg128Killed = getKillRegState(LowRegOp.isKill());
83 unsigned Reg128Undef = getUndefRegState(LowRegOp.isUndef());
84 HighRegOp.setReg(RI.getSubReg(HighRegOp.getReg(), SystemZ::subreg_h64));
85 LowRegOp.setReg(RI.getSubReg(LowRegOp.getReg(), SystemZ::subreg_l64));
86
87 if (MI->mayStore()) {
88 // Add implicit uses of the super register in case one of the subregs is
89 // undefined. We could track liveness and skip storing an undefined
90 // subreg, but this is hopefully rare (discovered with llvm-stress).
91 // If Reg128 was killed, set kill flag on MI.
92 unsigned Reg128UndefImpl = (Reg128Undef | RegState::Implicit);
93 MachineInstrBuilder(MF, EarlierMI).addReg(Reg128, Reg128UndefImpl);
94 MachineInstrBuilder(MF, MI).addReg(Reg128, (Reg128UndefImpl | Reg128Killed));
95 }
96
97 // The address in the first (high) instruction is already correct.
98 // Adjust the offset in the second (low) instruction.
99 MachineOperand &HighOffsetOp = EarlierMI->getOperand(2);
100 MachineOperand &LowOffsetOp = MI->getOperand(2);
101 LowOffsetOp.setImm(LowOffsetOp.getImm() + 8);
102
103 // Clear the kill flags on the registers in the first instruction.
104 if (EarlierMI->getOperand(0).isReg() && EarlierMI->getOperand(0).isUse())
105 EarlierMI->getOperand(0).setIsKill(false);
106 EarlierMI->getOperand(1).setIsKill(false);
107 EarlierMI->getOperand(3).setIsKill(false);
108
109 // Set the opcodes.
110 unsigned HighOpcode = getOpcodeForOffset(NewOpcode, HighOffsetOp.getImm());
111 unsigned LowOpcode = getOpcodeForOffset(NewOpcode, LowOffsetOp.getImm());
112 assert(HighOpcode && LowOpcode && "Both offsets should be in range");
113
114 EarlierMI->setDesc(get(HighOpcode));
115 MI->setDesc(get(LowOpcode));
116}
117
118// Split ADJDYNALLOC instruction MI.
119void SystemZInstrInfo::splitAdjDynAlloc(MachineBasicBlock::iterator MI) const {
120 MachineBasicBlock *MBB = MI->getParent();
121 MachineFunction &MF = *MBB->getParent();
122 MachineFrameInfo &MFFrame = MF.getFrameInfo();
123 MachineOperand &OffsetMO = MI->getOperand(2);
125
126 uint64_t Offset = (MFFrame.getMaxCallFrameSize() +
127 Regs->getCallFrameSize() +
128 Regs->getStackPointerBias() +
129 OffsetMO.getImm());
130 unsigned NewOpcode = getOpcodeForOffset(SystemZ::LA, Offset);
131 assert(NewOpcode && "No support for huge argument lists yet");
132 MI->setDesc(get(NewOpcode));
133 OffsetMO.setImm(Offset);
134}
135
136// MI is an RI-style pseudo instruction. Replace it with LowOpcode
137// if the first operand is a low GR32 and HighOpcode if the first operand
138// is a high GR32. ConvertHigh is true if LowOpcode takes a signed operand
139// and HighOpcode takes an unsigned 32-bit operand. In those cases,
140// MI has the same kind of operand as LowOpcode, so needs to be converted
141// if HighOpcode is used.
142void SystemZInstrInfo::expandRIPseudo(MachineInstr &MI, unsigned LowOpcode,
143 unsigned HighOpcode,
144 bool ConvertHigh) const {
145 Register Reg = MI.getOperand(0).getReg();
146 bool IsHigh = SystemZ::isHighReg(Reg);
147 MI.setDesc(get(IsHigh ? HighOpcode : LowOpcode));
148 if (IsHigh && ConvertHigh)
149 MI.getOperand(1).setImm(uint32_t(MI.getOperand(1).getImm()));
150}
151
152// MI is a three-operand RIE-style pseudo instruction. Replace it with
153// LowOpcodeK if the registers are both low GR32s, otherwise use a move
154// followed by HighOpcode or LowOpcode, depending on whether the target
155// is a high or low GR32.
156void SystemZInstrInfo::expandRIEPseudo(MachineInstr &MI, unsigned LowOpcode,
157 unsigned LowOpcodeK,
158 unsigned HighOpcode) const {
159 Register DestReg = MI.getOperand(0).getReg();
160 Register SrcReg = MI.getOperand(1).getReg();
161 bool DestIsHigh = SystemZ::isHighReg(DestReg);
162 bool SrcIsHigh = SystemZ::isHighReg(SrcReg);
163 if (!DestIsHigh && !SrcIsHigh)
164 MI.setDesc(get(LowOpcodeK));
165 else {
166 if (DestReg != SrcReg) {
167 emitGRX32Move(*MI.getParent(), MI, MI.getDebugLoc(), DestReg, SrcReg,
168 SystemZ::LR, 32, MI.getOperand(1).isKill(),
169 MI.getOperand(1).isUndef());
170 MI.getOperand(1).setReg(DestReg);
171 }
172 MI.setDesc(get(DestIsHigh ? HighOpcode : LowOpcode));
173 MI.tieOperands(0, 1);
174 }
175}
176
177// MI is an RXY-style pseudo instruction. Replace it with LowOpcode
178// if the first operand is a low GR32 and HighOpcode if the first operand
179// is a high GR32.
180void SystemZInstrInfo::expandRXYPseudo(MachineInstr &MI, unsigned LowOpcode,
181 unsigned HighOpcode) const {
182 Register Reg = MI.getOperand(0).getReg();
183 unsigned Opcode = getOpcodeForOffset(
184 SystemZ::isHighReg(Reg) ? HighOpcode : LowOpcode,
185 MI.getOperand(2).getImm());
186 MI.setDesc(get(Opcode));
187}
188
189// MI is a load-on-condition pseudo instruction with a single register
190// (source or destination) operand. Replace it with LowOpcode if the
191// register is a low GR32 and HighOpcode if the register is a high GR32.
192void SystemZInstrInfo::expandLOCPseudo(MachineInstr &MI, unsigned LowOpcode,
193 unsigned HighOpcode) const {
194 Register Reg = MI.getOperand(0).getReg();
195 unsigned Opcode = SystemZ::isHighReg(Reg) ? HighOpcode : LowOpcode;
196 MI.setDesc(get(Opcode));
197}
198
199// MI is an RR-style pseudo instruction that zero-extends the low Size bits
200// of one GRX32 into another. Replace it with LowOpcode if both operands
201// are low registers, otherwise use RISB[LH]G.
202void SystemZInstrInfo::expandZExtPseudo(MachineInstr &MI, unsigned LowOpcode,
203 unsigned Size) const {
205 emitGRX32Move(*MI.getParent(), MI, MI.getDebugLoc(),
206 MI.getOperand(0).getReg(), MI.getOperand(1).getReg(), LowOpcode,
207 Size, MI.getOperand(1).isKill(), MI.getOperand(1).isUndef());
208
209 // Keep the remaining operands as-is.
210 for (const MachineOperand &MO : llvm::drop_begin(MI.operands(), 2))
211 MIB.add(MO);
212
213 MI.eraseFromParent();
214}
215
216void SystemZInstrInfo::expandLoadStackGuard(MachineInstr *MI) const {
217 MachineBasicBlock *MBB = MI->getParent();
218 MachineFunction &MF = *MBB->getParent();
219 const Register Reg64 = MI->getOperand(0).getReg();
220 const Register Reg32 = RI.getSubReg(Reg64, SystemZ::subreg_l32);
221
222 // EAR can only load the low subregister so us a shift for %a0 to produce
223 // the GR containing %a0 and %a1.
224
225 // ear <reg>, %a0
226 BuildMI(*MBB, MI, MI->getDebugLoc(), get(SystemZ::EAR), Reg32)
227 .addReg(SystemZ::A0)
229
230 // sllg <reg>, <reg>, 32
231 BuildMI(*MBB, MI, MI->getDebugLoc(), get(SystemZ::SLLG), Reg64)
232 .addReg(Reg64)
233 .addReg(0)
234 .addImm(32);
235
236 // ear <reg>, %a1
237 BuildMI(*MBB, MI, MI->getDebugLoc(), get(SystemZ::EAR), Reg32)
238 .addReg(SystemZ::A1);
239
240 // lg <reg>, 40(<reg>)
241 MI->setDesc(get(SystemZ::LG));
242 MachineInstrBuilder(MF, MI).addReg(Reg64).addImm(40).addReg(0);
243}
244
245// Emit a zero-extending move from 32-bit GPR SrcReg to 32-bit GPR
246// DestReg before MBBI in MBB. Use LowLowOpcode when both DestReg and SrcReg
247// are low registers, otherwise use RISB[LH]G. Size is the number of bits
248// taken from the low end of SrcReg (8 for LLCR, 16 for LLHR and 32 for LR).
249// KillSrc is true if this move is the last use of SrcReg.
251SystemZInstrInfo::emitGRX32Move(MachineBasicBlock &MBB,
253 const DebugLoc &DL, unsigned DestReg,
254 unsigned SrcReg, unsigned LowLowOpcode,
255 unsigned Size, bool KillSrc,
256 bool UndefSrc) const {
257 unsigned Opcode;
258 bool DestIsHigh = SystemZ::isHighReg(DestReg);
259 bool SrcIsHigh = SystemZ::isHighReg(SrcReg);
260 if (DestIsHigh && SrcIsHigh)
261 Opcode = SystemZ::RISBHH;
262 else if (DestIsHigh && !SrcIsHigh)
263 Opcode = SystemZ::RISBHL;
264 else if (!DestIsHigh && SrcIsHigh)
265 Opcode = SystemZ::RISBLH;
266 else {
267 return BuildMI(MBB, MBBI, DL, get(LowLowOpcode), DestReg)
268 .addReg(SrcReg, getKillRegState(KillSrc) | getUndefRegState(UndefSrc));
269 }
270 unsigned Rotate = (DestIsHigh != SrcIsHigh ? 32 : 0);
271 return BuildMI(MBB, MBBI, DL, get(Opcode), DestReg)
272 .addReg(DestReg, RegState::Undef)
273 .addReg(SrcReg, getKillRegState(KillSrc) | getUndefRegState(UndefSrc))
274 .addImm(32 - Size).addImm(128 + 31).addImm(Rotate);
275}
276
278 bool NewMI,
279 unsigned OpIdx1,
280 unsigned OpIdx2) const {
281 auto cloneIfNew = [NewMI](MachineInstr &MI) -> MachineInstr & {
282 if (NewMI)
283 return *MI.getParent()->getParent()->CloneMachineInstr(&MI);
284 return MI;
285 };
286
287 switch (MI.getOpcode()) {
288 case SystemZ::SELRMux:
289 case SystemZ::SELFHR:
290 case SystemZ::SELR:
291 case SystemZ::SELGR:
292 case SystemZ::LOCRMux:
293 case SystemZ::LOCFHR:
294 case SystemZ::LOCR:
295 case SystemZ::LOCGR: {
296 auto &WorkingMI = cloneIfNew(MI);
297 // Invert condition.
298 unsigned CCValid = WorkingMI.getOperand(3).getImm();
299 unsigned CCMask = WorkingMI.getOperand(4).getImm();
300 WorkingMI.getOperand(4).setImm(CCMask ^ CCValid);
301 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
302 OpIdx1, OpIdx2);
303 }
304 default:
305 return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
306 }
307}
308
309// If MI is a simple load or store for a frame object, return the register
310// it loads or stores and set FrameIndex to the index of the frame object.
311// Return 0 otherwise.
312//
313// Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores.
314static int isSimpleMove(const MachineInstr &MI, int &FrameIndex,
315 unsigned Flag) {
316 const MCInstrDesc &MCID = MI.getDesc();
317 if ((MCID.TSFlags & Flag) && MI.getOperand(1).isFI() &&
318 MI.getOperand(2).getImm() == 0 && MI.getOperand(3).getReg() == 0) {
319 FrameIndex = MI.getOperand(1).getIndex();
320 return MI.getOperand(0).getReg();
321 }
322 return 0;
323}
324
326 int &FrameIndex) const {
327 return isSimpleMove(MI, FrameIndex, SystemZII::SimpleBDXLoad);
328}
329
331 int &FrameIndex) const {
332 return isSimpleMove(MI, FrameIndex, SystemZII::SimpleBDXStore);
333}
334
336 int &DestFrameIndex,
337 int &SrcFrameIndex) const {
338 // Check for MVC 0(Length,FI1),0(FI2)
339 const MachineFrameInfo &MFI = MI.getParent()->getParent()->getFrameInfo();
340 if (MI.getOpcode() != SystemZ::MVC || !MI.getOperand(0).isFI() ||
341 MI.getOperand(1).getImm() != 0 || !MI.getOperand(3).isFI() ||
342 MI.getOperand(4).getImm() != 0)
343 return false;
344
345 // Check that Length covers the full slots.
346 int64_t Length = MI.getOperand(2).getImm();
347 unsigned FI1 = MI.getOperand(0).getIndex();
348 unsigned FI2 = MI.getOperand(3).getIndex();
349 if (MFI.getObjectSize(FI1) != Length ||
350 MFI.getObjectSize(FI2) != Length)
351 return false;
352
353 DestFrameIndex = FI1;
354 SrcFrameIndex = FI2;
355 return true;
356}
357
360 MachineBasicBlock *&FBB,
362 bool AllowModify) const {
363 // Most of the code and comments here are boilerplate.
364
365 // Start from the bottom of the block and work up, examining the
366 // terminator instructions.
368 while (I != MBB.begin()) {
369 --I;
370 if (I->isDebugInstr())
371 continue;
372
373 // Working from the bottom, when we see a non-terminator instruction, we're
374 // done.
375 if (!isUnpredicatedTerminator(*I))
376 break;
377
378 // A terminator that isn't a branch can't easily be handled by this
379 // analysis.
380 if (!I->isBranch())
381 return true;
382
383 // Can't handle indirect branches.
385 if (!Branch.hasMBBTarget())
386 return true;
387
388 // Punt on compound branches.
389 if (Branch.Type != SystemZII::BranchNormal)
390 return true;
391
392 if (Branch.CCMask == SystemZ::CCMASK_ANY) {
393 // Handle unconditional branches.
394 if (!AllowModify) {
395 TBB = Branch.getMBBTarget();
396 continue;
397 }
398
399 // If the block has any instructions after a JMP, delete them.
400 MBB.erase(std::next(I), MBB.end());
401
402 Cond.clear();
403 FBB = nullptr;
404
405 // Delete the JMP if it's equivalent to a fall-through.
406 if (MBB.isLayoutSuccessor(Branch.getMBBTarget())) {
407 TBB = nullptr;
408 I->eraseFromParent();
409 I = MBB.end();
410 continue;
411 }
412
413 // TBB is used to indicate the unconditinal destination.
414 TBB = Branch.getMBBTarget();
415 continue;
416 }
417
418 // Working from the bottom, handle the first conditional branch.
419 if (Cond.empty()) {
420 // FIXME: add X86-style branch swap
421 FBB = TBB;
422 TBB = Branch.getMBBTarget();
423 Cond.push_back(MachineOperand::CreateImm(Branch.CCValid));
424 Cond.push_back(MachineOperand::CreateImm(Branch.CCMask));
425 continue;
426 }
427
428 // Handle subsequent conditional branches.
429 assert(Cond.size() == 2 && TBB && "Should have seen a conditional branch");
430
431 // Only handle the case where all conditional branches branch to the same
432 // destination.
433 if (TBB != Branch.getMBBTarget())
434 return true;
435
436 // If the conditions are the same, we can leave them alone.
437 unsigned OldCCValid = Cond[0].getImm();
438 unsigned OldCCMask = Cond[1].getImm();
439 if (OldCCValid == Branch.CCValid && OldCCMask == Branch.CCMask)
440 continue;
441
442 // FIXME: Try combining conditions like X86 does. Should be easy on Z!
443 return false;
444 }
445
446 return false;
447}
448
450 int *BytesRemoved) const {
451 assert(!BytesRemoved && "code size not handled");
452
453 // Most of the code and comments here are boilerplate.
455 unsigned Count = 0;
456
457 while (I != MBB.begin()) {
458 --I;
459 if (I->isDebugInstr())
460 continue;
461 if (!I->isBranch())
462 break;
463 if (!getBranchInfo(*I).hasMBBTarget())
464 break;
465 // Remove the branch.
466 I->eraseFromParent();
467 I = MBB.end();
468 ++Count;
469 }
470
471 return Count;
472}
473
476 assert(Cond.size() == 2 && "Invalid condition");
477 Cond[1].setImm(Cond[1].getImm() ^ Cond[0].getImm());
478 return false;
479}
480
485 const DebugLoc &DL,
486 int *BytesAdded) const {
487 // In this function we output 32-bit branches, which should always
488 // have enough range. They can be shortened and relaxed by later code
489 // in the pipeline, if desired.
490
491 // Shouldn't be a fall through.
492 assert(TBB && "insertBranch must not be told to insert a fallthrough");
493 assert((Cond.size() == 2 || Cond.size() == 0) &&
494 "SystemZ branch conditions have one component!");
495 assert(!BytesAdded && "code size not handled");
496
497 if (Cond.empty()) {
498 // Unconditional branch?
499 assert(!FBB && "Unconditional branch with multiple successors!");
500 BuildMI(&MBB, DL, get(SystemZ::J)).addMBB(TBB);
501 return 1;
502 }
503
504 // Conditional branch.
505 unsigned Count = 0;
506 unsigned CCValid = Cond[0].getImm();
507 unsigned CCMask = Cond[1].getImm();
508 BuildMI(&MBB, DL, get(SystemZ::BRC))
509 .addImm(CCValid).addImm(CCMask).addMBB(TBB);
510 ++Count;
511
512 if (FBB) {
513 // Two-way Conditional branch. Insert the second branch.
514 BuildMI(&MBB, DL, get(SystemZ::J)).addMBB(FBB);
515 ++Count;
516 }
517 return Count;
518}
519
521 Register &SrcReg2, int64_t &Mask,
522 int64_t &Value) const {
523 assert(MI.isCompare() && "Caller should have checked for a comparison");
524
525 if (MI.getNumExplicitOperands() == 2 && MI.getOperand(0).isReg() &&
526 MI.getOperand(1).isImm()) {
527 SrcReg = MI.getOperand(0).getReg();
528 SrcReg2 = 0;
529 Value = MI.getOperand(1).getImm();
530 Mask = ~0;
531 return true;
532 }
533
534 return false;
535}
536
539 Register DstReg, Register TrueReg,
540 Register FalseReg, int &CondCycles,
541 int &TrueCycles,
542 int &FalseCycles) const {
543 // Not all subtargets have LOCR instructions.
544 if (!STI.hasLoadStoreOnCond())
545 return false;
546 if (Pred.size() != 2)
547 return false;
548
549 // Check register classes.
551 const TargetRegisterClass *RC =
552 RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
553 if (!RC)
554 return false;
555
556 // We have LOCR instructions for 32 and 64 bit general purpose registers.
557 if ((STI.hasLoadStoreOnCond2() &&
558 SystemZ::GRX32BitRegClass.hasSubClassEq(RC)) ||
559 SystemZ::GR32BitRegClass.hasSubClassEq(RC) ||
560 SystemZ::GR64BitRegClass.hasSubClassEq(RC)) {
561 CondCycles = 2;
562 TrueCycles = 2;
563 FalseCycles = 2;
564 return true;
565 }
566
567 // Can't do anything else.
568 return false;
569}
570
573 const DebugLoc &DL, Register DstReg,
575 Register TrueReg,
576 Register FalseReg) const {
578 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
579
580 assert(Pred.size() == 2 && "Invalid condition");
581 unsigned CCValid = Pred[0].getImm();
582 unsigned CCMask = Pred[1].getImm();
583
584 unsigned Opc;
585 if (SystemZ::GRX32BitRegClass.hasSubClassEq(RC)) {
586 if (STI.hasMiscellaneousExtensions3())
587 Opc = SystemZ::SELRMux;
588 else if (STI.hasLoadStoreOnCond2())
589 Opc = SystemZ::LOCRMux;
590 else {
591 Opc = SystemZ::LOCR;
592 MRI.constrainRegClass(DstReg, &SystemZ::GR32BitRegClass);
593 Register TReg = MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
594 Register FReg = MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
595 BuildMI(MBB, I, DL, get(TargetOpcode::COPY), TReg).addReg(TrueReg);
596 BuildMI(MBB, I, DL, get(TargetOpcode::COPY), FReg).addReg(FalseReg);
597 TrueReg = TReg;
598 FalseReg = FReg;
599 }
600 } else if (SystemZ::GR64BitRegClass.hasSubClassEq(RC)) {
601 if (STI.hasMiscellaneousExtensions3())
602 Opc = SystemZ::SELGR;
603 else
604 Opc = SystemZ::LOCGR;
605 } else
606 llvm_unreachable("Invalid register class");
607
608 BuildMI(MBB, I, DL, get(Opc), DstReg)
609 .addReg(FalseReg).addReg(TrueReg)
610 .addImm(CCValid).addImm(CCMask);
611}
612
614 Register Reg,
615 MachineRegisterInfo *MRI) const {
616 unsigned DefOpc = DefMI.getOpcode();
617 if (DefOpc != SystemZ::LHIMux && DefOpc != SystemZ::LHI &&
618 DefOpc != SystemZ::LGHI)
619 return false;
620 if (DefMI.getOperand(0).getReg() != Reg)
621 return false;
622 int32_t ImmVal = (int32_t)DefMI.getOperand(1).getImm();
623
624 unsigned UseOpc = UseMI.getOpcode();
625 unsigned NewUseOpc;
626 unsigned UseIdx;
627 int CommuteIdx = -1;
628 bool TieOps = false;
629 switch (UseOpc) {
630 case SystemZ::SELRMux:
631 TieOps = true;
632 [[fallthrough]];
633 case SystemZ::LOCRMux:
634 if (!STI.hasLoadStoreOnCond2())
635 return false;
636 NewUseOpc = SystemZ::LOCHIMux;
637 if (UseMI.getOperand(2).getReg() == Reg)
638 UseIdx = 2;
639 else if (UseMI.getOperand(1).getReg() == Reg)
640 UseIdx = 2, CommuteIdx = 1;
641 else
642 return false;
643 break;
644 case SystemZ::SELGR:
645 TieOps = true;
646 [[fallthrough]];
647 case SystemZ::LOCGR:
648 if (!STI.hasLoadStoreOnCond2())
649 return false;
650 NewUseOpc = SystemZ::LOCGHI;
651 if (UseMI.getOperand(2).getReg() == Reg)
652 UseIdx = 2;
653 else if (UseMI.getOperand(1).getReg() == Reg)
654 UseIdx = 2, CommuteIdx = 1;
655 else
656 return false;
657 break;
658 default:
659 return false;
660 }
661
662 if (CommuteIdx != -1)
663 if (!commuteInstruction(UseMI, false, CommuteIdx, UseIdx))
664 return false;
665
666 bool DeleteDef = MRI->hasOneNonDBGUse(Reg);
667 UseMI.setDesc(get(NewUseOpc));
668 if (TieOps)
669 UseMI.tieOperands(0, 1);
670 UseMI.getOperand(UseIdx).ChangeToImmediate(ImmVal);
671 if (DeleteDef)
672 DefMI.eraseFromParent();
673
674 return true;
675}
676
678 unsigned Opcode = MI.getOpcode();
679 if (Opcode == SystemZ::Return ||
680 Opcode == SystemZ::Return_XPLINK ||
681 Opcode == SystemZ::Trap ||
682 Opcode == SystemZ::CallJG ||
683 Opcode == SystemZ::CallBR)
684 return true;
685 return false;
686}
687
690 unsigned NumCycles, unsigned ExtraPredCycles,
691 BranchProbability Probability) const {
692 // Avoid using conditional returns at the end of a loop (since then
693 // we'd need to emit an unconditional branch to the beginning anyway,
694 // making the loop body longer). This doesn't apply for low-probability
695 // loops (eg. compare-and-swap retry), so just decide based on branch
696 // probability instead of looping structure.
697 // However, since Compare and Trap instructions cost the same as a regular
698 // Compare instruction, we should allow the if conversion to convert this
699 // into a Conditional Compare regardless of the branch probability.
700 if (MBB.getLastNonDebugInstr()->getOpcode() != SystemZ::Trap &&
701 MBB.succ_empty() && Probability < BranchProbability(1, 8))
702 return false;
703 // For now only convert single instructions.
704 return NumCycles == 1;
705}
706
709 unsigned NumCyclesT, unsigned ExtraPredCyclesT,
710 MachineBasicBlock &FMBB,
711 unsigned NumCyclesF, unsigned ExtraPredCyclesF,
712 BranchProbability Probability) const {
713 // For now avoid converting mutually-exclusive cases.
714 return false;
715}
716
719 BranchProbability Probability) const {
720 // For now only duplicate single instructions.
721 return NumCycles == 1;
722}
723
726 assert(Pred.size() == 2 && "Invalid condition");
727 unsigned CCValid = Pred[0].getImm();
728 unsigned CCMask = Pred[1].getImm();
729 assert(CCMask > 0 && CCMask < 15 && "Invalid predicate");
730 unsigned Opcode = MI.getOpcode();
731 if (Opcode == SystemZ::Trap) {
732 MI.setDesc(get(SystemZ::CondTrap));
733 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
734 .addImm(CCValid).addImm(CCMask)
735 .addReg(SystemZ::CC, RegState::Implicit);
736 return true;
737 }
738 if (Opcode == SystemZ::Return || Opcode == SystemZ::Return_XPLINK) {
739 MI.setDesc(get(Opcode == SystemZ::Return ? SystemZ::CondReturn
740 : SystemZ::CondReturn_XPLINK));
741 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
742 .addImm(CCValid)
743 .addImm(CCMask)
744 .addReg(SystemZ::CC, RegState::Implicit);
745 return true;
746 }
747 if (Opcode == SystemZ::CallJG) {
748 MachineOperand FirstOp = MI.getOperand(0);
749 const uint32_t *RegMask = MI.getOperand(1).getRegMask();
750 MI.removeOperand(1);
751 MI.removeOperand(0);
752 MI.setDesc(get(SystemZ::CallBRCL));
753 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
754 .addImm(CCValid)
755 .addImm(CCMask)
756 .add(FirstOp)
757 .addRegMask(RegMask)
758 .addReg(SystemZ::CC, RegState::Implicit);
759 return true;
760 }
761 if (Opcode == SystemZ::CallBR) {
762 MachineOperand Target = MI.getOperand(0);
763 const uint32_t *RegMask = MI.getOperand(1).getRegMask();
764 MI.removeOperand(1);
765 MI.removeOperand(0);
766 MI.setDesc(get(SystemZ::CallBCR));
767 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
768 .addImm(CCValid).addImm(CCMask)
769 .add(Target)
770 .addRegMask(RegMask)
771 .addReg(SystemZ::CC, RegState::Implicit);
772 return true;
773 }
774 return false;
775}
776
779 const DebugLoc &DL, MCRegister DestReg,
780 MCRegister SrcReg, bool KillSrc) const {
781 // Split 128-bit GPR moves into two 64-bit moves. Add implicit uses of the
782 // super register in case one of the subregs is undefined.
783 // This handles ADDR128 too.
784 if (SystemZ::GR128BitRegClass.contains(DestReg, SrcReg)) {
785 copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_h64),
786 RI.getSubReg(SrcReg, SystemZ::subreg_h64), KillSrc);
787 MachineInstrBuilder(*MBB.getParent(), std::prev(MBBI))
788 .addReg(SrcReg, RegState::Implicit);
789 copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_l64),
790 RI.getSubReg(SrcReg, SystemZ::subreg_l64), KillSrc);
791 MachineInstrBuilder(*MBB.getParent(), std::prev(MBBI))
792 .addReg(SrcReg, (getKillRegState(KillSrc) | RegState::Implicit));
793 return;
794 }
795
796 if (SystemZ::GRX32BitRegClass.contains(DestReg, SrcReg)) {
797 emitGRX32Move(MBB, MBBI, DL, DestReg, SrcReg, SystemZ::LR, 32, KillSrc,
798 false);
799 return;
800 }
801
802 // Move 128-bit floating-point values between VR128 and FP128.
803 if (SystemZ::VR128BitRegClass.contains(DestReg) &&
804 SystemZ::FP128BitRegClass.contains(SrcReg)) {
805 MCRegister SrcRegHi =
806 RI.getMatchingSuperReg(RI.getSubReg(SrcReg, SystemZ::subreg_h64),
807 SystemZ::subreg_h64, &SystemZ::VR128BitRegClass);
808 MCRegister SrcRegLo =
809 RI.getMatchingSuperReg(RI.getSubReg(SrcReg, SystemZ::subreg_l64),
810 SystemZ::subreg_h64, &SystemZ::VR128BitRegClass);
811
812 BuildMI(MBB, MBBI, DL, get(SystemZ::VMRHG), DestReg)
813 .addReg(SrcRegHi, getKillRegState(KillSrc))
814 .addReg(SrcRegLo, getKillRegState(KillSrc));
815 return;
816 }
817 if (SystemZ::FP128BitRegClass.contains(DestReg) &&
818 SystemZ::VR128BitRegClass.contains(SrcReg)) {
819 MCRegister DestRegHi =
820 RI.getMatchingSuperReg(RI.getSubReg(DestReg, SystemZ::subreg_h64),
821 SystemZ::subreg_h64, &SystemZ::VR128BitRegClass);
822 MCRegister DestRegLo =
823 RI.getMatchingSuperReg(RI.getSubReg(DestReg, SystemZ::subreg_l64),
824 SystemZ::subreg_h64, &SystemZ::VR128BitRegClass);
825
826 if (DestRegHi != SrcReg)
827 copyPhysReg(MBB, MBBI, DL, DestRegHi, SrcReg, false);
828 BuildMI(MBB, MBBI, DL, get(SystemZ::VREPG), DestRegLo)
829 .addReg(SrcReg, getKillRegState(KillSrc)).addImm(1);
830 return;
831 }
832
833 // Move CC value from a GR32.
834 if (DestReg == SystemZ::CC) {
835 unsigned Opcode =
836 SystemZ::GR32BitRegClass.contains(SrcReg) ? SystemZ::TMLH : SystemZ::TMHH;
837 BuildMI(MBB, MBBI, DL, get(Opcode))
838 .addReg(SrcReg, getKillRegState(KillSrc))
839 .addImm(3 << (SystemZ::IPM_CC - 16));
840 return;
841 }
842
843 // Everything else needs only one instruction.
844 unsigned Opcode;
845 if (SystemZ::GR64BitRegClass.contains(DestReg, SrcReg))
846 Opcode = SystemZ::LGR;
847 else if (SystemZ::FP32BitRegClass.contains(DestReg, SrcReg))
848 // For z13 we prefer LDR over LER to avoid partial register dependencies.
849 Opcode = STI.hasVector() ? SystemZ::LDR32 : SystemZ::LER;
850 else if (SystemZ::FP64BitRegClass.contains(DestReg, SrcReg))
851 Opcode = SystemZ::LDR;
852 else if (SystemZ::FP128BitRegClass.contains(DestReg, SrcReg))
853 Opcode = SystemZ::LXR;
854 else if (SystemZ::VR32BitRegClass.contains(DestReg, SrcReg))
855 Opcode = SystemZ::VLR32;
856 else if (SystemZ::VR64BitRegClass.contains(DestReg, SrcReg))
857 Opcode = SystemZ::VLR64;
858 else if (SystemZ::VR128BitRegClass.contains(DestReg, SrcReg))
859 Opcode = SystemZ::VLR;
860 else if (SystemZ::AR32BitRegClass.contains(DestReg, SrcReg))
861 Opcode = SystemZ::CPYA;
862 else
863 llvm_unreachable("Impossible reg-to-reg copy");
864
865 BuildMI(MBB, MBBI, DL, get(Opcode), DestReg)
866 .addReg(SrcReg, getKillRegState(KillSrc));
867}
868
871 bool isKill, int FrameIdx, const TargetRegisterClass *RC,
872 const TargetRegisterInfo *TRI, Register VReg) const {
873 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
874
875 // Callers may expect a single instruction, so keep 128-bit moves
876 // together for now and lower them after register allocation.
877 unsigned LoadOpcode, StoreOpcode;
878 getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode);
879 addFrameReference(BuildMI(MBB, MBBI, DL, get(StoreOpcode))
880 .addReg(SrcReg, getKillRegState(isKill)),
881 FrameIdx);
882}
883
886 Register DestReg, int FrameIdx,
887 const TargetRegisterClass *RC,
888 const TargetRegisterInfo *TRI,
889 Register VReg) const {
890 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
891
892 // Callers may expect a single instruction, so keep 128-bit moves
893 // together for now and lower them after register allocation.
894 unsigned LoadOpcode, StoreOpcode;
895 getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode);
896 addFrameReference(BuildMI(MBB, MBBI, DL, get(LoadOpcode), DestReg),
897 FrameIdx);
898}
899
900// Return true if MI is a simple load or store with a 12-bit displacement
901// and no index. Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores.
902static bool isSimpleBD12Move(const MachineInstr *MI, unsigned Flag) {
903 const MCInstrDesc &MCID = MI->getDesc();
904 return ((MCID.TSFlags & Flag) &&
905 isUInt<12>(MI->getOperand(2).getImm()) &&
906 MI->getOperand(3).getReg() == 0);
907}
908
909namespace {
910
911struct LogicOp {
912 LogicOp() = default;
913 LogicOp(unsigned regSize, unsigned immLSB, unsigned immSize)
914 : RegSize(regSize), ImmLSB(immLSB), ImmSize(immSize) {}
915
916 explicit operator bool() const { return RegSize; }
917
918 unsigned RegSize = 0;
919 unsigned ImmLSB = 0;
920 unsigned ImmSize = 0;
921};
922
923} // end anonymous namespace
924
925static LogicOp interpretAndImmediate(unsigned Opcode) {
926 switch (Opcode) {
927 case SystemZ::NILMux: return LogicOp(32, 0, 16);
928 case SystemZ::NIHMux: return LogicOp(32, 16, 16);
929 case SystemZ::NILL64: return LogicOp(64, 0, 16);
930 case SystemZ::NILH64: return LogicOp(64, 16, 16);
931 case SystemZ::NIHL64: return LogicOp(64, 32, 16);
932 case SystemZ::NIHH64: return LogicOp(64, 48, 16);
933 case SystemZ::NIFMux: return LogicOp(32, 0, 32);
934 case SystemZ::NILF64: return LogicOp(64, 0, 32);
935 case SystemZ::NIHF64: return LogicOp(64, 32, 32);
936 default: return LogicOp();
937 }
938}
939
940static void transferDeadCC(MachineInstr *OldMI, MachineInstr *NewMI) {
941 if (OldMI->registerDefIsDead(SystemZ::CC, /*TRI=*/nullptr)) {
942 MachineOperand *CCDef =
943 NewMI->findRegisterDefOperand(SystemZ::CC, /*TRI=*/nullptr);
944 if (CCDef != nullptr)
945 CCDef->setIsDead(true);
946 }
947}
948
949static void transferMIFlag(MachineInstr *OldMI, MachineInstr *NewMI,
951 if (OldMI->getFlag(Flag))
952 NewMI->setFlag(Flag);
953}
954
957 LiveIntervals *LIS) const {
958 MachineBasicBlock *MBB = MI.getParent();
959
960 // Try to convert an AND into an RISBG-type instruction.
961 // TODO: It might be beneficial to select RISBG and shorten to AND instead.
962 if (LogicOp And = interpretAndImmediate(MI.getOpcode())) {
963 uint64_t Imm = MI.getOperand(2).getImm() << And.ImmLSB;
964 // AND IMMEDIATE leaves the other bits of the register unchanged.
965 Imm |= allOnes(And.RegSize) & ~(allOnes(And.ImmSize) << And.ImmLSB);
966 unsigned Start, End;
967 if (isRxSBGMask(Imm, And.RegSize, Start, End)) {
968 unsigned NewOpcode;
969 if (And.RegSize == 64) {
970 NewOpcode = SystemZ::RISBG;
971 // Prefer RISBGN if available, since it does not clobber CC.
972 if (STI.hasMiscellaneousExtensions())
973 NewOpcode = SystemZ::RISBGN;
974 } else {
975 NewOpcode = SystemZ::RISBMux;
976 Start &= 31;
977 End &= 31;
978 }
979 MachineOperand &Dest = MI.getOperand(0);
980 MachineOperand &Src = MI.getOperand(1);
982 BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpcode))
983 .add(Dest)
984 .addReg(0)
985 .addReg(Src.getReg(), getKillRegState(Src.isKill()),
986 Src.getSubReg())
987 .addImm(Start)
988 .addImm(End + 128)
989 .addImm(0);
990 if (LV) {
991 unsigned NumOps = MI.getNumOperands();
992 for (unsigned I = 1; I < NumOps; ++I) {
993 MachineOperand &Op = MI.getOperand(I);
994 if (Op.isReg() && Op.isKill())
995 LV->replaceKillInstruction(Op.getReg(), MI, *MIB);
996 }
997 }
998 if (LIS)
999 LIS->ReplaceMachineInstrInMaps(MI, *MIB);
1000 transferDeadCC(&MI, MIB);
1001 return MIB;
1002 }
1003 }
1004 return nullptr;
1005}
1006
1009 MachineBasicBlock::iterator InsertPt, int FrameIndex,
1010 LiveIntervals *LIS, VirtRegMap *VRM) const {
1013 const MachineFrameInfo &MFI = MF.getFrameInfo();
1014 unsigned Size = MFI.getObjectSize(FrameIndex);
1015 unsigned Opcode = MI.getOpcode();
1016
1017 // Check CC liveness if new instruction introduces a dead def of CC.
1018 SlotIndex MISlot = SlotIndex();
1019 LiveRange *CCLiveRange = nullptr;
1020 bool CCLiveAtMI = true;
1021 if (LIS) {
1022 MISlot = LIS->getSlotIndexes()->getInstructionIndex(MI).getRegSlot();
1023 auto CCUnits = TRI->regunits(MCRegister::from(SystemZ::CC));
1024 assert(range_size(CCUnits) == 1 && "CC only has one reg unit.");
1025 CCLiveRange = &LIS->getRegUnit(*CCUnits.begin());
1026 CCLiveAtMI = CCLiveRange->liveAt(MISlot);
1027 }
1028
1029 if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
1030 if (!CCLiveAtMI && (Opcode == SystemZ::LA || Opcode == SystemZ::LAY) &&
1031 isInt<8>(MI.getOperand(2).getImm()) && !MI.getOperand(3).getReg()) {
1032 // LA(Y) %reg, CONST(%reg) -> AGSI %mem, CONST
1033 MachineInstr *BuiltMI = BuildMI(*InsertPt->getParent(), InsertPt,
1034 MI.getDebugLoc(), get(SystemZ::AGSI))
1035 .addFrameIndex(FrameIndex)
1036 .addImm(0)
1037 .addImm(MI.getOperand(2).getImm());
1038 BuiltMI->findRegisterDefOperand(SystemZ::CC, /*TRI=*/nullptr)
1039 ->setIsDead(true);
1040 CCLiveRange->createDeadDef(MISlot, LIS->getVNInfoAllocator());
1041 return BuiltMI;
1042 }
1043 return nullptr;
1044 }
1045
1046 // All other cases require a single operand.
1047 if (Ops.size() != 1)
1048 return nullptr;
1049
1050 unsigned OpNum = Ops[0];
1051 assert(Size * 8 ==
1052 TRI->getRegSizeInBits(*MF.getRegInfo()
1053 .getRegClass(MI.getOperand(OpNum).getReg())) &&
1054 "Invalid size combination");
1055
1056 if ((Opcode == SystemZ::AHI || Opcode == SystemZ::AGHI) && OpNum == 0 &&
1057 isInt<8>(MI.getOperand(2).getImm())) {
1058 // A(G)HI %reg, CONST -> A(G)SI %mem, CONST
1059 Opcode = (Opcode == SystemZ::AHI ? SystemZ::ASI : SystemZ::AGSI);
1060 MachineInstr *BuiltMI =
1061 BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), get(Opcode))
1062 .addFrameIndex(FrameIndex)
1063 .addImm(0)
1064 .addImm(MI.getOperand(2).getImm());
1065 transferDeadCC(&MI, BuiltMI);
1067 return BuiltMI;
1068 }
1069
1070 if ((Opcode == SystemZ::ALFI && OpNum == 0 &&
1071 isInt<8>((int32_t)MI.getOperand(2).getImm())) ||
1072 (Opcode == SystemZ::ALGFI && OpNum == 0 &&
1073 isInt<8>((int64_t)MI.getOperand(2).getImm()))) {
1074 // AL(G)FI %reg, CONST -> AL(G)SI %mem, CONST
1075 Opcode = (Opcode == SystemZ::ALFI ? SystemZ::ALSI : SystemZ::ALGSI);
1076 MachineInstr *BuiltMI =
1077 BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), get(Opcode))
1078 .addFrameIndex(FrameIndex)
1079 .addImm(0)
1080 .addImm((int8_t)MI.getOperand(2).getImm());
1081 transferDeadCC(&MI, BuiltMI);
1082 return BuiltMI;
1083 }
1084
1085 if ((Opcode == SystemZ::SLFI && OpNum == 0 &&
1086 isInt<8>((int32_t)-MI.getOperand(2).getImm())) ||
1087 (Opcode == SystemZ::SLGFI && OpNum == 0 &&
1088 isInt<8>((int64_t)-MI.getOperand(2).getImm()))) {
1089 // SL(G)FI %reg, CONST -> AL(G)SI %mem, -CONST
1090 Opcode = (Opcode == SystemZ::SLFI ? SystemZ::ALSI : SystemZ::ALGSI);
1091 MachineInstr *BuiltMI =
1092 BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), get(Opcode))
1093 .addFrameIndex(FrameIndex)
1094 .addImm(0)
1095 .addImm((int8_t)-MI.getOperand(2).getImm());
1096 transferDeadCC(&MI, BuiltMI);
1097 return BuiltMI;
1098 }
1099
1100 unsigned MemImmOpc = 0;
1101 switch (Opcode) {
1102 case SystemZ::LHIMux:
1103 case SystemZ::LHI: MemImmOpc = SystemZ::MVHI; break;
1104 case SystemZ::LGHI: MemImmOpc = SystemZ::MVGHI; break;
1105 case SystemZ::CHIMux:
1106 case SystemZ::CHI: MemImmOpc = SystemZ::CHSI; break;
1107 case SystemZ::CGHI: MemImmOpc = SystemZ::CGHSI; break;
1108 case SystemZ::CLFIMux:
1109 case SystemZ::CLFI:
1110 if (isUInt<16>(MI.getOperand(1).getImm()))
1111 MemImmOpc = SystemZ::CLFHSI;
1112 break;
1113 case SystemZ::CLGFI:
1114 if (isUInt<16>(MI.getOperand(1).getImm()))
1115 MemImmOpc = SystemZ::CLGHSI;
1116 break;
1117 default: break;
1118 }
1119 if (MemImmOpc)
1120 return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
1121 get(MemImmOpc))
1122 .addFrameIndex(FrameIndex)
1123 .addImm(0)
1124 .addImm(MI.getOperand(1).getImm());
1125
1126 if (Opcode == SystemZ::LGDR || Opcode == SystemZ::LDGR) {
1127 bool Op0IsGPR = (Opcode == SystemZ::LGDR);
1128 bool Op1IsGPR = (Opcode == SystemZ::LDGR);
1129 // If we're spilling the destination of an LDGR or LGDR, store the
1130 // source register instead.
1131 if (OpNum == 0) {
1132 unsigned StoreOpcode = Op1IsGPR ? SystemZ::STG : SystemZ::STD;
1133 return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
1134 get(StoreOpcode))
1135 .add(MI.getOperand(1))
1136 .addFrameIndex(FrameIndex)
1137 .addImm(0)
1138 .addReg(0);
1139 }
1140 // If we're spilling the source of an LDGR or LGDR, load the
1141 // destination register instead.
1142 if (OpNum == 1) {
1143 unsigned LoadOpcode = Op0IsGPR ? SystemZ::LG : SystemZ::LD;
1144 return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
1145 get(LoadOpcode))
1146 .add(MI.getOperand(0))
1147 .addFrameIndex(FrameIndex)
1148 .addImm(0)
1149 .addReg(0);
1150 }
1151 }
1152
1153 // Look for cases where the source of a simple store or the destination
1154 // of a simple load is being spilled. Try to use MVC instead.
1155 //
1156 // Although MVC is in practice a fast choice in these cases, it is still
1157 // logically a bytewise copy. This means that we cannot use it if the
1158 // load or store is volatile. We also wouldn't be able to use MVC if
1159 // the two memories partially overlap, but that case cannot occur here,
1160 // because we know that one of the memories is a full frame index.
1161 //
1162 // For performance reasons, we also want to avoid using MVC if the addresses
1163 // might be equal. We don't worry about that case here, because spill slot
1164 // coloring happens later, and because we have special code to remove
1165 // MVCs that turn out to be redundant.
1166 if (OpNum == 0 && MI.hasOneMemOperand()) {
1167 MachineMemOperand *MMO = *MI.memoperands_begin();
1168 if (MMO->getSize() == Size && !MMO->isVolatile() && !MMO->isAtomic()) {
1169 // Handle conversion of loads.
1171 return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
1172 get(SystemZ::MVC))
1173 .addFrameIndex(FrameIndex)
1174 .addImm(0)
1175 .addImm(Size)
1176 .add(MI.getOperand(1))
1177 .addImm(MI.getOperand(2).getImm())
1178 .addMemOperand(MMO);
1179 }
1180 // Handle conversion of stores.
1182 return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
1183 get(SystemZ::MVC))
1184 .add(MI.getOperand(1))
1185 .addImm(MI.getOperand(2).getImm())
1186 .addImm(Size)
1187 .addFrameIndex(FrameIndex)
1188 .addImm(0)
1189 .addMemOperand(MMO);
1190 }
1191 }
1192 }
1193
1194 // If the spilled operand is the final one or the instruction is
1195 // commutable, try to change <INSN>R into <INSN>. Don't introduce a def of
1196 // CC if it is live and MI does not define it.
1197 unsigned NumOps = MI.getNumExplicitOperands();
1198 int MemOpcode = SystemZ::getMemOpcode(Opcode);
1199 if (MemOpcode == -1 ||
1200 (CCLiveAtMI && !MI.definesRegister(SystemZ::CC, /*TRI=*/nullptr) &&
1201 get(MemOpcode).hasImplicitDefOfPhysReg(SystemZ::CC)))
1202 return nullptr;
1203
1204 // Check if all other vregs have a usable allocation in the case of vector
1205 // to FP conversion.
1206 const MCInstrDesc &MCID = MI.getDesc();
1207 for (unsigned I = 0, E = MCID.getNumOperands(); I != E; ++I) {
1208 const MCOperandInfo &MCOI = MCID.operands()[I];
1209 if (MCOI.OperandType != MCOI::OPERAND_REGISTER || I == OpNum)
1210 continue;
1211 const TargetRegisterClass *RC = TRI->getRegClass(MCOI.RegClass);
1212 if (RC == &SystemZ::VR32BitRegClass || RC == &SystemZ::VR64BitRegClass) {
1213 Register Reg = MI.getOperand(I).getReg();
1214 Register PhysReg = Reg.isVirtual()
1215 ? (VRM ? Register(VRM->getPhys(Reg)) : Register())
1216 : Reg;
1217 if (!PhysReg ||
1218 !(SystemZ::FP32BitRegClass.contains(PhysReg) ||
1219 SystemZ::FP64BitRegClass.contains(PhysReg) ||
1220 SystemZ::VF128BitRegClass.contains(PhysReg)))
1221 return nullptr;
1222 }
1223 }
1224 // Fused multiply and add/sub need to have the same dst and accumulator reg.
1225 bool FusedFPOp = (Opcode == SystemZ::WFMADB || Opcode == SystemZ::WFMASB ||
1226 Opcode == SystemZ::WFMSDB || Opcode == SystemZ::WFMSSB);
1227 if (FusedFPOp) {
1228 Register DstReg = VRM->getPhys(MI.getOperand(0).getReg());
1229 Register AccReg = VRM->getPhys(MI.getOperand(3).getReg());
1230 if (OpNum == 0 || OpNum == 3 || DstReg != AccReg)
1231 return nullptr;
1232 }
1233
1234 // Try to swap compare operands if possible.
1235 bool NeedsCommute = false;
1236 if ((MI.getOpcode() == SystemZ::CR || MI.getOpcode() == SystemZ::CGR ||
1237 MI.getOpcode() == SystemZ::CLR || MI.getOpcode() == SystemZ::CLGR ||
1238 MI.getOpcode() == SystemZ::WFCDB || MI.getOpcode() == SystemZ::WFCSB ||
1239 MI.getOpcode() == SystemZ::WFKDB || MI.getOpcode() == SystemZ::WFKSB) &&
1240 OpNum == 0 && prepareCompareSwapOperands(MI))
1241 NeedsCommute = true;
1242
1243 bool CCOperands = false;
1244 if (MI.getOpcode() == SystemZ::LOCRMux || MI.getOpcode() == SystemZ::LOCGR ||
1245 MI.getOpcode() == SystemZ::SELRMux || MI.getOpcode() == SystemZ::SELGR) {
1246 assert(MI.getNumOperands() == 6 && NumOps == 5 &&
1247 "LOCR/SELR instruction operands corrupt?");
1248 NumOps -= 2;
1249 CCOperands = true;
1250 }
1251
1252 // See if this is a 3-address instruction that is convertible to 2-address
1253 // and suitable for folding below. Only try this with virtual registers
1254 // and a provided VRM (during regalloc).
1255 if (NumOps == 3 && SystemZ::getTargetMemOpcode(MemOpcode) != -1) {
1256 if (VRM == nullptr)
1257 return nullptr;
1258 else {
1259 Register DstReg = MI.getOperand(0).getReg();
1260 Register DstPhys =
1261 (DstReg.isVirtual() ? Register(VRM->getPhys(DstReg)) : DstReg);
1262 Register SrcReg = (OpNum == 2 ? MI.getOperand(1).getReg()
1263 : ((OpNum == 1 && MI.isCommutable())
1264 ? MI.getOperand(2).getReg()
1265 : Register()));
1266 if (DstPhys && !SystemZ::GRH32BitRegClass.contains(DstPhys) && SrcReg &&
1267 SrcReg.isVirtual() && DstPhys == VRM->getPhys(SrcReg))
1268 NeedsCommute = (OpNum == 1);
1269 else
1270 return nullptr;
1271 }
1272 }
1273
1274 if ((OpNum == NumOps - 1) || NeedsCommute || FusedFPOp) {
1275 const MCInstrDesc &MemDesc = get(MemOpcode);
1276 uint64_t AccessBytes = SystemZII::getAccessSize(MemDesc.TSFlags);
1277 assert(AccessBytes != 0 && "Size of access should be known");
1278 assert(AccessBytes <= Size && "Access outside the frame index");
1279 uint64_t Offset = Size - AccessBytes;
1280 MachineInstrBuilder MIB = BuildMI(*InsertPt->getParent(), InsertPt,
1281 MI.getDebugLoc(), get(MemOpcode));
1282 if (MI.isCompare()) {
1283 assert(NumOps == 2 && "Expected 2 register operands for a compare.");
1284 MIB.add(MI.getOperand(NeedsCommute ? 1 : 0));
1285 }
1286 else if (FusedFPOp) {
1287 MIB.add(MI.getOperand(0));
1288 MIB.add(MI.getOperand(3));
1289 MIB.add(MI.getOperand(OpNum == 1 ? 2 : 1));
1290 }
1291 else {
1292 MIB.add(MI.getOperand(0));
1293 if (NeedsCommute)
1294 MIB.add(MI.getOperand(2));
1295 else
1296 for (unsigned I = 1; I < OpNum; ++I)
1297 MIB.add(MI.getOperand(I));
1298 }
1299 MIB.addFrameIndex(FrameIndex).addImm(Offset);
1300 if (MemDesc.TSFlags & SystemZII::HasIndex)
1301 MIB.addReg(0);
1302 if (CCOperands) {
1303 unsigned CCValid = MI.getOperand(NumOps).getImm();
1304 unsigned CCMask = MI.getOperand(NumOps + 1).getImm();
1305 MIB.addImm(CCValid);
1306 MIB.addImm(NeedsCommute ? CCMask ^ CCValid : CCMask);
1307 }
1308 if (MIB->definesRegister(SystemZ::CC, /*TRI=*/nullptr) &&
1309 (!MI.definesRegister(SystemZ::CC, /*TRI=*/nullptr) ||
1310 MI.registerDefIsDead(SystemZ::CC, /*TRI=*/nullptr))) {
1311 MIB->addRegisterDead(SystemZ::CC, TRI);
1312 if (CCLiveRange)
1313 CCLiveRange->createDeadDef(MISlot, LIS->getVNInfoAllocator());
1314 }
1315 // Constrain the register classes if converted from a vector opcode. The
1316 // allocated regs are in an FP reg-class per previous check above.
1317 for (const MachineOperand &MO : MIB->operands())
1318 if (MO.isReg() && MO.getReg().isVirtual()) {
1319 Register Reg = MO.getReg();
1320 if (MRI.getRegClass(Reg) == &SystemZ::VR32BitRegClass)
1321 MRI.setRegClass(Reg, &SystemZ::FP32BitRegClass);
1322 else if (MRI.getRegClass(Reg) == &SystemZ::VR64BitRegClass)
1323 MRI.setRegClass(Reg, &SystemZ::FP64BitRegClass);
1324 else if (MRI.getRegClass(Reg) == &SystemZ::VR128BitRegClass)
1325 MRI.setRegClass(Reg, &SystemZ::VF128BitRegClass);
1326 }
1327
1328 transferDeadCC(&MI, MIB);
1331 return MIB;
1332 }
1333
1334 return nullptr;
1335}
1336
1339 MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
1340 LiveIntervals *LIS) const {
1341 return nullptr;
1342}
1343
1345 switch (MI.getOpcode()) {
1346 case SystemZ::L128:
1347 splitMove(MI, SystemZ::LG);
1348 return true;
1349
1350 case SystemZ::ST128:
1351 splitMove(MI, SystemZ::STG);
1352 return true;
1353
1354 case SystemZ::LX:
1355 splitMove(MI, SystemZ::LD);
1356 return true;
1357
1358 case SystemZ::STX:
1359 splitMove(MI, SystemZ::STD);
1360 return true;
1361
1362 case SystemZ::LBMux:
1363 expandRXYPseudo(MI, SystemZ::LB, SystemZ::LBH);
1364 return true;
1365
1366 case SystemZ::LHMux:
1367 expandRXYPseudo(MI, SystemZ::LH, SystemZ::LHH);
1368 return true;
1369
1370 case SystemZ::LLCRMux:
1371 expandZExtPseudo(MI, SystemZ::LLCR, 8);
1372 return true;
1373
1374 case SystemZ::LLHRMux:
1375 expandZExtPseudo(MI, SystemZ::LLHR, 16);
1376 return true;
1377
1378 case SystemZ::LLCMux:
1379 expandRXYPseudo(MI, SystemZ::LLC, SystemZ::LLCH);
1380 return true;
1381
1382 case SystemZ::LLHMux:
1383 expandRXYPseudo(MI, SystemZ::LLH, SystemZ::LLHH);
1384 return true;
1385
1386 case SystemZ::LMux:
1387 expandRXYPseudo(MI, SystemZ::L, SystemZ::LFH);
1388 return true;
1389
1390 case SystemZ::LOCMux:
1391 expandLOCPseudo(MI, SystemZ::LOC, SystemZ::LOCFH);
1392 return true;
1393
1394 case SystemZ::LOCHIMux:
1395 expandLOCPseudo(MI, SystemZ::LOCHI, SystemZ::LOCHHI);
1396 return true;
1397
1398 case SystemZ::STCMux:
1399 expandRXYPseudo(MI, SystemZ::STC, SystemZ::STCH);
1400 return true;
1401
1402 case SystemZ::STHMux:
1403 expandRXYPseudo(MI, SystemZ::STH, SystemZ::STHH);
1404 return true;
1405
1406 case SystemZ::STMux:
1407 expandRXYPseudo(MI, SystemZ::ST, SystemZ::STFH);
1408 return true;
1409
1410 case SystemZ::STOCMux:
1411 expandLOCPseudo(MI, SystemZ::STOC, SystemZ::STOCFH);
1412 return true;
1413
1414 case SystemZ::LHIMux:
1415 expandRIPseudo(MI, SystemZ::LHI, SystemZ::IIHF, true);
1416 return true;
1417
1418 case SystemZ::IIFMux:
1419 expandRIPseudo(MI, SystemZ::IILF, SystemZ::IIHF, false);
1420 return true;
1421
1422 case SystemZ::IILMux:
1423 expandRIPseudo(MI, SystemZ::IILL, SystemZ::IIHL, false);
1424 return true;
1425
1426 case SystemZ::IIHMux:
1427 expandRIPseudo(MI, SystemZ::IILH, SystemZ::IIHH, false);
1428 return true;
1429
1430 case SystemZ::NIFMux:
1431 expandRIPseudo(MI, SystemZ::NILF, SystemZ::NIHF, false);
1432 return true;
1433
1434 case SystemZ::NILMux:
1435 expandRIPseudo(MI, SystemZ::NILL, SystemZ::NIHL, false);
1436 return true;
1437
1438 case SystemZ::NIHMux:
1439 expandRIPseudo(MI, SystemZ::NILH, SystemZ::NIHH, false);
1440 return true;
1441
1442 case SystemZ::OIFMux:
1443 expandRIPseudo(MI, SystemZ::OILF, SystemZ::OIHF, false);
1444 return true;
1445
1446 case SystemZ::OILMux:
1447 expandRIPseudo(MI, SystemZ::OILL, SystemZ::OIHL, false);
1448 return true;
1449
1450 case SystemZ::OIHMux:
1451 expandRIPseudo(MI, SystemZ::OILH, SystemZ::OIHH, false);
1452 return true;
1453
1454 case SystemZ::XIFMux:
1455 expandRIPseudo(MI, SystemZ::XILF, SystemZ::XIHF, false);
1456 return true;
1457
1458 case SystemZ::TMLMux:
1459 expandRIPseudo(MI, SystemZ::TMLL, SystemZ::TMHL, false);
1460 return true;
1461
1462 case SystemZ::TMHMux:
1463 expandRIPseudo(MI, SystemZ::TMLH, SystemZ::TMHH, false);
1464 return true;
1465
1466 case SystemZ::AHIMux:
1467 expandRIPseudo(MI, SystemZ::AHI, SystemZ::AIH, false);
1468 return true;
1469
1470 case SystemZ::AHIMuxK:
1471 expandRIEPseudo(MI, SystemZ::AHI, SystemZ::AHIK, SystemZ::AIH);
1472 return true;
1473
1474 case SystemZ::AFIMux:
1475 expandRIPseudo(MI, SystemZ::AFI, SystemZ::AIH, false);
1476 return true;
1477
1478 case SystemZ::CHIMux:
1479 expandRIPseudo(MI, SystemZ::CHI, SystemZ::CIH, false);
1480 return true;
1481
1482 case SystemZ::CFIMux:
1483 expandRIPseudo(MI, SystemZ::CFI, SystemZ::CIH, false);
1484 return true;
1485
1486 case SystemZ::CLFIMux:
1487 expandRIPseudo(MI, SystemZ::CLFI, SystemZ::CLIH, false);
1488 return true;
1489
1490 case SystemZ::CMux:
1491 expandRXYPseudo(MI, SystemZ::C, SystemZ::CHF);
1492 return true;
1493
1494 case SystemZ::CLMux:
1495 expandRXYPseudo(MI, SystemZ::CL, SystemZ::CLHF);
1496 return true;
1497
1498 case SystemZ::RISBMux: {
1499 bool DestIsHigh = SystemZ::isHighReg(MI.getOperand(0).getReg());
1500 bool SrcIsHigh = SystemZ::isHighReg(MI.getOperand(2).getReg());
1501 if (SrcIsHigh == DestIsHigh)
1502 MI.setDesc(get(DestIsHigh ? SystemZ::RISBHH : SystemZ::RISBLL));
1503 else {
1504 MI.setDesc(get(DestIsHigh ? SystemZ::RISBHL : SystemZ::RISBLH));
1505 MI.getOperand(5).setImm(MI.getOperand(5).getImm() ^ 32);
1506 }
1507 return true;
1508 }
1509
1510 case SystemZ::ADJDYNALLOC:
1511 splitAdjDynAlloc(MI);
1512 return true;
1513
1514 case TargetOpcode::LOAD_STACK_GUARD:
1515 expandLoadStackGuard(&MI);
1516 return true;
1517
1518 default:
1519 return false;
1520 }
1521}
1522
1524 if (MI.isInlineAsm()) {
1525 const MachineFunction *MF = MI.getParent()->getParent();
1526 const char *AsmStr = MI.getOperand(0).getSymbolName();
1527 return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo());
1528 }
1529 else if (MI.getOpcode() == SystemZ::PATCHPOINT)
1531 else if (MI.getOpcode() == SystemZ::STACKMAP)
1532 return MI.getOperand(1).getImm();
1533 else if (MI.getOpcode() == SystemZ::FENTRY_CALL)
1534 return 6;
1535
1536 return MI.getDesc().getSize();
1537}
1538
1541 switch (MI.getOpcode()) {
1542 case SystemZ::BR:
1543 case SystemZ::BI:
1544 case SystemZ::J:
1545 case SystemZ::JG:
1547 SystemZ::CCMASK_ANY, &MI.getOperand(0));
1548
1549 case SystemZ::BRC:
1550 case SystemZ::BRCL:
1551 return SystemZII::Branch(SystemZII::BranchNormal, MI.getOperand(0).getImm(),
1552 MI.getOperand(1).getImm(), &MI.getOperand(2));
1553
1554 case SystemZ::BRCT:
1555 case SystemZ::BRCTH:
1557 SystemZ::CCMASK_CMP_NE, &MI.getOperand(2));
1558
1559 case SystemZ::BRCTG:
1561 SystemZ::CCMASK_CMP_NE, &MI.getOperand(2));
1562
1563 case SystemZ::CIJ:
1564 case SystemZ::CRJ:
1566 MI.getOperand(2).getImm(), &MI.getOperand(3));
1567
1568 case SystemZ::CLIJ:
1569 case SystemZ::CLRJ:
1571 MI.getOperand(2).getImm(), &MI.getOperand(3));
1572
1573 case SystemZ::CGIJ:
1574 case SystemZ::CGRJ:
1576 MI.getOperand(2).getImm(), &MI.getOperand(3));
1577
1578 case SystemZ::CLGIJ:
1579 case SystemZ::CLGRJ:
1581 MI.getOperand(2).getImm(), &MI.getOperand(3));
1582
1583 case SystemZ::INLINEASM_BR:
1584 // Don't try to analyze asm goto, so pass nullptr as branch target argument.
1585 return SystemZII::Branch(SystemZII::AsmGoto, 0, 0, nullptr);
1586
1587 default:
1588 llvm_unreachable("Unrecognized branch opcode");
1589 }
1590}
1591
1593 unsigned &LoadOpcode,
1594 unsigned &StoreOpcode) const {
1595 if (RC == &SystemZ::GR32BitRegClass || RC == &SystemZ::ADDR32BitRegClass) {
1596 LoadOpcode = SystemZ::L;
1597 StoreOpcode = SystemZ::ST;
1598 } else if (RC == &SystemZ::GRH32BitRegClass) {
1599 LoadOpcode = SystemZ::LFH;
1600 StoreOpcode = SystemZ::STFH;
1601 } else if (RC == &SystemZ::GRX32BitRegClass) {
1602 LoadOpcode = SystemZ::LMux;
1603 StoreOpcode = SystemZ::STMux;
1604 } else if (RC == &SystemZ::GR64BitRegClass ||
1605 RC == &SystemZ::ADDR64BitRegClass) {
1606 LoadOpcode = SystemZ::LG;
1607 StoreOpcode = SystemZ::STG;
1608 } else if (RC == &SystemZ::GR128BitRegClass ||
1609 RC == &SystemZ::ADDR128BitRegClass) {
1610 LoadOpcode = SystemZ::L128;
1611 StoreOpcode = SystemZ::ST128;
1612 } else if (RC == &SystemZ::FP32BitRegClass) {
1613 LoadOpcode = SystemZ::LE;
1614 StoreOpcode = SystemZ::STE;
1615 } else if (RC == &SystemZ::FP64BitRegClass) {
1616 LoadOpcode = SystemZ::LD;
1617 StoreOpcode = SystemZ::STD;
1618 } else if (RC == &SystemZ::FP128BitRegClass) {
1619 LoadOpcode = SystemZ::LX;
1620 StoreOpcode = SystemZ::STX;
1621 } else if (RC == &SystemZ::VR32BitRegClass) {
1622 LoadOpcode = SystemZ::VL32;
1623 StoreOpcode = SystemZ::VST32;
1624 } else if (RC == &SystemZ::VR64BitRegClass) {
1625 LoadOpcode = SystemZ::VL64;
1626 StoreOpcode = SystemZ::VST64;
1627 } else if (RC == &SystemZ::VF128BitRegClass ||
1628 RC == &SystemZ::VR128BitRegClass) {
1629 LoadOpcode = SystemZ::VL;
1630 StoreOpcode = SystemZ::VST;
1631 } else
1632 llvm_unreachable("Unsupported regclass to load or store");
1633}
1634
1636 int64_t Offset,
1637 const MachineInstr *MI) const {
1638 const MCInstrDesc &MCID = get(Opcode);
1639 int64_t Offset2 = (MCID.TSFlags & SystemZII::Is128Bit ? Offset + 8 : Offset);
1640 if (isUInt<12>(Offset) && isUInt<12>(Offset2)) {
1641 // Get the instruction to use for unsigned 12-bit displacements.
1642 int Disp12Opcode = SystemZ::getDisp12Opcode(Opcode);
1643 if (Disp12Opcode >= 0)
1644 return Disp12Opcode;
1645
1646 // All address-related instructions can use unsigned 12-bit
1647 // displacements.
1648 return Opcode;
1649 }
1650 if (isInt<20>(Offset) && isInt<20>(Offset2)) {
1651 // Get the instruction to use for signed 20-bit displacements.
1652 int Disp20Opcode = SystemZ::getDisp20Opcode(Opcode);
1653 if (Disp20Opcode >= 0)
1654 return Disp20Opcode;
1655
1656 // Check whether Opcode allows signed 20-bit displacements.
1658 return Opcode;
1659
1660 // If a VR32/VR64 reg ended up in an FP register, use the FP opcode.
1661 if (MI && MI->getOperand(0).isReg()) {
1662 Register Reg = MI->getOperand(0).getReg();
1663 if (Reg.isPhysical() && SystemZMC::getFirstReg(Reg) < 16) {
1664 switch (Opcode) {
1665 case SystemZ::VL32:
1666 return SystemZ::LEY;
1667 case SystemZ::VST32:
1668 return SystemZ::STEY;
1669 case SystemZ::VL64:
1670 return SystemZ::LDY;
1671 case SystemZ::VST64:
1672 return SystemZ::STDY;
1673 default: break;
1674 }
1675 }
1676 }
1677 }
1678 return 0;
1679}
1680
1682 const MCInstrDesc &MCID = get(Opcode);
1684 return SystemZ::getDisp12Opcode(Opcode) >= 0;
1685 return SystemZ::getDisp20Opcode(Opcode) >= 0;
1686}
1687
1688unsigned SystemZInstrInfo::getLoadAndTest(unsigned Opcode) const {
1689 switch (Opcode) {
1690 case SystemZ::L: return SystemZ::LT;
1691 case SystemZ::LY: return SystemZ::LT;
1692 case SystemZ::LG: return SystemZ::LTG;
1693 case SystemZ::LGF: return SystemZ::LTGF;
1694 case SystemZ::LR: return SystemZ::LTR;
1695 case SystemZ::LGFR: return SystemZ::LTGFR;
1696 case SystemZ::LGR: return SystemZ::LTGR;
1697 case SystemZ::LCDFR: return SystemZ::LCDBR;
1698 case SystemZ::LPDFR: return SystemZ::LPDBR;
1699 case SystemZ::LNDFR: return SystemZ::LNDBR;
1700 case SystemZ::LCDFR_32: return SystemZ::LCEBR;
1701 case SystemZ::LPDFR_32: return SystemZ::LPEBR;
1702 case SystemZ::LNDFR_32: return SystemZ::LNEBR;
1703 // On zEC12 we prefer to use RISBGN. But if there is a chance to
1704 // actually use the condition code, we may turn it back into RISGB.
1705 // Note that RISBG is not really a "load-and-test" instruction,
1706 // but sets the same condition code values, so is OK to use here.
1707 case SystemZ::RISBGN: return SystemZ::RISBG;
1708 default: return 0;
1709 }
1710}
1711
1712bool SystemZInstrInfo::isRxSBGMask(uint64_t Mask, unsigned BitSize,
1713 unsigned &Start, unsigned &End) const {
1714 // Reject trivial all-zero masks.
1715 Mask &= allOnes(BitSize);
1716 if (Mask == 0)
1717 return false;
1718
1719 // Handle the 1+0+ or 0+1+0* cases. Start then specifies the index of
1720 // the msb and End specifies the index of the lsb.
1721 unsigned LSB, Length;
1722 if (isShiftedMask_64(Mask, LSB, Length)) {
1723 Start = 63 - (LSB + Length - 1);
1724 End = 63 - LSB;
1725 return true;
1726 }
1727
1728 // Handle the wrap-around 1+0+1+ cases. Start then specifies the msb
1729 // of the low 1s and End specifies the lsb of the high 1s.
1730 if (isShiftedMask_64(Mask ^ allOnes(BitSize), LSB, Length)) {
1731 assert(LSB > 0 && "Bottom bit must be set");
1732 assert(LSB + Length < BitSize && "Top bit must be set");
1733 Start = 63 - (LSB - 1);
1734 End = 63 - (LSB + Length);
1735 return true;
1736 }
1737
1738 return false;
1739}
1740
1741unsigned SystemZInstrInfo::getFusedCompare(unsigned Opcode,
1743 const MachineInstr *MI) const {
1744 switch (Opcode) {
1745 case SystemZ::CHI:
1746 case SystemZ::CGHI:
1747 if (!(MI && isInt<8>(MI->getOperand(1).getImm())))
1748 return 0;
1749 break;
1750 case SystemZ::CLFI:
1751 case SystemZ::CLGFI:
1752 if (!(MI && isUInt<8>(MI->getOperand(1).getImm())))
1753 return 0;
1754 break;
1755 case SystemZ::CL:
1756 case SystemZ::CLG:
1757 if (!STI.hasMiscellaneousExtensions())
1758 return 0;
1759 if (!(MI && MI->getOperand(3).getReg() == 0))
1760 return 0;
1761 break;
1762 }
1763 switch (Type) {
1765 switch (Opcode) {
1766 case SystemZ::CR:
1767 return SystemZ::CRJ;
1768 case SystemZ::CGR:
1769 return SystemZ::CGRJ;
1770 case SystemZ::CHI:
1771 return SystemZ::CIJ;
1772 case SystemZ::CGHI:
1773 return SystemZ::CGIJ;
1774 case SystemZ::CLR:
1775 return SystemZ::CLRJ;
1776 case SystemZ::CLGR:
1777 return SystemZ::CLGRJ;
1778 case SystemZ::CLFI:
1779 return SystemZ::CLIJ;
1780 case SystemZ::CLGFI:
1781 return SystemZ::CLGIJ;
1782 default:
1783 return 0;
1784 }
1786 switch (Opcode) {
1787 case SystemZ::CR:
1788 return SystemZ::CRBReturn;
1789 case SystemZ::CGR:
1790 return SystemZ::CGRBReturn;
1791 case SystemZ::CHI:
1792 return SystemZ::CIBReturn;
1793 case SystemZ::CGHI:
1794 return SystemZ::CGIBReturn;
1795 case SystemZ::CLR:
1796 return SystemZ::CLRBReturn;
1797 case SystemZ::CLGR:
1798 return SystemZ::CLGRBReturn;
1799 case SystemZ::CLFI:
1800 return SystemZ::CLIBReturn;
1801 case SystemZ::CLGFI:
1802 return SystemZ::CLGIBReturn;
1803 default:
1804 return 0;
1805 }
1807 switch (Opcode) {
1808 case SystemZ::CR:
1809 return SystemZ::CRBCall;
1810 case SystemZ::CGR:
1811 return SystemZ::CGRBCall;
1812 case SystemZ::CHI:
1813 return SystemZ::CIBCall;
1814 case SystemZ::CGHI:
1815 return SystemZ::CGIBCall;
1816 case SystemZ::CLR:
1817 return SystemZ::CLRBCall;
1818 case SystemZ::CLGR:
1819 return SystemZ::CLGRBCall;
1820 case SystemZ::CLFI:
1821 return SystemZ::CLIBCall;
1822 case SystemZ::CLGFI:
1823 return SystemZ::CLGIBCall;
1824 default:
1825 return 0;
1826 }
1828 switch (Opcode) {
1829 case SystemZ::CR:
1830 return SystemZ::CRT;
1831 case SystemZ::CGR:
1832 return SystemZ::CGRT;
1833 case SystemZ::CHI:
1834 return SystemZ::CIT;
1835 case SystemZ::CGHI:
1836 return SystemZ::CGIT;
1837 case SystemZ::CLR:
1838 return SystemZ::CLRT;
1839 case SystemZ::CLGR:
1840 return SystemZ::CLGRT;
1841 case SystemZ::CLFI:
1842 return SystemZ::CLFIT;
1843 case SystemZ::CLGFI:
1844 return SystemZ::CLGIT;
1845 case SystemZ::CL:
1846 return SystemZ::CLT;
1847 case SystemZ::CLG:
1848 return SystemZ::CLGT;
1849 default:
1850 return 0;
1851 }
1852 }
1853 return 0;
1854}
1855
1858 assert(MBBI->isCompare() && MBBI->getOperand(0).isReg() &&
1859 MBBI->getOperand(1).isReg() && !MBBI->mayLoad() &&
1860 "Not a compare reg/reg.");
1861
1863 bool CCLive = true;
1865 for (MachineInstr &MI : llvm::make_range(std::next(MBBI), MBB->end())) {
1866 if (MI.readsRegister(SystemZ::CC, /*TRI=*/nullptr)) {
1867 unsigned Flags = MI.getDesc().TSFlags;
1868 if ((Flags & SystemZII::CCMaskFirst) || (Flags & SystemZII::CCMaskLast))
1869 CCUsers.push_back(&MI);
1870 else
1871 return false;
1872 }
1873 if (MI.definesRegister(SystemZ::CC, /*TRI=*/nullptr)) {
1874 CCLive = false;
1875 break;
1876 }
1877 }
1878 if (CCLive) {
1880 LiveRegs.addLiveOuts(*MBB);
1881 if (!LiveRegs.available(SystemZ::CC))
1882 return false;
1883 }
1884
1885 // Update all CC users.
1886 for (unsigned Idx = 0; Idx < CCUsers.size(); ++Idx) {
1887 unsigned Flags = CCUsers[Idx]->getDesc().TSFlags;
1888 unsigned FirstOpNum = ((Flags & SystemZII::CCMaskFirst) ?
1889 0 : CCUsers[Idx]->getNumExplicitOperands() - 2);
1890 MachineOperand &CCMaskMO = CCUsers[Idx]->getOperand(FirstOpNum + 1);
1891 unsigned NewCCMask = SystemZ::reverseCCMask(CCMaskMO.getImm());
1892 CCMaskMO.setImm(NewCCMask);
1893 }
1894
1895 return true;
1896}
1897
1898unsigned SystemZ::reverseCCMask(unsigned CCMask) {
1899 return ((CCMask & SystemZ::CCMASK_CMP_EQ) |
1902 (CCMask & SystemZ::CCMASK_CMP_UO));
1903}
1904
1906 MachineFunction &MF = *MBB->getParent();
1908 MF.insert(std::next(MachineFunction::iterator(MBB)), NewMBB);
1909 return NewMBB;
1910}
1911
1915 NewMBB->splice(NewMBB->begin(), MBB,
1916 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
1918 return NewMBB;
1919}
1920
1924 NewMBB->splice(NewMBB->begin(), MBB, MI, MBB->end());
1926 return NewMBB;
1927}
1928
1929unsigned SystemZInstrInfo::getLoadAndTrap(unsigned Opcode) const {
1930 if (!STI.hasLoadAndTrap())
1931 return 0;
1932 switch (Opcode) {
1933 case SystemZ::L:
1934 case SystemZ::LY:
1935 return SystemZ::LAT;
1936 case SystemZ::LG:
1937 return SystemZ::LGAT;
1938 case SystemZ::LFH:
1939 return SystemZ::LFHAT;
1940 case SystemZ::LLGF:
1941 return SystemZ::LLGFAT;
1942 case SystemZ::LLGT:
1943 return SystemZ::LLGTAT;
1944 }
1945 return 0;
1946}
1947
1950 unsigned Reg, uint64_t Value) const {
1951 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
1952 unsigned Opcode = 0;
1953 if (isInt<16>(Value))
1954 Opcode = SystemZ::LGHI;
1955 else if (SystemZ::isImmLL(Value))
1956 Opcode = SystemZ::LLILL;
1957 else if (SystemZ::isImmLH(Value)) {
1958 Opcode = SystemZ::LLILH;
1959 Value >>= 16;
1960 }
1961 else if (isInt<32>(Value))
1962 Opcode = SystemZ::LGFI;
1963 if (Opcode) {
1964 BuildMI(MBB, MBBI, DL, get(Opcode), Reg).addImm(Value);
1965 return;
1966 }
1967
1969 assert (MRI.isSSA() && "Huge values only handled before reg-alloc .");
1970 Register Reg0 = MRI.createVirtualRegister(&SystemZ::GR64BitRegClass);
1971 Register Reg1 = MRI.createVirtualRegister(&SystemZ::GR64BitRegClass);
1972 BuildMI(MBB, MBBI, DL, get(SystemZ::IMPLICIT_DEF), Reg0);
1973 BuildMI(MBB, MBBI, DL, get(SystemZ::IIHF64), Reg1)
1974 .addReg(Reg0).addImm(Value >> 32);
1975 BuildMI(MBB, MBBI, DL, get(SystemZ::IILF64), Reg)
1976 .addReg(Reg1).addImm(Value & ((uint64_t(1) << 32) - 1));
1977}
1978
1980 StringRef &ErrInfo) const {
1981 const MCInstrDesc &MCID = MI.getDesc();
1982 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
1983 if (I >= MCID.getNumOperands())
1984 break;
1985 const MachineOperand &Op = MI.getOperand(I);
1986 const MCOperandInfo &MCOI = MCID.operands()[I];
1987 // Addressing modes have register and immediate operands. Op should be a
1988 // register (or frame index) operand if MCOI.RegClass contains a valid
1989 // register class, or an immediate otherwise.
1990 if (MCOI.OperandType == MCOI::OPERAND_MEMORY &&
1991 ((MCOI.RegClass != -1 && !Op.isReg() && !Op.isFI()) ||
1992 (MCOI.RegClass == -1 && !Op.isImm()))) {
1993 ErrInfo = "Addressing mode operands corrupt!";
1994 return false;
1995 }
1996 }
1997
1998 return true;
1999}
2000
2003 const MachineInstr &MIb) const {
2004
2005 if (!MIa.hasOneMemOperand() || !MIb.hasOneMemOperand())
2006 return false;
2007
2008 // If mem-operands show that the same address Value is used by both
2009 // instructions, check for non-overlapping offsets and widths. Not
2010 // sure if a register based analysis would be an improvement...
2011
2012 MachineMemOperand *MMOa = *MIa.memoperands_begin();
2013 MachineMemOperand *MMOb = *MIb.memoperands_begin();
2014 const Value *VALa = MMOa->getValue();
2015 const Value *VALb = MMOb->getValue();
2016 bool SameVal = (VALa && VALb && (VALa == VALb));
2017 if (!SameVal) {
2018 const PseudoSourceValue *PSVa = MMOa->getPseudoValue();
2019 const PseudoSourceValue *PSVb = MMOb->getPseudoValue();
2020 if (PSVa && PSVb && (PSVa == PSVb))
2021 SameVal = true;
2022 }
2023 if (SameVal) {
2024 int OffsetA = MMOa->getOffset(), OffsetB = MMOb->getOffset();
2025 LocationSize WidthA = MMOa->getSize(), WidthB = MMOb->getSize();
2026 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB;
2027 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA;
2028 LocationSize LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
2029 if (LowWidth.hasValue() &&
2030 LowOffset + (int)LowWidth.getValue() <= HighOffset)
2031 return true;
2032 }
2033
2034 return false;
2035}
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
unsigned RegSize
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
uint64_t Size
bool End
Definition: ELF_riscv.cpp:480
IRTranslator LLVM IR MI
A set of register units.
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
static bool isSimpleBD12Move(const MachineInstr *MI, unsigned Flag)
static void transferDeadCC(MachineInstr *OldMI, MachineInstr *NewMI)
static void transferMIFlag(MachineInstr *OldMI, MachineInstr *NewMI, MachineInstr::MIFlag Flag)
static int isSimpleMove(const MachineInstr &MI, int &FrameIndex, unsigned Flag)
static LogicOp interpretAndImmediate(unsigned Opcode)
static uint64_t allOnes(unsigned int Count)
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:469
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
This class represents an Operation in the Expression.
A debug info location.
Definition: DebugLoc.h:33
SlotIndexes * getSlotIndexes() const
VNInfo::Allocator & getVNInfoAllocator()
LiveRange & getRegUnit(unsigned Unit)
Return the live range for register unit Unit.
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
This class represents the liveness of a register, stack slot, etc.
Definition: LiveInterval.h:157
bool liveAt(SlotIndex index) const
Definition: LiveInterval.h:401
VNInfo * createDeadDef(SlotIndex Def, VNInfo::Allocator &VNIAlloc)
createDeadDef - Make sure the range has a value defined at Def.
A set of register units used to track register liveness.
Definition: LiveRegUnits.h:30
bool available(MCPhysReg Reg) const
Returns true if no part of physical register Reg is live.
Definition: LiveRegUnits.h:116
void addLiveOuts(const MachineBasicBlock &MBB)
Adds registers living out of block MBB.
void replaceKillInstruction(Register Reg, MachineInstr &OldMI, MachineInstr &NewMI)
replaceKillInstruction - Update register kill info by replacing a kill instruction with a new one.
bool hasValue() const
TypeSize getValue() const
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:237
ArrayRef< MCOperandInfo > operands() const
Definition: MCInstrDesc.h:239
This holds information about one operand of a machine instruction, indicating the register class for ...
Definition: MCInstrDesc.h:85
uint8_t OperandType
Information about the type of the operand.
Definition: MCInstrDesc.h:97
int16_t RegClass
This specifies the register class enumeration of the operand if the operand is a register.
Definition: MCInstrDesc.h:91
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
static MCRegister from(unsigned Val)
Check the provided unsigned value is a valid MCRegister.
Definition: MCRegister.h:74
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB will be emitted immediately after this block, such that if this bloc...
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
unsigned getMaxCallFrameSize() const
Return the maximum size of a call frame that must be allocated for an outgoing function call.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
MachineInstr * CloneMachineInstr(const MachineInstr *Orig)
Create a new MachineInstr which is a copy of Orig, identical in all ways except the instruction has n...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void insert(iterator MBBI, MachineBasicBlock *MBB)
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
Definition: MachineInstr.h:69
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
Definition: MachineInstr.h:391
bool registerDefIsDead(Register Reg, const TargetRegisterInfo *TRI) const
Returns true if the register is dead in this machine instruction.
bool definesRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr fully defines the specified register.
void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
Definition: MachineInstr.h:804
iterator_range< mop_iterator > operands()
Definition: MachineInstr.h:674
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:789
void setFlag(MIFlag Flag)
Set a MI flag.
Definition: MachineInstr.h:398
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:568
MachineOperand * findRegisterDefOperand(Register Reg, const TargetRegisterInfo *TRI, bool isDead=false, bool Overlap=false)
Wrapper for findRegisterDefOperandIdx, it returns a pointer to the MachineOperand rather than an inde...
bool addRegisterDead(Register Reg, const TargetRegisterInfo *RegInfo, bool AddIfNotFound=false)
We have determined MI defined a register without a use.
A description of a memory reference used in the backend.
LocationSize getSize() const
Return the size in bytes of the memory reference.
const PseudoSourceValue * getPseudoValue() const
bool isAtomic() const
Returns true if this operation has an atomic ordering requirement of unordered or higher,...
const Value * getValue() const
Return the base address of the memory access.
int64_t getOffset() const
For normal values, this is a byte offset added to the base address.
MachineOperand class - Representation of each machine instruction operand.
void setImm(int64_t immVal)
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
void setIsDead(bool Val=true)
void setReg(Register Reg)
Change the register this operand corresponds to.
void setIsKill(bool Val=true)
static MachineOperand CreateImm(int64_t Val)
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
MI-level patchpoint operands.
Definition: StackMaps.h:76
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given patchpoint should emit.
Definition: StackMaps.h:104
Special value supplied for machine level alias analysis.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:91
SlotIndex - An opaque wrapper around machine indexes.
Definition: SlotIndexes.h:68
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
Definition: SlotIndexes.h:240
SlotIndex getInstructionIndex(const MachineInstr &MI, bool IgnoreBundle=false) const
Returns the base index for the given instruction.
Definition: SlotIndexes.h:371
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
A SystemZ-specific class detailing special use registers particular for calling conventions.
unsigned getLoadAndTrap(unsigned Opcode) const
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, unsigned ExtraPredCycles, BranchProbability Probability) const override
unsigned getLoadAndTest(unsigned Opcode) const
bool isPredicable(const MachineInstr &MI) const override
bool isStackSlotCopy(const MachineInstr &MI, int &DestFrameIndex, int &SrcFrameIndex) const override
MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIdx, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
unsigned getOpcodeForOffset(unsigned Opcode, int64_t Offset, const MachineInstr *MI=nullptr) const
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
SystemZInstrInfo(SystemZSubtarget &STI)
bool hasDisplacementPairInsn(unsigned Opcode) const
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned CommuteOpIdx1, unsigned CommuteOpIdx2) const override
Commutes the operands in the given instruction by changing the operands order and/or changing the ins...
void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, Register DstReg, ArrayRef< MachineOperand > Cond, Register TrueReg, Register FalseReg) const override
bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, BranchProbability Probability) const override
SystemZII::Branch getBranchInfo(const MachineInstr &MI) const
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
unsigned getFusedCompare(unsigned Opcode, SystemZII::FusedCompareType Type, const MachineInstr *MI=nullptr) const
bool expandPostRAPseudo(MachineInstr &MBBI) const override
bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, Register &SrcReg2, int64_t &Mask, int64_t &Value) const override
bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const override
void getLoadStoreOpcodes(const TargetRegisterClass *RC, unsigned &LoadOpcode, unsigned &StoreOpcode) const
bool isRxSBGMask(uint64_t Mask, unsigned BitSize, unsigned &Start, unsigned &End) const
bool foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg, MachineRegisterInfo *MRI) const override
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc) const override
bool canInsertSelect(const MachineBasicBlock &, ArrayRef< MachineOperand > Cond, Register, Register, Register, int &, int &, int &) const override
bool prepareCompareSwapOperands(MachineBasicBlock::iterator MBBI) const
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
void loadImmediate(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned Reg, uint64_t Value) const
bool PredicateInstruction(MachineInstr &MI, ArrayRef< MachineOperand > Pred) const override
SystemZCallingConventionRegisters * getSpecialRegisters() const
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
Target - Wrapper for Target specific information.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
LLVM Value Representation.
Definition: Value.h:74
MCRegister getPhys(Register virtReg) const
returns the physical register mapped to the specified virtual register
Definition: VirtRegMap.h:105
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ OPERAND_REGISTER
Definition: MCInstrDesc.h:61
@ OPERAND_MEMORY
Definition: MCInstrDesc.h:62
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Undef
Value of the register doesn't matter.
static unsigned getAccessSize(unsigned int Flags)
unsigned getFirstReg(unsigned Reg)
MachineBasicBlock * splitBlockBefore(MachineBasicBlock::iterator MI, MachineBasicBlock *MBB)
int getTargetMemOpcode(uint16_t Opcode)
const unsigned CCMASK_CMP_GT
Definition: SystemZ.h:37
const unsigned CCMASK_ANY
Definition: SystemZ.h:31
static bool isImmLL(uint64_t Val)
Definition: SystemZ.h:161
static bool isImmLH(uint64_t Val)
Definition: SystemZ.h:166
MachineBasicBlock * emitBlockAfter(MachineBasicBlock *MBB)
unsigned reverseCCMask(unsigned CCMask)
const unsigned IPM_CC
Definition: SystemZ.h:112
const unsigned CCMASK_CMP_EQ
Definition: SystemZ.h:35
const unsigned CCMASK_ICMP
Definition: SystemZ.h:47
MachineBasicBlock * splitBlockAfter(MachineBasicBlock::iterator MI, MachineBasicBlock *MBB)
const unsigned CCMASK_CMP_LT
Definition: SystemZ.h:36
const unsigned CCMASK_CMP_NE
Definition: SystemZ.h:38
bool isHighReg(unsigned int Reg)
const unsigned CCMASK_CMP_UO
Definition: SystemZ.h:43
Reg
All possible values of the reg field in the ModR/M byte.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
@ Offset
Definition: DWP.cpp:456
@ Length
Definition: DWP.cpp:456
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
static const MachineInstrBuilder & addFrameReference(const MachineInstrBuilder &MIB, int FI, int Offset=0, bool mem=true)
addFrameReference - This function is used to add a reference to the base of an abstract object on the...
constexpr bool isShiftedMask_64(uint64_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (64 bit ver...
Definition: MathExtras.h:269
constexpr size_t range_size(R &&Range)
Returns the size of the Range, i.e., the number of elements.
Definition: STLExtras.h:1705
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
unsigned getUndefRegState(bool B)
@ And
Bitwise or logical AND of integers.
unsigned getKillRegState(bool B)