LLVM 19.0.0git
MipsInstructionSelector.cpp
Go to the documentation of this file.
1//===- MipsInstructionSelector.cpp ------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the InstructionSelector class for
10/// Mips.
11/// \todo This should be generated by TableGen.
12//===----------------------------------------------------------------------===//
13
15#include "MipsMachineFunction.h"
17#include "MipsTargetMachine.h"
21#include "llvm/IR/IntrinsicsMips.h"
22
23#define DEBUG_TYPE "mips-isel"
24
25using namespace llvm;
26
27namespace {
28
29#define GET_GLOBALISEL_PREDICATE_BITSET
30#include "MipsGenGlobalISel.inc"
31#undef GET_GLOBALISEL_PREDICATE_BITSET
32
33class MipsInstructionSelector : public InstructionSelector {
34public:
35 MipsInstructionSelector(const MipsTargetMachine &TM, const MipsSubtarget &STI,
36 const MipsRegisterBankInfo &RBI);
37
38 bool select(MachineInstr &I) override;
39 static const char *getName() { return DEBUG_TYPE; }
40
41private:
42 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
43 bool isRegInGprb(Register Reg, MachineRegisterInfo &MRI) const;
44 bool isRegInFprb(Register Reg, MachineRegisterInfo &MRI) const;
45 bool materialize32BitImm(Register DestReg, APInt Imm,
46 MachineIRBuilder &B) const;
49 getRegClassForTypeOnBank(Register Reg, MachineRegisterInfo &MRI) const;
50 unsigned selectLoadStoreOpCode(MachineInstr &I,
52 bool buildUnalignedStore(MachineInstr &I, unsigned Opc,
53 MachineOperand &BaseAddr, unsigned Offset,
54 MachineMemOperand *MMO) const;
55 bool buildUnalignedLoad(MachineInstr &I, unsigned Opc, Register Dest,
56 MachineOperand &BaseAddr, unsigned Offset,
57 Register TiedDest, MachineMemOperand *MMO) const;
58
59 const MipsTargetMachine &TM;
60 const MipsSubtarget &STI;
61 const MipsInstrInfo &TII;
62 const MipsRegisterInfo &TRI;
63 const MipsRegisterBankInfo &RBI;
64
65#define GET_GLOBALISEL_PREDICATES_DECL
66#include "MipsGenGlobalISel.inc"
67#undef GET_GLOBALISEL_PREDICATES_DECL
68
69#define GET_GLOBALISEL_TEMPORARIES_DECL
70#include "MipsGenGlobalISel.inc"
71#undef GET_GLOBALISEL_TEMPORARIES_DECL
72};
73
74} // end anonymous namespace
75
76#define GET_GLOBALISEL_IMPL
77#include "MipsGenGlobalISel.inc"
78#undef GET_GLOBALISEL_IMPL
79
80MipsInstructionSelector::MipsInstructionSelector(
81 const MipsTargetMachine &TM, const MipsSubtarget &STI,
82 const MipsRegisterBankInfo &RBI)
83 : TM(TM), STI(STI), TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()),
84 RBI(RBI),
85
87#include "MipsGenGlobalISel.inc"
90#include "MipsGenGlobalISel.inc"
92{
93}
94
95bool MipsInstructionSelector::isRegInGprb(Register Reg,
96 MachineRegisterInfo &MRI) const {
97 return RBI.getRegBank(Reg, MRI, TRI)->getID() == Mips::GPRBRegBankID;
98}
99
100bool MipsInstructionSelector::isRegInFprb(Register Reg,
101 MachineRegisterInfo &MRI) const {
102 return RBI.getRegBank(Reg, MRI, TRI)->getID() == Mips::FPRBRegBankID;
103}
104
105bool MipsInstructionSelector::selectCopy(MachineInstr &I,
106 MachineRegisterInfo &MRI) const {
107 Register DstReg = I.getOperand(0).getReg();
108 if (DstReg.isPhysical())
109 return true;
110
111 const TargetRegisterClass *RC = getRegClassForTypeOnBank(DstReg, MRI);
112 if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
113 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
114 << " operand\n");
115 return false;
116 }
117 return true;
118}
119
120const TargetRegisterClass *MipsInstructionSelector::getRegClassForTypeOnBank(
121 Register Reg, MachineRegisterInfo &MRI) const {
122 const LLT Ty = MRI.getType(Reg);
123 const unsigned TySize = Ty.getSizeInBits();
124
125 if (isRegInGprb(Reg, MRI)) {
126 assert((Ty.isScalar() || Ty.isPointer()) && TySize == 32 &&
127 "Register class not available for LLT, register bank combination");
128 return &Mips::GPR32RegClass;
129 }
130
131 if (isRegInFprb(Reg, MRI)) {
132 if (Ty.isScalar()) {
133 assert((TySize == 32 || TySize == 64) &&
134 "Register class not available for LLT, register bank combination");
135 if (TySize == 32)
136 return &Mips::FGR32RegClass;
137 return STI.isFP64bit() ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass;
138 }
139 }
140
141 llvm_unreachable("Unsupported register bank.");
142}
143
144bool MipsInstructionSelector::materialize32BitImm(Register DestReg, APInt Imm,
145 MachineIRBuilder &B) const {
146 assert(Imm.getBitWidth() == 32 && "Unsupported immediate size.");
147 // Ori zero extends immediate. Used for values with zeros in high 16 bits.
148 if (Imm.getHiBits(16).isZero()) {
149 MachineInstr *Inst =
150 B.buildInstr(Mips::ORi, {DestReg}, {Register(Mips::ZERO)})
151 .addImm(Imm.getLoBits(16).getLimitedValue());
152 return constrainSelectedInstRegOperands(*Inst, TII, TRI, RBI);
153 }
154 // Lui places immediate in high 16 bits and sets low 16 bits to zero.
155 if (Imm.getLoBits(16).isZero()) {
156 MachineInstr *Inst = B.buildInstr(Mips::LUi, {DestReg}, {})
157 .addImm(Imm.getHiBits(16).getLimitedValue());
158 return constrainSelectedInstRegOperands(*Inst, TII, TRI, RBI);
159 }
160 // ADDiu sign extends immediate. Used for values with 1s in high 17 bits.
161 if (Imm.isSignedIntN(16)) {
162 MachineInstr *Inst =
163 B.buildInstr(Mips::ADDiu, {DestReg}, {Register(Mips::ZERO)})
164 .addImm(Imm.getLoBits(16).getLimitedValue());
165 return constrainSelectedInstRegOperands(*Inst, TII, TRI, RBI);
166 }
167 // Values that cannot be materialized with single immediate instruction.
168 Register LUiReg = B.getMRI()->createVirtualRegister(&Mips::GPR32RegClass);
169 MachineInstr *LUi = B.buildInstr(Mips::LUi, {LUiReg}, {})
170 .addImm(Imm.getHiBits(16).getLimitedValue());
171 MachineInstr *ORi = B.buildInstr(Mips::ORi, {DestReg}, {LUiReg})
172 .addImm(Imm.getLoBits(16).getLimitedValue());
173 if (!constrainSelectedInstRegOperands(*LUi, TII, TRI, RBI))
174 return false;
175 if (!constrainSelectedInstRegOperands(*ORi, TII, TRI, RBI))
176 return false;
177 return true;
178}
179
180/// When I.getOpcode() is returned, we failed to select MIPS instruction opcode.
181unsigned
182MipsInstructionSelector::selectLoadStoreOpCode(MachineInstr &I,
183 MachineRegisterInfo &MRI) const {
184 const Register ValueReg = I.getOperand(0).getReg();
185 const LLT Ty = MRI.getType(ValueReg);
186 const unsigned TySize = Ty.getSizeInBits();
187 const unsigned MemSizeInBytes = (*I.memoperands_begin())->getSize();
188 unsigned Opc = I.getOpcode();
189 const bool isStore = Opc == TargetOpcode::G_STORE;
190
191 if (isRegInGprb(ValueReg, MRI)) {
192 assert(((Ty.isScalar() && TySize == 32) ||
193 (Ty.isPointer() && TySize == 32 && MemSizeInBytes == 4)) &&
194 "Unsupported register bank, LLT, MemSizeInBytes combination");
195 (void)TySize;
196 if (isStore)
197 switch (MemSizeInBytes) {
198 case 4:
199 return Mips::SW;
200 case 2:
201 return Mips::SH;
202 case 1:
203 return Mips::SB;
204 default:
205 return Opc;
206 }
207 else
208 // Unspecified extending load is selected into zeroExtending load.
209 switch (MemSizeInBytes) {
210 case 4:
211 return Mips::LW;
212 case 2:
213 return Opc == TargetOpcode::G_SEXTLOAD ? Mips::LH : Mips::LHu;
214 case 1:
215 return Opc == TargetOpcode::G_SEXTLOAD ? Mips::LB : Mips::LBu;
216 default:
217 return Opc;
218 }
219 }
220
221 if (isRegInFprb(ValueReg, MRI)) {
222 if (Ty.isScalar()) {
223 assert(((TySize == 32 && MemSizeInBytes == 4) ||
224 (TySize == 64 && MemSizeInBytes == 8)) &&
225 "Unsupported register bank, LLT, MemSizeInBytes combination");
226
227 if (MemSizeInBytes == 4)
228 return isStore ? Mips::SWC1 : Mips::LWC1;
229
230 if (STI.isFP64bit())
231 return isStore ? Mips::SDC164 : Mips::LDC164;
232 return isStore ? Mips::SDC1 : Mips::LDC1;
233 }
234
235 if (Ty.isVector()) {
236 assert(STI.hasMSA() && "Vector instructions require target with MSA.");
237 assert((TySize == 128 && MemSizeInBytes == 16) &&
238 "Unsupported register bank, LLT, MemSizeInBytes combination");
239 switch (Ty.getElementType().getSizeInBits()) {
240 case 8:
241 return isStore ? Mips::ST_B : Mips::LD_B;
242 case 16:
243 return isStore ? Mips::ST_H : Mips::LD_H;
244 case 32:
245 return isStore ? Mips::ST_W : Mips::LD_W;
246 case 64:
247 return isStore ? Mips::ST_D : Mips::LD_D;
248 default:
249 return Opc;
250 }
251 }
252 }
253
254 return Opc;
255}
256
257bool MipsInstructionSelector::buildUnalignedStore(
258 MachineInstr &I, unsigned Opc, MachineOperand &BaseAddr, unsigned Offset,
259 MachineMemOperand *MMO) const {
260 MachineInstr *NewInst =
261 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opc))
262 .add(I.getOperand(0))
263 .add(BaseAddr)
264 .addImm(Offset)
265 .addMemOperand(MMO);
266 if (!constrainSelectedInstRegOperands(*NewInst, TII, TRI, RBI))
267 return false;
268 return true;
269}
270
271bool MipsInstructionSelector::buildUnalignedLoad(
272 MachineInstr &I, unsigned Opc, Register Dest, MachineOperand &BaseAddr,
273 unsigned Offset, Register TiedDest, MachineMemOperand *MMO) const {
274 MachineInstr *NewInst =
275 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opc))
276 .addDef(Dest)
277 .add(BaseAddr)
278 .addImm(Offset)
279 .addUse(TiedDest)
280 .addMemOperand(*I.memoperands_begin());
281 if (!constrainSelectedInstRegOperands(*NewInst, TII, TRI, RBI))
282 return false;
283 return true;
284}
285
286bool MipsInstructionSelector::select(MachineInstr &I) {
287
288 MachineBasicBlock &MBB = *I.getParent();
291
292 if (!isPreISelGenericOpcode(I.getOpcode())) {
293 if (I.isCopy())
294 return selectCopy(I, MRI);
295
296 return true;
297 }
298
299 if (I.getOpcode() == Mips::G_MUL &&
300 isRegInGprb(I.getOperand(0).getReg(), MRI)) {
301 MachineInstr *Mul = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::MUL))
302 .add(I.getOperand(0))
303 .add(I.getOperand(1))
304 .add(I.getOperand(2));
306 return false;
307 Mul->getOperand(3).setIsDead(true);
308 Mul->getOperand(4).setIsDead(true);
309
310 I.eraseFromParent();
311 return true;
312 }
313
314 if (selectImpl(I, *CoverageInfo))
315 return true;
316
317 MachineInstr *MI = nullptr;
318 using namespace TargetOpcode;
319
320 switch (I.getOpcode()) {
321 case G_UMULH: {
322 Register PseudoMULTuReg = MRI.createVirtualRegister(&Mips::ACC64RegClass);
323 MachineInstr *PseudoMULTu, *PseudoMove;
324
325 PseudoMULTu = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoMULTu))
326 .addDef(PseudoMULTuReg)
327 .add(I.getOperand(1))
328 .add(I.getOperand(2));
329 if (!constrainSelectedInstRegOperands(*PseudoMULTu, TII, TRI, RBI))
330 return false;
331
332 PseudoMove = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoMFHI))
333 .addDef(I.getOperand(0).getReg())
334 .addUse(PseudoMULTuReg);
335 if (!constrainSelectedInstRegOperands(*PseudoMove, TII, TRI, RBI))
336 return false;
337
338 I.eraseFromParent();
339 return true;
340 }
341 case G_PTR_ADD: {
342 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDu))
343 .add(I.getOperand(0))
344 .add(I.getOperand(1))
345 .add(I.getOperand(2));
346 break;
347 }
348 case G_INTTOPTR:
349 case G_PTRTOINT: {
350 I.setDesc(TII.get(COPY));
351 return selectCopy(I, MRI);
352 }
353 case G_FRAME_INDEX: {
354 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu))
355 .add(I.getOperand(0))
356 .add(I.getOperand(1))
357 .addImm(0);
358 break;
359 }
360 case G_BRJT: {
361 unsigned EntrySize =
363 assert(isPowerOf2_32(EntrySize) &&
364 "Non-power-of-two jump-table entry size not supported.");
365
366 Register JTIndex = MRI.createVirtualRegister(&Mips::GPR32RegClass);
367 MachineInstr *SLL = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::SLL))
368 .addDef(JTIndex)
369 .addUse(I.getOperand(2).getReg())
370 .addImm(Log2_32(EntrySize));
371 if (!constrainSelectedInstRegOperands(*SLL, TII, TRI, RBI))
372 return false;
373
374 Register DestAddress = MRI.createVirtualRegister(&Mips::GPR32RegClass);
375 MachineInstr *ADDu = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDu))
376 .addDef(DestAddress)
377 .addUse(I.getOperand(0).getReg())
378 .addUse(JTIndex);
379 if (!constrainSelectedInstRegOperands(*ADDu, TII, TRI, RBI))
380 return false;
381
382 Register Dest = MRI.createVirtualRegister(&Mips::GPR32RegClass);
383 MachineInstr *LW =
384 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LW))
385 .addDef(Dest)
386 .addUse(DestAddress)
387 .addJumpTableIndex(I.getOperand(1).getIndex(), MipsII::MO_ABS_LO)
391 return false;
392
393 if (MF.getTarget().isPositionIndependent()) {
394 Register DestTmp = MRI.createVirtualRegister(&Mips::GPR32RegClass);
395 LW->getOperand(0).setReg(DestTmp);
396 MachineInstr *ADDu = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDu))
397 .addDef(Dest)
398 .addUse(DestTmp)
401 if (!constrainSelectedInstRegOperands(*ADDu, TII, TRI, RBI))
402 return false;
403 }
404
406 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoIndirectBranch))
407 .addUse(Dest);
408 if (!constrainSelectedInstRegOperands(*Branch, TII, TRI, RBI))
409 return false;
410
411 I.eraseFromParent();
412 return true;
413 }
414 case G_BRINDIRECT: {
415 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoIndirectBranch))
416 .add(I.getOperand(0));
417 break;
418 }
419 case G_PHI: {
420 const Register DestReg = I.getOperand(0).getReg();
421
422 const TargetRegisterClass *DefRC = nullptr;
423 if (DestReg.isPhysical())
424 DefRC = TRI.getRegClass(DestReg);
425 else
426 DefRC = getRegClassForTypeOnBank(DestReg, MRI);
427
428 I.setDesc(TII.get(TargetOpcode::PHI));
429 return RBI.constrainGenericRegister(DestReg, *DefRC, MRI);
430 }
431 case G_STORE:
432 case G_LOAD:
433 case G_ZEXTLOAD:
434 case G_SEXTLOAD: {
435 auto MMO = *I.memoperands_begin();
436 MachineOperand BaseAddr = I.getOperand(1);
437 int64_t SignedOffset = 0;
438 // Try to fold load/store + G_PTR_ADD + G_CONSTANT
439 // %SignedOffset:(s32) = G_CONSTANT i32 16_bit_signed_immediate
440 // %Addr:(p0) = G_PTR_ADD %BaseAddr, %SignedOffset
441 // %LoadResult/%StoreSrc = load/store %Addr(p0)
442 // into:
443 // %LoadResult/%StoreSrc = NewOpc %BaseAddr(p0), 16_bit_signed_immediate
444
445 MachineInstr *Addr = MRI.getVRegDef(I.getOperand(1).getReg());
446 if (Addr->getOpcode() == G_PTR_ADD) {
447 MachineInstr *Offset = MRI.getVRegDef(Addr->getOperand(2).getReg());
448 if (Offset->getOpcode() == G_CONSTANT) {
449 APInt OffsetValue = Offset->getOperand(1).getCImm()->getValue();
450 if (OffsetValue.isSignedIntN(16)) {
451 BaseAddr = Addr->getOperand(1);
452 SignedOffset = OffsetValue.getSExtValue();
453 }
454 }
455 }
456
457 // Unaligned memory access
458 if (MMO->getAlign() < MMO->getSize() &&
459 !STI.systemSupportsUnalignedAccess()) {
460 if (MMO->getSize() != 4 || !isRegInGprb(I.getOperand(0).getReg(), MRI))
461 return false;
462
463 if (I.getOpcode() == G_STORE) {
464 if (!buildUnalignedStore(I, Mips::SWL, BaseAddr, SignedOffset + 3, MMO))
465 return false;
466 if (!buildUnalignedStore(I, Mips::SWR, BaseAddr, SignedOffset, MMO))
467 return false;
468 I.eraseFromParent();
469 return true;
470 }
471
472 if (I.getOpcode() == G_LOAD) {
473 Register ImplDef = MRI.createVirtualRegister(&Mips::GPR32RegClass);
474 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::IMPLICIT_DEF))
475 .addDef(ImplDef);
476 Register Tmp = MRI.createVirtualRegister(&Mips::GPR32RegClass);
477 if (!buildUnalignedLoad(I, Mips::LWL, Tmp, BaseAddr, SignedOffset + 3,
478 ImplDef, MMO))
479 return false;
480 if (!buildUnalignedLoad(I, Mips::LWR, I.getOperand(0).getReg(),
481 BaseAddr, SignedOffset, Tmp, MMO))
482 return false;
483 I.eraseFromParent();
484 return true;
485 }
486
487 return false;
488 }
489
490 const unsigned NewOpc = selectLoadStoreOpCode(I, MRI);
491 if (NewOpc == I.getOpcode())
492 return false;
493
494 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(NewOpc))
495 .add(I.getOperand(0))
496 .add(BaseAddr)
497 .addImm(SignedOffset)
498 .addMemOperand(MMO);
499 break;
500 }
501 case G_UDIV:
502 case G_UREM:
503 case G_SDIV:
504 case G_SREM: {
505 Register HILOReg = MRI.createVirtualRegister(&Mips::ACC64RegClass);
506 bool IsSigned = I.getOpcode() == G_SREM || I.getOpcode() == G_SDIV;
507 bool IsDiv = I.getOpcode() == G_UDIV || I.getOpcode() == G_SDIV;
508
509 MachineInstr *PseudoDIV, *PseudoMove;
510 PseudoDIV = BuildMI(MBB, I, I.getDebugLoc(),
511 TII.get(IsSigned ? Mips::PseudoSDIV : Mips::PseudoUDIV))
512 .addDef(HILOReg)
513 .add(I.getOperand(1))
514 .add(I.getOperand(2));
515 if (!constrainSelectedInstRegOperands(*PseudoDIV, TII, TRI, RBI))
516 return false;
517
518 PseudoMove = BuildMI(MBB, I, I.getDebugLoc(),
519 TII.get(IsDiv ? Mips::PseudoMFLO : Mips::PseudoMFHI))
520 .addDef(I.getOperand(0).getReg())
521 .addUse(HILOReg);
522 if (!constrainSelectedInstRegOperands(*PseudoMove, TII, TRI, RBI))
523 return false;
524
525 I.eraseFromParent();
526 return true;
527 }
528 case G_SELECT: {
529 // Handle operands with pointer type.
530 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::MOVN_I_I))
531 .add(I.getOperand(0))
532 .add(I.getOperand(2))
533 .add(I.getOperand(1))
534 .add(I.getOperand(3));
535 break;
536 }
537 case G_UNMERGE_VALUES: {
538 if (I.getNumOperands() != 3)
539 return false;
540 Register Src = I.getOperand(2).getReg();
541 Register Lo = I.getOperand(0).getReg();
542 Register Hi = I.getOperand(1).getReg();
543 if (!isRegInFprb(Src, MRI) ||
544 !(isRegInGprb(Lo, MRI) && isRegInGprb(Hi, MRI)))
545 return false;
546
547 unsigned Opcode =
548 STI.isFP64bit() ? Mips::ExtractElementF64_64 : Mips::ExtractElementF64;
549
550 MachineInstr *ExtractLo = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Opcode))
551 .addDef(Lo)
552 .addUse(Src)
553 .addImm(0);
554 if (!constrainSelectedInstRegOperands(*ExtractLo, TII, TRI, RBI))
555 return false;
556
557 MachineInstr *ExtractHi = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Opcode))
558 .addDef(Hi)
559 .addUse(Src)
560 .addImm(1);
561 if (!constrainSelectedInstRegOperands(*ExtractHi, TII, TRI, RBI))
562 return false;
563
564 I.eraseFromParent();
565 return true;
566 }
567 case G_IMPLICIT_DEF: {
568 Register Dst = I.getOperand(0).getReg();
569 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::IMPLICIT_DEF))
570 .addDef(Dst);
571
572 // Set class based on register bank, there can be fpr and gpr implicit def.
573 MRI.setRegClass(Dst, getRegClassForTypeOnBank(Dst, MRI));
574 break;
575 }
576 case G_CONSTANT: {
578 if (!materialize32BitImm(I.getOperand(0).getReg(),
579 I.getOperand(1).getCImm()->getValue(), B))
580 return false;
581
582 I.eraseFromParent();
583 return true;
584 }
585 case G_FCONSTANT: {
586 const APFloat &FPimm = I.getOperand(1).getFPImm()->getValueAPF();
587 APInt APImm = FPimm.bitcastToAPInt();
588 unsigned Size = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
589
590 if (Size == 32) {
591 Register GPRReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
593 if (!materialize32BitImm(GPRReg, APImm, B))
594 return false;
595
597 B.buildInstr(Mips::MTC1, {I.getOperand(0).getReg()}, {GPRReg});
598 if (!MTC1.constrainAllUses(TII, TRI, RBI))
599 return false;
600 }
601 if (Size == 64) {
602 Register GPRRegHigh = MRI.createVirtualRegister(&Mips::GPR32RegClass);
603 Register GPRRegLow = MRI.createVirtualRegister(&Mips::GPR32RegClass);
605 if (!materialize32BitImm(GPRRegHigh, APImm.getHiBits(32).trunc(32), B))
606 return false;
607 if (!materialize32BitImm(GPRRegLow, APImm.getLoBits(32).trunc(32), B))
608 return false;
609
610 MachineInstrBuilder PairF64 = B.buildInstr(
611 STI.isFP64bit() ? Mips::BuildPairF64_64 : Mips::BuildPairF64,
612 {I.getOperand(0).getReg()}, {GPRRegLow, GPRRegHigh});
613 if (!PairF64.constrainAllUses(TII, TRI, RBI))
614 return false;
615 }
616
617 I.eraseFromParent();
618 return true;
619 }
620 case G_FABS: {
621 unsigned Size = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
622 unsigned FABSOpcode =
623 Size == 32 ? Mips::FABS_S
624 : STI.isFP64bit() ? Mips::FABS_D64 : Mips::FABS_D32;
625 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(FABSOpcode))
626 .add(I.getOperand(0))
627 .add(I.getOperand(1));
628 break;
629 }
630 case G_FPTOSI: {
631 unsigned FromSize = MRI.getType(I.getOperand(1).getReg()).getSizeInBits();
632 unsigned ToSize = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
633 (void)ToSize;
634 assert((ToSize == 32) && "Unsupported integer size for G_FPTOSI");
635 assert((FromSize == 32 || FromSize == 64) &&
636 "Unsupported floating point size for G_FPTOSI");
637
638 unsigned Opcode;
639 if (FromSize == 32)
640 Opcode = Mips::TRUNC_W_S;
641 else
642 Opcode = STI.isFP64bit() ? Mips::TRUNC_W_D64 : Mips::TRUNC_W_D32;
643 Register ResultInFPR = MRI.createVirtualRegister(&Mips::FGR32RegClass);
644 MachineInstr *Trunc = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Opcode))
645 .addDef(ResultInFPR)
646 .addUse(I.getOperand(1).getReg());
647 if (!constrainSelectedInstRegOperands(*Trunc, TII, TRI, RBI))
648 return false;
649
650 MachineInstr *Move = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::MFC1))
651 .addDef(I.getOperand(0).getReg())
652 .addUse(ResultInFPR);
653 if (!constrainSelectedInstRegOperands(*Move, TII, TRI, RBI))
654 return false;
655
656 I.eraseFromParent();
657 return true;
658 }
659 case G_GLOBAL_VALUE: {
660 const llvm::GlobalValue *GVal = I.getOperand(1).getGlobal();
661 if (MF.getTarget().isPositionIndependent()) {
662 MachineInstr *LWGOT = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LW))
663 .addDef(I.getOperand(0).getReg())
666 .addGlobalAddress(GVal);
667 // Global Values that don't have local linkage are handled differently
668 // when they are part of call sequence. MipsCallLowering::lowerCall
669 // creates G_GLOBAL_VALUE instruction as part of call sequence and adds
670 // MO_GOT_CALL flag when Callee doesn't have local linkage.
671 if (I.getOperand(1).getTargetFlags() == MipsII::MO_GOT_CALL)
673 else
675 LWGOT->addMemOperand(
678 if (!constrainSelectedInstRegOperands(*LWGOT, TII, TRI, RBI))
679 return false;
680
681 if (GVal->hasLocalLinkage()) {
682 Register LWGOTDef = MRI.createVirtualRegister(&Mips::GPR32RegClass);
683 LWGOT->getOperand(0).setReg(LWGOTDef);
684
685 MachineInstr *ADDiu =
686 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu))
687 .addDef(I.getOperand(0).getReg())
688 .addReg(LWGOTDef)
689 .addGlobalAddress(GVal);
691 if (!constrainSelectedInstRegOperands(*ADDiu, TII, TRI, RBI))
692 return false;
693 }
694 } else {
695 Register LUiReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
696
697 MachineInstr *LUi = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LUi))
698 .addDef(LUiReg)
699 .addGlobalAddress(GVal);
701 if (!constrainSelectedInstRegOperands(*LUi, TII, TRI, RBI))
702 return false;
703
704 MachineInstr *ADDiu =
705 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu))
706 .addDef(I.getOperand(0).getReg())
707 .addUse(LUiReg)
708 .addGlobalAddress(GVal);
710 if (!constrainSelectedInstRegOperands(*ADDiu, TII, TRI, RBI))
711 return false;
712 }
713 I.eraseFromParent();
714 return true;
715 }
716 case G_JUMP_TABLE: {
717 if (MF.getTarget().isPositionIndependent()) {
718 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LW))
719 .addDef(I.getOperand(0).getReg())
722 .addJumpTableIndex(I.getOperand(1).getIndex(), MipsII::MO_GOT)
725 Align(4)));
726 } else {
727 MI =
728 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LUi))
729 .addDef(I.getOperand(0).getReg())
730 .addJumpTableIndex(I.getOperand(1).getIndex(), MipsII::MO_ABS_HI);
731 }
732 break;
733 }
734 case G_ICMP: {
735 struct Instr {
736 unsigned Opcode;
738 Instr(unsigned Opcode, Register Def, Register LHS, Register RHS)
739 : Opcode(Opcode), Def(Def), LHS(LHS), RHS(RHS){};
740
741 bool hasImm() const {
742 if (Opcode == Mips::SLTiu || Opcode == Mips::XORi)
743 return true;
744 return false;
745 }
746 };
747
749 Register ICMPReg = I.getOperand(0).getReg();
750 Register Temp = MRI.createVirtualRegister(&Mips::GPR32RegClass);
751 Register LHS = I.getOperand(2).getReg();
752 Register RHS = I.getOperand(3).getReg();
754 static_cast<CmpInst::Predicate>(I.getOperand(1).getPredicate());
755
756 switch (Cond) {
757 case CmpInst::ICMP_EQ: // LHS == RHS -> (LHS ^ RHS) < 1
758 Instructions.emplace_back(Mips::XOR, Temp, LHS, RHS);
759 Instructions.emplace_back(Mips::SLTiu, ICMPReg, Temp, 1);
760 break;
761 case CmpInst::ICMP_NE: // LHS != RHS -> 0 < (LHS ^ RHS)
762 Instructions.emplace_back(Mips::XOR, Temp, LHS, RHS);
763 Instructions.emplace_back(Mips::SLTu, ICMPReg, Mips::ZERO, Temp);
764 break;
765 case CmpInst::ICMP_UGT: // LHS > RHS -> RHS < LHS
766 Instructions.emplace_back(Mips::SLTu, ICMPReg, RHS, LHS);
767 break;
768 case CmpInst::ICMP_UGE: // LHS >= RHS -> !(LHS < RHS)
769 Instructions.emplace_back(Mips::SLTu, Temp, LHS, RHS);
770 Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1);
771 break;
772 case CmpInst::ICMP_ULT: // LHS < RHS -> LHS < RHS
773 Instructions.emplace_back(Mips::SLTu, ICMPReg, LHS, RHS);
774 break;
775 case CmpInst::ICMP_ULE: // LHS <= RHS -> !(RHS < LHS)
776 Instructions.emplace_back(Mips::SLTu, Temp, RHS, LHS);
777 Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1);
778 break;
779 case CmpInst::ICMP_SGT: // LHS > RHS -> RHS < LHS
780 Instructions.emplace_back(Mips::SLT, ICMPReg, RHS, LHS);
781 break;
782 case CmpInst::ICMP_SGE: // LHS >= RHS -> !(LHS < RHS)
783 Instructions.emplace_back(Mips::SLT, Temp, LHS, RHS);
784 Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1);
785 break;
786 case CmpInst::ICMP_SLT: // LHS < RHS -> LHS < RHS
787 Instructions.emplace_back(Mips::SLT, ICMPReg, LHS, RHS);
788 break;
789 case CmpInst::ICMP_SLE: // LHS <= RHS -> !(RHS < LHS)
790 Instructions.emplace_back(Mips::SLT, Temp, RHS, LHS);
791 Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1);
792 break;
793 default:
794 return false;
795 }
796
798 for (const struct Instr &Instruction : Instructions) {
799 MachineInstrBuilder MIB = B.buildInstr(
800 Instruction.Opcode, {Instruction.Def}, {Instruction.LHS});
801
802 if (Instruction.hasImm())
803 MIB.addImm(Instruction.RHS);
804 else
805 MIB.addUse(Instruction.RHS);
806
807 if (!MIB.constrainAllUses(TII, TRI, RBI))
808 return false;
809 }
810
811 I.eraseFromParent();
812 return true;
813 }
814 case G_FCMP: {
815 unsigned MipsFCMPCondCode;
816 bool isLogicallyNegated;
817 switch (CmpInst::Predicate Cond = static_cast<CmpInst::Predicate>(
818 I.getOperand(1).getPredicate())) {
819 case CmpInst::FCMP_UNO: // Unordered
820 case CmpInst::FCMP_ORD: // Ordered (OR)
821 MipsFCMPCondCode = Mips::FCOND_UN;
822 isLogicallyNegated = Cond != CmpInst::FCMP_UNO;
823 break;
824 case CmpInst::FCMP_OEQ: // Equal
825 case CmpInst::FCMP_UNE: // Not Equal (NEQ)
826 MipsFCMPCondCode = Mips::FCOND_OEQ;
827 isLogicallyNegated = Cond != CmpInst::FCMP_OEQ;
828 break;
829 case CmpInst::FCMP_UEQ: // Unordered or Equal
830 case CmpInst::FCMP_ONE: // Ordered or Greater Than or Less Than (OGL)
831 MipsFCMPCondCode = Mips::FCOND_UEQ;
832 isLogicallyNegated = Cond != CmpInst::FCMP_UEQ;
833 break;
834 case CmpInst::FCMP_OLT: // Ordered or Less Than
835 case CmpInst::FCMP_UGE: // Unordered or Greater Than or Equal (UGE)
836 MipsFCMPCondCode = Mips::FCOND_OLT;
837 isLogicallyNegated = Cond != CmpInst::FCMP_OLT;
838 break;
839 case CmpInst::FCMP_ULT: // Unordered or Less Than
840 case CmpInst::FCMP_OGE: // Ordered or Greater Than or Equal (OGE)
841 MipsFCMPCondCode = Mips::FCOND_ULT;
842 isLogicallyNegated = Cond != CmpInst::FCMP_ULT;
843 break;
844 case CmpInst::FCMP_OLE: // Ordered or Less Than or Equal
845 case CmpInst::FCMP_UGT: // Unordered or Greater Than (UGT)
846 MipsFCMPCondCode = Mips::FCOND_OLE;
847 isLogicallyNegated = Cond != CmpInst::FCMP_OLE;
848 break;
849 case CmpInst::FCMP_ULE: // Unordered or Less Than or Equal
850 case CmpInst::FCMP_OGT: // Ordered or Greater Than (OGT)
851 MipsFCMPCondCode = Mips::FCOND_ULE;
852 isLogicallyNegated = Cond != CmpInst::FCMP_ULE;
853 break;
854 default:
855 return false;
856 }
857
858 // Default compare result in gpr register will be `true`.
859 // We will move `false` (MIPS::Zero) to gpr result when fcmp gives false
860 // using MOVF_I. When orignal predicate (Cond) is logically negated
861 // MipsFCMPCondCode, result is inverted i.e. MOVT_I is used.
862 unsigned MoveOpcode = isLogicallyNegated ? Mips::MOVT_I : Mips::MOVF_I;
863
864 Register TrueInReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
865 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu))
866 .addDef(TrueInReg)
867 .addUse(Mips::ZERO)
868 .addImm(1);
869
870 unsigned Size = MRI.getType(I.getOperand(2).getReg()).getSizeInBits();
871 unsigned FCMPOpcode =
872 Size == 32 ? Mips::FCMP_S32
873 : STI.isFP64bit() ? Mips::FCMP_D64 : Mips::FCMP_D32;
874 MachineInstr *FCMP = BuildMI(MBB, I, I.getDebugLoc(), TII.get(FCMPOpcode))
875 .addUse(I.getOperand(2).getReg())
876 .addUse(I.getOperand(3).getReg())
877 .addImm(MipsFCMPCondCode);
878 if (!constrainSelectedInstRegOperands(*FCMP, TII, TRI, RBI))
879 return false;
880
881 MachineInstr *Move = BuildMI(MBB, I, I.getDebugLoc(), TII.get(MoveOpcode))
882 .addDef(I.getOperand(0).getReg())
883 .addUse(Mips::ZERO)
884 .addUse(Mips::FCC0)
885 .addUse(TrueInReg);
886 if (!constrainSelectedInstRegOperands(*Move, TII, TRI, RBI))
887 return false;
888
889 I.eraseFromParent();
890 return true;
891 }
892 case G_FENCE: {
893 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::SYNC)).addImm(0);
894 break;
895 }
896 case G_VASTART: {
897 MipsFunctionInfo *FuncInfo = MF.getInfo<MipsFunctionInfo>();
898 int FI = FuncInfo->getVarArgsFrameIndex();
899
900 Register LeaReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
901 MachineInstr *LEA_ADDiu =
902 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LEA_ADDiu))
903 .addDef(LeaReg)
904 .addFrameIndex(FI)
905 .addImm(0);
906 if (!constrainSelectedInstRegOperands(*LEA_ADDiu, TII, TRI, RBI))
907 return false;
908
909 MachineInstr *Store = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::SW))
910 .addUse(LeaReg)
911 .addUse(I.getOperand(0).getReg())
912 .addImm(0);
913 if (!constrainSelectedInstRegOperands(*Store, TII, TRI, RBI))
914 return false;
915
916 I.eraseFromParent();
917 return true;
918 }
919 default:
920 return false;
921 }
922
923 I.eraseFromParent();
925}
926
927namespace llvm {
929 MipsSubtarget &Subtarget,
931 return new MipsInstructionSelector(TM, Subtarget, RBI);
932}
933} // end namespace llvm
unsigned const MachineRegisterInfo * MRI
static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
MachineBasicBlock & MBB
static bool isStore(int Opcode)
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_DEBUG(X)
Definition: Debug.h:101
uint64_t Addr
uint64_t Size
#define DEBUG_TYPE
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define I(x, y, z)
Definition: MD5.cpp:58
This file declares the MachineIRBuilder class.
unsigned const TargetRegisterInfo * TRI
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
This file declares the targeting of the RegisterBankInfo class for Mips.
const char LLVMTargetMachineRef TM
static StringRef getName(Value *V)
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Value * RHS
Value * LHS
BinaryOperator * Mul
static unsigned getSize(unsigned Kind)
support::ulittle16_t & Lo
Definition: aarch32.cpp:206
support::ulittle16_t & Hi
Definition: aarch32.cpp:205
APInt bitcastToAPInt() const
Definition: APFloat.h:1210
Class for arbitrary precision integers.
Definition: APInt.h:76
APInt getLoBits(unsigned numBits) const
Compute an APInt containing numBits lowbits from this APInt.
Definition: APInt.cpp:613
APInt getHiBits(unsigned numBits) const
Compute an APInt containing numBits highbits from this APInt.
Definition: APInt.cpp:608
APInt trunc(unsigned width) const
Truncate to new width.
Definition: APInt.cpp:906
bool isSignedIntN(unsigned N) const
Check if this APInt has an N-bits signed integer value.
Definition: APInt.h:413
int64_t getSExtValue() const
Get sign extended value.
Definition: APInt.h:1507
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:780
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition: InstrTypes.h:783
@ ICMP_SLT
signed less than
Definition: InstrTypes.h:809
@ ICMP_SLE
signed less or equal
Definition: InstrTypes.h:810
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition: InstrTypes.h:786
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
Definition: InstrTypes.h:795
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Definition: InstrTypes.h:784
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Definition: InstrTypes.h:785
@ ICMP_UGE
unsigned greater or equal
Definition: InstrTypes.h:804
@ ICMP_UGT
unsigned greater than
Definition: InstrTypes.h:803
@ ICMP_SGT
signed greater than
Definition: InstrTypes.h:807
@ FCMP_ULT
1 1 0 0 True if unordered or less than
Definition: InstrTypes.h:794
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition: InstrTypes.h:788
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition: InstrTypes.h:791
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:805
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
Definition: InstrTypes.h:792
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition: InstrTypes.h:787
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition: InstrTypes.h:789
@ ICMP_EQ
equal
Definition: InstrTypes.h:801
@ ICMP_NE
not equal
Definition: InstrTypes.h:802
@ ICMP_SGE
signed greater or equal
Definition: InstrTypes.h:808
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition: InstrTypes.h:796
@ ICMP_ULE
unsigned less or equal
Definition: InstrTypes.h:806
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
Definition: InstrTypes.h:793
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition: InstrTypes.h:790
bool hasLocalLinkage() const
Definition: GlobalValue.h:527
virtual bool select(MachineInstr &I)=0
Select the (possibly generic) instruction I to only use target-specific opcodes.
constexpr bool isScalar() const
Definition: LowLevelType.h:139
constexpr bool isVector() const
Definition: LowLevelType.h:147
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
Definition: LowLevelType.h:185
constexpr bool isPointer() const
Definition: LowLevelType.h:141
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
Definition: LowLevelType.h:282
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineJumpTableInfo * getJumpTableInfo() const
getJumpTableInfo - Return the jump table info object for the current function.
Helper class to build MachineInstr.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
const MachineInstrBuilder & addJumpTableIndex(unsigned Idx, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
Definition: MachineInstr.h:68
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:553
void addMemOperand(MachineFunction &MF, MachineMemOperand *MO)
Add a MachineMemOperand to the machine instruction.
unsigned getEntrySize(const DataLayout &TD) const
getEntrySize - Return the size of each entry in the jump table.
A description of a memory reference used in the backend.
uint64_t getSize() const
Return the size in bytes of the memory reference.
@ MOLoad
The memory access reads data.
Align getAlign() const
Return the minimum known alignment in bytes of the actual memory reference.
MachineOperand class - Representation of each machine instruction operand.
void setReg(Register Reg)
Change the register this operand corresponds to.
void setTargetFlags(unsigned F)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
MipsFunctionInfo - This class is derived from MachineFunction private Mips target-specific informatio...
Register getGlobalBaseRegForGlobalISel(MachineFunction &MF)
This class provides the information for the target register banks.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:95
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
bool isPositionIndependent() const
Value * getOperand(unsigned i) const
Definition: User.h:169
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ MO_GOT_CALL
MO_GOT_CALL - Represents the offset into the global offset table at which the address of a call site ...
Definition: MipsBaseInfo.h:44
@ MO_GOT
MO_GOT - Represents the offset into the global offset table at which the address the relocation entry...
Definition: MipsBaseInfo.h:38
@ MO_ABS_HI
MO_ABS_HI/LO - Represents the hi or low part of an absolute symbol address.
Definition: MipsBaseInfo.h:52
bool hasImm(uint64_t TSFlags)
Definition: X86BaseInfo.h:897
NodeAddr< InstrNode * > Instr
Definition: RDFGraph.h:389
NodeAddr< DefNode * > Def
Definition: RDFGraph.h:384
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:456
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
Definition: Utils.cpp:153
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
Definition: TargetOpcodes.h:30
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:313
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:264
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
InstructionSelector * createMipsInstructionSelector(const MipsTargetMachine &, MipsSubtarget &, MipsRegisterBankInfo &)
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.