LLVM 20.0.0git
MipsFastISel.cpp
Go to the documentation of this file.
1//===- MipsFastISel.cpp - Mips FastISel implementation --------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file defines the MIPS-specific support for the FastISel class.
11/// Some of the target-specific code is generated by tablegen in the file
12/// MipsGenFastISel.inc, which is #included here.
13///
14//===----------------------------------------------------------------------===//
15
18#include "MipsCCState.h"
19#include "MipsISelLowering.h"
20#include "MipsInstrInfo.h"
21#include "MipsMachineFunction.h"
22#include "MipsSubtarget.h"
23#include "MipsTargetMachine.h"
24#include "llvm/ADT/APInt.h"
25#include "llvm/ADT/ArrayRef.h"
26#include "llvm/ADT/DenseMap.h"
42#include "llvm/IR/Attributes.h"
43#include "llvm/IR/CallingConv.h"
44#include "llvm/IR/Constant.h"
45#include "llvm/IR/Constants.h"
46#include "llvm/IR/DataLayout.h"
47#include "llvm/IR/Function.h"
49#include "llvm/IR/GlobalValue.h"
51#include "llvm/IR/InstrTypes.h"
52#include "llvm/IR/Instruction.h"
55#include "llvm/IR/Operator.h"
56#include "llvm/IR/Type.h"
57#include "llvm/IR/User.h"
58#include "llvm/IR/Value.h"
59#include "llvm/MC/MCContext.h"
60#include "llvm/MC/MCInstrDesc.h"
62#include "llvm/MC/MCSymbol.h"
65#include "llvm/Support/Debug.h"
69#include <algorithm>
70#include <array>
71#include <cassert>
72#include <cstdint>
73
74#define DEBUG_TYPE "mips-fastisel"
75
76using namespace llvm;
77
79
80namespace {
81
82class MipsFastISel final : public FastISel {
83
84 // All possible address modes.
85 class Address {
86 public:
87 using BaseKind = enum { RegBase, FrameIndexBase };
88
89 private:
90 BaseKind Kind = RegBase;
91 union {
92 unsigned Reg;
93 int FI;
94 } Base;
95
96 int64_t Offset = 0;
97
98 const GlobalValue *GV = nullptr;
99
100 public:
101 // Innocuous defaults for our address.
102 Address() { Base.Reg = 0; }
103
104 void setKind(BaseKind K) { Kind = K; }
105 BaseKind getKind() const { return Kind; }
106 bool isRegBase() const { return Kind == RegBase; }
107 bool isFIBase() const { return Kind == FrameIndexBase; }
108
109 void setReg(unsigned Reg) {
110 assert(isRegBase() && "Invalid base register access!");
111 Base.Reg = Reg;
112 }
113
114 unsigned getReg() const {
115 assert(isRegBase() && "Invalid base register access!");
116 return Base.Reg;
117 }
118
119 void setFI(unsigned FI) {
120 assert(isFIBase() && "Invalid base frame index access!");
121 Base.FI = FI;
122 }
123
124 unsigned getFI() const {
125 assert(isFIBase() && "Invalid base frame index access!");
126 return Base.FI;
127 }
128
129 void setOffset(int64_t Offset_) { Offset = Offset_; }
130 int64_t getOffset() const { return Offset; }
131 void setGlobalValue(const GlobalValue *G) { GV = G; }
132 const GlobalValue *getGlobalValue() { return GV; }
133 };
134
135 /// Subtarget - Keep a pointer to the MipsSubtarget around so that we can
136 /// make the right decision when generating code for different targets.
137 const TargetMachine &TM;
138 const MipsSubtarget *Subtarget;
139 const TargetInstrInfo &TII;
140 const TargetLowering &TLI;
142
143 // Convenience variables to avoid some queries.
144 LLVMContext *Context;
145
146 bool fastLowerArguments() override;
147 bool fastLowerCall(CallLoweringInfo &CLI) override;
148 bool fastLowerIntrinsicCall(const IntrinsicInst *II) override;
149
150 bool UnsupportedFPMode; // To allow fast-isel to proceed and just not handle
151 // floating point but not reject doing fast-isel in other
152 // situations
153
154private:
155 // Selection routines.
156 bool selectLogicalOp(const Instruction *I);
157 bool selectLoad(const Instruction *I);
158 bool selectStore(const Instruction *I);
159 bool selectBranch(const Instruction *I);
160 bool selectSelect(const Instruction *I);
161 bool selectCmp(const Instruction *I);
162 bool selectFPExt(const Instruction *I);
163 bool selectFPTrunc(const Instruction *I);
164 bool selectFPToInt(const Instruction *I, bool IsSigned);
165 bool selectRet(const Instruction *I);
166 bool selectTrunc(const Instruction *I);
167 bool selectIntExt(const Instruction *I);
168 bool selectShift(const Instruction *I);
169 bool selectDivRem(const Instruction *I, unsigned ISDOpcode);
170
171 // Utility helper routines.
172 bool isTypeLegal(Type *Ty, MVT &VT);
173 bool isTypeSupported(Type *Ty, MVT &VT);
174 bool isLoadTypeLegal(Type *Ty, MVT &VT);
175 bool computeAddress(const Value *Obj, Address &Addr);
176 bool computeCallAddress(const Value *V, Address &Addr);
177 void simplifyAddress(Address &Addr);
178
179 // Emit helper routines.
180 bool emitCmp(unsigned DestReg, const CmpInst *CI);
181 bool emitLoad(MVT VT, unsigned &ResultReg, Address &Addr);
182 bool emitStore(MVT VT, unsigned SrcReg, Address &Addr);
183 unsigned emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, bool isZExt);
184 bool emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, unsigned DestReg,
185
186 bool IsZExt);
187 bool emitIntZExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, unsigned DestReg);
188
189 bool emitIntSExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, unsigned DestReg);
190 bool emitIntSExt32r1(MVT SrcVT, unsigned SrcReg, MVT DestVT,
191 unsigned DestReg);
192 bool emitIntSExt32r2(MVT SrcVT, unsigned SrcReg, MVT DestVT,
193 unsigned DestReg);
194
195 unsigned getRegEnsuringSimpleIntegerWidening(const Value *, bool IsUnsigned);
196
197 unsigned emitLogicalOp(unsigned ISDOpc, MVT RetVT, const Value *LHS,
198 const Value *RHS);
199
200 unsigned materializeFP(const ConstantFP *CFP, MVT VT);
201 unsigned materializeGV(const GlobalValue *GV, MVT VT);
202 unsigned materializeInt(const Constant *C, MVT VT);
203 unsigned materialize32BitInt(int64_t Imm, const TargetRegisterClass *RC);
204 unsigned materializeExternalCallSym(MCSymbol *Syn);
205
206 MachineInstrBuilder emitInst(unsigned Opc) {
207 return BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc));
208 }
209
210 MachineInstrBuilder emitInst(unsigned Opc, unsigned DstReg) {
211 return BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc),
212 DstReg);
213 }
214
215 MachineInstrBuilder emitInstStore(unsigned Opc, unsigned SrcReg,
216 unsigned MemReg, int64_t MemOffset) {
217 return emitInst(Opc).addReg(SrcReg).addReg(MemReg).addImm(MemOffset);
218 }
219
220 MachineInstrBuilder emitInstLoad(unsigned Opc, unsigned DstReg,
221 unsigned MemReg, int64_t MemOffset) {
222 return emitInst(Opc, DstReg).addReg(MemReg).addImm(MemOffset);
223 }
224
225 unsigned fastEmitInst_rr(unsigned MachineInstOpcode,
226 const TargetRegisterClass *RC,
227 unsigned Op0, unsigned Op1);
228
229 // for some reason, this default is not generated by tablegen
230 // so we explicitly generate it here.
231 unsigned fastEmitInst_riir(uint64_t inst, const TargetRegisterClass *RC,
232 unsigned Op0, uint64_t imm1, uint64_t imm2,
233 unsigned Op3) {
234 return 0;
235 }
236
237 // Call handling routines.
238private:
239 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC) const;
240 bool processCallArgs(CallLoweringInfo &CLI, SmallVectorImpl<MVT> &ArgVTs,
241 unsigned &NumBytes);
242 bool finishCall(CallLoweringInfo &CLI, MVT RetVT, unsigned NumBytes);
243
244 const MipsABIInfo &getABI() const {
245 return static_cast<const MipsTargetMachine &>(TM).getABI();
246 }
247
248public:
249 // Backend specific FastISel code.
250 explicit MipsFastISel(FunctionLoweringInfo &funcInfo,
251 const TargetLibraryInfo *libInfo)
252 : FastISel(funcInfo, libInfo), TM(funcInfo.MF->getTarget()),
253 Subtarget(&funcInfo.MF->getSubtarget<MipsSubtarget>()),
254 TII(*Subtarget->getInstrInfo()), TLI(*Subtarget->getTargetLowering()) {
255 MFI = funcInfo.MF->getInfo<MipsFunctionInfo>();
256 Context = &funcInfo.Fn->getContext();
257 UnsupportedFPMode = Subtarget->isFP64bit() || Subtarget->useSoftFloat();
258 }
259
260 unsigned fastMaterializeAlloca(const AllocaInst *AI) override;
261 unsigned fastMaterializeConstant(const Constant *C) override;
262 bool fastSelectInstruction(const Instruction *I) override;
263
264#include "MipsGenFastISel.inc"
265};
266
267} // end anonymous namespace
268
269static bool CC_Mips(unsigned ValNo, MVT ValVT, MVT LocVT,
270 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
272
273static bool CC_MipsO32_FP32(unsigned ValNo, MVT ValVT, MVT LocVT,
274 CCValAssign::LocInfo LocInfo,
275 ISD::ArgFlagsTy ArgFlags, CCState &State) {
276 llvm_unreachable("should not be called");
277}
278
279static bool CC_MipsO32_FP64(unsigned ValNo, MVT ValVT, MVT LocVT,
280 CCValAssign::LocInfo LocInfo,
281 ISD::ArgFlagsTy ArgFlags, CCState &State) {
282 llvm_unreachable("should not be called");
283}
284
285#include "MipsGenCallingConv.inc"
286
287CCAssignFn *MipsFastISel::CCAssignFnForCall(CallingConv::ID CC) const {
288 return CC_MipsO32;
289}
290
291unsigned MipsFastISel::emitLogicalOp(unsigned ISDOpc, MVT RetVT,
292 const Value *LHS, const Value *RHS) {
293 // Canonicalize immediates to the RHS first.
294 if (isa<ConstantInt>(LHS) && !isa<ConstantInt>(RHS))
295 std::swap(LHS, RHS);
296
297 unsigned Opc;
298 switch (ISDOpc) {
299 case ISD::AND:
300 Opc = Mips::AND;
301 break;
302 case ISD::OR:
303 Opc = Mips::OR;
304 break;
305 case ISD::XOR:
306 Opc = Mips::XOR;
307 break;
308 default:
309 llvm_unreachable("unexpected opcode");
310 }
311
312 Register LHSReg = getRegForValue(LHS);
313 if (!LHSReg)
314 return 0;
315
316 unsigned RHSReg;
317 if (const auto *C = dyn_cast<ConstantInt>(RHS))
318 RHSReg = materializeInt(C, MVT::i32);
319 else
320 RHSReg = getRegForValue(RHS);
321 if (!RHSReg)
322 return 0;
323
324 Register ResultReg = createResultReg(&Mips::GPR32RegClass);
325 if (!ResultReg)
326 return 0;
327
328 emitInst(Opc, ResultReg).addReg(LHSReg).addReg(RHSReg);
329 return ResultReg;
330}
331
332unsigned MipsFastISel::fastMaterializeAlloca(const AllocaInst *AI) {
333 assert(TLI.getValueType(DL, AI->getType(), true) == MVT::i32 &&
334 "Alloca should always return a pointer.");
335
337 FuncInfo.StaticAllocaMap.find(AI);
338
339 if (SI != FuncInfo.StaticAllocaMap.end()) {
340 Register ResultReg = createResultReg(&Mips::GPR32RegClass);
341 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Mips::LEA_ADDiu),
342 ResultReg)
343 .addFrameIndex(SI->second)
344 .addImm(0);
345 return ResultReg;
346 }
347
348 return 0;
349}
350
351unsigned MipsFastISel::materializeInt(const Constant *C, MVT VT) {
352 if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1)
353 return 0;
354 const TargetRegisterClass *RC = &Mips::GPR32RegClass;
355 const ConstantInt *CI = cast<ConstantInt>(C);
356 return materialize32BitInt(CI->getZExtValue(), RC);
357}
358
359unsigned MipsFastISel::materialize32BitInt(int64_t Imm,
360 const TargetRegisterClass *RC) {
361 Register ResultReg = createResultReg(RC);
362
363 if (isInt<16>(Imm)) {
364 unsigned Opc = Mips::ADDiu;
365 emitInst(Opc, ResultReg).addReg(Mips::ZERO).addImm(Imm);
366 return ResultReg;
367 } else if (isUInt<16>(Imm)) {
368 emitInst(Mips::ORi, ResultReg).addReg(Mips::ZERO).addImm(Imm);
369 return ResultReg;
370 }
371 unsigned Lo = Imm & 0xFFFF;
372 unsigned Hi = (Imm >> 16) & 0xFFFF;
373 if (Lo) {
374 // Both Lo and Hi have nonzero bits.
375 Register TmpReg = createResultReg(RC);
376 emitInst(Mips::LUi, TmpReg).addImm(Hi);
377 emitInst(Mips::ORi, ResultReg).addReg(TmpReg).addImm(Lo);
378 } else {
379 emitInst(Mips::LUi, ResultReg).addImm(Hi);
380 }
381 return ResultReg;
382}
383
384unsigned MipsFastISel::materializeFP(const ConstantFP *CFP, MVT VT) {
385 if (UnsupportedFPMode)
386 return 0;
387 int64_t Imm = CFP->getValueAPF().bitcastToAPInt().getZExtValue();
388 if (VT == MVT::f32) {
389 const TargetRegisterClass *RC = &Mips::FGR32RegClass;
390 Register DestReg = createResultReg(RC);
391 unsigned TempReg = materialize32BitInt(Imm, &Mips::GPR32RegClass);
392 emitInst(Mips::MTC1, DestReg).addReg(TempReg);
393 return DestReg;
394 } else if (VT == MVT::f64) {
395 const TargetRegisterClass *RC = &Mips::AFGR64RegClass;
396 Register DestReg = createResultReg(RC);
397 unsigned TempReg1 = materialize32BitInt(Imm >> 32, &Mips::GPR32RegClass);
398 unsigned TempReg2 =
399 materialize32BitInt(Imm & 0xFFFFFFFF, &Mips::GPR32RegClass);
400 emitInst(Mips::BuildPairF64, DestReg).addReg(TempReg2).addReg(TempReg1);
401 return DestReg;
402 }
403 return 0;
404}
405
406unsigned MipsFastISel::materializeGV(const GlobalValue *GV, MVT VT) {
407 // For now 32-bit only.
408 if (VT != MVT::i32)
409 return 0;
410 const TargetRegisterClass *RC = &Mips::GPR32RegClass;
411 Register DestReg = createResultReg(RC);
412 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
413 bool IsThreadLocal = GVar && GVar->isThreadLocal();
414 // TLS not supported at this time.
415 if (IsThreadLocal)
416 return 0;
417 emitInst(Mips::LW, DestReg)
418 .addReg(MFI->getGlobalBaseReg(*MF))
419 .addGlobalAddress(GV, 0, MipsII::MO_GOT);
420 if ((GV->hasInternalLinkage() ||
421 (GV->hasLocalLinkage() && !isa<Function>(GV)))) {
422 Register TempReg = createResultReg(RC);
423 emitInst(Mips::ADDiu, TempReg)
424 .addReg(DestReg)
425 .addGlobalAddress(GV, 0, MipsII::MO_ABS_LO);
426 DestReg = TempReg;
427 }
428 return DestReg;
429}
430
431unsigned MipsFastISel::materializeExternalCallSym(MCSymbol *Sym) {
432 const TargetRegisterClass *RC = &Mips::GPR32RegClass;
433 Register DestReg = createResultReg(RC);
434 emitInst(Mips::LW, DestReg)
435 .addReg(MFI->getGlobalBaseReg(*MF))
436 .addSym(Sym, MipsII::MO_GOT);
437 return DestReg;
438}
439
440// Materialize a constant into a register, and return the register
441// number (or zero if we failed to handle it).
442unsigned MipsFastISel::fastMaterializeConstant(const Constant *C) {
443 EVT CEVT = TLI.getValueType(DL, C->getType(), true);
444
445 // Only handle simple types.
446 if (!CEVT.isSimple())
447 return 0;
448 MVT VT = CEVT.getSimpleVT();
449
450 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
451 return (UnsupportedFPMode) ? 0 : materializeFP(CFP, VT);
452 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
453 return materializeGV(GV, VT);
454 else if (isa<ConstantInt>(C))
455 return materializeInt(C, VT);
456
457 return 0;
458}
459
460bool MipsFastISel::computeAddress(const Value *Obj, Address &Addr) {
461 const User *U = nullptr;
462 unsigned Opcode = Instruction::UserOp1;
463 if (const Instruction *I = dyn_cast<Instruction>(Obj)) {
464 // Don't walk into other basic blocks unless the object is an alloca from
465 // another block, otherwise it may not have a virtual register assigned.
466 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) ||
467 FuncInfo.getMBB(I->getParent()) == FuncInfo.MBB) {
468 Opcode = I->getOpcode();
469 U = I;
470 }
471 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) {
472 Opcode = C->getOpcode();
473 U = C;
474 }
475 switch (Opcode) {
476 default:
477 break;
478 case Instruction::BitCast:
479 // Look through bitcasts.
480 return computeAddress(U->getOperand(0), Addr);
481 case Instruction::GetElementPtr: {
482 Address SavedAddr = Addr;
483 int64_t TmpOffset = Addr.getOffset();
484 // Iterate through the GEP folding the constants into offsets where
485 // we can.
487 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e;
488 ++i, ++GTI) {
489 const Value *Op = *i;
490 if (StructType *STy = GTI.getStructTypeOrNull()) {
491 const StructLayout *SL = DL.getStructLayout(STy);
492 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
493 TmpOffset += SL->getElementOffset(Idx);
494 } else {
496 while (true) {
497 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
498 // Constant-offset addressing.
499 TmpOffset += CI->getSExtValue() * S;
500 break;
501 }
502 if (canFoldAddIntoGEP(U, Op)) {
503 // A compatible add with a constant operand. Fold the constant.
504 ConstantInt *CI =
505 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
506 TmpOffset += CI->getSExtValue() * S;
507 // Iterate on the other operand.
508 Op = cast<AddOperator>(Op)->getOperand(0);
509 continue;
510 }
511 // Unsupported
512 goto unsupported_gep;
513 }
514 }
515 }
516 // Try to grab the base operand now.
517 Addr.setOffset(TmpOffset);
518 if (computeAddress(U->getOperand(0), Addr))
519 return true;
520 // We failed, restore everything and try the other options.
521 Addr = SavedAddr;
522 unsupported_gep:
523 break;
524 }
525 case Instruction::Alloca: {
526 const AllocaInst *AI = cast<AllocaInst>(Obj);
528 FuncInfo.StaticAllocaMap.find(AI);
529 if (SI != FuncInfo.StaticAllocaMap.end()) {
530 Addr.setKind(Address::FrameIndexBase);
531 Addr.setFI(SI->second);
532 return true;
533 }
534 break;
535 }
536 }
537 Addr.setReg(getRegForValue(Obj));
538 return Addr.getReg() != 0;
539}
540
541bool MipsFastISel::computeCallAddress(const Value *V, Address &Addr) {
542 const User *U = nullptr;
543 unsigned Opcode = Instruction::UserOp1;
544
545 if (const auto *I = dyn_cast<Instruction>(V)) {
546 // Check if the value is defined in the same basic block. This information
547 // is crucial to know whether or not folding an operand is valid.
548 if (I->getParent() == FuncInfo.MBB->getBasicBlock()) {
549 Opcode = I->getOpcode();
550 U = I;
551 }
552 } else if (const auto *C = dyn_cast<ConstantExpr>(V)) {
553 Opcode = C->getOpcode();
554 U = C;
555 }
556
557 switch (Opcode) {
558 default:
559 break;
560 case Instruction::BitCast:
561 // Look past bitcasts if its operand is in the same BB.
562 return computeCallAddress(U->getOperand(0), Addr);
563 break;
564 case Instruction::IntToPtr:
565 // Look past no-op inttoptrs if its operand is in the same BB.
566 if (TLI.getValueType(DL, U->getOperand(0)->getType()) ==
567 TLI.getPointerTy(DL))
568 return computeCallAddress(U->getOperand(0), Addr);
569 break;
570 case Instruction::PtrToInt:
571 // Look past no-op ptrtoints if its operand is in the same BB.
572 if (TLI.getValueType(DL, U->getType()) == TLI.getPointerTy(DL))
573 return computeCallAddress(U->getOperand(0), Addr);
574 break;
575 }
576
577 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
578 Addr.setGlobalValue(GV);
579 return true;
580 }
581
582 // If all else fails, try to materialize the value in a register.
583 if (!Addr.getGlobalValue()) {
584 Addr.setReg(getRegForValue(V));
585 return Addr.getReg() != 0;
586 }
587
588 return false;
589}
590
591bool MipsFastISel::isTypeLegal(Type *Ty, MVT &VT) {
592 EVT evt = TLI.getValueType(DL, Ty, true);
593 // Only handle simple types.
594 if (evt == MVT::Other || !evt.isSimple())
595 return false;
596 VT = evt.getSimpleVT();
597
598 // Handle all legal types, i.e. a register that will directly hold this
599 // value.
600 return TLI.isTypeLegal(VT);
601}
602
603bool MipsFastISel::isTypeSupported(Type *Ty, MVT &VT) {
604 if (Ty->isVectorTy())
605 return false;
606
607 if (isTypeLegal(Ty, VT))
608 return true;
609
610 // If this is a type than can be sign or zero-extended to a basic operation
611 // go ahead and accept it now.
612 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
613 return true;
614
615 return false;
616}
617
618bool MipsFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) {
619 if (isTypeLegal(Ty, VT))
620 return true;
621 // We will extend this in a later patch:
622 // If this is a type than can be sign or zero-extended to a basic operation
623 // go ahead and accept it now.
624 if (VT == MVT::i8 || VT == MVT::i16)
625 return true;
626 return false;
627}
628
629// Because of how EmitCmp is called with fast-isel, you can
630// end up with redundant "andi" instructions after the sequences emitted below.
631// We should try and solve this issue in the future.
632//
633bool MipsFastISel::emitCmp(unsigned ResultReg, const CmpInst *CI) {
634 const Value *Left = CI->getOperand(0), *Right = CI->getOperand(1);
635 bool IsUnsigned = CI->isUnsigned();
636 unsigned LeftReg = getRegEnsuringSimpleIntegerWidening(Left, IsUnsigned);
637 if (LeftReg == 0)
638 return false;
639 unsigned RightReg = getRegEnsuringSimpleIntegerWidening(Right, IsUnsigned);
640 if (RightReg == 0)
641 return false;
643
644 switch (P) {
645 default:
646 return false;
647 case CmpInst::ICMP_EQ: {
648 Register TempReg = createResultReg(&Mips::GPR32RegClass);
649 emitInst(Mips::XOR, TempReg).addReg(LeftReg).addReg(RightReg);
650 emitInst(Mips::SLTiu, ResultReg).addReg(TempReg).addImm(1);
651 break;
652 }
653 case CmpInst::ICMP_NE: {
654 Register TempReg = createResultReg(&Mips::GPR32RegClass);
655 emitInst(Mips::XOR, TempReg).addReg(LeftReg).addReg(RightReg);
656 emitInst(Mips::SLTu, ResultReg).addReg(Mips::ZERO).addReg(TempReg);
657 break;
658 }
660 emitInst(Mips::SLTu, ResultReg).addReg(RightReg).addReg(LeftReg);
661 break;
663 emitInst(Mips::SLTu, ResultReg).addReg(LeftReg).addReg(RightReg);
664 break;
665 case CmpInst::ICMP_UGE: {
666 Register TempReg = createResultReg(&Mips::GPR32RegClass);
667 emitInst(Mips::SLTu, TempReg).addReg(LeftReg).addReg(RightReg);
668 emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1);
669 break;
670 }
671 case CmpInst::ICMP_ULE: {
672 Register TempReg = createResultReg(&Mips::GPR32RegClass);
673 emitInst(Mips::SLTu, TempReg).addReg(RightReg).addReg(LeftReg);
674 emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1);
675 break;
676 }
678 emitInst(Mips::SLT, ResultReg).addReg(RightReg).addReg(LeftReg);
679 break;
681 emitInst(Mips::SLT, ResultReg).addReg(LeftReg).addReg(RightReg);
682 break;
683 case CmpInst::ICMP_SGE: {
684 Register TempReg = createResultReg(&Mips::GPR32RegClass);
685 emitInst(Mips::SLT, TempReg).addReg(LeftReg).addReg(RightReg);
686 emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1);
687 break;
688 }
689 case CmpInst::ICMP_SLE: {
690 Register TempReg = createResultReg(&Mips::GPR32RegClass);
691 emitInst(Mips::SLT, TempReg).addReg(RightReg).addReg(LeftReg);
692 emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1);
693 break;
694 }
700 case CmpInst::FCMP_OGE: {
701 if (UnsupportedFPMode)
702 return false;
703 bool IsFloat = Left->getType()->isFloatTy();
704 bool IsDouble = Left->getType()->isDoubleTy();
705 if (!IsFloat && !IsDouble)
706 return false;
707 unsigned Opc, CondMovOpc;
708 switch (P) {
710 Opc = IsFloat ? Mips::C_EQ_S : Mips::C_EQ_D32;
711 CondMovOpc = Mips::MOVT_I;
712 break;
714 Opc = IsFloat ? Mips::C_EQ_S : Mips::C_EQ_D32;
715 CondMovOpc = Mips::MOVF_I;
716 break;
718 Opc = IsFloat ? Mips::C_OLT_S : Mips::C_OLT_D32;
719 CondMovOpc = Mips::MOVT_I;
720 break;
722 Opc = IsFloat ? Mips::C_OLE_S : Mips::C_OLE_D32;
723 CondMovOpc = Mips::MOVT_I;
724 break;
726 Opc = IsFloat ? Mips::C_ULE_S : Mips::C_ULE_D32;
727 CondMovOpc = Mips::MOVF_I;
728 break;
730 Opc = IsFloat ? Mips::C_ULT_S : Mips::C_ULT_D32;
731 CondMovOpc = Mips::MOVF_I;
732 break;
733 default:
734 llvm_unreachable("Only switching of a subset of CCs.");
735 }
736 Register RegWithZero = createResultReg(&Mips::GPR32RegClass);
737 Register RegWithOne = createResultReg(&Mips::GPR32RegClass);
738 emitInst(Mips::ADDiu, RegWithZero).addReg(Mips::ZERO).addImm(0);
739 emitInst(Mips::ADDiu, RegWithOne).addReg(Mips::ZERO).addImm(1);
740 emitInst(Opc).addReg(Mips::FCC0, RegState::Define).addReg(LeftReg)
741 .addReg(RightReg);
742 emitInst(CondMovOpc, ResultReg)
743 .addReg(RegWithOne)
744 .addReg(Mips::FCC0)
745 .addReg(RegWithZero);
746 break;
747 }
748 }
749 return true;
750}
751
752bool MipsFastISel::emitLoad(MVT VT, unsigned &ResultReg, Address &Addr) {
753 //
754 // more cases will be handled here in following patches.
755 //
756 unsigned Opc;
757 switch (VT.SimpleTy) {
758 case MVT::i32:
759 ResultReg = createResultReg(&Mips::GPR32RegClass);
760 Opc = Mips::LW;
761 break;
762 case MVT::i16:
763 ResultReg = createResultReg(&Mips::GPR32RegClass);
764 Opc = Mips::LHu;
765 break;
766 case MVT::i8:
767 ResultReg = createResultReg(&Mips::GPR32RegClass);
768 Opc = Mips::LBu;
769 break;
770 case MVT::f32:
771 if (UnsupportedFPMode)
772 return false;
773 ResultReg = createResultReg(&Mips::FGR32RegClass);
774 Opc = Mips::LWC1;
775 break;
776 case MVT::f64:
777 if (UnsupportedFPMode)
778 return false;
779 ResultReg = createResultReg(&Mips::AFGR64RegClass);
780 Opc = Mips::LDC1;
781 break;
782 default:
783 return false;
784 }
785 if (Addr.isRegBase()) {
786 simplifyAddress(Addr);
787 emitInstLoad(Opc, ResultReg, Addr.getReg(), Addr.getOffset());
788 return true;
789 }
790 if (Addr.isFIBase()) {
791 unsigned FI = Addr.getFI();
792 int64_t Offset = Addr.getOffset();
793 MachineFrameInfo &MFI = MF->getFrameInfo();
794 MachineMemOperand *MMO = MF->getMachineMemOperand(
796 MFI.getObjectSize(FI), Align(4));
797 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg)
798 .addFrameIndex(FI)
799 .addImm(Offset)
800 .addMemOperand(MMO);
801 return true;
802 }
803 return false;
804}
805
806bool MipsFastISel::emitStore(MVT VT, unsigned SrcReg, Address &Addr) {
807 //
808 // more cases will be handled here in following patches.
809 //
810 unsigned Opc;
811 switch (VT.SimpleTy) {
812 case MVT::i8:
813 Opc = Mips::SB;
814 break;
815 case MVT::i16:
816 Opc = Mips::SH;
817 break;
818 case MVT::i32:
819 Opc = Mips::SW;
820 break;
821 case MVT::f32:
822 if (UnsupportedFPMode)
823 return false;
824 Opc = Mips::SWC1;
825 break;
826 case MVT::f64:
827 if (UnsupportedFPMode)
828 return false;
829 Opc = Mips::SDC1;
830 break;
831 default:
832 return false;
833 }
834 if (Addr.isRegBase()) {
835 simplifyAddress(Addr);
836 emitInstStore(Opc, SrcReg, Addr.getReg(), Addr.getOffset());
837 return true;
838 }
839 if (Addr.isFIBase()) {
840 unsigned FI = Addr.getFI();
841 int64_t Offset = Addr.getOffset();
842 MachineFrameInfo &MFI = MF->getFrameInfo();
843 MachineMemOperand *MMO = MF->getMachineMemOperand(
845 MFI.getObjectSize(FI), Align(4));
846 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc))
847 .addReg(SrcReg)
848 .addFrameIndex(FI)
849 .addImm(Offset)
850 .addMemOperand(MMO);
851 return true;
852 }
853 return false;
854}
855
856bool MipsFastISel::selectLogicalOp(const Instruction *I) {
857 MVT VT;
858 if (!isTypeSupported(I->getType(), VT))
859 return false;
860
861 unsigned ResultReg;
862 switch (I->getOpcode()) {
863 default:
864 llvm_unreachable("Unexpected instruction.");
865 case Instruction::And:
866 ResultReg = emitLogicalOp(ISD::AND, VT, I->getOperand(0), I->getOperand(1));
867 break;
868 case Instruction::Or:
869 ResultReg = emitLogicalOp(ISD::OR, VT, I->getOperand(0), I->getOperand(1));
870 break;
871 case Instruction::Xor:
872 ResultReg = emitLogicalOp(ISD::XOR, VT, I->getOperand(0), I->getOperand(1));
873 break;
874 }
875
876 if (!ResultReg)
877 return false;
878
879 updateValueMap(I, ResultReg);
880 return true;
881}
882
883bool MipsFastISel::selectLoad(const Instruction *I) {
884 // Atomic loads need special handling.
885 if (cast<LoadInst>(I)->isAtomic())
886 return false;
887
888 // Verify we have a legal type before going any further.
889 MVT VT;
890 if (!isLoadTypeLegal(I->getType(), VT))
891 return false;
892
893 // See if we can handle this address.
895 if (!computeAddress(I->getOperand(0), Addr))
896 return false;
897
898 unsigned ResultReg;
899 if (!emitLoad(VT, ResultReg, Addr))
900 return false;
901 updateValueMap(I, ResultReg);
902 return true;
903}
904
905bool MipsFastISel::selectStore(const Instruction *I) {
906 Value *Op0 = I->getOperand(0);
907 unsigned SrcReg = 0;
908
909 // Atomic stores need special handling.
910 if (cast<StoreInst>(I)->isAtomic())
911 return false;
912
913 // Verify we have a legal type before going any further.
914 MVT VT;
915 if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT))
916 return false;
917
918 // Get the value to be stored into a register.
919 SrcReg = getRegForValue(Op0);
920 if (SrcReg == 0)
921 return false;
922
923 // See if we can handle this address.
925 if (!computeAddress(I->getOperand(1), Addr))
926 return false;
927
928 if (!emitStore(VT, SrcReg, Addr))
929 return false;
930 return true;
931}
932
933// This can cause a redundant sltiu to be generated.
934// FIXME: try and eliminate this in a future patch.
935bool MipsFastISel::selectBranch(const Instruction *I) {
936 const BranchInst *BI = cast<BranchInst>(I);
937 MachineBasicBlock *BrBB = FuncInfo.MBB;
938 //
939 // TBB is the basic block for the case where the comparison is true.
940 // FBB is the basic block for the case where the comparison is false.
941 // if (cond) goto TBB
942 // goto FBB
943 // TBB:
944 //
945 MachineBasicBlock *TBB = FuncInfo.getMBB(BI->getSuccessor(0));
946 MachineBasicBlock *FBB = FuncInfo.getMBB(BI->getSuccessor(1));
947
948 // Fold the common case of a conditional branch with a comparison
949 // in the same block.
950 unsigned ZExtCondReg = 0;
951 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
952 if (CI->hasOneUse() && CI->getParent() == I->getParent()) {
953 ZExtCondReg = createResultReg(&Mips::GPR32RegClass);
954 if (!emitCmp(ZExtCondReg, CI))
955 return false;
956 }
957 }
958
959 // For the general case, we need to mask with 1.
960 if (ZExtCondReg == 0) {
961 Register CondReg = getRegForValue(BI->getCondition());
962 if (CondReg == 0)
963 return false;
964
965 ZExtCondReg = emitIntExt(MVT::i1, CondReg, MVT::i32, true);
966 if (ZExtCondReg == 0)
967 return false;
968 }
969
970 BuildMI(*BrBB, FuncInfo.InsertPt, MIMD, TII.get(Mips::BGTZ))
971 .addReg(ZExtCondReg)
972 .addMBB(TBB);
973 finishCondBranch(BI->getParent(), TBB, FBB);
974 return true;
975}
976
977bool MipsFastISel::selectCmp(const Instruction *I) {
978 const CmpInst *CI = cast<CmpInst>(I);
979 Register ResultReg = createResultReg(&Mips::GPR32RegClass);
980 if (!emitCmp(ResultReg, CI))
981 return false;
982 updateValueMap(I, ResultReg);
983 return true;
984}
985
986// Attempt to fast-select a floating-point extend instruction.
987bool MipsFastISel::selectFPExt(const Instruction *I) {
988 if (UnsupportedFPMode)
989 return false;
990 Value *Src = I->getOperand(0);
991 EVT SrcVT = TLI.getValueType(DL, Src->getType(), true);
992 EVT DestVT = TLI.getValueType(DL, I->getType(), true);
993
994 if (SrcVT != MVT::f32 || DestVT != MVT::f64)
995 return false;
996
997 Register SrcReg =
998 getRegForValue(Src); // this must be a 32bit floating point register class
999 // maybe we should handle this differently
1000 if (!SrcReg)
1001 return false;
1002
1003 Register DestReg = createResultReg(&Mips::AFGR64RegClass);
1004 emitInst(Mips::CVT_D32_S, DestReg).addReg(SrcReg);
1005 updateValueMap(I, DestReg);
1006 return true;
1007}
1008
1009bool MipsFastISel::selectSelect(const Instruction *I) {
1010 assert(isa<SelectInst>(I) && "Expected a select instruction.");
1011
1012 LLVM_DEBUG(dbgs() << "selectSelect\n");
1013
1014 MVT VT;
1015 if (!isTypeSupported(I->getType(), VT) || UnsupportedFPMode) {
1016 LLVM_DEBUG(
1017 dbgs() << ".. .. gave up (!isTypeSupported || UnsupportedFPMode)\n");
1018 return false;
1019 }
1020
1021 unsigned CondMovOpc;
1022 const TargetRegisterClass *RC;
1023
1024 if (VT.isInteger() && !VT.isVector() && VT.getSizeInBits() <= 32) {
1025 CondMovOpc = Mips::MOVN_I_I;
1026 RC = &Mips::GPR32RegClass;
1027 } else if (VT == MVT::f32) {
1028 CondMovOpc = Mips::MOVN_I_S;
1029 RC = &Mips::FGR32RegClass;
1030 } else if (VT == MVT::f64) {
1031 CondMovOpc = Mips::MOVN_I_D32;
1032 RC = &Mips::AFGR64RegClass;
1033 } else
1034 return false;
1035
1036 const SelectInst *SI = cast<SelectInst>(I);
1037 const Value *Cond = SI->getCondition();
1038 Register Src1Reg = getRegForValue(SI->getTrueValue());
1039 Register Src2Reg = getRegForValue(SI->getFalseValue());
1040 Register CondReg = getRegForValue(Cond);
1041
1042 if (!Src1Reg || !Src2Reg || !CondReg)
1043 return false;
1044
1045 Register ZExtCondReg = createResultReg(&Mips::GPR32RegClass);
1046 if (!ZExtCondReg)
1047 return false;
1048
1049 if (!emitIntExt(MVT::i1, CondReg, MVT::i32, ZExtCondReg, true))
1050 return false;
1051
1052 Register ResultReg = createResultReg(RC);
1053 Register TempReg = createResultReg(RC);
1054
1055 if (!ResultReg || !TempReg)
1056 return false;
1057
1058 emitInst(TargetOpcode::COPY, TempReg).addReg(Src2Reg);
1059 emitInst(CondMovOpc, ResultReg)
1060 .addReg(Src1Reg).addReg(ZExtCondReg).addReg(TempReg);
1061 updateValueMap(I, ResultReg);
1062 return true;
1063}
1064
1065// Attempt to fast-select a floating-point truncate instruction.
1066bool MipsFastISel::selectFPTrunc(const Instruction *I) {
1067 if (UnsupportedFPMode)
1068 return false;
1069 Value *Src = I->getOperand(0);
1070 EVT SrcVT = TLI.getValueType(DL, Src->getType(), true);
1071 EVT DestVT = TLI.getValueType(DL, I->getType(), true);
1072
1073 if (SrcVT != MVT::f64 || DestVT != MVT::f32)
1074 return false;
1075
1076 Register SrcReg = getRegForValue(Src);
1077 if (!SrcReg)
1078 return false;
1079
1080 Register DestReg = createResultReg(&Mips::FGR32RegClass);
1081 if (!DestReg)
1082 return false;
1083
1084 emitInst(Mips::CVT_S_D32, DestReg).addReg(SrcReg);
1085 updateValueMap(I, DestReg);
1086 return true;
1087}
1088
1089// Attempt to fast-select a floating-point-to-integer conversion.
1090bool MipsFastISel::selectFPToInt(const Instruction *I, bool IsSigned) {
1091 if (UnsupportedFPMode)
1092 return false;
1093 MVT DstVT, SrcVT;
1094 if (!IsSigned)
1095 return false; // We don't handle this case yet. There is no native
1096 // instruction for this but it can be synthesized.
1097 Type *DstTy = I->getType();
1098 if (!isTypeLegal(DstTy, DstVT))
1099 return false;
1100
1101 if (DstVT != MVT::i32)
1102 return false;
1103
1104 Value *Src = I->getOperand(0);
1105 Type *SrcTy = Src->getType();
1106 if (!isTypeLegal(SrcTy, SrcVT))
1107 return false;
1108
1109 if (SrcVT != MVT::f32 && SrcVT != MVT::f64)
1110 return false;
1111
1112 Register SrcReg = getRegForValue(Src);
1113 if (SrcReg == 0)
1114 return false;
1115
1116 // Determine the opcode for the conversion, which takes place
1117 // entirely within FPRs.
1118 Register DestReg = createResultReg(&Mips::GPR32RegClass);
1119 Register TempReg = createResultReg(&Mips::FGR32RegClass);
1120 unsigned Opc = (SrcVT == MVT::f32) ? Mips::TRUNC_W_S : Mips::TRUNC_W_D32;
1121
1122 // Generate the convert.
1123 emitInst(Opc, TempReg).addReg(SrcReg);
1124 emitInst(Mips::MFC1, DestReg).addReg(TempReg);
1125
1126 updateValueMap(I, DestReg);
1127 return true;
1128}
1129
1130bool MipsFastISel::processCallArgs(CallLoweringInfo &CLI,
1131 SmallVectorImpl<MVT> &OutVTs,
1132 unsigned &NumBytes) {
1133 CallingConv::ID CC = CLI.CallConv;
1135 CCState CCInfo(CC, false, *FuncInfo.MF, ArgLocs, *Context);
1136 CCInfo.AnalyzeCallOperands(OutVTs, CLI.OutFlags, CCAssignFnForCall(CC));
1137 // Get a count of how many bytes are to be pushed on the stack.
1138 NumBytes = CCInfo.getStackSize();
1139 // This is the minimum argument area used for A0-A3.
1140 if (NumBytes < 16)
1141 NumBytes = 16;
1142
1143 emitInst(Mips::ADJCALLSTACKDOWN).addImm(16).addImm(0);
1144 // Process the args.
1145 MVT firstMVT;
1146 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1147 CCValAssign &VA = ArgLocs[i];
1148 const Value *ArgVal = CLI.OutVals[VA.getValNo()];
1149 MVT ArgVT = OutVTs[VA.getValNo()];
1150
1151 if (i == 0) {
1152 firstMVT = ArgVT;
1153 if (ArgVT == MVT::f32) {
1154 VA.convertToReg(Mips::F12);
1155 } else if (ArgVT == MVT::f64) {
1156 if (Subtarget->isFP64bit())
1157 VA.convertToReg(Mips::D6_64);
1158 else
1159 VA.convertToReg(Mips::D6);
1160 }
1161 } else if (i == 1) {
1162 if ((firstMVT == MVT::f32) || (firstMVT == MVT::f64)) {
1163 if (ArgVT == MVT::f32) {
1164 VA.convertToReg(Mips::F14);
1165 } else if (ArgVT == MVT::f64) {
1166 if (Subtarget->isFP64bit())
1167 VA.convertToReg(Mips::D7_64);
1168 else
1169 VA.convertToReg(Mips::D7);
1170 }
1171 }
1172 }
1173 if (((ArgVT == MVT::i32) || (ArgVT == MVT::f32) || (ArgVT == MVT::i16) ||
1174 (ArgVT == MVT::i8)) &&
1175 VA.isMemLoc()) {
1176 switch (VA.getLocMemOffset()) {
1177 case 0:
1178 VA.convertToReg(Mips::A0);
1179 break;
1180 case 4:
1181 VA.convertToReg(Mips::A1);
1182 break;
1183 case 8:
1184 VA.convertToReg(Mips::A2);
1185 break;
1186 case 12:
1187 VA.convertToReg(Mips::A3);
1188 break;
1189 default:
1190 break;
1191 }
1192 }
1193 Register ArgReg = getRegForValue(ArgVal);
1194 if (!ArgReg)
1195 return false;
1196
1197 // Handle arg promotion: SExt, ZExt, AExt.
1198 switch (VA.getLocInfo()) {
1199 case CCValAssign::Full:
1200 break;
1201 case CCValAssign::AExt:
1202 case CCValAssign::SExt: {
1203 MVT DestVT = VA.getLocVT();
1204 MVT SrcVT = ArgVT;
1205 ArgReg = emitIntExt(SrcVT, ArgReg, DestVT, /*isZExt=*/false);
1206 if (!ArgReg)
1207 return false;
1208 break;
1209 }
1210 case CCValAssign::ZExt: {
1211 MVT DestVT = VA.getLocVT();
1212 MVT SrcVT = ArgVT;
1213 ArgReg = emitIntExt(SrcVT, ArgReg, DestVT, /*isZExt=*/true);
1214 if (!ArgReg)
1215 return false;
1216 break;
1217 }
1218 default:
1219 llvm_unreachable("Unknown arg promotion!");
1220 }
1221
1222 // Now copy/store arg to correct locations.
1223 if (VA.isRegLoc() && !VA.needsCustom()) {
1224 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1225 TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(ArgReg);
1226 CLI.OutRegs.push_back(VA.getLocReg());
1227 } else if (VA.needsCustom()) {
1228 llvm_unreachable("Mips does not use custom args.");
1229 return false;
1230 } else {
1231 //
1232 // FIXME: This path will currently return false. It was copied
1233 // from the AArch64 port and should be essentially fine for Mips too.
1234 // The work to finish up this path will be done in a follow-on patch.
1235 //
1236 assert(VA.isMemLoc() && "Assuming store on stack.");
1237 // Don't emit stores for undef values.
1238 if (isa<UndefValue>(ArgVal))
1239 continue;
1240
1241 // Need to store on the stack.
1242 // FIXME: This alignment is incorrect but this path is disabled
1243 // for now (will return false). We need to determine the right alignment
1244 // based on the normal alignment for the underlying machine type.
1245 //
1246 unsigned ArgSize = alignTo(ArgVT.getSizeInBits(), 4);
1247
1248 unsigned BEAlign = 0;
1249 if (ArgSize < 8 && !Subtarget->isLittle())
1250 BEAlign = 8 - ArgSize;
1251
1252 Address Addr;
1253 Addr.setKind(Address::RegBase);
1254 Addr.setReg(Mips::SP);
1255 Addr.setOffset(VA.getLocMemOffset() + BEAlign);
1256
1257 Align Alignment = DL.getABITypeAlign(ArgVal->getType());
1258 MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand(
1259 MachinePointerInfo::getStack(*FuncInfo.MF, Addr.getOffset()),
1260 MachineMemOperand::MOStore, ArgVT.getStoreSize(), Alignment);
1261 (void)(MMO);
1262 // if (!emitStore(ArgVT, ArgReg, Addr, MMO))
1263 return false; // can't store on the stack yet.
1264 }
1265 }
1266
1267 return true;
1268}
1269
1270bool MipsFastISel::finishCall(CallLoweringInfo &CLI, MVT RetVT,
1271 unsigned NumBytes) {
1272 CallingConv::ID CC = CLI.CallConv;
1273 emitInst(Mips::ADJCALLSTACKUP).addImm(16).addImm(0);
1274 if (RetVT != MVT::isVoid) {
1276 MipsCCState CCInfo(CC, false, *FuncInfo.MF, RVLocs, *Context);
1277
1278 CCInfo.AnalyzeCallResult(CLI.Ins, RetCC_Mips, CLI.RetTy,
1279 CLI.Symbol ? CLI.Symbol->getName().data()
1280 : nullptr);
1281
1282 // Only handle a single return value.
1283 if (RVLocs.size() != 1)
1284 return false;
1285 // Copy all of the result registers out of their specified physreg.
1286 MVT CopyVT = RVLocs[0].getValVT();
1287 // Special handling for extended integers.
1288 if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16)
1289 CopyVT = MVT::i32;
1290
1291 Register ResultReg = createResultReg(TLI.getRegClassFor(CopyVT));
1292 if (!ResultReg)
1293 return false;
1294 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1295 TII.get(TargetOpcode::COPY),
1296 ResultReg).addReg(RVLocs[0].getLocReg());
1297 CLI.InRegs.push_back(RVLocs[0].getLocReg());
1298
1299 CLI.ResultReg = ResultReg;
1300 CLI.NumResultRegs = 1;
1301 }
1302 return true;
1303}
1304
1305bool MipsFastISel::fastLowerArguments() {
1306 LLVM_DEBUG(dbgs() << "fastLowerArguments\n");
1307
1308 if (!FuncInfo.CanLowerReturn) {
1309 LLVM_DEBUG(dbgs() << ".. gave up (!CanLowerReturn)\n");
1310 return false;
1311 }
1312
1313 const Function *F = FuncInfo.Fn;
1314 if (F->isVarArg()) {
1315 LLVM_DEBUG(dbgs() << ".. gave up (varargs)\n");
1316 return false;
1317 }
1318
1319 CallingConv::ID CC = F->getCallingConv();
1320 if (CC != CallingConv::C) {
1321 LLVM_DEBUG(dbgs() << ".. gave up (calling convention is not C)\n");
1322 return false;
1323 }
1324
1325 std::array<MCPhysReg, 4> GPR32ArgRegs = {{Mips::A0, Mips::A1, Mips::A2,
1326 Mips::A3}};
1327 std::array<MCPhysReg, 2> FGR32ArgRegs = {{Mips::F12, Mips::F14}};
1328 std::array<MCPhysReg, 2> AFGR64ArgRegs = {{Mips::D6, Mips::D7}};
1329 auto NextGPR32 = GPR32ArgRegs.begin();
1330 auto NextFGR32 = FGR32ArgRegs.begin();
1331 auto NextAFGR64 = AFGR64ArgRegs.begin();
1332
1333 struct AllocatedReg {
1334 const TargetRegisterClass *RC;
1335 unsigned Reg;
1336 AllocatedReg(const TargetRegisterClass *RC, unsigned Reg)
1337 : RC(RC), Reg(Reg) {}
1338 };
1339
1340 // Only handle simple cases. i.e. All arguments are directly mapped to
1341 // registers of the appropriate type.
1343 for (const auto &FormalArg : F->args()) {
1344 if (FormalArg.hasAttribute(Attribute::InReg) ||
1345 FormalArg.hasAttribute(Attribute::StructRet) ||
1346 FormalArg.hasAttribute(Attribute::ByVal)) {
1347 LLVM_DEBUG(dbgs() << ".. gave up (inreg, structret, byval)\n");
1348 return false;
1349 }
1350
1351 Type *ArgTy = FormalArg.getType();
1352 if (ArgTy->isStructTy() || ArgTy->isArrayTy() || ArgTy->isVectorTy()) {
1353 LLVM_DEBUG(dbgs() << ".. gave up (struct, array, or vector)\n");
1354 return false;
1355 }
1356
1357 EVT ArgVT = TLI.getValueType(DL, ArgTy);
1358 LLVM_DEBUG(dbgs() << ".. " << FormalArg.getArgNo() << ": "
1359 << ArgVT << "\n");
1360 if (!ArgVT.isSimple()) {
1361 LLVM_DEBUG(dbgs() << ".. .. gave up (not a simple type)\n");
1362 return false;
1363 }
1364
1365 switch (ArgVT.getSimpleVT().SimpleTy) {
1366 case MVT::i1:
1367 case MVT::i8:
1368 case MVT::i16:
1369 if (!FormalArg.hasAttribute(Attribute::SExt) &&
1370 !FormalArg.hasAttribute(Attribute::ZExt)) {
1371 // It must be any extend, this shouldn't happen for clang-generated IR
1372 // so just fall back on SelectionDAG.
1373 LLVM_DEBUG(dbgs() << ".. .. gave up (i8/i16 arg is not extended)\n");
1374 return false;
1375 }
1376
1377 if (NextGPR32 == GPR32ArgRegs.end()) {
1378 LLVM_DEBUG(dbgs() << ".. .. gave up (ran out of GPR32 arguments)\n");
1379 return false;
1380 }
1381
1382 LLVM_DEBUG(dbgs() << ".. .. GPR32(" << *NextGPR32 << ")\n");
1383 Allocation.emplace_back(&Mips::GPR32RegClass, *NextGPR32++);
1384
1385 // Allocating any GPR32 prohibits further use of floating point arguments.
1386 NextFGR32 = FGR32ArgRegs.end();
1387 NextAFGR64 = AFGR64ArgRegs.end();
1388 break;
1389
1390 case MVT::i32:
1391 if (FormalArg.hasAttribute(Attribute::ZExt)) {
1392 // The O32 ABI does not permit a zero-extended i32.
1393 LLVM_DEBUG(dbgs() << ".. .. gave up (i32 arg is zero extended)\n");
1394 return false;
1395 }
1396
1397 if (NextGPR32 == GPR32ArgRegs.end()) {
1398 LLVM_DEBUG(dbgs() << ".. .. gave up (ran out of GPR32 arguments)\n");
1399 return false;
1400 }
1401
1402 LLVM_DEBUG(dbgs() << ".. .. GPR32(" << *NextGPR32 << ")\n");
1403 Allocation.emplace_back(&Mips::GPR32RegClass, *NextGPR32++);
1404
1405 // Allocating any GPR32 prohibits further use of floating point arguments.
1406 NextFGR32 = FGR32ArgRegs.end();
1407 NextAFGR64 = AFGR64ArgRegs.end();
1408 break;
1409
1410 case MVT::f32:
1411 if (UnsupportedFPMode) {
1412 LLVM_DEBUG(dbgs() << ".. .. gave up (UnsupportedFPMode)\n");
1413 return false;
1414 }
1415 if (NextFGR32 == FGR32ArgRegs.end()) {
1416 LLVM_DEBUG(dbgs() << ".. .. gave up (ran out of FGR32 arguments)\n");
1417 return false;
1418 }
1419 LLVM_DEBUG(dbgs() << ".. .. FGR32(" << *NextFGR32 << ")\n");
1420 Allocation.emplace_back(&Mips::FGR32RegClass, *NextFGR32++);
1421 // Allocating an FGR32 also allocates the super-register AFGR64, and
1422 // ABI rules require us to skip the corresponding GPR32.
1423 if (NextGPR32 != GPR32ArgRegs.end())
1424 NextGPR32++;
1425 if (NextAFGR64 != AFGR64ArgRegs.end())
1426 NextAFGR64++;
1427 break;
1428
1429 case MVT::f64:
1430 if (UnsupportedFPMode) {
1431 LLVM_DEBUG(dbgs() << ".. .. gave up (UnsupportedFPMode)\n");
1432 return false;
1433 }
1434 if (NextAFGR64 == AFGR64ArgRegs.end()) {
1435 LLVM_DEBUG(dbgs() << ".. .. gave up (ran out of AFGR64 arguments)\n");
1436 return false;
1437 }
1438 LLVM_DEBUG(dbgs() << ".. .. AFGR64(" << *NextAFGR64 << ")\n");
1439 Allocation.emplace_back(&Mips::AFGR64RegClass, *NextAFGR64++);
1440 // Allocating an FGR32 also allocates the super-register AFGR64, and
1441 // ABI rules require us to skip the corresponding GPR32 pair.
1442 if (NextGPR32 != GPR32ArgRegs.end())
1443 NextGPR32++;
1444 if (NextGPR32 != GPR32ArgRegs.end())
1445 NextGPR32++;
1446 if (NextFGR32 != FGR32ArgRegs.end())
1447 NextFGR32++;
1448 break;
1449
1450 default:
1451 LLVM_DEBUG(dbgs() << ".. .. gave up (unknown type)\n");
1452 return false;
1453 }
1454 }
1455
1456 for (const auto &FormalArg : F->args()) {
1457 unsigned ArgNo = FormalArg.getArgNo();
1458 unsigned SrcReg = Allocation[ArgNo].Reg;
1459 Register DstReg = FuncInfo.MF->addLiveIn(SrcReg, Allocation[ArgNo].RC);
1460 // FIXME: Unfortunately it's necessary to emit a copy from the livein copy.
1461 // Without this, EmitLiveInCopies may eliminate the livein if its only
1462 // use is a bitcast (which isn't turned into an instruction).
1463 Register ResultReg = createResultReg(Allocation[ArgNo].RC);
1464 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1465 TII.get(TargetOpcode::COPY), ResultReg)
1466 .addReg(DstReg, getKillRegState(true));
1467 updateValueMap(&FormalArg, ResultReg);
1468 }
1469
1470 // Calculate the size of the incoming arguments area.
1471 // We currently reject all the cases where this would be non-zero.
1472 unsigned IncomingArgSizeInBytes = 0;
1473
1474 // Account for the reserved argument area on ABI's that have one (O32).
1475 // It seems strange to do this on the caller side but it's necessary in
1476 // SelectionDAG's implementation.
1477 IncomingArgSizeInBytes = std::min(getABI().GetCalleeAllocdArgSizeInBytes(CC),
1478 IncomingArgSizeInBytes);
1479
1480 MF->getInfo<MipsFunctionInfo>()->setFormalArgInfo(IncomingArgSizeInBytes,
1481 false);
1482
1483 return true;
1484}
1485
1486bool MipsFastISel::fastLowerCall(CallLoweringInfo &CLI) {
1487 CallingConv::ID CC = CLI.CallConv;
1488 bool IsTailCall = CLI.IsTailCall;
1489 bool IsVarArg = CLI.IsVarArg;
1490 const Value *Callee = CLI.Callee;
1491 MCSymbol *Symbol = CLI.Symbol;
1492
1493 // Do not handle FastCC.
1494 if (CC == CallingConv::Fast)
1495 return false;
1496
1497 // Allow SelectionDAG isel to handle tail calls.
1498 if (IsTailCall)
1499 return false;
1500
1501 // Let SDISel handle vararg functions.
1502 if (IsVarArg)
1503 return false;
1504
1505 // FIXME: Only handle *simple* calls for now.
1506 MVT RetVT;
1507 if (CLI.RetTy->isVoidTy())
1508 RetVT = MVT::isVoid;
1509 else if (!isTypeSupported(CLI.RetTy, RetVT))
1510 return false;
1511
1512 for (auto Flag : CLI.OutFlags)
1513 if (Flag.isInReg() || Flag.isSRet() || Flag.isNest() || Flag.isByVal())
1514 return false;
1515
1516 // Set up the argument vectors.
1517 SmallVector<MVT, 16> OutVTs;
1518 OutVTs.reserve(CLI.OutVals.size());
1519
1520 for (auto *Val : CLI.OutVals) {
1521 MVT VT;
1522 if (!isTypeLegal(Val->getType(), VT) &&
1523 !(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16))
1524 return false;
1525
1526 // We don't handle vector parameters yet.
1527 if (VT.isVector() || VT.getSizeInBits() > 64)
1528 return false;
1529
1530 OutVTs.push_back(VT);
1531 }
1532
1533 Address Addr;
1534 if (!computeCallAddress(Callee, Addr))
1535 return false;
1536
1537 // Handle the arguments now that we've gotten them.
1538 unsigned NumBytes;
1539 if (!processCallArgs(CLI, OutVTs, NumBytes))
1540 return false;
1541
1542 if (!Addr.getGlobalValue())
1543 return false;
1544
1545 // Issue the call.
1546 unsigned DestAddress;
1547 if (Symbol)
1548 DestAddress = materializeExternalCallSym(Symbol);
1549 else
1550 DestAddress = materializeGV(Addr.getGlobalValue(), MVT::i32);
1551 emitInst(TargetOpcode::COPY, Mips::T9).addReg(DestAddress);
1553 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Mips::JALR),
1554 Mips::RA).addReg(Mips::T9);
1555
1556 // Add implicit physical register uses to the call.
1557 for (auto Reg : CLI.OutRegs)
1558 MIB.addReg(Reg, RegState::Implicit);
1559
1560 // Add a register mask with the call-preserved registers.
1561 // Proper defs for return values will be added by setPhysRegsDeadExcept().
1562 MIB.addRegMask(TRI.getCallPreservedMask(*FuncInfo.MF, CC));
1563
1564 CLI.Call = MIB;
1565
1566 if (EmitJalrReloc && !Subtarget->inMips16Mode()) {
1567 // Attach callee address to the instruction, let asm printer emit
1568 // .reloc R_MIPS_JALR.
1569 if (Symbol)
1570 MIB.addSym(Symbol, MipsII::MO_JALR);
1571 else
1572 MIB.addSym(FuncInfo.MF->getContext().getOrCreateSymbol(
1573 Addr.getGlobalValue()->getName()), MipsII::MO_JALR);
1574 }
1575
1576 // Finish off the call including any return values.
1577 return finishCall(CLI, RetVT, NumBytes);
1578}
1579
1580bool MipsFastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
1581 switch (II->getIntrinsicID()) {
1582 default:
1583 return false;
1584 case Intrinsic::bswap: {
1585 Type *RetTy = II->getCalledFunction()->getReturnType();
1586
1587 MVT VT;
1588 if (!isTypeSupported(RetTy, VT))
1589 return false;
1590
1591 Register SrcReg = getRegForValue(II->getOperand(0));
1592 if (SrcReg == 0)
1593 return false;
1594 Register DestReg = createResultReg(&Mips::GPR32RegClass);
1595 if (DestReg == 0)
1596 return false;
1597 if (VT == MVT::i16) {
1598 if (Subtarget->hasMips32r2()) {
1599 emitInst(Mips::WSBH, DestReg).addReg(SrcReg);
1600 updateValueMap(II, DestReg);
1601 return true;
1602 } else {
1603 unsigned TempReg[3];
1604 for (unsigned &R : TempReg) {
1605 R = createResultReg(&Mips::GPR32RegClass);
1606 if (R == 0)
1607 return false;
1608 }
1609 emitInst(Mips::SLL, TempReg[0]).addReg(SrcReg).addImm(8);
1610 emitInst(Mips::SRL, TempReg[1]).addReg(SrcReg).addImm(8);
1611 emitInst(Mips::OR, TempReg[2]).addReg(TempReg[0]).addReg(TempReg[1]);
1612 emitInst(Mips::ANDi, DestReg).addReg(TempReg[2]).addImm(0xFFFF);
1613 updateValueMap(II, DestReg);
1614 return true;
1615 }
1616 } else if (VT == MVT::i32) {
1617 if (Subtarget->hasMips32r2()) {
1618 Register TempReg = createResultReg(&Mips::GPR32RegClass);
1619 emitInst(Mips::WSBH, TempReg).addReg(SrcReg);
1620 emitInst(Mips::ROTR, DestReg).addReg(TempReg).addImm(16);
1621 updateValueMap(II, DestReg);
1622 return true;
1623 } else {
1624 unsigned TempReg[8];
1625 for (unsigned &R : TempReg) {
1626 R = createResultReg(&Mips::GPR32RegClass);
1627 if (R == 0)
1628 return false;
1629 }
1630
1631 emitInst(Mips::SRL, TempReg[0]).addReg(SrcReg).addImm(8);
1632 emitInst(Mips::SRL, TempReg[1]).addReg(SrcReg).addImm(24);
1633 emitInst(Mips::ANDi, TempReg[2]).addReg(TempReg[0]).addImm(0xFF00);
1634 emitInst(Mips::OR, TempReg[3]).addReg(TempReg[1]).addReg(TempReg[2]);
1635
1636 emitInst(Mips::ANDi, TempReg[4]).addReg(SrcReg).addImm(0xFF00);
1637 emitInst(Mips::SLL, TempReg[5]).addReg(TempReg[4]).addImm(8);
1638
1639 emitInst(Mips::SLL, TempReg[6]).addReg(SrcReg).addImm(24);
1640 emitInst(Mips::OR, TempReg[7]).addReg(TempReg[3]).addReg(TempReg[5]);
1641 emitInst(Mips::OR, DestReg).addReg(TempReg[6]).addReg(TempReg[7]);
1642 updateValueMap(II, DestReg);
1643 return true;
1644 }
1645 }
1646 return false;
1647 }
1648 case Intrinsic::memcpy:
1649 case Intrinsic::memmove: {
1650 const auto *MTI = cast<MemTransferInst>(II);
1651 // Don't handle volatile.
1652 if (MTI->isVolatile())
1653 return false;
1654 if (!MTI->getLength()->getType()->isIntegerTy(32))
1655 return false;
1656 const char *IntrMemName = isa<MemCpyInst>(II) ? "memcpy" : "memmove";
1657 return lowerCallTo(II, IntrMemName, II->arg_size() - 1);
1658 }
1659 case Intrinsic::memset: {
1660 const MemSetInst *MSI = cast<MemSetInst>(II);
1661 // Don't handle volatile.
1662 if (MSI->isVolatile())
1663 return false;
1664 if (!MSI->getLength()->getType()->isIntegerTy(32))
1665 return false;
1666 return lowerCallTo(II, "memset", II->arg_size() - 1);
1667 }
1668 }
1669 return false;
1670}
1671
1672bool MipsFastISel::selectRet(const Instruction *I) {
1673 const Function &F = *I->getParent()->getParent();
1674 const ReturnInst *Ret = cast<ReturnInst>(I);
1675
1676 LLVM_DEBUG(dbgs() << "selectRet\n");
1677
1678 if (!FuncInfo.CanLowerReturn)
1679 return false;
1680
1681 // Build a list of return value registers.
1683
1684 if (Ret->getNumOperands() > 0) {
1685 CallingConv::ID CC = F.getCallingConv();
1686
1687 // Do not handle FastCC.
1688 if (CC == CallingConv::Fast)
1689 return false;
1690
1692 GetReturnInfo(CC, F.getReturnType(), F.getAttributes(), Outs, TLI, DL);
1693
1694 // Analyze operands of the call, assigning locations to each operand.
1696 MipsCCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, ValLocs,
1697 I->getContext());
1698 CCAssignFn *RetCC = RetCC_Mips;
1699 CCInfo.AnalyzeReturn(Outs, RetCC);
1700
1701 // Only handle a single return value for now.
1702 if (ValLocs.size() != 1)
1703 return false;
1704
1705 CCValAssign &VA = ValLocs[0];
1706 const Value *RV = Ret->getOperand(0);
1707
1708 // Don't bother handling odd stuff for now.
1709 if ((VA.getLocInfo() != CCValAssign::Full) &&
1710 (VA.getLocInfo() != CCValAssign::BCvt))
1711 return false;
1712
1713 // Only handle register returns for now.
1714 if (!VA.isRegLoc())
1715 return false;
1716
1717 Register Reg = getRegForValue(RV);
1718 if (Reg == 0)
1719 return false;
1720
1721 unsigned SrcReg = Reg + VA.getValNo();
1722 Register DestReg = VA.getLocReg();
1723 // Avoid a cross-class copy. This is very unlikely.
1724 if (!MRI.getRegClass(SrcReg)->contains(DestReg))
1725 return false;
1726
1727 EVT RVEVT = TLI.getValueType(DL, RV->getType());
1728 if (!RVEVT.isSimple())
1729 return false;
1730
1731 if (RVEVT.isVector())
1732 return false;
1733
1734 MVT RVVT = RVEVT.getSimpleVT();
1735 if (RVVT == MVT::f128)
1736 return false;
1737
1738 // Do not handle FGR64 returns for now.
1739 if (RVVT == MVT::f64 && UnsupportedFPMode) {
1740 LLVM_DEBUG(dbgs() << ".. .. gave up (UnsupportedFPMode\n");
1741 return false;
1742 }
1743
1744 MVT DestVT = VA.getValVT();
1745 // Special handling for extended integers.
1746 if (RVVT != DestVT) {
1747 if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16)
1748 return false;
1749
1750 if (Outs[0].Flags.isZExt() || Outs[0].Flags.isSExt()) {
1751 bool IsZExt = Outs[0].Flags.isZExt();
1752 SrcReg = emitIntExt(RVVT, SrcReg, DestVT, IsZExt);
1753 if (SrcReg == 0)
1754 return false;
1755 }
1756 }
1757
1758 // Make the copy.
1759 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1760 TII.get(TargetOpcode::COPY), DestReg).addReg(SrcReg);
1761
1762 // Add register to return instruction.
1763 RetRegs.push_back(VA.getLocReg());
1764 }
1765 MachineInstrBuilder MIB = emitInst(Mips::RetRA);
1766 for (unsigned Reg : RetRegs)
1767 MIB.addReg(Reg, RegState::Implicit);
1768 return true;
1769}
1770
1771bool MipsFastISel::selectTrunc(const Instruction *I) {
1772 // The high bits for a type smaller than the register size are assumed to be
1773 // undefined.
1774 Value *Op = I->getOperand(0);
1775
1776 EVT SrcVT, DestVT;
1777 SrcVT = TLI.getValueType(DL, Op->getType(), true);
1778 DestVT = TLI.getValueType(DL, I->getType(), true);
1779
1780 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8)
1781 return false;
1782 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1)
1783 return false;
1784
1785 Register SrcReg = getRegForValue(Op);
1786 if (!SrcReg)
1787 return false;
1788
1789 // Because the high bits are undefined, a truncate doesn't generate
1790 // any code.
1791 updateValueMap(I, SrcReg);
1792 return true;
1793}
1794
1795bool MipsFastISel::selectIntExt(const Instruction *I) {
1796 Type *DestTy = I->getType();
1797 Value *Src = I->getOperand(0);
1798 Type *SrcTy = Src->getType();
1799
1800 bool isZExt = isa<ZExtInst>(I);
1801 Register SrcReg = getRegForValue(Src);
1802 if (!SrcReg)
1803 return false;
1804
1805 EVT SrcEVT, DestEVT;
1806 SrcEVT = TLI.getValueType(DL, SrcTy, true);
1807 DestEVT = TLI.getValueType(DL, DestTy, true);
1808 if (!SrcEVT.isSimple())
1809 return false;
1810 if (!DestEVT.isSimple())
1811 return false;
1812
1813 MVT SrcVT = SrcEVT.getSimpleVT();
1814 MVT DestVT = DestEVT.getSimpleVT();
1815 Register ResultReg = createResultReg(&Mips::GPR32RegClass);
1816
1817 if (!emitIntExt(SrcVT, SrcReg, DestVT, ResultReg, isZExt))
1818 return false;
1819 updateValueMap(I, ResultReg);
1820 return true;
1821}
1822
1823bool MipsFastISel::emitIntSExt32r1(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1824 unsigned DestReg) {
1825 unsigned ShiftAmt;
1826 switch (SrcVT.SimpleTy) {
1827 default:
1828 return false;
1829 case MVT::i8:
1830 ShiftAmt = 24;
1831 break;
1832 case MVT::i16:
1833 ShiftAmt = 16;
1834 break;
1835 }
1836 Register TempReg = createResultReg(&Mips::GPR32RegClass);
1837 emitInst(Mips::SLL, TempReg).addReg(SrcReg).addImm(ShiftAmt);
1838 emitInst(Mips::SRA, DestReg).addReg(TempReg).addImm(ShiftAmt);
1839 return true;
1840}
1841
1842bool MipsFastISel::emitIntSExt32r2(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1843 unsigned DestReg) {
1844 switch (SrcVT.SimpleTy) {
1845 default:
1846 return false;
1847 case MVT::i8:
1848 emitInst(Mips::SEB, DestReg).addReg(SrcReg);
1849 break;
1850 case MVT::i16:
1851 emitInst(Mips::SEH, DestReg).addReg(SrcReg);
1852 break;
1853 }
1854 return true;
1855}
1856
1857bool MipsFastISel::emitIntSExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1858 unsigned DestReg) {
1859 if ((DestVT != MVT::i32) && (DestVT != MVT::i16))
1860 return false;
1861 if (Subtarget->hasMips32r2())
1862 return emitIntSExt32r2(SrcVT, SrcReg, DestVT, DestReg);
1863 return emitIntSExt32r1(SrcVT, SrcReg, DestVT, DestReg);
1864}
1865
1866bool MipsFastISel::emitIntZExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1867 unsigned DestReg) {
1868 int64_t Imm;
1869
1870 switch (SrcVT.SimpleTy) {
1871 default:
1872 return false;
1873 case MVT::i1:
1874 Imm = 1;
1875 break;
1876 case MVT::i8:
1877 Imm = 0xff;
1878 break;
1879 case MVT::i16:
1880 Imm = 0xffff;
1881 break;
1882 }
1883
1884 emitInst(Mips::ANDi, DestReg).addReg(SrcReg).addImm(Imm);
1885 return true;
1886}
1887
1888bool MipsFastISel::emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1889 unsigned DestReg, bool IsZExt) {
1890 // FastISel does not have plumbing to deal with extensions where the SrcVT or
1891 // DestVT are odd things, so test to make sure that they are both types we can
1892 // handle (i1/i8/i16/i32 for SrcVT and i8/i16/i32/i64 for DestVT), otherwise
1893 // bail out to SelectionDAG.
1894 if (((DestVT != MVT::i8) && (DestVT != MVT::i16) && (DestVT != MVT::i32)) ||
1895 ((SrcVT != MVT::i1) && (SrcVT != MVT::i8) && (SrcVT != MVT::i16)))
1896 return false;
1897 if (IsZExt)
1898 return emitIntZExt(SrcVT, SrcReg, DestVT, DestReg);
1899 return emitIntSExt(SrcVT, SrcReg, DestVT, DestReg);
1900}
1901
1902unsigned MipsFastISel::emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1903 bool isZExt) {
1904 unsigned DestReg = createResultReg(&Mips::GPR32RegClass);
1905 bool Success = emitIntExt(SrcVT, SrcReg, DestVT, DestReg, isZExt);
1906 return Success ? DestReg : 0;
1907}
1908
1909bool MipsFastISel::selectDivRem(const Instruction *I, unsigned ISDOpcode) {
1910 EVT DestEVT = TLI.getValueType(DL, I->getType(), true);
1911 if (!DestEVT.isSimple())
1912 return false;
1913
1914 MVT DestVT = DestEVT.getSimpleVT();
1915 if (DestVT != MVT::i32)
1916 return false;
1917
1918 unsigned DivOpc;
1919 switch (ISDOpcode) {
1920 default:
1921 return false;
1922 case ISD::SDIV:
1923 case ISD::SREM:
1924 DivOpc = Mips::SDIV;
1925 break;
1926 case ISD::UDIV:
1927 case ISD::UREM:
1928 DivOpc = Mips::UDIV;
1929 break;
1930 }
1931
1932 Register Src0Reg = getRegForValue(I->getOperand(0));
1933 Register Src1Reg = getRegForValue(I->getOperand(1));
1934 if (!Src0Reg || !Src1Reg)
1935 return false;
1936
1937 emitInst(DivOpc).addReg(Src0Reg).addReg(Src1Reg);
1938 emitInst(Mips::TEQ).addReg(Src1Reg).addReg(Mips::ZERO).addImm(7);
1939
1940 Register ResultReg = createResultReg(&Mips::GPR32RegClass);
1941 if (!ResultReg)
1942 return false;
1943
1944 unsigned MFOpc = (ISDOpcode == ISD::SREM || ISDOpcode == ISD::UREM)
1945 ? Mips::MFHI
1946 : Mips::MFLO;
1947 emitInst(MFOpc, ResultReg);
1948
1949 updateValueMap(I, ResultReg);
1950 return true;
1951}
1952
1953bool MipsFastISel::selectShift(const Instruction *I) {
1954 MVT RetVT;
1955
1956 if (!isTypeSupported(I->getType(), RetVT))
1957 return false;
1958
1959 Register ResultReg = createResultReg(&Mips::GPR32RegClass);
1960 if (!ResultReg)
1961 return false;
1962
1963 unsigned Opcode = I->getOpcode();
1964 const Value *Op0 = I->getOperand(0);
1965 Register Op0Reg = getRegForValue(Op0);
1966 if (!Op0Reg)
1967 return false;
1968
1969 // If AShr or LShr, then we need to make sure the operand0 is sign extended.
1970 if (Opcode == Instruction::AShr || Opcode == Instruction::LShr) {
1971 Register TempReg = createResultReg(&Mips::GPR32RegClass);
1972 if (!TempReg)
1973 return false;
1974
1975 MVT Op0MVT = TLI.getValueType(DL, Op0->getType(), true).getSimpleVT();
1976 bool IsZExt = Opcode == Instruction::LShr;
1977 if (!emitIntExt(Op0MVT, Op0Reg, MVT::i32, TempReg, IsZExt))
1978 return false;
1979
1980 Op0Reg = TempReg;
1981 }
1982
1983 if (const auto *C = dyn_cast<ConstantInt>(I->getOperand(1))) {
1984 uint64_t ShiftVal = C->getZExtValue();
1985
1986 switch (Opcode) {
1987 default:
1988 llvm_unreachable("Unexpected instruction.");
1989 case Instruction::Shl:
1990 Opcode = Mips::SLL;
1991 break;
1992 case Instruction::AShr:
1993 Opcode = Mips::SRA;
1994 break;
1995 case Instruction::LShr:
1996 Opcode = Mips::SRL;
1997 break;
1998 }
1999
2000 emitInst(Opcode, ResultReg).addReg(Op0Reg).addImm(ShiftVal);
2001 updateValueMap(I, ResultReg);
2002 return true;
2003 }
2004
2005 Register Op1Reg = getRegForValue(I->getOperand(1));
2006 if (!Op1Reg)
2007 return false;
2008
2009 switch (Opcode) {
2010 default:
2011 llvm_unreachable("Unexpected instruction.");
2012 case Instruction::Shl:
2013 Opcode = Mips::SLLV;
2014 break;
2015 case Instruction::AShr:
2016 Opcode = Mips::SRAV;
2017 break;
2018 case Instruction::LShr:
2019 Opcode = Mips::SRLV;
2020 break;
2021 }
2022
2023 emitInst(Opcode, ResultReg).addReg(Op0Reg).addReg(Op1Reg);
2024 updateValueMap(I, ResultReg);
2025 return true;
2026}
2027
2028bool MipsFastISel::fastSelectInstruction(const Instruction *I) {
2029 switch (I->getOpcode()) {
2030 default:
2031 break;
2032 case Instruction::Load:
2033 return selectLoad(I);
2034 case Instruction::Store:
2035 return selectStore(I);
2036 case Instruction::SDiv:
2037 if (!selectBinaryOp(I, ISD::SDIV))
2038 return selectDivRem(I, ISD::SDIV);
2039 return true;
2040 case Instruction::UDiv:
2041 if (!selectBinaryOp(I, ISD::UDIV))
2042 return selectDivRem(I, ISD::UDIV);
2043 return true;
2044 case Instruction::SRem:
2045 if (!selectBinaryOp(I, ISD::SREM))
2046 return selectDivRem(I, ISD::SREM);
2047 return true;
2048 case Instruction::URem:
2049 if (!selectBinaryOp(I, ISD::UREM))
2050 return selectDivRem(I, ISD::UREM);
2051 return true;
2052 case Instruction::Shl:
2053 case Instruction::LShr:
2054 case Instruction::AShr:
2055 return selectShift(I);
2056 case Instruction::And:
2057 case Instruction::Or:
2058 case Instruction::Xor:
2059 return selectLogicalOp(I);
2060 case Instruction::Br:
2061 return selectBranch(I);
2062 case Instruction::Ret:
2063 return selectRet(I);
2064 case Instruction::Trunc:
2065 return selectTrunc(I);
2066 case Instruction::ZExt:
2067 case Instruction::SExt:
2068 return selectIntExt(I);
2069 case Instruction::FPTrunc:
2070 return selectFPTrunc(I);
2071 case Instruction::FPExt:
2072 return selectFPExt(I);
2073 case Instruction::FPToSI:
2074 return selectFPToInt(I, /*isSigned*/ true);
2075 case Instruction::FPToUI:
2076 return selectFPToInt(I, /*isSigned*/ false);
2077 case Instruction::ICmp:
2078 case Instruction::FCmp:
2079 return selectCmp(I);
2080 case Instruction::Select:
2081 return selectSelect(I);
2082 }
2083 return false;
2084}
2085
2086unsigned MipsFastISel::getRegEnsuringSimpleIntegerWidening(const Value *V,
2087 bool IsUnsigned) {
2088 Register VReg = getRegForValue(V);
2089 if (VReg == 0)
2090 return 0;
2091 MVT VMVT = TLI.getValueType(DL, V->getType(), true).getSimpleVT();
2092
2093 if (VMVT == MVT::i1)
2094 return 0;
2095
2096 if ((VMVT == MVT::i8) || (VMVT == MVT::i16)) {
2097 Register TempReg = createResultReg(&Mips::GPR32RegClass);
2098 if (!emitIntExt(VMVT, VReg, MVT::i32, TempReg, IsUnsigned))
2099 return 0;
2100 VReg = TempReg;
2101 }
2102 return VReg;
2103}
2104
2105void MipsFastISel::simplifyAddress(Address &Addr) {
2106 if (!isInt<16>(Addr.getOffset())) {
2107 unsigned TempReg =
2108 materialize32BitInt(Addr.getOffset(), &Mips::GPR32RegClass);
2109 Register DestReg = createResultReg(&Mips::GPR32RegClass);
2110 emitInst(Mips::ADDu, DestReg).addReg(TempReg).addReg(Addr.getReg());
2111 Addr.setReg(DestReg);
2112 Addr.setOffset(0);
2113 }
2114}
2115
2116unsigned MipsFastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
2117 const TargetRegisterClass *RC,
2118 unsigned Op0, unsigned Op1) {
2119 // We treat the MUL instruction in a special way because it clobbers
2120 // the HI0 & LO0 registers. The TableGen definition of this instruction can
2121 // mark these registers only as implicitly defined. As a result, the
2122 // register allocator runs out of registers when this instruction is
2123 // followed by another instruction that defines the same registers too.
2124 // We can fix this by explicitly marking those registers as dead.
2125 if (MachineInstOpcode == Mips::MUL) {
2126 Register ResultReg = createResultReg(RC);
2127 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2128 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2129 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
2130 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2131 .addReg(Op0)
2132 .addReg(Op1)
2135 return ResultReg;
2136 }
2137
2138 return FastISel::fastEmitInst_rr(MachineInstOpcode, RC, Op0, Op1);
2139}
2140
2141namespace llvm {
2142
2144 const TargetLibraryInfo *libInfo) {
2145 return new MipsFastISel(funcInfo, libInfo);
2146}
2147
2148} // end namespace llvm
unsigned const MachineRegisterInfo * MRI
#define Success
static unsigned selectBinaryOp(unsigned GenericOpc, unsigned RegBankID, unsigned OpSize)
Select the AArch64 opcode for the basic binary operation GenericOpc (such as G_OR or G_SDIV),...
static void emitLoad(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator Pos, const TargetInstrInfo &TII, unsigned Reg1, unsigned Reg2, int Offset, bool IsPostDec)
Emit a load-pair instruction for frame-destroy.
static void emitStore(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator Pos, const TargetInstrInfo &TII, unsigned Reg1, unsigned Reg2, int Offset, bool IsPreDec)
Emit a store-pair instruction for frame-setup.
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file contains the simple types necessary to represent the attributes associated with functions a...
#define LLVM_ATTRIBUTE_UNUSED
Definition: Compiler.h:203
This file contains the declarations for the subclasses of Constant, which represent the different fla...
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
This file defines the DenseMap class.
uint64_t Addr
Symbol * Sym
Definition: ELF_riscv.cpp:479
This file defines the FastISel class.
const HexagonInstrInfo * TII
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
#define G(x, y, z)
Definition: MD5.cpp:56
unsigned const TargetRegisterInfo * TRI
unsigned Reg
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
cl::opt< bool > EmitJalrReloc
static bool CC_MipsO32_FP64(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
static bool CC_Mips(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State) LLVM_ATTRIBUTE_UNUSED
static bool CC_MipsO32_FP32(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State, ArrayRef< MCPhysReg > F64Regs)
uint64_t IntrinsicInst * II
#define P(N)
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallVector class.
static SDValue emitCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
This file describes how to lower LLVM code to machine code.
Value * RHS
Value * LHS
support::ulittle16_t & Lo
Definition: aarch32.cpp:206
support::ulittle16_t & Hi
Definition: aarch32.cpp:205
APInt bitcastToAPInt() const
Definition: APFloat.h:1266
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1500
an instruction to allocate memory on the stack
Definition: Instructions.h:61
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:97
Conditional or Unconditional Branch instruction.
BasicBlock * getSuccessor(unsigned i) const
Value * getCondition() const
CCState - This class holds information needed while lowering arguments and return values.
CCValAssign - Represent assignment of one arg/retval to a location.
bool isRegLoc() const
Register getLocReg() const
LocInfo getLocInfo() const
bool needsCustom() const
bool isMemLoc() const
void convertToReg(unsigned RegNo)
int64_t getLocMemOffset() const
unsigned getValNo() const
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:747
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:757
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition: InstrTypes.h:760
@ ICMP_SLT
signed less than
Definition: InstrTypes.h:786
@ ICMP_SLE
signed less or equal
Definition: InstrTypes.h:787
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition: InstrTypes.h:763
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Definition: InstrTypes.h:761
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Definition: InstrTypes.h:762
@ ICMP_UGE
unsigned greater or equal
Definition: InstrTypes.h:781
@ ICMP_UGT
unsigned greater than
Definition: InstrTypes.h:780
@ ICMP_SGT
signed greater than
Definition: InstrTypes.h:784
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:782
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition: InstrTypes.h:764
@ ICMP_EQ
equal
Definition: InstrTypes.h:778
@ ICMP_NE
not equal
Definition: InstrTypes.h:779
@ ICMP_SGE
signed greater or equal
Definition: InstrTypes.h:785
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition: InstrTypes.h:773
@ ICMP_ULE
unsigned less or equal
Definition: InstrTypes.h:783
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:847
bool isUnsigned() const
Definition: InstrTypes.h:1013
A constant value that is initialized with an expression using other constant values.
Definition: Constants.h:1097
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:269
const APFloat & getValueAPF() const
Definition: Constants.h:312
This is the shared class of boolean and integer constants.
Definition: Constants.h:81
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
Definition: Constants.h:161
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:155
This is an important base class in LLVM.
Definition: Constant.h:42
This class represents an Operation in the Expression.
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
Definition: FastISel.h:66
virtual bool fastLowerIntrinsicCall(const IntrinsicInst *II)
This method is called by target-independent code to do target- specific intrinsic lowering.
Definition: FastISel.cpp:1937
MachineFrameInfo & MFI
Definition: FastISel.h:206
virtual unsigned fastMaterializeConstant(const Constant *C)
Emit a constant in a register using target-specific logic, such as constant pool loads.
Definition: FastISel.h:473
virtual bool fastLowerCall(CallLoweringInfo &CLI)
This method is called by target-independent code to do target- specific call lowering.
Definition: FastISel.cpp:1935
Register fastEmitInst_rr(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, unsigned Op1)
Emit a MachineInstr with two register operands and a result register in the given register class.
Definition: FastISel.cpp:2056
virtual bool fastLowerArguments()
This method is called by target-independent code to do target- specific argument lowering.
Definition: FastISel.cpp:1933
const TargetInstrInfo & TII
Definition: FastISel.h:211
virtual bool fastSelectInstruction(const Instruction *I)=0
This method is called by target-independent code when the normal FastISel process fails to select an ...
const TargetLowering & TLI
Definition: FastISel.h:212
const TargetMachine & TM
Definition: FastISel.h:209
virtual unsigned fastMaterializeAlloca(const AllocaInst *C)
Emit an alloca address in a register using target-specific logic.
Definition: FastISel.h:476
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:380
bool isThreadLocal() const
If the value is "Thread Local", its value isn't shared by the threads.
Definition: GlobalValue.h:263
bool hasLocalLinkage() const
Definition: GlobalValue.h:528
bool hasInternalLinkage() const
Definition: GlobalValue.h:526
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:48
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:41
Machine Value Type.
SimpleValueType SimpleTy
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
Value * getLength() const
bool isVolatile() const
This class wraps the llvm.memset and llvm.memset.inline intrinsics.
MipsFunctionInfo - This class is derived from MachineFunction private Mips target-specific informatio...
bool isFP64bit() const
bool useSoftFloat() const
const MipsInstrInfo * getInstrInfo() const override
const MipsTargetLowering * getTargetLowering() const override
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
Return a value (possibly void), from a function.
This class represents the LLVM 'select' instruction.
size_t size() const
Definition: SmallVector.h:92
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:587
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:951
void reserve(size_type N)
Definition: SmallVector.h:677
void push_back(const T &Elt)
Definition: SmallVector.h:427
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1210
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
Definition: DataLayout.h:572
TypeSize getElementOffset(unsigned Idx) const
Definition: DataLayout.h:601
Class to represent struct types.
Definition: DerivedTypes.h:216
TargetInstrInfo - Interface to description of machine instruction set.
Provides information about what library functions are available for the current target.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:77
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:261
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition: Type.h:248
bool isStructTy() const
True if this is an instance of StructType.
Definition: Type.h:245
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:224
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
Value * getOperand(unsigned i) const
Definition: User.h:169
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition: Value.h:434
TypeSize getSequentialElementStride(const DataLayout &DL) const
const ParentTy * getParent() const
Definition: ilist_node.h:32
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:708
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Definition: MCInstrDesc.h:148
@ MO_GOT
MO_GOT - Represents the offset into the global offset table at which the address the relocation entry...
Definition: MipsBaseInfo.h:38
@ MO_JALR
Helper operand used to generate R_MIPS_JALR.
Definition: MipsBaseInfo.h:95
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Dead
Unused definition.
@ Define
Register definition.
Reg
All possible values of the reg field in the ModR/M byte.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:480
Register constrainOperandRegClass(const MachineFunction &MF, const TargetRegisterInfo &TRI, MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, MachineInstr &InsertPt, const TargetRegisterClass &RegClass, MachineOperand &RegMO)
Constrain the Register operand OpIdx, so that it is now constrained to the TargetRegisterClass passed...
Definition: Utils.cpp:56
void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags,...
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
static Error getOffset(const SymbolRef &Sym, SectionRef Sec, uint64_t &Result)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
unsigned getKillRegState(bool B)
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
gep_type_iterator gep_type_begin(const User *GEP)
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Extended Value Type.
Definition: ValueTypes.h:35
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:137
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:307
bool isVector() const
Return true if this is a vector value type.
Definition: ValueTypes.h:168
static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.