LLVM 17.0.0git
MipsISelLowering.cpp
Go to the documentation of this file.
1//===- MipsISelLowering.cpp - Mips DAG Lowering Implementation ------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interfaces that Mips uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#include "MipsISelLowering.h"
18#include "MipsCCState.h"
19#include "MipsInstrInfo.h"
20#include "MipsMachineFunction.h"
21#include "MipsRegisterInfo.h"
22#include "MipsSubtarget.h"
23#include "MipsTargetMachine.h"
25#include "llvm/ADT/APFloat.h"
26#include "llvm/ADT/ArrayRef.h"
28#include "llvm/ADT/Statistic.h"
29#include "llvm/ADT/StringRef.h"
51#include "llvm/IR/CallingConv.h"
52#include "llvm/IR/Constants.h"
53#include "llvm/IR/DataLayout.h"
54#include "llvm/IR/DebugLoc.h"
56#include "llvm/IR/Function.h"
57#include "llvm/IR/GlobalValue.h"
58#include "llvm/IR/Type.h"
59#include "llvm/IR/Value.h"
60#include "llvm/MC/MCContext.h"
70#include <algorithm>
71#include <cassert>
72#include <cctype>
73#include <cstdint>
74#include <deque>
75#include <iterator>
76#include <utility>
77#include <vector>
78
79using namespace llvm;
80
81#define DEBUG_TYPE "mips-lower"
82
83STATISTIC(NumTailCalls, "Number of tail calls");
84
85static cl::opt<bool>
86NoZeroDivCheck("mno-check-zero-division", cl::Hidden,
87 cl::desc("MIPS: Don't trap on integer division by zero."),
88 cl::init(false));
89
91
92static const MCPhysReg Mips64DPRegs[8] = {
93 Mips::D12_64, Mips::D13_64, Mips::D14_64, Mips::D15_64,
94 Mips::D16_64, Mips::D17_64, Mips::D18_64, Mips::D19_64
95};
96
97// The MIPS MSA ABI passes vector arguments in the integer register set.
98// The number of integer registers used is dependant on the ABI used.
101 EVT VT) const {
102 if (!VT.isVector())
103 return getRegisterType(Context, VT);
104
105 return Subtarget.isABI_O32() || VT.getSizeInBits() == 32 ? MVT::i32
106 : MVT::i64;
107}
108
111 EVT VT) const {
112 if (VT.isVector())
113 return divideCeil(VT.getSizeInBits(), Subtarget.isABI_O32() ? 32 : 64);
115}
116
118 LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
119 unsigned &NumIntermediates, MVT &RegisterVT) const {
120 // Break down vector types to either 2 i64s or 4 i32s.
121 RegisterVT = getRegisterTypeForCallingConv(Context, CC, VT);
122 IntermediateVT = RegisterVT;
123 NumIntermediates =
124 VT.getFixedSizeInBits() < RegisterVT.getFixedSizeInBits()
126 : divideCeil(VT.getSizeInBits(), RegisterVT.getSizeInBits());
127 return NumIntermediates;
128}
129
133 return DAG.getRegister(FI->getGlobalBaseReg(MF), Ty);
134}
135
136SDValue MipsTargetLowering::getTargetNode(GlobalAddressSDNode *N, EVT Ty,
137 SelectionDAG &DAG,
138 unsigned Flag) const {
139 return DAG.getTargetGlobalAddress(N->getGlobal(), SDLoc(N), Ty, 0, Flag);
140}
141
142SDValue MipsTargetLowering::getTargetNode(ExternalSymbolSDNode *N, EVT Ty,
143 SelectionDAG &DAG,
144 unsigned Flag) const {
145 return DAG.getTargetExternalSymbol(N->getSymbol(), Ty, Flag);
146}
147
148SDValue MipsTargetLowering::getTargetNode(BlockAddressSDNode *N, EVT Ty,
149 SelectionDAG &DAG,
150 unsigned Flag) const {
151 return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, 0, Flag);
152}
153
154SDValue MipsTargetLowering::getTargetNode(JumpTableSDNode *N, EVT Ty,
155 SelectionDAG &DAG,
156 unsigned Flag) const {
157 return DAG.getTargetJumpTable(N->getIndex(), Ty, Flag);
158}
159
160SDValue MipsTargetLowering::getTargetNode(ConstantPoolSDNode *N, EVT Ty,
161 SelectionDAG &DAG,
162 unsigned Flag) const {
163 return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
164 N->getOffset(), Flag);
165}
166
167const char *MipsTargetLowering::getTargetNodeName(unsigned Opcode) const {
168 switch ((MipsISD::NodeType)Opcode) {
169 case MipsISD::FIRST_NUMBER: break;
170 case MipsISD::JmpLink: return "MipsISD::JmpLink";
171 case MipsISD::TailCall: return "MipsISD::TailCall";
172 case MipsISD::Highest: return "MipsISD::Highest";
173 case MipsISD::Higher: return "MipsISD::Higher";
174 case MipsISD::Hi: return "MipsISD::Hi";
175 case MipsISD::Lo: return "MipsISD::Lo";
176 case MipsISD::GotHi: return "MipsISD::GotHi";
177 case MipsISD::TlsHi: return "MipsISD::TlsHi";
178 case MipsISD::GPRel: return "MipsISD::GPRel";
179 case MipsISD::ThreadPointer: return "MipsISD::ThreadPointer";
180 case MipsISD::Ret: return "MipsISD::Ret";
181 case MipsISD::ERet: return "MipsISD::ERet";
182 case MipsISD::EH_RETURN: return "MipsISD::EH_RETURN";
183 case MipsISD::FAbs: return "MipsISD::FAbs";
184 case MipsISD::FMS: return "MipsISD::FMS";
185 case MipsISD::FPBrcond: return "MipsISD::FPBrcond";
186 case MipsISD::FPCmp: return "MipsISD::FPCmp";
187 case MipsISD::FSELECT: return "MipsISD::FSELECT";
188 case MipsISD::MTC1_D64: return "MipsISD::MTC1_D64";
189 case MipsISD::CMovFP_T: return "MipsISD::CMovFP_T";
190 case MipsISD::CMovFP_F: return "MipsISD::CMovFP_F";
191 case MipsISD::TruncIntFP: return "MipsISD::TruncIntFP";
192 case MipsISD::MFHI: return "MipsISD::MFHI";
193 case MipsISD::MFLO: return "MipsISD::MFLO";
194 case MipsISD::MTLOHI: return "MipsISD::MTLOHI";
195 case MipsISD::Mult: return "MipsISD::Mult";
196 case MipsISD::Multu: return "MipsISD::Multu";
197 case MipsISD::MAdd: return "MipsISD::MAdd";
198 case MipsISD::MAddu: return "MipsISD::MAddu";
199 case MipsISD::MSub: return "MipsISD::MSub";
200 case MipsISD::MSubu: return "MipsISD::MSubu";
201 case MipsISD::DivRem: return "MipsISD::DivRem";
202 case MipsISD::DivRemU: return "MipsISD::DivRemU";
203 case MipsISD::DivRem16: return "MipsISD::DivRem16";
204 case MipsISD::DivRemU16: return "MipsISD::DivRemU16";
205 case MipsISD::BuildPairF64: return "MipsISD::BuildPairF64";
206 case MipsISD::ExtractElementF64: return "MipsISD::ExtractElementF64";
207 case MipsISD::Wrapper: return "MipsISD::Wrapper";
208 case MipsISD::DynAlloc: return "MipsISD::DynAlloc";
209 case MipsISD::Sync: return "MipsISD::Sync";
210 case MipsISD::Ext: return "MipsISD::Ext";
211 case MipsISD::Ins: return "MipsISD::Ins";
212 case MipsISD::CIns: return "MipsISD::CIns";
213 case MipsISD::LWL: return "MipsISD::LWL";
214 case MipsISD::LWR: return "MipsISD::LWR";
215 case MipsISD::SWL: return "MipsISD::SWL";
216 case MipsISD::SWR: return "MipsISD::SWR";
217 case MipsISD::LDL: return "MipsISD::LDL";
218 case MipsISD::LDR: return "MipsISD::LDR";
219 case MipsISD::SDL: return "MipsISD::SDL";
220 case MipsISD::SDR: return "MipsISD::SDR";
221 case MipsISD::EXTP: return "MipsISD::EXTP";
222 case MipsISD::EXTPDP: return "MipsISD::EXTPDP";
223 case MipsISD::EXTR_S_H: return "MipsISD::EXTR_S_H";
224 case MipsISD::EXTR_W: return "MipsISD::EXTR_W";
225 case MipsISD::EXTR_R_W: return "MipsISD::EXTR_R_W";
226 case MipsISD::EXTR_RS_W: return "MipsISD::EXTR_RS_W";
227 case MipsISD::SHILO: return "MipsISD::SHILO";
228 case MipsISD::MTHLIP: return "MipsISD::MTHLIP";
229 case MipsISD::MULSAQ_S_W_PH: return "MipsISD::MULSAQ_S_W_PH";
230 case MipsISD::MAQ_S_W_PHL: return "MipsISD::MAQ_S_W_PHL";
231 case MipsISD::MAQ_S_W_PHR: return "MipsISD::MAQ_S_W_PHR";
232 case MipsISD::MAQ_SA_W_PHL: return "MipsISD::MAQ_SA_W_PHL";
233 case MipsISD::MAQ_SA_W_PHR: return "MipsISD::MAQ_SA_W_PHR";
234 case MipsISD::DPAU_H_QBL: return "MipsISD::DPAU_H_QBL";
235 case MipsISD::DPAU_H_QBR: return "MipsISD::DPAU_H_QBR";
236 case MipsISD::DPSU_H_QBL: return "MipsISD::DPSU_H_QBL";
237 case MipsISD::DPSU_H_QBR: return "MipsISD::DPSU_H_QBR";
238 case MipsISD::DPAQ_S_W_PH: return "MipsISD::DPAQ_S_W_PH";
239 case MipsISD::DPSQ_S_W_PH: return "MipsISD::DPSQ_S_W_PH";
240 case MipsISD::DPAQ_SA_L_W: return "MipsISD::DPAQ_SA_L_W";
241 case MipsISD::DPSQ_SA_L_W: return "MipsISD::DPSQ_SA_L_W";
242 case MipsISD::DPA_W_PH: return "MipsISD::DPA_W_PH";
243 case MipsISD::DPS_W_PH: return "MipsISD::DPS_W_PH";
244 case MipsISD::DPAQX_S_W_PH: return "MipsISD::DPAQX_S_W_PH";
245 case MipsISD::DPAQX_SA_W_PH: return "MipsISD::DPAQX_SA_W_PH";
246 case MipsISD::DPAX_W_PH: return "MipsISD::DPAX_W_PH";
247 case MipsISD::DPSX_W_PH: return "MipsISD::DPSX_W_PH";
248 case MipsISD::DPSQX_S_W_PH: return "MipsISD::DPSQX_S_W_PH";
249 case MipsISD::DPSQX_SA_W_PH: return "MipsISD::DPSQX_SA_W_PH";
250 case MipsISD::MULSA_W_PH: return "MipsISD::MULSA_W_PH";
251 case MipsISD::MULT: return "MipsISD::MULT";
252 case MipsISD::MULTU: return "MipsISD::MULTU";
253 case MipsISD::MADD_DSP: return "MipsISD::MADD_DSP";
254 case MipsISD::MADDU_DSP: return "MipsISD::MADDU_DSP";
255 case MipsISD::MSUB_DSP: return "MipsISD::MSUB_DSP";
256 case MipsISD::MSUBU_DSP: return "MipsISD::MSUBU_DSP";
257 case MipsISD::SHLL_DSP: return "MipsISD::SHLL_DSP";
258 case MipsISD::SHRA_DSP: return "MipsISD::SHRA_DSP";
259 case MipsISD::SHRL_DSP: return "MipsISD::SHRL_DSP";
260 case MipsISD::SETCC_DSP: return "MipsISD::SETCC_DSP";
261 case MipsISD::SELECT_CC_DSP: return "MipsISD::SELECT_CC_DSP";
262 case MipsISD::VALL_ZERO: return "MipsISD::VALL_ZERO";
263 case MipsISD::VANY_ZERO: return "MipsISD::VANY_ZERO";
264 case MipsISD::VALL_NONZERO: return "MipsISD::VALL_NONZERO";
265 case MipsISD::VANY_NONZERO: return "MipsISD::VANY_NONZERO";
266 case MipsISD::VCEQ: return "MipsISD::VCEQ";
267 case MipsISD::VCLE_S: return "MipsISD::VCLE_S";
268 case MipsISD::VCLE_U: return "MipsISD::VCLE_U";
269 case MipsISD::VCLT_S: return "MipsISD::VCLT_S";
270 case MipsISD::VCLT_U: return "MipsISD::VCLT_U";
271 case MipsISD::VEXTRACT_SEXT_ELT: return "MipsISD::VEXTRACT_SEXT_ELT";
272 case MipsISD::VEXTRACT_ZEXT_ELT: return "MipsISD::VEXTRACT_ZEXT_ELT";
273 case MipsISD::VNOR: return "MipsISD::VNOR";
274 case MipsISD::VSHF: return "MipsISD::VSHF";
275 case MipsISD::SHF: return "MipsISD::SHF";
276 case MipsISD::ILVEV: return "MipsISD::ILVEV";
277 case MipsISD::ILVOD: return "MipsISD::ILVOD";
278 case MipsISD::ILVL: return "MipsISD::ILVL";
279 case MipsISD::ILVR: return "MipsISD::ILVR";
280 case MipsISD::PCKEV: return "MipsISD::PCKEV";
281 case MipsISD::PCKOD: return "MipsISD::PCKOD";
282 case MipsISD::INSVE: return "MipsISD::INSVE";
283 }
284 return nullptr;
285}
286
288 const MipsSubtarget &STI)
289 : TargetLowering(TM), Subtarget(STI), ABI(TM.getABI()) {
290 // Mips does not have i1 type, so use i32 for
291 // setcc operations results (slt, sgt, ...).
294 // The cmp.cond.fmt instruction in MIPS32r6/MIPS64r6 uses 0 and -1 like MSA
295 // does. Integer booleans still use 0 and 1.
299
300 // Load extented operations for i1 types must be promoted
301 for (MVT VT : MVT::integer_valuetypes()) {
305 }
306
307 // MIPS doesn't have extending float->double load/store. Set LoadExtAction
308 // for f32, f16
309 for (MVT VT : MVT::fp_valuetypes()) {
310 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
311 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand);
312 }
313
314 // Set LoadExtAction for f16 vectors to Expand
316 MVT F16VT = MVT::getVectorVT(MVT::f16, VT.getVectorNumElements());
317 if (F16VT.isValid())
319 }
320
321 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
322 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
323
324 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
325
326 // Used by legalize types to correctly generate the setcc result.
327 // Without this, every float setcc comes with a AND/OR with the result,
328 // we don't want this, since the fpcmp result goes to a flag register,
329 // which is used implicitly by brcond and select operations.
330 AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32);
331
332 // Mips Custom Operations
350
351 if (Subtarget.isGP64bit()) {
364 }
365
366 if (!Subtarget.isGP64bit()) {
370 }
371
373 if (Subtarget.isGP64bit())
375
384
385 // Operations not directly supported by Mips.
399 if (Subtarget.hasCnMips()) {
402 } else {
405 }
412
413 if (!Subtarget.hasMips32r2())
415
416 if (!Subtarget.hasMips64r2())
418
435
436 // Lower f16 conversion operations into library calls
441
443
448
449 // Use the default for now
452
453 if (!Subtarget.isGP64bit()) {
456 }
457
458 if (!Subtarget.hasMips32r2()) {
461 }
462
463 // MIPS16 lacks MIPS32's clz and clo instructions.
466 if (!Subtarget.hasMips64())
468
469 if (!Subtarget.hasMips32r2())
471 if (!Subtarget.hasMips64r2())
473
474 if (Subtarget.isGP64bit()) {
475 setLoadExtAction(ISD::SEXTLOAD, MVT::i64, MVT::i32, Custom);
476 setLoadExtAction(ISD::ZEXTLOAD, MVT::i64, MVT::i32, Custom);
477 setLoadExtAction(ISD::EXTLOAD, MVT::i64, MVT::i32, Custom);
478 setTruncStoreAction(MVT::i64, MVT::i32, Custom);
479 }
480
481 setOperationAction(ISD::TRAP, MVT::Other, Legal);
482
485
486 if (ABI.IsO32()) {
487 // These libcalls are not available in 32-bit.
488 setLibcallName(RTLIB::SHL_I128, nullptr);
489 setLibcallName(RTLIB::SRL_I128, nullptr);
490 setLibcallName(RTLIB::SRA_I128, nullptr);
491 setLibcallName(RTLIB::MUL_I128, nullptr);
492 setLibcallName(RTLIB::MULO_I64, nullptr);
493 setLibcallName(RTLIB::MULO_I128, nullptr);
494 }
495
497
498 // The arguments on the stack are defined in terms of 4-byte slots on O32
499 // and 8-byte slots on N32/N64.
501 : Align(4));
502
503 setStackPointerRegisterToSaveRestore(ABI.IsN64() ? Mips::SP_64 : Mips::SP);
504
506
507 isMicroMips = Subtarget.inMicroMipsMode();
508}
509
510const MipsTargetLowering *
512 const MipsSubtarget &STI) {
513 if (STI.inMips16Mode())
514 return createMips16TargetLowering(TM, STI);
515
516 return createMipsSETargetLowering(TM, STI);
517}
518
519// Create a fast isel object.
520FastISel *
522 const TargetLibraryInfo *libInfo) const {
523 const MipsTargetMachine &TM =
524 static_cast<const MipsTargetMachine &>(funcInfo.MF->getTarget());
525
526 // We support only the standard encoding [MIPS32,MIPS32R5] ISAs.
527 bool UseFastISel = TM.Options.EnableFastISel && Subtarget.hasMips32() &&
530
531 // Disable if either of the following is true:
532 // We do not generate PIC, the ABI is not O32, XGOT is being used.
533 if (!TM.isPositionIndependent() || !TM.getABI().IsO32() ||
535 UseFastISel = false;
536
537 return UseFastISel ? Mips::createFastISel(funcInfo, libInfo) : nullptr;
538}
539
541 EVT VT) const {
542 if (!VT.isVector())
543 return MVT::i32;
545}
546
549 const MipsSubtarget &Subtarget) {
550 if (DCI.isBeforeLegalizeOps())
551 return SDValue();
552
553 EVT Ty = N->getValueType(0);
554 unsigned LO = (Ty == MVT::i32) ? Mips::LO0 : Mips::LO0_64;
555 unsigned HI = (Ty == MVT::i32) ? Mips::HI0 : Mips::HI0_64;
556 unsigned Opc = N->getOpcode() == ISD::SDIVREM ? MipsISD::DivRem16 :
558 SDLoc DL(N);
559
560 SDValue DivRem = DAG.getNode(Opc, DL, MVT::Glue,
561 N->getOperand(0), N->getOperand(1));
562 SDValue InChain = DAG.getEntryNode();
563 SDValue InGlue = DivRem;
564
565 // insert MFLO
566 if (N->hasAnyUseOfValue(0)) {
567 SDValue CopyFromLo = DAG.getCopyFromReg(InChain, DL, LO, Ty,
568 InGlue);
569 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), CopyFromLo);
570 InChain = CopyFromLo.getValue(1);
571 InGlue = CopyFromLo.getValue(2);
572 }
573
574 // insert MFHI
575 if (N->hasAnyUseOfValue(1)) {
576 SDValue CopyFromHi = DAG.getCopyFromReg(InChain, DL,
577 HI, Ty, InGlue);
578 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), CopyFromHi);
579 }
580
581 return SDValue();
582}
583
585 switch (CC) {
586 default: llvm_unreachable("Unknown fp condition code!");
587 case ISD::SETEQ:
588 case ISD::SETOEQ: return Mips::FCOND_OEQ;
589 case ISD::SETUNE: return Mips::FCOND_UNE;
590 case ISD::SETLT:
591 case ISD::SETOLT: return Mips::FCOND_OLT;
592 case ISD::SETGT:
593 case ISD::SETOGT: return Mips::FCOND_OGT;
594 case ISD::SETLE:
595 case ISD::SETOLE: return Mips::FCOND_OLE;
596 case ISD::SETGE:
597 case ISD::SETOGE: return Mips::FCOND_OGE;
598 case ISD::SETULT: return Mips::FCOND_ULT;
599 case ISD::SETULE: return Mips::FCOND_ULE;
600 case ISD::SETUGT: return Mips::FCOND_UGT;
601 case ISD::SETUGE: return Mips::FCOND_UGE;
602 case ISD::SETUO: return Mips::FCOND_UN;
603 case ISD::SETO: return Mips::FCOND_OR;
604 case ISD::SETNE:
605 case ISD::SETONE: return Mips::FCOND_ONE;
606 case ISD::SETUEQ: return Mips::FCOND_UEQ;
607 }
608}
609
610/// This function returns true if the floating point conditional branches and
611/// conditional moves which use condition code CC should be inverted.
613 if (CC >= Mips::FCOND_F && CC <= Mips::FCOND_NGT)
614 return false;
615
617 "Illegal Condition Code");
618
619 return true;
620}
621
622// Creates and returns an FPCmp node from a setcc node.
623// Returns Op if setcc is not a floating point comparison.
624static SDValue createFPCmp(SelectionDAG &DAG, const SDValue &Op) {
625 // must be a SETCC node
626 if (Op.getOpcode() != ISD::SETCC)
627 return Op;
628
629 SDValue LHS = Op.getOperand(0);
630
631 if (!LHS.getValueType().isFloatingPoint())
632 return Op;
633
634 SDValue RHS = Op.getOperand(1);
635 SDLoc DL(Op);
636
637 // Assume the 3rd operand is a CondCodeSDNode. Add code to check the type of
638 // node if necessary.
639 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
640
641 return DAG.getNode(MipsISD::FPCmp, DL, MVT::Glue, LHS, RHS,
642 DAG.getConstant(condCodeToFCC(CC), DL, MVT::i32));
643}
644
645// Creates and returns a CMovFPT/F node.
647 SDValue False, const SDLoc &DL) {
648 ConstantSDNode *CC = cast<ConstantSDNode>(Cond.getOperand(2));
649 bool invert = invertFPCondCodeUser((Mips::CondCode)CC->getSExtValue());
650 SDValue FCC0 = DAG.getRegister(Mips::FCC0, MVT::i32);
651
652 return DAG.getNode((invert ? MipsISD::CMovFP_F : MipsISD::CMovFP_T), DL,
653 True.getValueType(), True, FCC0, False, Cond);
654}
655
658 const MipsSubtarget &Subtarget) {
659 if (DCI.isBeforeLegalizeOps())
660 return SDValue();
661
662 SDValue SetCC = N->getOperand(0);
663
664 if ((SetCC.getOpcode() != ISD::SETCC) ||
665 !SetCC.getOperand(0).getValueType().isInteger())
666 return SDValue();
667
668 SDValue False = N->getOperand(2);
669 EVT FalseTy = False.getValueType();
670
671 if (!FalseTy.isInteger())
672 return SDValue();
673
674 ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(False);
675
676 // If the RHS (False) is 0, we swap the order of the operands
677 // of ISD::SELECT (obviously also inverting the condition) so that we can
678 // take advantage of conditional moves using the $0 register.
679 // Example:
680 // return (a != 0) ? x : 0;
681 // load $reg, x
682 // movz $reg, $0, a
683 if (!FalseC)
684 return SDValue();
685
686 const SDLoc DL(N);
687
688 if (!FalseC->getZExtValue()) {
689 ISD::CondCode CC = cast<CondCodeSDNode>(SetCC.getOperand(2))->get();
690 SDValue True = N->getOperand(1);
691
692 SetCC = DAG.getSetCC(DL, SetCC.getValueType(), SetCC.getOperand(0),
693 SetCC.getOperand(1),
695
696 return DAG.getNode(ISD::SELECT, DL, FalseTy, SetCC, False, True);
697 }
698
699 // If both operands are integer constants there's a possibility that we
700 // can do some interesting optimizations.
701 SDValue True = N->getOperand(1);
702 ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(True);
703
704 if (!TrueC || !True.getValueType().isInteger())
705 return SDValue();
706
707 // We'll also ignore MVT::i64 operands as this optimizations proves
708 // to be ineffective because of the required sign extensions as the result
709 // of a SETCC operator is always MVT::i32 for non-vector types.
710 if (True.getValueType() == MVT::i64)
711 return SDValue();
712
713 int64_t Diff = TrueC->getSExtValue() - FalseC->getSExtValue();
714
715 // 1) (a < x) ? y : y-1
716 // slti $reg1, a, x
717 // addiu $reg2, $reg1, y-1
718 if (Diff == 1)
719 return DAG.getNode(ISD::ADD, DL, SetCC.getValueType(), SetCC, False);
720
721 // 2) (a < x) ? y-1 : y
722 // slti $reg1, a, x
723 // xor $reg1, $reg1, 1
724 // addiu $reg2, $reg1, y-1
725 if (Diff == -1) {
726 ISD::CondCode CC = cast<CondCodeSDNode>(SetCC.getOperand(2))->get();
727 SetCC = DAG.getSetCC(DL, SetCC.getValueType(), SetCC.getOperand(0),
728 SetCC.getOperand(1),
730 return DAG.getNode(ISD::ADD, DL, SetCC.getValueType(), SetCC, True);
731 }
732
733 // Could not optimize.
734 return SDValue();
735}
736
739 const MipsSubtarget &Subtarget) {
740 if (DCI.isBeforeLegalizeOps())
741 return SDValue();
742
743 SDValue ValueIfTrue = N->getOperand(0), ValueIfFalse = N->getOperand(2);
744
745 ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(ValueIfFalse);
746 if (!FalseC || FalseC->getZExtValue())
747 return SDValue();
748
749 // Since RHS (False) is 0, we swap the order of the True/False operands
750 // (obviously also inverting the condition) so that we can
751 // take advantage of conditional moves using the $0 register.
752 // Example:
753 // return (a != 0) ? x : 0;
754 // load $reg, x
755 // movz $reg, $0, a
756 unsigned Opc = (N->getOpcode() == MipsISD::CMovFP_T) ? MipsISD::CMovFP_F :
758
759 SDValue FCC = N->getOperand(1), Glue = N->getOperand(3);
760 return DAG.getNode(Opc, SDLoc(N), ValueIfFalse.getValueType(),
761 ValueIfFalse, FCC, ValueIfTrue, Glue);
762}
763
766 const MipsSubtarget &Subtarget) {
767 if (DCI.isBeforeLegalizeOps() || !Subtarget.hasExtractInsert())
768 return SDValue();
769
770 SDValue FirstOperand = N->getOperand(0);
771 unsigned FirstOperandOpc = FirstOperand.getOpcode();
772 SDValue Mask = N->getOperand(1);
773 EVT ValTy = N->getValueType(0);
774 SDLoc DL(N);
775
776 uint64_t Pos = 0;
777 unsigned SMPos, SMSize;
778 ConstantSDNode *CN;
779 SDValue NewOperand;
780 unsigned Opc;
781
782 // Op's second operand must be a shifted mask.
783 if (!(CN = dyn_cast<ConstantSDNode>(Mask)) ||
784 !isShiftedMask_64(CN->getZExtValue(), SMPos, SMSize))
785 return SDValue();
786
787 if (FirstOperandOpc == ISD::SRA || FirstOperandOpc == ISD::SRL) {
788 // Pattern match EXT.
789 // $dst = and ((sra or srl) $src , pos), (2**size - 1)
790 // => ext $dst, $src, pos, size
791
792 // The second operand of the shift must be an immediate.
793 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.getOperand(1))))
794 return SDValue();
795
796 Pos = CN->getZExtValue();
797
798 // Return if the shifted mask does not start at bit 0 or the sum of its size
799 // and Pos exceeds the word's size.
800 if (SMPos != 0 || Pos + SMSize > ValTy.getSizeInBits())
801 return SDValue();
802
803 Opc = MipsISD::Ext;
804 NewOperand = FirstOperand.getOperand(0);
805 } else if (FirstOperandOpc == ISD::SHL && Subtarget.hasCnMips()) {
806 // Pattern match CINS.
807 // $dst = and (shl $src , pos), mask
808 // => cins $dst, $src, pos, size
809 // mask is a shifted mask with consecutive 1's, pos = shift amount,
810 // size = population count.
811
812 // The second operand of the shift must be an immediate.
813 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.getOperand(1))))
814 return SDValue();
815
816 Pos = CN->getZExtValue();
817
818 if (SMPos != Pos || Pos >= ValTy.getSizeInBits() || SMSize >= 32 ||
819 Pos + SMSize > ValTy.getSizeInBits())
820 return SDValue();
821
822 NewOperand = FirstOperand.getOperand(0);
823 // SMSize is 'location' (position) in this case, not size.
824 SMSize--;
825 Opc = MipsISD::CIns;
826 } else {
827 // Pattern match EXT.
828 // $dst = and $src, (2**size - 1) , if size > 16
829 // => ext $dst, $src, pos, size , pos = 0
830
831 // If the mask is <= 0xffff, andi can be used instead.
832 if (CN->getZExtValue() <= 0xffff)
833 return SDValue();
834
835 // Return if the mask doesn't start at position 0.
836 if (SMPos)
837 return SDValue();
838
839 Opc = MipsISD::Ext;
840 NewOperand = FirstOperand;
841 }
842 return DAG.getNode(Opc, DL, ValTy, NewOperand,
843 DAG.getConstant(Pos, DL, MVT::i32),
844 DAG.getConstant(SMSize, DL, MVT::i32));
845}
846
849 const MipsSubtarget &Subtarget) {
850 // Pattern match INS.
851 // $dst = or (and $src1 , mask0), (and (shl $src, pos), mask1),
852 // where mask1 = (2**size - 1) << pos, mask0 = ~mask1
853 // => ins $dst, $src, size, pos, $src1
854 if (DCI.isBeforeLegalizeOps() || !Subtarget.hasExtractInsert())
855 return SDValue();
856
857 SDValue And0 = N->getOperand(0), And1 = N->getOperand(1);
858 unsigned SMPos0, SMSize0, SMPos1, SMSize1;
859 ConstantSDNode *CN, *CN1;
860
861 // See if Op's first operand matches (and $src1 , mask0).
862 if (And0.getOpcode() != ISD::AND)
863 return SDValue();
864
865 if (!(CN = dyn_cast<ConstantSDNode>(And0.getOperand(1))) ||
866 !isShiftedMask_64(~CN->getSExtValue(), SMPos0, SMSize0))
867 return SDValue();
868
869 // See if Op's second operand matches (and (shl $src, pos), mask1).
870 if (And1.getOpcode() == ISD::AND &&
871 And1.getOperand(0).getOpcode() == ISD::SHL) {
872
873 if (!(CN = dyn_cast<ConstantSDNode>(And1.getOperand(1))) ||
874 !isShiftedMask_64(CN->getZExtValue(), SMPos1, SMSize1))
875 return SDValue();
876
877 // The shift masks must have the same position and size.
878 if (SMPos0 != SMPos1 || SMSize0 != SMSize1)
879 return SDValue();
880
881 SDValue Shl = And1.getOperand(0);
882
883 if (!(CN = dyn_cast<ConstantSDNode>(Shl.getOperand(1))))
884 return SDValue();
885
886 unsigned Shamt = CN->getZExtValue();
887
888 // Return if the shift amount and the first bit position of mask are not the
889 // same.
890 EVT ValTy = N->getValueType(0);
891 if ((Shamt != SMPos0) || (SMPos0 + SMSize0 > ValTy.getSizeInBits()))
892 return SDValue();
893
894 SDLoc DL(N);
895 return DAG.getNode(MipsISD::Ins, DL, ValTy, Shl.getOperand(0),
896 DAG.getConstant(SMPos0, DL, MVT::i32),
897 DAG.getConstant(SMSize0, DL, MVT::i32),
898 And0.getOperand(0));
899 } else {
900 // Pattern match DINS.
901 // $dst = or (and $src, mask0), mask1
902 // where mask0 = ((1 << SMSize0) -1) << SMPos0
903 // => dins $dst, $src, pos, size
904 if (~CN->getSExtValue() == ((((int64_t)1 << SMSize0) - 1) << SMPos0) &&
905 ((SMSize0 + SMPos0 <= 64 && Subtarget.hasMips64r2()) ||
906 (SMSize0 + SMPos0 <= 32))) {
907 // Check if AND instruction has constant as argument
908 bool isConstCase = And1.getOpcode() != ISD::AND;
909 if (And1.getOpcode() == ISD::AND) {
910 if (!(CN1 = dyn_cast<ConstantSDNode>(And1->getOperand(1))))
911 return SDValue();
912 } else {
913 if (!(CN1 = dyn_cast<ConstantSDNode>(N->getOperand(1))))
914 return SDValue();
915 }
916 // Don't generate INS if constant OR operand doesn't fit into bits
917 // cleared by constant AND operand.
918 if (CN->getSExtValue() & CN1->getSExtValue())
919 return SDValue();
920
921 SDLoc DL(N);
922 EVT ValTy = N->getOperand(0)->getValueType(0);
923 SDValue Const1;
924 SDValue SrlX;
925 if (!isConstCase) {
926 Const1 = DAG.getConstant(SMPos0, DL, MVT::i32);
927 SrlX = DAG.getNode(ISD::SRL, DL, And1->getValueType(0), And1, Const1);
928 }
929 return DAG.getNode(
930 MipsISD::Ins, DL, N->getValueType(0),
931 isConstCase
932 ? DAG.getConstant(CN1->getSExtValue() >> SMPos0, DL, ValTy)
933 : SrlX,
934 DAG.getConstant(SMPos0, DL, MVT::i32),
935 DAG.getConstant(ValTy.getSizeInBits() / 8 < 8 ? SMSize0 & 31
936 : SMSize0,
937 DL, MVT::i32),
938 And0->getOperand(0));
939
940 }
941 return SDValue();
942 }
943}
944
946 const MipsSubtarget &Subtarget) {
947 // ROOTNode must have a multiplication as an operand for the match to be
948 // successful.
949 if (ROOTNode->getOperand(0).getOpcode() != ISD::MUL &&
950 ROOTNode->getOperand(1).getOpcode() != ISD::MUL)
951 return SDValue();
952
953 // In the case where we have a multiplication as the left operand of
954 // of a subtraction, we can't combine into a MipsISD::MSub node as the
955 // the instruction definition of msub(u) places the multiplication on
956 // on the right.
957 if (ROOTNode->getOpcode() == ISD::SUB &&
958 ROOTNode->getOperand(0).getOpcode() == ISD::MUL)
959 return SDValue();
960
961 // We don't handle vector types here.
962 if (ROOTNode->getValueType(0).isVector())
963 return SDValue();
964
965 // For MIPS64, madd / msub instructions are inefficent to use with 64 bit
966 // arithmetic. E.g.
967 // (add (mul a b) c) =>
968 // let res = (madd (mthi (drotr c 32))x(mtlo c) a b) in
969 // MIPS64: (or (dsll (mfhi res) 32) (dsrl (dsll (mflo res) 32) 32)
970 // or
971 // MIPS64R2: (dins (mflo res) (mfhi res) 32 32)
972 //
973 // The overhead of setting up the Hi/Lo registers and reassembling the
974 // result makes this a dubious optimzation for MIPS64. The core of the
975 // problem is that Hi/Lo contain the upper and lower 32 bits of the
976 // operand and result.
977 //
978 // It requires a chain of 4 add/mul for MIPS64R2 to get better code
979 // density than doing it naively, 5 for MIPS64. Additionally, using
980 // madd/msub on MIPS64 requires the operands actually be 32 bit sign
981 // extended operands, not true 64 bit values.
982 //
983 // FIXME: For the moment, disable this completely for MIPS64.
984 if (Subtarget.hasMips64())
985 return SDValue();
986
987 SDValue Mult = ROOTNode->getOperand(0).getOpcode() == ISD::MUL
988 ? ROOTNode->getOperand(0)
989 : ROOTNode->getOperand(1);
990
991 SDValue AddOperand = ROOTNode->getOperand(0).getOpcode() == ISD::MUL
992 ? ROOTNode->getOperand(1)
993 : ROOTNode->getOperand(0);
994
995 // Transform this to a MADD only if the user of this node is the add.
996 // If there are other users of the mul, this function returns here.
997 if (!Mult.hasOneUse())
998 return SDValue();
999
1000 // maddu and madd are unusual instructions in that on MIPS64 bits 63..31
1001 // must be in canonical form, i.e. sign extended. For MIPS32, the operands
1002 // of the multiply must have 32 or more sign bits, otherwise we cannot
1003 // perform this optimization. We have to check this here as we're performing
1004 // this optimization pre-legalization.
1005 SDValue MultLHS = Mult->getOperand(0);
1006 SDValue MultRHS = Mult->getOperand(1);
1007
1008 bool IsSigned = MultLHS->getOpcode() == ISD::SIGN_EXTEND &&
1009 MultRHS->getOpcode() == ISD::SIGN_EXTEND;
1010 bool IsUnsigned = MultLHS->getOpcode() == ISD::ZERO_EXTEND &&
1011 MultRHS->getOpcode() == ISD::ZERO_EXTEND;
1012
1013 if (!IsSigned && !IsUnsigned)
1014 return SDValue();
1015
1016 // Initialize accumulator.
1017 SDLoc DL(ROOTNode);
1018 SDValue BottomHalf, TopHalf;
1019 std::tie(BottomHalf, TopHalf) =
1020 CurDAG.SplitScalar(AddOperand, DL, MVT::i32, MVT::i32);
1021 SDValue ACCIn =
1022 CurDAG.getNode(MipsISD::MTLOHI, DL, MVT::Untyped, BottomHalf, TopHalf);
1023
1024 // Create MipsMAdd(u) / MipsMSub(u) node.
1025 bool IsAdd = ROOTNode->getOpcode() == ISD::ADD;
1026 unsigned Opcode = IsAdd ? (IsUnsigned ? MipsISD::MAddu : MipsISD::MAdd)
1027 : (IsUnsigned ? MipsISD::MSubu : MipsISD::MSub);
1028 SDValue MAddOps[3] = {
1029 CurDAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mult->getOperand(0)),
1030 CurDAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mult->getOperand(1)), ACCIn};
1031 EVT VTs[2] = {MVT::i32, MVT::i32};
1032 SDValue MAdd = CurDAG.getNode(Opcode, DL, VTs, MAddOps);
1033
1034 SDValue ResLo = CurDAG.getNode(MipsISD::MFLO, DL, MVT::i32, MAdd);
1035 SDValue ResHi = CurDAG.getNode(MipsISD::MFHI, DL, MVT::i32, MAdd);
1036 SDValue Combined =
1037 CurDAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, ResLo, ResHi);
1038 return Combined;
1039}
1040
1043 const MipsSubtarget &Subtarget) {
1044 // (sub v0 (mul v1, v2)) => (msub v1, v2, v0)
1045 if (DCI.isBeforeLegalizeOps()) {
1046 if (Subtarget.hasMips32() && !Subtarget.hasMips32r6() &&
1047 !Subtarget.inMips16Mode() && N->getValueType(0) == MVT::i64)
1048 return performMADD_MSUBCombine(N, DAG, Subtarget);
1049
1050 return SDValue();
1051 }
1052
1053 return SDValue();
1054}
1055
1058 const MipsSubtarget &Subtarget) {
1059 // (add v0 (mul v1, v2)) => (madd v1, v2, v0)
1060 if (DCI.isBeforeLegalizeOps()) {
1061 if (Subtarget.hasMips32() && !Subtarget.hasMips32r6() &&
1062 !Subtarget.inMips16Mode() && N->getValueType(0) == MVT::i64)
1063 return performMADD_MSUBCombine(N, DAG, Subtarget);
1064
1065 return SDValue();
1066 }
1067
1068 // (add v0, (add v1, abs_lo(tjt))) => (add (add v0, v1), abs_lo(tjt))
1069 SDValue Add = N->getOperand(1);
1070
1071 if (Add.getOpcode() != ISD::ADD)
1072 return SDValue();
1073
1074 SDValue Lo = Add.getOperand(1);
1075
1076 if ((Lo.getOpcode() != MipsISD::Lo) ||
1077 (Lo.getOperand(0).getOpcode() != ISD::TargetJumpTable))
1078 return SDValue();
1079
1080 EVT ValTy = N->getValueType(0);
1081 SDLoc DL(N);
1082
1083 SDValue Add1 = DAG.getNode(ISD::ADD, DL, ValTy, N->getOperand(0),
1084 Add.getOperand(0));
1085 return DAG.getNode(ISD::ADD, DL, ValTy, Add1, Lo);
1086}
1087
1090 const MipsSubtarget &Subtarget) {
1091 // Pattern match CINS.
1092 // $dst = shl (and $src , imm), pos
1093 // => cins $dst, $src, pos, size
1094
1095 if (DCI.isBeforeLegalizeOps() || !Subtarget.hasCnMips())
1096 return SDValue();
1097
1098 SDValue FirstOperand = N->getOperand(0);
1099 unsigned FirstOperandOpc = FirstOperand.getOpcode();
1100 SDValue SecondOperand = N->getOperand(1);
1101 EVT ValTy = N->getValueType(0);
1102 SDLoc DL(N);
1103
1104 uint64_t Pos = 0;
1105 unsigned SMPos, SMSize;
1106 ConstantSDNode *CN;
1107 SDValue NewOperand;
1108
1109 // The second operand of the shift must be an immediate.
1110 if (!(CN = dyn_cast<ConstantSDNode>(SecondOperand)))
1111 return SDValue();
1112
1113 Pos = CN->getZExtValue();
1114
1115 if (Pos >= ValTy.getSizeInBits())
1116 return SDValue();
1117
1118 if (FirstOperandOpc != ISD::AND)
1119 return SDValue();
1120
1121 // AND's second operand must be a shifted mask.
1122 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.getOperand(1))) ||
1123 !isShiftedMask_64(CN->getZExtValue(), SMPos, SMSize))
1124 return SDValue();
1125
1126 // Return if the shifted mask does not start at bit 0 or the sum of its size
1127 // and Pos exceeds the word's size.
1128 if (SMPos != 0 || SMSize > 32 || Pos + SMSize > ValTy.getSizeInBits())
1129 return SDValue();
1130
1131 NewOperand = FirstOperand.getOperand(0);
1132 // SMSize is 'location' (position) in this case, not size.
1133 SMSize--;
1134
1135 return DAG.getNode(MipsISD::CIns, DL, ValTy, NewOperand,
1136 DAG.getConstant(Pos, DL, MVT::i32),
1137 DAG.getConstant(SMSize, DL, MVT::i32));
1138}
1139
1141 const {
1142 SelectionDAG &DAG = DCI.DAG;
1143 unsigned Opc = N->getOpcode();
1144
1145 switch (Opc) {
1146 default: break;
1147 case ISD::SDIVREM:
1148 case ISD::UDIVREM:
1149 return performDivRemCombine(N, DAG, DCI, Subtarget);
1150 case ISD::SELECT:
1151 return performSELECTCombine(N, DAG, DCI, Subtarget);
1152 case MipsISD::CMovFP_F:
1153 case MipsISD::CMovFP_T:
1154 return performCMovFPCombine(N, DAG, DCI, Subtarget);
1155 case ISD::AND:
1156 return performANDCombine(N, DAG, DCI, Subtarget);
1157 case ISD::OR:
1158 return performORCombine(N, DAG, DCI, Subtarget);
1159 case ISD::ADD:
1160 return performADDCombine(N, DAG, DCI, Subtarget);
1161 case ISD::SHL:
1162 return performSHLCombine(N, DAG, DCI, Subtarget);
1163 case ISD::SUB:
1164 return performSUBCombine(N, DAG, DCI, Subtarget);
1165 }
1166
1167 return SDValue();
1168}
1169
1171 return Subtarget.hasMips32();
1172}
1173
1175 return Subtarget.hasMips32();
1176}
1177
1179 // We can use ANDI+SLTIU as a bit test. Y contains the bit position.
1180 // For MIPSR2 or later, we may be able to use the `ext` instruction or its'
1181 // double-word variants.
1182 if (auto *C = dyn_cast<ConstantSDNode>(Y))
1183 return C->getAPIntValue().ule(15);
1184
1185 return false;
1186}
1187
1189 const SDNode *N, CombineLevel Level) const {
1190 assert(((N->getOpcode() == ISD::SHL &&
1191 N->getOperand(0).getOpcode() == ISD::SRL) ||
1192 (N->getOpcode() == ISD::SRL &&
1193 N->getOperand(0).getOpcode() == ISD::SHL)) &&
1194 "Expected shift-shift mask");
1195
1196 if (N->getOperand(0).getValueType().isVector())
1197 return false;
1198 return true;
1199}
1200
1201void
1204 SelectionDAG &DAG) const {
1205 return LowerOperationWrapper(N, Results, DAG);
1206}
1207
1209LowerOperation(SDValue Op, SelectionDAG &DAG) const
1210{
1211 switch (Op.getOpcode())
1212 {
1213 case ISD::BRCOND: return lowerBRCOND(Op, DAG);
1214 case ISD::ConstantPool: return lowerConstantPool(Op, DAG);
1215 case ISD::GlobalAddress: return lowerGlobalAddress(Op, DAG);
1216 case ISD::BlockAddress: return lowerBlockAddress(Op, DAG);
1217 case ISD::GlobalTLSAddress: return lowerGlobalTLSAddress(Op, DAG);
1218 case ISD::JumpTable: return lowerJumpTable(Op, DAG);
1219 case ISD::SELECT: return lowerSELECT(Op, DAG);
1220 case ISD::SETCC: return lowerSETCC(Op, DAG);
1221 case ISD::VASTART: return lowerVASTART(Op, DAG);
1222 case ISD::VAARG: return lowerVAARG(Op, DAG);
1223 case ISD::FCOPYSIGN: return lowerFCOPYSIGN(Op, DAG);
1224 case ISD::FABS: return lowerFABS(Op, DAG);
1225 case ISD::FRAMEADDR: return lowerFRAMEADDR(Op, DAG);
1226 case ISD::RETURNADDR: return lowerRETURNADDR(Op, DAG);
1227 case ISD::EH_RETURN: return lowerEH_RETURN(Op, DAG);
1228 case ISD::ATOMIC_FENCE: return lowerATOMIC_FENCE(Op, DAG);
1229 case ISD::SHL_PARTS: return lowerShiftLeftParts(Op, DAG);
1230 case ISD::SRA_PARTS: return lowerShiftRightParts(Op, DAG, true);
1231 case ISD::SRL_PARTS: return lowerShiftRightParts(Op, DAG, false);
1232 case ISD::LOAD: return lowerLOAD(Op, DAG);
1233 case ISD::STORE: return lowerSTORE(Op, DAG);
1234 case ISD::EH_DWARF_CFA: return lowerEH_DWARF_CFA(Op, DAG);
1235 case ISD::FP_TO_SINT: return lowerFP_TO_SINT(Op, DAG);
1236 }
1237 return SDValue();
1238}
1239
1240//===----------------------------------------------------------------------===//
1241// Lower helper functions
1242//===----------------------------------------------------------------------===//
1243
1244// addLiveIn - This helper function adds the specified physical register to the
1245// MachineFunction as a live in value. It also creates a corresponding
1246// virtual register for it.
1247static unsigned
1248addLiveIn(MachineFunction &MF, unsigned PReg, const TargetRegisterClass *RC)
1249{
1251 MF.getRegInfo().addLiveIn(PReg, VReg);
1252 return VReg;
1253}
1254
1257 const TargetInstrInfo &TII,
1258 bool Is64Bit, bool IsMicroMips) {
1259 if (NoZeroDivCheck)
1260 return &MBB;
1261
1262 // Insert instruction "teq $divisor_reg, $zero, 7".
1265 MachineOperand &Divisor = MI.getOperand(2);
1266 MIB = BuildMI(MBB, std::next(I), MI.getDebugLoc(),
1267 TII.get(IsMicroMips ? Mips::TEQ_MM : Mips::TEQ))
1268 .addReg(Divisor.getReg(), getKillRegState(Divisor.isKill()))
1269 .addReg(Mips::ZERO)
1270 .addImm(7);
1271
1272 // Use the 32-bit sub-register if this is a 64-bit division.
1273 if (Is64Bit)
1274 MIB->getOperand(0).setSubReg(Mips::sub_32);
1275
1276 // Clear Divisor's kill flag.
1277 Divisor.setIsKill(false);
1278
1279 // We would normally delete the original instruction here but in this case
1280 // we only needed to inject an additional instruction rather than replace it.
1281
1282 return &MBB;
1283}
1284
1287 MachineBasicBlock *BB) const {
1288 switch (MI.getOpcode()) {
1289 default:
1290 llvm_unreachable("Unexpected instr type to insert");
1291 case Mips::ATOMIC_LOAD_ADD_I8:
1292 return emitAtomicBinaryPartword(MI, BB, 1);
1293 case Mips::ATOMIC_LOAD_ADD_I16:
1294 return emitAtomicBinaryPartword(MI, BB, 2);
1295 case Mips::ATOMIC_LOAD_ADD_I32:
1296 return emitAtomicBinary(MI, BB);
1297 case Mips::ATOMIC_LOAD_ADD_I64:
1298 return emitAtomicBinary(MI, BB);
1299
1300 case Mips::ATOMIC_LOAD_AND_I8:
1301 return emitAtomicBinaryPartword(MI, BB, 1);
1302 case Mips::ATOMIC_LOAD_AND_I16:
1303 return emitAtomicBinaryPartword(MI, BB, 2);
1304 case Mips::ATOMIC_LOAD_AND_I32:
1305 return emitAtomicBinary(MI, BB);
1306 case Mips::ATOMIC_LOAD_AND_I64:
1307 return emitAtomicBinary(MI, BB);
1308
1309 case Mips::ATOMIC_LOAD_OR_I8:
1310 return emitAtomicBinaryPartword(MI, BB, 1);
1311 case Mips::ATOMIC_LOAD_OR_I16:
1312 return emitAtomicBinaryPartword(MI, BB, 2);
1313 case Mips::ATOMIC_LOAD_OR_I32:
1314 return emitAtomicBinary(MI, BB);
1315 case Mips::ATOMIC_LOAD_OR_I64:
1316 return emitAtomicBinary(MI, BB);
1317
1318 case Mips::ATOMIC_LOAD_XOR_I8:
1319 return emitAtomicBinaryPartword(MI, BB, 1);
1320 case Mips::ATOMIC_LOAD_XOR_I16:
1321 return emitAtomicBinaryPartword(MI, BB, 2);
1322 case Mips::ATOMIC_LOAD_XOR_I32:
1323 return emitAtomicBinary(MI, BB);
1324 case Mips::ATOMIC_LOAD_XOR_I64:
1325 return emitAtomicBinary(MI, BB);
1326
1327 case Mips::ATOMIC_LOAD_NAND_I8:
1328 return emitAtomicBinaryPartword(MI, BB, 1);
1329 case Mips::ATOMIC_LOAD_NAND_I16:
1330 return emitAtomicBinaryPartword(MI, BB, 2);
1331 case Mips::ATOMIC_LOAD_NAND_I32:
1332 return emitAtomicBinary(MI, BB);
1333 case Mips::ATOMIC_LOAD_NAND_I64:
1334 return emitAtomicBinary(MI, BB);
1335
1336 case Mips::ATOMIC_LOAD_SUB_I8:
1337 return emitAtomicBinaryPartword(MI, BB, 1);
1338 case Mips::ATOMIC_LOAD_SUB_I16:
1339 return emitAtomicBinaryPartword(MI, BB, 2);
1340 case Mips::ATOMIC_LOAD_SUB_I32:
1341 return emitAtomicBinary(MI, BB);
1342 case Mips::ATOMIC_LOAD_SUB_I64:
1343 return emitAtomicBinary(MI, BB);
1344
1345 case Mips::ATOMIC_SWAP_I8:
1346 return emitAtomicBinaryPartword(MI, BB, 1);
1347 case Mips::ATOMIC_SWAP_I16:
1348 return emitAtomicBinaryPartword(MI, BB, 2);
1349 case Mips::ATOMIC_SWAP_I32:
1350 return emitAtomicBinary(MI, BB);
1351 case Mips::ATOMIC_SWAP_I64:
1352 return emitAtomicBinary(MI, BB);
1353
1354 case Mips::ATOMIC_CMP_SWAP_I8:
1355 return emitAtomicCmpSwapPartword(MI, BB, 1);
1356 case Mips::ATOMIC_CMP_SWAP_I16:
1357 return emitAtomicCmpSwapPartword(MI, BB, 2);
1358 case Mips::ATOMIC_CMP_SWAP_I32:
1359 return emitAtomicCmpSwap(MI, BB);
1360 case Mips::ATOMIC_CMP_SWAP_I64:
1361 return emitAtomicCmpSwap(MI, BB);
1362
1363 case Mips::ATOMIC_LOAD_MIN_I8:
1364 return emitAtomicBinaryPartword(MI, BB, 1);
1365 case Mips::ATOMIC_LOAD_MIN_I16:
1366 return emitAtomicBinaryPartword(MI, BB, 2);
1367 case Mips::ATOMIC_LOAD_MIN_I32:
1368 return emitAtomicBinary(MI, BB);
1369 case Mips::ATOMIC_LOAD_MIN_I64:
1370 return emitAtomicBinary(MI, BB);
1371
1372 case Mips::ATOMIC_LOAD_MAX_I8:
1373 return emitAtomicBinaryPartword(MI, BB, 1);
1374 case Mips::ATOMIC_LOAD_MAX_I16:
1375 return emitAtomicBinaryPartword(MI, BB, 2);
1376 case Mips::ATOMIC_LOAD_MAX_I32:
1377 return emitAtomicBinary(MI, BB);
1378 case Mips::ATOMIC_LOAD_MAX_I64:
1379 return emitAtomicBinary(MI, BB);
1380
1381 case Mips::ATOMIC_LOAD_UMIN_I8:
1382 return emitAtomicBinaryPartword(MI, BB, 1);
1383 case Mips::ATOMIC_LOAD_UMIN_I16:
1384 return emitAtomicBinaryPartword(MI, BB, 2);
1385 case Mips::ATOMIC_LOAD_UMIN_I32:
1386 return emitAtomicBinary(MI, BB);
1387 case Mips::ATOMIC_LOAD_UMIN_I64:
1388 return emitAtomicBinary(MI, BB);
1389
1390 case Mips::ATOMIC_LOAD_UMAX_I8:
1391 return emitAtomicBinaryPartword(MI, BB, 1);
1392 case Mips::ATOMIC_LOAD_UMAX_I16:
1393 return emitAtomicBinaryPartword(MI, BB, 2);
1394 case Mips::ATOMIC_LOAD_UMAX_I32:
1395 return emitAtomicBinary(MI, BB);
1396 case Mips::ATOMIC_LOAD_UMAX_I64:
1397 return emitAtomicBinary(MI, BB);
1398
1399 case Mips::PseudoSDIV:
1400 case Mips::PseudoUDIV:
1401 case Mips::DIV:
1402 case Mips::DIVU:
1403 case Mips::MOD:
1404 case Mips::MODU:
1405 return insertDivByZeroTrap(MI, *BB, *Subtarget.getInstrInfo(), false,
1406 false);
1407 case Mips::SDIV_MM_Pseudo:
1408 case Mips::UDIV_MM_Pseudo:
1409 case Mips::SDIV_MM:
1410 case Mips::UDIV_MM:
1411 case Mips::DIV_MMR6:
1412 case Mips::DIVU_MMR6:
1413 case Mips::MOD_MMR6:
1414 case Mips::MODU_MMR6:
1415 return insertDivByZeroTrap(MI, *BB, *Subtarget.getInstrInfo(), false, true);
1416 case Mips::PseudoDSDIV:
1417 case Mips::PseudoDUDIV:
1418 case Mips::DDIV:
1419 case Mips::DDIVU:
1420 case Mips::DMOD:
1421 case Mips::DMODU:
1422 return insertDivByZeroTrap(MI, *BB, *Subtarget.getInstrInfo(), true, false);
1423
1424 case Mips::PseudoSELECT_I:
1425 case Mips::PseudoSELECT_I64:
1426 case Mips::PseudoSELECT_S:
1427 case Mips::PseudoSELECT_D32:
1428 case Mips::PseudoSELECT_D64:
1429 return emitPseudoSELECT(MI, BB, false, Mips::BNE);
1430 case Mips::PseudoSELECTFP_F_I:
1431 case Mips::PseudoSELECTFP_F_I64:
1432 case Mips::PseudoSELECTFP_F_S:
1433 case Mips::PseudoSELECTFP_F_D32:
1434 case Mips::PseudoSELECTFP_F_D64:
1435 return emitPseudoSELECT(MI, BB, true, Mips::BC1F);
1436 case Mips::PseudoSELECTFP_T_I:
1437 case Mips::PseudoSELECTFP_T_I64:
1438 case Mips::PseudoSELECTFP_T_S:
1439 case Mips::PseudoSELECTFP_T_D32:
1440 case Mips::PseudoSELECTFP_T_D64:
1441 return emitPseudoSELECT(MI, BB, true, Mips::BC1T);
1442 case Mips::PseudoD_SELECT_I:
1443 case Mips::PseudoD_SELECT_I64:
1444 return emitPseudoD_SELECT(MI, BB);
1445 case Mips::LDR_W:
1446 return emitLDR_W(MI, BB);
1447 case Mips::LDR_D:
1448 return emitLDR_D(MI, BB);
1449 case Mips::STR_W:
1450 return emitSTR_W(MI, BB);
1451 case Mips::STR_D:
1452 return emitSTR_D(MI, BB);
1453 }
1454}
1455
1456// This function also handles Mips::ATOMIC_SWAP_I32 (when BinOpcode == 0), and
1457// Mips::ATOMIC_LOAD_NAND_I32 (when Nand == true)
1459MipsTargetLowering::emitAtomicBinary(MachineInstr &MI,
1460 MachineBasicBlock *BB) const {
1461
1462 MachineFunction *MF = BB->getParent();
1463 MachineRegisterInfo &RegInfo = MF->getRegInfo();
1465 DebugLoc DL = MI.getDebugLoc();
1466
1467 unsigned AtomicOp;
1468 bool NeedsAdditionalReg = false;
1469 switch (MI.getOpcode()) {
1470 case Mips::ATOMIC_LOAD_ADD_I32:
1471 AtomicOp = Mips::ATOMIC_LOAD_ADD_I32_POSTRA;
1472 break;
1473 case Mips::ATOMIC_LOAD_SUB_I32:
1474 AtomicOp = Mips::ATOMIC_LOAD_SUB_I32_POSTRA;
1475 break;
1476 case Mips::ATOMIC_LOAD_AND_I32:
1477 AtomicOp = Mips::ATOMIC_LOAD_AND_I32_POSTRA;
1478 break;
1479 case Mips::ATOMIC_LOAD_OR_I32:
1480 AtomicOp = Mips::ATOMIC_LOAD_OR_I32_POSTRA;
1481 break;
1482 case Mips::ATOMIC_LOAD_XOR_I32:
1483 AtomicOp = Mips::ATOMIC_LOAD_XOR_I32_POSTRA;
1484 break;
1485 case Mips::ATOMIC_LOAD_NAND_I32:
1486 AtomicOp = Mips::ATOMIC_LOAD_NAND_I32_POSTRA;
1487 break;
1488 case Mips::ATOMIC_SWAP_I32:
1489 AtomicOp = Mips::ATOMIC_SWAP_I32_POSTRA;
1490 break;
1491 case Mips::ATOMIC_LOAD_ADD_I64:
1492 AtomicOp = Mips::ATOMIC_LOAD_ADD_I64_POSTRA;
1493 break;
1494 case Mips::ATOMIC_LOAD_SUB_I64:
1495 AtomicOp = Mips::ATOMIC_LOAD_SUB_I64_POSTRA;
1496 break;
1497 case Mips::ATOMIC_LOAD_AND_I64:
1498 AtomicOp = Mips::ATOMIC_LOAD_AND_I64_POSTRA;
1499 break;
1500 case Mips::ATOMIC_LOAD_OR_I64:
1501 AtomicOp = Mips::ATOMIC_LOAD_OR_I64_POSTRA;
1502 break;
1503 case Mips::ATOMIC_LOAD_XOR_I64:
1504 AtomicOp = Mips::ATOMIC_LOAD_XOR_I64_POSTRA;
1505 break;
1506 case Mips::ATOMIC_LOAD_NAND_I64:
1507 AtomicOp = Mips::ATOMIC_LOAD_NAND_I64_POSTRA;
1508 break;
1509 case Mips::ATOMIC_SWAP_I64:
1510 AtomicOp = Mips::ATOMIC_SWAP_I64_POSTRA;
1511 break;
1512 case Mips::ATOMIC_LOAD_MIN_I32:
1513 AtomicOp = Mips::ATOMIC_LOAD_MIN_I32_POSTRA;
1514 NeedsAdditionalReg = true;
1515 break;
1516 case Mips::ATOMIC_LOAD_MAX_I32:
1517 AtomicOp = Mips::ATOMIC_LOAD_MAX_I32_POSTRA;
1518 NeedsAdditionalReg = true;
1519 break;
1520 case Mips::ATOMIC_LOAD_UMIN_I32:
1521 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I32_POSTRA;
1522 NeedsAdditionalReg = true;
1523 break;
1524 case Mips::ATOMIC_LOAD_UMAX_I32:
1525 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I32_POSTRA;
1526 NeedsAdditionalReg = true;
1527 break;
1528 case Mips::ATOMIC_LOAD_MIN_I64:
1529 AtomicOp = Mips::ATOMIC_LOAD_MIN_I64_POSTRA;
1530 NeedsAdditionalReg = true;
1531 break;
1532 case Mips::ATOMIC_LOAD_MAX_I64:
1533 AtomicOp = Mips::ATOMIC_LOAD_MAX_I64_POSTRA;
1534 NeedsAdditionalReg = true;
1535 break;
1536 case Mips::ATOMIC_LOAD_UMIN_I64:
1537 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I64_POSTRA;
1538 NeedsAdditionalReg = true;
1539 break;
1540 case Mips::ATOMIC_LOAD_UMAX_I64:
1541 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I64_POSTRA;
1542 NeedsAdditionalReg = true;
1543 break;
1544 default:
1545 llvm_unreachable("Unknown pseudo atomic for replacement!");
1546 }
1547
1548 Register OldVal = MI.getOperand(0).getReg();
1549 Register Ptr = MI.getOperand(1).getReg();
1550 Register Incr = MI.getOperand(2).getReg();
1551 Register Scratch = RegInfo.createVirtualRegister(RegInfo.getRegClass(OldVal));
1552
1554
1555 // The scratch registers here with the EarlyClobber | Define | Implicit
1556 // flags is used to persuade the register allocator and the machine
1557 // verifier to accept the usage of this register. This has to be a real
1558 // register which has an UNDEF value but is dead after the instruction which
1559 // is unique among the registers chosen for the instruction.
1560
1561 // The EarlyClobber flag has the semantic properties that the operand it is
1562 // attached to is clobbered before the rest of the inputs are read. Hence it
1563 // must be unique among the operands to the instruction.
1564 // The Define flag is needed to coerce the machine verifier that an Undef
1565 // value isn't a problem.
1566 // The Dead flag is needed as the value in scratch isn't used by any other
1567 // instruction. Kill isn't used as Dead is more precise.
1568 // The implicit flag is here due to the interaction between the other flags
1569 // and the machine verifier.
1570
1571 // For correctness purpose, a new pseudo is introduced here. We need this
1572 // new pseudo, so that FastRegisterAllocator does not see an ll/sc sequence
1573 // that is spread over >1 basic blocks. A register allocator which
1574 // introduces (or any codegen infact) a store, can violate the expectations
1575 // of the hardware.
1576 //
1577 // An atomic read-modify-write sequence starts with a linked load
1578 // instruction and ends with a store conditional instruction. The atomic
1579 // read-modify-write sequence fails if any of the following conditions
1580 // occur between the execution of ll and sc:
1581 // * A coherent store is completed by another process or coherent I/O
1582 // module into the block of synchronizable physical memory containing
1583 // the word. The size and alignment of the block is
1584 // implementation-dependent.
1585 // * A coherent store is executed between an LL and SC sequence on the
1586 // same processor to the block of synchornizable physical memory
1587 // containing the word.
1588 //
1589
1590 Register PtrCopy = RegInfo.createVirtualRegister(RegInfo.getRegClass(Ptr));
1591 Register IncrCopy = RegInfo.createVirtualRegister(RegInfo.getRegClass(Incr));
1592
1593 BuildMI(*BB, II, DL, TII->get(Mips::COPY), IncrCopy).addReg(Incr);
1594 BuildMI(*BB, II, DL, TII->get(Mips::COPY), PtrCopy).addReg(Ptr);
1595
1597 BuildMI(*BB, II, DL, TII->get(AtomicOp))
1599 .addReg(PtrCopy)
1600 .addReg(IncrCopy)
1603 if (NeedsAdditionalReg) {
1604 Register Scratch2 =
1605 RegInfo.createVirtualRegister(RegInfo.getRegClass(OldVal));
1608 }
1609
1610 MI.eraseFromParent();
1611
1612 return BB;
1613}
1614
1615MachineBasicBlock *MipsTargetLowering::emitSignExtendToI32InReg(
1616 MachineInstr &MI, MachineBasicBlock *BB, unsigned Size, unsigned DstReg,
1617 unsigned SrcReg) const {
1619 const DebugLoc &DL = MI.getDebugLoc();
1620
1621 if (Subtarget.hasMips32r2() && Size == 1) {
1622 BuildMI(BB, DL, TII->get(Mips::SEB), DstReg).addReg(SrcReg);
1623 return BB;
1624 }
1625
1626 if (Subtarget.hasMips32r2() && Size == 2) {
1627 BuildMI(BB, DL, TII->get(Mips::SEH), DstReg).addReg(SrcReg);
1628 return BB;
1629 }
1630
1631 MachineFunction *MF = BB->getParent();
1633 const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
1634 Register ScrReg = RegInfo.createVirtualRegister(RC);
1635
1636 assert(Size < 32);
1637 int64_t ShiftImm = 32 - (Size * 8);
1638
1639 BuildMI(BB, DL, TII->get(Mips::SLL), ScrReg).addReg(SrcReg).addImm(ShiftImm);
1640 BuildMI(BB, DL, TII->get(Mips::SRA), DstReg).addReg(ScrReg).addImm(ShiftImm);
1641
1642 return BB;
1643}
1644
1645MachineBasicBlock *MipsTargetLowering::emitAtomicBinaryPartword(
1646 MachineInstr &MI, MachineBasicBlock *BB, unsigned Size) const {
1647 assert((Size == 1 || Size == 2) &&
1648 "Unsupported size for EmitAtomicBinaryPartial.");
1649
1650 MachineFunction *MF = BB->getParent();
1652 const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
1653 const bool ArePtrs64bit = ABI.ArePtrs64bit();
1654 const TargetRegisterClass *RCp =
1655 getRegClassFor(ArePtrs64bit ? MVT::i64 : MVT::i32);
1657 DebugLoc DL = MI.getDebugLoc();
1658
1659 Register Dest = MI.getOperand(0).getReg();
1660 Register Ptr = MI.getOperand(1).getReg();
1661 Register Incr = MI.getOperand(2).getReg();
1662
1663 Register AlignedAddr = RegInfo.createVirtualRegister(RCp);
1664 Register ShiftAmt = RegInfo.createVirtualRegister(RC);
1665 Register Mask = RegInfo.createVirtualRegister(RC);
1666 Register Mask2 = RegInfo.createVirtualRegister(RC);
1667 Register Incr2 = RegInfo.createVirtualRegister(RC);
1668 Register MaskLSB2 = RegInfo.createVirtualRegister(RCp);
1669 Register PtrLSB2 = RegInfo.createVirtualRegister(RC);
1670 Register MaskUpper = RegInfo.createVirtualRegister(RC);
1671 Register Scratch = RegInfo.createVirtualRegister(RC);
1672 Register Scratch2 = RegInfo.createVirtualRegister(RC);
1673 Register Scratch3 = RegInfo.createVirtualRegister(RC);
1674
1675 unsigned AtomicOp = 0;
1676 bool NeedsAdditionalReg = false;
1677 switch (MI.getOpcode()) {
1678 case Mips::ATOMIC_LOAD_NAND_I8:
1679 AtomicOp = Mips::ATOMIC_LOAD_NAND_I8_POSTRA;
1680 break;
1681 case Mips::ATOMIC_LOAD_NAND_I16:
1682 AtomicOp = Mips::ATOMIC_LOAD_NAND_I16_POSTRA;
1683 break;
1684 case Mips::ATOMIC_SWAP_I8:
1685 AtomicOp = Mips::ATOMIC_SWAP_I8_POSTRA;
1686 break;
1687 case Mips::ATOMIC_SWAP_I16:
1688 AtomicOp = Mips::ATOMIC_SWAP_I16_POSTRA;
1689 break;
1690 case Mips::ATOMIC_LOAD_ADD_I8:
1691 AtomicOp = Mips::ATOMIC_LOAD_ADD_I8_POSTRA;
1692 break;
1693 case Mips::ATOMIC_LOAD_ADD_I16:
1694 AtomicOp = Mips::ATOMIC_LOAD_ADD_I16_POSTRA;
1695 break;
1696 case Mips::ATOMIC_LOAD_SUB_I8:
1697 AtomicOp = Mips::ATOMIC_LOAD_SUB_I8_POSTRA;
1698 break;
1699 case Mips::ATOMIC_LOAD_SUB_I16:
1700 AtomicOp = Mips::ATOMIC_LOAD_SUB_I16_POSTRA;
1701 break;
1702 case Mips::ATOMIC_LOAD_AND_I8:
1703 AtomicOp = Mips::ATOMIC_LOAD_AND_I8_POSTRA;
1704 break;
1705 case Mips::ATOMIC_LOAD_AND_I16:
1706 AtomicOp = Mips::ATOMIC_LOAD_AND_I16_POSTRA;
1707 break;
1708 case Mips::ATOMIC_LOAD_OR_I8:
1709 AtomicOp = Mips::ATOMIC_LOAD_OR_I8_POSTRA;
1710 break;
1711 case Mips::ATOMIC_LOAD_OR_I16:
1712 AtomicOp = Mips::ATOMIC_LOAD_OR_I16_POSTRA;
1713 break;
1714 case Mips::ATOMIC_LOAD_XOR_I8:
1715 AtomicOp = Mips::ATOMIC_LOAD_XOR_I8_POSTRA;
1716 break;
1717 case Mips::ATOMIC_LOAD_XOR_I16:
1718 AtomicOp = Mips::ATOMIC_LOAD_XOR_I16_POSTRA;
1719 break;
1720 case Mips::ATOMIC_LOAD_MIN_I8:
1721 AtomicOp = Mips::ATOMIC_LOAD_MIN_I8_POSTRA;
1722 NeedsAdditionalReg = true;
1723 break;
1724 case Mips::ATOMIC_LOAD_MIN_I16:
1725 AtomicOp = Mips::ATOMIC_LOAD_MIN_I16_POSTRA;
1726 NeedsAdditionalReg = true;
1727 break;
1728 case Mips::ATOMIC_LOAD_MAX_I8:
1729 AtomicOp = Mips::ATOMIC_LOAD_MAX_I8_POSTRA;
1730 NeedsAdditionalReg = true;
1731 break;
1732 case Mips::ATOMIC_LOAD_MAX_I16:
1733 AtomicOp = Mips::ATOMIC_LOAD_MAX_I16_POSTRA;
1734 NeedsAdditionalReg = true;
1735 break;
1736 case Mips::ATOMIC_LOAD_UMIN_I8:
1737 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I8_POSTRA;
1738 NeedsAdditionalReg = true;
1739 break;
1740 case Mips::ATOMIC_LOAD_UMIN_I16:
1741 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I16_POSTRA;
1742 NeedsAdditionalReg = true;
1743 break;
1744 case Mips::ATOMIC_LOAD_UMAX_I8:
1745 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I8_POSTRA;
1746 NeedsAdditionalReg = true;
1747 break;
1748 case Mips::ATOMIC_LOAD_UMAX_I16:
1749 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I16_POSTRA;
1750 NeedsAdditionalReg = true;
1751 break;
1752 default:
1753 llvm_unreachable("Unknown subword atomic pseudo for expansion!");
1754 }
1755
1756 // insert new blocks after the current block
1757 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1758 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
1760 MF->insert(It, exitMBB);
1761
1762 // Transfer the remainder of BB and its successor edges to exitMBB.
1763 exitMBB->splice(exitMBB->begin(), BB,
1764 std::next(MachineBasicBlock::iterator(MI)), BB->end());
1766
1768
1769 // thisMBB:
1770 // addiu masklsb2,$0,-4 # 0xfffffffc
1771 // and alignedaddr,ptr,masklsb2
1772 // andi ptrlsb2,ptr,3
1773 // sll shiftamt,ptrlsb2,3
1774 // ori maskupper,$0,255 # 0xff
1775 // sll mask,maskupper,shiftamt
1776 // nor mask2,$0,mask
1777 // sll incr2,incr,shiftamt
1778
1779 int64_t MaskImm = (Size == 1) ? 255 : 65535;
1780 BuildMI(BB, DL, TII->get(ABI.GetPtrAddiuOp()), MaskLSB2)
1781 .addReg(ABI.GetNullPtr()).addImm(-4);
1782 BuildMI(BB, DL, TII->get(ABI.GetPtrAndOp()), AlignedAddr)
1783 .addReg(Ptr).addReg(MaskLSB2);
1784 BuildMI(BB, DL, TII->get(Mips::ANDi), PtrLSB2)
1785 .addReg(Ptr, 0, ArePtrs64bit ? Mips::sub_32 : 0).addImm(3);
1786 if (Subtarget.isLittle()) {
1787 BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(PtrLSB2).addImm(3);
1788 } else {
1789 Register Off = RegInfo.createVirtualRegister(RC);
1790 BuildMI(BB, DL, TII->get(Mips::XORi), Off)
1791 .addReg(PtrLSB2).addImm((Size == 1) ? 3 : 2);
1792 BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(Off).addImm(3);
1793 }
1794 BuildMI(BB, DL, TII->get(Mips::ORi), MaskUpper)
1795 .addReg(Mips::ZERO).addImm(MaskImm);
1796 BuildMI(BB, DL, TII->get(Mips::SLLV), Mask)
1797 .addReg(MaskUpper).addReg(ShiftAmt);
1798 BuildMI(BB, DL, TII->get(Mips::NOR), Mask2).addReg(Mips::ZERO).addReg(Mask);
1799 BuildMI(BB, DL, TII->get(Mips::SLLV), Incr2).addReg(Incr).addReg(ShiftAmt);
1800
1801
1802 // The purposes of the flags on the scratch registers is explained in
1803 // emitAtomicBinary. In summary, we need a scratch register which is going to
1804 // be undef, that is unique among registers chosen for the instruction.
1805
1807 BuildMI(BB, DL, TII->get(AtomicOp))
1809 .addReg(AlignedAddr)
1810 .addReg(Incr2)
1811 .addReg(Mask)
1812 .addReg(Mask2)
1813 .addReg(ShiftAmt)
1820 if (NeedsAdditionalReg) {
1821 Register Scratch4 = RegInfo.createVirtualRegister(RC);
1824 }
1825
1826 MI.eraseFromParent(); // The instruction is gone now.
1827
1828 return exitMBB;
1829}
1830
1831// Lower atomic compare and swap to a pseudo instruction, taking care to
1832// define a scratch register for the pseudo instruction's expansion. The
1833// instruction is expanded after the register allocator as to prevent
1834// the insertion of stores between the linked load and the store conditional.
1835
1837MipsTargetLowering::emitAtomicCmpSwap(MachineInstr &MI,
1838 MachineBasicBlock *BB) const {
1839
1840 assert((MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32 ||
1841 MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I64) &&
1842 "Unsupported atomic pseudo for EmitAtomicCmpSwap.");
1843
1844 const unsigned Size = MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32 ? 4 : 8;
1845
1846 MachineFunction *MF = BB->getParent();
1850 DebugLoc DL = MI.getDebugLoc();
1851
1852 unsigned AtomicOp = MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32
1853 ? Mips::ATOMIC_CMP_SWAP_I32_POSTRA
1854 : Mips::ATOMIC_CMP_SWAP_I64_POSTRA;
1855 Register Dest = MI.getOperand(0).getReg();
1856 Register Ptr = MI.getOperand(1).getReg();
1857 Register OldVal = MI.getOperand(2).getReg();
1858 Register NewVal = MI.getOperand(3).getReg();
1859
1860 Register Scratch = MRI.createVirtualRegister(RC);
1862
1863 // We need to create copies of the various registers and kill them at the
1864 // atomic pseudo. If the copies are not made, when the atomic is expanded
1865 // after fast register allocation, the spills will end up outside of the
1866 // blocks that their values are defined in, causing livein errors.
1867
1868 Register PtrCopy = MRI.createVirtualRegister(MRI.getRegClass(Ptr));
1869 Register OldValCopy = MRI.createVirtualRegister(MRI.getRegClass(OldVal));
1870 Register NewValCopy = MRI.createVirtualRegister(MRI.getRegClass(NewVal));
1871
1872 BuildMI(*BB, II, DL, TII->get(Mips::COPY), PtrCopy).addReg(Ptr);
1873 BuildMI(*BB, II, DL, TII->get(Mips::COPY), OldValCopy).addReg(OldVal);
1874 BuildMI(*BB, II, DL, TII->get(Mips::COPY), NewValCopy).addReg(NewVal);
1875
1876 // The purposes of the flags on the scratch registers is explained in
1877 // emitAtomicBinary. In summary, we need a scratch register which is going to
1878 // be undef, that is unique among registers chosen for the instruction.
1879
1880 BuildMI(*BB, II, DL, TII->get(AtomicOp))
1882 .addReg(PtrCopy, RegState::Kill)
1883 .addReg(OldValCopy, RegState::Kill)
1884 .addReg(NewValCopy, RegState::Kill)
1887
1888 MI.eraseFromParent(); // The instruction is gone now.
1889
1890 return BB;
1891}
1892
1893MachineBasicBlock *MipsTargetLowering::emitAtomicCmpSwapPartword(
1894 MachineInstr &MI, MachineBasicBlock *BB, unsigned Size) const {
1895 assert((Size == 1 || Size == 2) &&
1896 "Unsupported size for EmitAtomicCmpSwapPartial.");
1897
1898 MachineFunction *MF = BB->getParent();
1900 const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
1901 const bool ArePtrs64bit = ABI.ArePtrs64bit();
1902 const TargetRegisterClass *RCp =
1903 getRegClassFor(ArePtrs64bit ? MVT::i64 : MVT::i32);
1905 DebugLoc DL = MI.getDebugLoc();
1906
1907 Register Dest = MI.getOperand(0).getReg();
1908 Register Ptr = MI.getOperand(1).getReg();
1909 Register CmpVal = MI.getOperand(2).getReg();
1910 Register NewVal = MI.getOperand(3).getReg();
1911
1912 Register AlignedAddr = RegInfo.createVirtualRegister(RCp);
1913 Register ShiftAmt = RegInfo.createVirtualRegister(RC);
1914 Register Mask = RegInfo.createVirtualRegister(RC);
1915 Register Mask2 = RegInfo.createVirtualRegister(RC);
1916 Register ShiftedCmpVal = RegInfo.createVirtualRegister(RC);
1917 Register ShiftedNewVal = RegInfo.createVirtualRegister(RC);
1918 Register MaskLSB2 = RegInfo.createVirtualRegister(RCp);
1919 Register PtrLSB2 = RegInfo.createVirtualRegister(RC);
1920 Register MaskUpper = RegInfo.createVirtualRegister(RC);
1921 Register MaskedCmpVal = RegInfo.createVirtualRegister(RC);
1922 Register MaskedNewVal = RegInfo.createVirtualRegister(RC);
1923 unsigned AtomicOp = MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I8
1924 ? Mips::ATOMIC_CMP_SWAP_I8_POSTRA
1925 : Mips::ATOMIC_CMP_SWAP_I16_POSTRA;
1926
1927 // The scratch registers here with the EarlyClobber | Define | Dead | Implicit
1928 // flags are used to coerce the register allocator and the machine verifier to
1929 // accept the usage of these registers.
1930 // The EarlyClobber flag has the semantic properties that the operand it is
1931 // attached to is clobbered before the rest of the inputs are read. Hence it
1932 // must be unique among the operands to the instruction.
1933 // The Define flag is needed to coerce the machine verifier that an Undef
1934 // value isn't a problem.
1935 // The Dead flag is needed as the value in scratch isn't used by any other
1936 // instruction. Kill isn't used as Dead is more precise.
1937 Register Scratch = RegInfo.createVirtualRegister(RC);
1938 Register Scratch2 = RegInfo.createVirtualRegister(RC);
1939
1940 // insert new blocks after the current block
1941 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1942 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
1944 MF->insert(It, exitMBB);
1945
1946 // Transfer the remainder of BB and its successor edges to exitMBB.
1947 exitMBB->splice(exitMBB->begin(), BB,
1948 std::next(MachineBasicBlock::iterator(MI)), BB->end());
1950
1952
1953 // thisMBB:
1954 // addiu masklsb2,$0,-4 # 0xfffffffc
1955 // and alignedaddr,ptr,masklsb2
1956 // andi ptrlsb2,ptr,3
1957 // xori ptrlsb2,ptrlsb2,3 # Only for BE
1958 // sll shiftamt,ptrlsb2,3
1959 // ori maskupper,$0,255 # 0xff
1960 // sll mask,maskupper,shiftamt
1961 // nor mask2,$0,mask
1962 // andi maskedcmpval,cmpval,255
1963 // sll shiftedcmpval,maskedcmpval,shiftamt
1964 // andi maskednewval,newval,255
1965 // sll shiftednewval,maskednewval,shiftamt
1966 int64_t MaskImm = (Size == 1) ? 255 : 65535;
1967 BuildMI(BB, DL, TII->get(ArePtrs64bit ? Mips::DADDiu : Mips::ADDiu), MaskLSB2)
1968 .addReg(ABI.GetNullPtr()).addImm(-4);
1969 BuildMI(BB, DL, TII->get(ArePtrs64bit ? Mips::AND64 : Mips::AND), AlignedAddr)
1970 .addReg(Ptr).addReg(MaskLSB2);
1971 BuildMI(BB, DL, TII->get(Mips::ANDi), PtrLSB2)
1972 .addReg(Ptr, 0, ArePtrs64bit ? Mips::sub_32 : 0).addImm(3);
1973 if (Subtarget.isLittle()) {
1974 BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(PtrLSB2).addImm(3);
1975 } else {
1976 Register Off = RegInfo.createVirtualRegister(RC);
1977 BuildMI(BB, DL, TII->get(Mips::XORi), Off)
1978 .addReg(PtrLSB2).addImm((Size == 1) ? 3 : 2);
1979 BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(Off).addImm(3);
1980 }
1981 BuildMI(BB, DL, TII->get(Mips::ORi), MaskUpper)
1982 .addReg(Mips::ZERO).addImm(MaskImm);
1983 BuildMI(BB, DL, TII->get(Mips::SLLV), Mask)
1984 .addReg(MaskUpper).addReg(ShiftAmt);
1985 BuildMI(BB, DL, TII->get(Mips::NOR), Mask2).addReg(Mips::ZERO).addReg(Mask);
1986 BuildMI(BB, DL, TII->get(Mips::ANDi), MaskedCmpVal)
1987 .addReg(CmpVal).addImm(MaskImm);
1988 BuildMI(BB, DL, TII->get(Mips::SLLV), ShiftedCmpVal)
1989 .addReg(MaskedCmpVal).addReg(ShiftAmt);
1990 BuildMI(BB, DL, TII->get(Mips::ANDi), MaskedNewVal)
1991 .addReg(NewVal).addImm(MaskImm);
1992 BuildMI(BB, DL, TII->get(Mips::SLLV), ShiftedNewVal)
1993 .addReg(MaskedNewVal).addReg(ShiftAmt);
1994
1995 // The purposes of the flags on the scratch registers are explained in
1996 // emitAtomicBinary. In summary, we need a scratch register which is going to
1997 // be undef, that is unique among the register chosen for the instruction.
1998
1999 BuildMI(BB, DL, TII->get(AtomicOp))
2001 .addReg(AlignedAddr)
2002 .addReg(Mask)
2003 .addReg(ShiftedCmpVal)
2004 .addReg(Mask2)
2005 .addReg(ShiftedNewVal)
2006 .addReg(ShiftAmt)
2011
2012 MI.eraseFromParent(); // The instruction is gone now.
2013
2014 return exitMBB;
2015}
2016
2017SDValue MipsTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
2018 // The first operand is the chain, the second is the condition, the third is
2019 // the block to branch to if the condition is true.
2020 SDValue Chain = Op.getOperand(0);
2021 SDValue Dest = Op.getOperand(2);
2022 SDLoc DL(Op);
2023
2025 SDValue CondRes = createFPCmp(DAG, Op.getOperand(1));
2026
2027 // Return if flag is not set by a floating point comparison.
2028 if (CondRes.getOpcode() != MipsISD::FPCmp)
2029 return Op;
2030
2031 SDValue CCNode = CondRes.getOperand(2);
2033 (Mips::CondCode)cast<ConstantSDNode>(CCNode)->getZExtValue();
2035 SDValue BrCode = DAG.getConstant(Opc, DL, MVT::i32);
2036 SDValue FCC0 = DAG.getRegister(Mips::FCC0, MVT::i32);
2037 return DAG.getNode(MipsISD::FPBrcond, DL, Op.getValueType(), Chain, BrCode,
2038 FCC0, Dest, CondRes);
2039}
2040
2041SDValue MipsTargetLowering::
2042lowerSELECT(SDValue Op, SelectionDAG &DAG) const
2043{
2045 SDValue Cond = createFPCmp(DAG, Op.getOperand(0));
2046
2047 // Return if flag is not set by a floating point comparison.
2048 if (Cond.getOpcode() != MipsISD::FPCmp)
2049 return Op;
2050
2051 return createCMovFP(DAG, Cond, Op.getOperand(1), Op.getOperand(2),
2052 SDLoc(Op));
2053}
2054
2055SDValue MipsTargetLowering::lowerSETCC(SDValue Op, SelectionDAG &DAG) const {
2057 SDValue Cond = createFPCmp(DAG, Op);
2058
2059 assert(Cond.getOpcode() == MipsISD::FPCmp &&
2060 "Floating point operand expected.");
2061
2062 SDLoc DL(Op);
2063 SDValue True = DAG.getConstant(1, DL, MVT::i32);
2064 SDValue False = DAG.getConstant(0, DL, MVT::i32);
2065
2066 return createCMovFP(DAG, Cond, True, False, DL);
2067}
2068
2069SDValue MipsTargetLowering::lowerGlobalAddress(SDValue Op,
2070 SelectionDAG &DAG) const {
2071 EVT Ty = Op.getValueType();
2072 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
2073 const GlobalValue *GV = N->getGlobal();
2074
2075 if (!isPositionIndependent()) {
2076 const MipsTargetObjectFile *TLOF =
2077 static_cast<const MipsTargetObjectFile *>(
2079 const GlobalObject *GO = GV->getAliaseeObject();
2080 if (GO && TLOF->IsGlobalInSmallSection(GO, getTargetMachine()))
2081 // %gp_rel relocation
2082 return getAddrGPRel(N, SDLoc(N), Ty, DAG, ABI.IsN64());
2083
2084 // %hi/%lo relocation
2085 return Subtarget.hasSym32() ? getAddrNonPIC(N, SDLoc(N), Ty, DAG)
2086 // %highest/%higher/%hi/%lo relocation
2087 : getAddrNonPICSym64(N, SDLoc(N), Ty, DAG);
2088 }
2089
2090 // Every other architecture would use shouldAssumeDSOLocal in here, but
2091 // mips is special.
2092 // * In PIC code mips requires got loads even for local statics!
2093 // * To save on got entries, for local statics the got entry contains the
2094 // page and an additional add instruction takes care of the low bits.
2095 // * It is legal to access a hidden symbol with a non hidden undefined,
2096 // so one cannot guarantee that all access to a hidden symbol will know
2097 // it is hidden.
2098 // * Mips linkers don't support creating a page and a full got entry for
2099 // the same symbol.
2100 // * Given all that, we have to use a full got entry for hidden symbols :-(
2101 if (GV->hasLocalLinkage())
2102 return getAddrLocal(N, SDLoc(N), Ty, DAG, ABI.IsN32() || ABI.IsN64());
2103
2104 if (Subtarget.useXGOT())
2105 return getAddrGlobalLargeGOT(
2107 DAG.getEntryNode(),
2108 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
2109
2110 return getAddrGlobal(
2111 N, SDLoc(N), Ty, DAG,
2113 DAG.getEntryNode(), MachinePointerInfo::getGOT(DAG.getMachineFunction()));
2114}
2115
2116SDValue MipsTargetLowering::lowerBlockAddress(SDValue Op,
2117 SelectionDAG &DAG) const {
2118 BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
2119 EVT Ty = Op.getValueType();
2120
2121 if (!isPositionIndependent())
2122 return Subtarget.hasSym32() ? getAddrNonPIC(N, SDLoc(N), Ty, DAG)
2123 : getAddrNonPICSym64(N, SDLoc(N), Ty, DAG);
2124
2125 return getAddrLocal(N, SDLoc(N), Ty, DAG, ABI.IsN32() || ABI.IsN64());
2126}
2127
2128SDValue MipsTargetLowering::
2129lowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const
2130{
2131 // If the relocation model is PIC, use the General Dynamic TLS Model or
2132 // Local Dynamic TLS model, otherwise use the Initial Exec or
2133 // Local Exec TLS Model.
2134
2135 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
2136 if (DAG.getTarget().useEmulatedTLS())
2137 return LowerToTLSEmulatedModel(GA, DAG);
2138
2139 SDLoc DL(GA);
2140 const GlobalValue *GV = GA->getGlobal();
2141 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2142
2144
2145 if (model == TLSModel::GeneralDynamic || model == TLSModel::LocalDynamic) {
2146 // General Dynamic and Local Dynamic TLS Model.
2147 unsigned Flag = (model == TLSModel::LocalDynamic) ? MipsII::MO_TLSLDM
2149
2150 SDValue TGA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, Flag);
2152 getGlobalReg(DAG, PtrVT), TGA);
2153 unsigned PtrSize = PtrVT.getSizeInBits();
2154 IntegerType *PtrTy = Type::getIntNTy(*DAG.getContext(), PtrSize);
2155
2156 SDValue TlsGetAddr = DAG.getExternalSymbol("__tls_get_addr", PtrVT);
2157
2159 ArgListEntry Entry;
2160 Entry.Node = Argument;
2161 Entry.Ty = PtrTy;
2162 Args.push_back(Entry);
2163
2165 CLI.setDebugLoc(DL)
2166 .setChain(DAG.getEntryNode())
2167 .setLibCallee(CallingConv::C, PtrTy, TlsGetAddr, std::move(Args));
2168 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
2169
2170 SDValue Ret = CallResult.first;
2171
2172 if (model != TLSModel::LocalDynamic)
2173 return Ret;
2174
2175 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2177 SDValue Hi = DAG.getNode(MipsISD::TlsHi, DL, PtrVT, TGAHi);
2178 SDValue TGALo = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2180 SDValue Lo = DAG.getNode(MipsISD::Lo, DL, PtrVT, TGALo);
2181 SDValue Add = DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Ret);
2182 return DAG.getNode(ISD::ADD, DL, PtrVT, Add, Lo);
2183 }
2184
2186 if (model == TLSModel::InitialExec) {
2187 // Initial Exec TLS Model
2188 SDValue TGA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2190 TGA = DAG.getNode(MipsISD::Wrapper, DL, PtrVT, getGlobalReg(DAG, PtrVT),
2191 TGA);
2192 Offset =
2193 DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), TGA, MachinePointerInfo());
2194 } else {
2195 // Local Exec TLS Model
2196 assert(model == TLSModel::LocalExec);
2197 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2199 SDValue TGALo = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2201 SDValue Hi = DAG.getNode(MipsISD::TlsHi, DL, PtrVT, TGAHi);
2202 SDValue Lo = DAG.getNode(MipsISD::Lo, DL, PtrVT, TGALo);
2203 Offset = DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo);
2204 }
2205
2207 return DAG.getNode(ISD::ADD, DL, PtrVT, ThreadPointer, Offset);
2208}
2209
2210SDValue MipsTargetLowering::
2211lowerJumpTable(SDValue Op, SelectionDAG &DAG) const
2212{
2213 JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
2214 EVT Ty = Op.getValueType();
2215
2216 if (!isPositionIndependent())
2217 return Subtarget.hasSym32() ? getAddrNonPIC(N, SDLoc(N), Ty, DAG)
2218 : getAddrNonPICSym64(N, SDLoc(N), Ty, DAG);
2219
2220 return getAddrLocal(N, SDLoc(N), Ty, DAG, ABI.IsN32() || ABI.IsN64());
2221}
2222
2223SDValue MipsTargetLowering::
2224lowerConstantPool(SDValue Op, SelectionDAG &DAG) const
2225{
2226 ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
2227 EVT Ty = Op.getValueType();
2228
2229 if (!isPositionIndependent()) {
2230 const MipsTargetObjectFile *TLOF =
2231 static_cast<const MipsTargetObjectFile *>(
2233
2234 if (TLOF->IsConstantInSmallSection(DAG.getDataLayout(), N->getConstVal(),
2236 // %gp_rel relocation
2237 return getAddrGPRel(N, SDLoc(N), Ty, DAG, ABI.IsN64());
2238
2239 return Subtarget.hasSym32() ? getAddrNonPIC(N, SDLoc(N), Ty, DAG)
2240 : getAddrNonPICSym64(N, SDLoc(N), Ty, DAG);
2241 }
2242
2243 return getAddrLocal(N, SDLoc(N), Ty, DAG, ABI.IsN32() || ABI.IsN64());
2244}
2245
2246SDValue MipsTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
2249
2250 SDLoc DL(Op);
2251 SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
2253
2254 // vastart just stores the address of the VarArgsFrameIndex slot into the
2255 // memory location argument.
2256 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2257 return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
2258 MachinePointerInfo(SV));
2259}
2260
2261SDValue MipsTargetLowering::lowerVAARG(SDValue Op, SelectionDAG &DAG) const {
2262 SDNode *Node = Op.getNode();
2263 EVT VT = Node->getValueType(0);
2264 SDValue Chain = Node->getOperand(0);
2265 SDValue VAListPtr = Node->getOperand(1);
2266 const Align Align =
2267 llvm::MaybeAlign(Node->getConstantOperandVal(3)).valueOrOne();
2268 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2269 SDLoc DL(Node);
2270 unsigned ArgSlotSizeInBytes = (ABI.IsN32() || ABI.IsN64()) ? 8 : 4;
2271
2272 SDValue VAListLoad = DAG.getLoad(getPointerTy(DAG.getDataLayout()), DL, Chain,
2273 VAListPtr, MachinePointerInfo(SV));
2274 SDValue VAList = VAListLoad;
2275
2276 // Re-align the pointer if necessary.
2277 // It should only ever be necessary for 64-bit types on O32 since the minimum
2278 // argument alignment is the same as the maximum type alignment for N32/N64.
2279 //
2280 // FIXME: We currently align too often. The code generator doesn't notice
2281 // when the pointer is still aligned from the last va_arg (or pair of
2282 // va_args for the i64 on O32 case).
2284 VAList = DAG.getNode(
2285 ISD::ADD, DL, VAList.getValueType(), VAList,
2286 DAG.getConstant(Align.value() - 1, DL, VAList.getValueType()));
2287
2288 VAList = DAG.getNode(
2289 ISD::AND, DL, VAList.getValueType(), VAList,
2290 DAG.getConstant(-(int64_t)Align.value(), DL, VAList.getValueType()));
2291 }
2292
2293 // Increment the pointer, VAList, to the next vaarg.
2294 auto &TD = DAG.getDataLayout();
2295 unsigned ArgSizeInBytes =
2297 SDValue Tmp3 =
2298 DAG.getNode(ISD::ADD, DL, VAList.getValueType(), VAList,
2299 DAG.getConstant(alignTo(ArgSizeInBytes, ArgSlotSizeInBytes),
2300 DL, VAList.getValueType()));
2301 // Store the incremented VAList to the legalized pointer
2302 Chain = DAG.getStore(VAListLoad.getValue(1), DL, Tmp3, VAListPtr,
2303 MachinePointerInfo(SV));
2304
2305 // In big-endian mode we must adjust the pointer when the load size is smaller
2306 // than the argument slot size. We must also reduce the known alignment to
2307 // match. For example in the N64 ABI, we must add 4 bytes to the offset to get
2308 // the correct half of the slot, and reduce the alignment from 8 (slot
2309 // alignment) down to 4 (type alignment).
2310 if (!Subtarget.isLittle() && ArgSizeInBytes < ArgSlotSizeInBytes) {
2311 unsigned Adjustment = ArgSlotSizeInBytes - ArgSizeInBytes;
2312 VAList = DAG.getNode(ISD::ADD, DL, VAListPtr.getValueType(), VAList,
2313 DAG.getIntPtrConstant(Adjustment, DL));
2314 }
2315 // Load the actual argument out of the pointer VAList
2316 return DAG.getLoad(VT, DL, Chain, VAList, MachinePointerInfo());
2317}
2318
2320 bool HasExtractInsert) {
2321 EVT TyX = Op.getOperand(0).getValueType();
2322 EVT TyY = Op.getOperand(1).getValueType();
2323 SDLoc DL(Op);
2324 SDValue Const1 = DAG.getConstant(1, DL, MVT::i32);
2325 SDValue Const31 = DAG.getConstant(31, DL, MVT::i32);
2326 SDValue Res;
2327
2328 // If operand is of type f64, extract the upper 32-bit. Otherwise, bitcast it
2329 // to i32.
2330 SDValue X = (TyX == MVT::f32) ?
2331 DAG.getNode(ISD::BITCAST, DL, MVT::i32, Op.getOperand(0)) :
2332 DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32, Op.getOperand(0),
2333 Const1);
2334 SDValue Y = (TyY == MVT::f32) ?
2335 DAG.getNode(ISD::BITCAST, DL, MVT::i32, Op.getOperand(1)) :
2336 DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32, Op.getOperand(1),
2337 Const1);
2338
2339 if (HasExtractInsert) {
2340 // ext E, Y, 31, 1 ; extract bit31 of Y
2341 // ins X, E, 31, 1 ; insert extracted bit at bit31 of X
2342 SDValue E = DAG.getNode(MipsISD::Ext, DL, MVT::i32, Y, Const31, Const1);
2343 Res = DAG.getNode(MipsISD::Ins, DL, MVT::i32, E, Const31, Const1, X);
2344 } else {
2345 // sll SllX, X, 1
2346 // srl SrlX, SllX, 1
2347 // srl SrlY, Y, 31
2348 // sll SllY, SrlX, 31
2349 // or Or, SrlX, SllY
2350 SDValue SllX = DAG.getNode(ISD::SHL, DL, MVT::i32, X, Const1);
2351 SDValue SrlX = DAG.getNode(ISD::SRL, DL, MVT::i32, SllX, Const1);
2352 SDValue SrlY = DAG.getNode(ISD::SRL, DL, MVT::i32, Y, Const31);
2353 SDValue SllY = DAG.getNode(ISD::SHL, DL, MVT::i32, SrlY, Const31);
2354 Res = DAG.getNode(ISD::OR, DL, MVT::i32, SrlX, SllY);
2355 }
2356
2357 if (TyX == MVT::f32)
2358 return DAG.getNode(ISD::BITCAST, DL, Op.getOperand(0).getValueType(), Res);
2359
2360 SDValue LowX = DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
2361 Op.getOperand(0),
2362 DAG.getConstant(0, DL, MVT::i32));
2363 return DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64, LowX, Res);
2364}
2365
2367 bool HasExtractInsert) {
2368 unsigned WidthX = Op.getOperand(0).getValueSizeInBits();
2369 unsigned WidthY = Op.getOperand(1).getValueSizeInBits();
2370 EVT TyX = MVT::getIntegerVT(WidthX), TyY = MVT::getIntegerVT(WidthY);
2371 SDLoc DL(Op);
2372 SDValue Const1 = DAG.getConstant(1, DL, MVT::i32);
2373
2374 // Bitcast to integer nodes.
2375 SDValue X = DAG.getNode(ISD::BITCAST, DL, TyX, Op.getOperand(0));
2376 SDValue Y = DAG.getNode(ISD::BITCAST, DL, TyY, Op.getOperand(1));
2377
2378 if (HasExtractInsert) {
2379 // ext E, Y, width(Y) - 1, 1 ; extract bit width(Y)-1 of Y
2380 // ins X, E, width(X) - 1, 1 ; insert extracted bit at bit width(X)-1 of X
2381 SDValue E = DAG.getNode(MipsISD::Ext, DL, TyY, Y,
2382 DAG.getConstant(WidthY - 1, DL, MVT::i32), Const1);
2383
2384 if (WidthX > WidthY)
2385 E = DAG.getNode(ISD::ZERO_EXTEND, DL, TyX, E);
2386 else if (WidthY > WidthX)
2387 E = DAG.getNode(ISD::TRUNCATE, DL, TyX, E);
2388
2389 SDValue I = DAG.getNode(MipsISD::Ins, DL, TyX, E,
2390 DAG.getConstant(WidthX - 1, DL, MVT::i32), Const1,
2391 X);
2392 return DAG.getNode(ISD::BITCAST, DL, Op.getOperand(0).getValueType(), I);
2393 }
2394
2395 // (d)sll SllX, X, 1
2396 // (d)srl SrlX, SllX, 1
2397 // (d)srl SrlY, Y, width(Y)-1
2398 // (d)sll SllY, SrlX, width(Y)-1
2399 // or Or, SrlX, SllY
2400 SDValue SllX = DAG.getNode(ISD::SHL, DL, TyX, X, Const1);
2401 SDValue SrlX = DAG.getNode(ISD::SRL, DL, TyX, SllX, Const1);
2402 SDValue SrlY = DAG.getNode(ISD::SRL, DL, TyY, Y,
2403 DAG.getConstant(WidthY - 1, DL, MVT::i32));
2404
2405 if (WidthX > WidthY)
2406 SrlY = DAG.getNode(ISD::ZERO_EXTEND, DL, TyX, SrlY);
2407 else if (WidthY > WidthX)
2408 SrlY = DAG.getNode(ISD::TRUNCATE, DL, TyX, SrlY);
2409
2410 SDValue SllY = DAG.getNode(ISD::SHL, DL, TyX, SrlY,
2411 DAG.getConstant(WidthX - 1, DL, MVT::i32));
2412 SDValue Or = DAG.getNode(ISD::OR, DL, TyX, SrlX, SllY);
2413 return DAG.getNode(ISD::BITCAST, DL, Op.getOperand(0).getValueType(), Or);
2414}
2415
2416SDValue
2417MipsTargetLowering::lowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
2418 if (Subtarget.isGP64bit())
2419 return lowerFCOPYSIGN64(Op, DAG, Subtarget.hasExtractInsert());
2420
2421 return lowerFCOPYSIGN32(Op, DAG, Subtarget.hasExtractInsert());
2422}
2423
2424SDValue MipsTargetLowering::lowerFABS32(SDValue Op, SelectionDAG &DAG,
2425 bool HasExtractInsert) const {
2426 SDLoc DL(Op);
2427 SDValue Res, Const1 = DAG.getConstant(1, DL, MVT::i32);
2428
2430 return DAG.getNode(MipsISD::FAbs, DL, Op.getValueType(), Op.getOperand(0));
2431
2432 // If operand is of type f64, extract the upper 32-bit. Otherwise, bitcast it
2433 // to i32.
2434 SDValue X = (Op.getValueType() == MVT::f32)
2435 ? DAG.getNode(ISD::BITCAST, DL, MVT::i32, Op.getOperand(0))
2436 : DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
2437 Op.getOperand(0), Const1);
2438
2439 // Clear MSB.
2440 if (HasExtractInsert)
2441 Res = DAG.getNode(MipsISD::Ins, DL, MVT::i32,
2442 DAG.getRegister(Mips::ZERO, MVT::i32),
2443 DAG.getConstant(31, DL, MVT::i32), Const1, X);
2444 else {
2445 // TODO: Provide DAG patterns which transform (and x, cst)
2446 // back to a (shl (srl x (clz cst)) (clz cst)) sequence.
2447 SDValue SllX = DAG.getNode(ISD::SHL, DL, MVT::i32, X, Const1);
2448 Res = DAG.getNode(ISD::SRL, DL, MVT::i32, SllX, Const1);
2449 }
2450
2451 if (Op.getValueType() == MVT::f32)
2452 return DAG.getNode(ISD::BITCAST, DL, MVT::f32, Res);
2453
2454 // FIXME: For mips32r2, the sequence of (BuildPairF64 (ins (ExtractElementF64
2455 // Op 1), $zero, 31 1) (ExtractElementF64 Op 0)) and the Op has one use, we
2456 // should be able to drop the usage of mfc1/mtc1 and rewrite the register in
2457 // place.
2458 SDValue LowX =
2459 DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32, Op.getOperand(0),
2460 DAG.getConstant(0, DL, MVT::i32));
2461 return DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64, LowX, Res);
2462}
2463
2464SDValue MipsTargetLowering::lowerFABS64(SDValue Op, SelectionDAG &DAG,
2465 bool HasExtractInsert) const {
2466 SDLoc DL(Op);
2467 SDValue Res, Const1 = DAG.getConstant(1, DL, MVT::i32);
2468
2470 return DAG.getNode(MipsISD::FAbs, DL, Op.getValueType(), Op.getOperand(0));
2471
2472 // Bitcast to integer node.
2473 SDValue X = DAG.getNode(ISD::BITCAST, DL, MVT::i64, Op.getOperand(0));
2474
2475 // Clear MSB.
2476 if (HasExtractInsert)
2477 Res = DAG.getNode(MipsISD::Ins, DL, MVT::i64,
2478 DAG.getRegister(Mips::ZERO_64, MVT::i64),
2479 DAG.getConstant(63, DL, MVT::i32), Const1, X);
2480 else {
2481 SDValue SllX = DAG.getNode(ISD::SHL, DL, MVT::i64, X, Const1);
2482 Res = DAG.getNode(ISD::SRL, DL, MVT::i64, SllX, Const1);
2483 }
2484
2485 return DAG.getNode(ISD::BITCAST, DL, MVT::f64, Res);
2486}
2487
2488SDValue MipsTargetLowering::lowerFABS(SDValue Op, SelectionDAG &DAG) const {
2489 if ((ABI.IsN32() || ABI.IsN64()) && (Op.getValueType() == MVT::f64))
2490 return lowerFABS64(Op, DAG, Subtarget.hasExtractInsert());
2491
2492 return lowerFABS32(Op, DAG, Subtarget.hasExtractInsert());
2493}
2494
2495SDValue MipsTargetLowering::
2496lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
2497 // check the depth
2498 if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() != 0) {
2499 DAG.getContext()->emitError(
2500 "return address can be determined only for current frame");
2501 return SDValue();
2502 }
2503
2505 MFI.setFrameAddressIsTaken(true);
2506 EVT VT = Op.getValueType();
2507 SDLoc DL(Op);
2508 SDValue FrameAddr = DAG.getCopyFromReg(
2509 DAG.getEntryNode(), DL, ABI.IsN64() ? Mips::FP_64 : Mips::FP, VT);
2510 return FrameAddr;
2511}
2512
2513SDValue MipsTargetLowering::lowerRETURNADDR(SDValue Op,
2514 SelectionDAG &DAG) const {
2516 return SDValue();
2517
2518 // check the depth
2519 if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() != 0) {
2520 DAG.getContext()->emitError(
2521 "return address can be determined only for current frame");
2522 return SDValue();
2523 }
2524
2526 MachineFrameInfo &MFI = MF.getFrameInfo();
2527 MVT VT = Op.getSimpleValueType();
2528 unsigned RA = ABI.IsN64() ? Mips::RA_64 : Mips::RA;
2529 MFI.setReturnAddressIsTaken(true);
2530
2531 // Return RA, which contains the return address. Mark it an implicit live-in.
2533 return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), Reg, VT);
2534}
2535
2536// An EH_RETURN is the result of lowering llvm.eh.return which in turn is
2537// generated from __builtin_eh_return (offset, handler)
2538// The effect of this is to adjust the stack pointer by "offset"
2539// and then branch to "handler".
2540SDValue MipsTargetLowering::lowerEH_RETURN(SDValue Op, SelectionDAG &DAG)
2541 const {
2544
2545 MipsFI->setCallsEhReturn();
2546 SDValue Chain = Op.getOperand(0);
2547 SDValue Offset = Op.getOperand(1);
2548 SDValue Handler = Op.getOperand(2);
2549 SDLoc DL(Op);
2550 EVT Ty = ABI.IsN64() ? MVT::i64 : MVT::i32;
2551
2552 // Store stack offset in V1, store jump target in V0. Glue CopyToReg and
2553 // EH_RETURN nodes, so that instructions are emitted back-to-back.
2554 unsigned OffsetReg = ABI.IsN64() ? Mips::V1_64 : Mips::V1;
2555 unsigned AddrReg = ABI.IsN64() ? Mips::V0_64 : Mips::V0;
2556 Chain = DAG.getCopyToReg(Chain, DL, OffsetReg, Offset, SDValue());
2557 Chain = DAG.getCopyToReg(Chain, DL, AddrReg, Handler, Chain.getValue(1));
2558 return DAG.getNode(MipsISD::EH_RETURN, DL, MVT::Other, Chain,
2559 DAG.getRegister(OffsetReg, Ty),
2560 DAG.getRegister(AddrReg, getPointerTy(MF.getDataLayout())),
2561 Chain.getValue(1));
2562}
2563
2564SDValue MipsTargetLowering::lowerATOMIC_FENCE(SDValue Op,
2565 SelectionDAG &DAG) const {
2566 // FIXME: Need pseudo-fence for 'singlethread' fences
2567 // FIXME: Set SType for weaker fences where supported/appropriate.
2568 unsigned SType = 0;
2569 SDLoc DL(Op);
2570 return DAG.getNode(MipsISD::Sync, DL, MVT::Other, Op.getOperand(0),
2571 DAG.getConstant(SType, DL, MVT::i32));
2572}
2573
2574SDValue MipsTargetLowering::lowerShiftLeftParts(SDValue Op,
2575 SelectionDAG &DAG) const {
2576 SDLoc DL(Op);
2577 MVT VT = Subtarget.isGP64bit() ? MVT::i64 : MVT::i32;
2578
2579 SDValue Lo = Op.getOperand(0), Hi = Op.getOperand(1);
2580 SDValue Shamt = Op.getOperand(2);
2581 // if shamt < (VT.bits):
2582 // lo = (shl lo, shamt)
2583 // hi = (or (shl hi, shamt) (srl (srl lo, 1), ~shamt))
2584 // else:
2585 // lo = 0
2586 // hi = (shl lo, shamt[4:0])
2587 SDValue Not = DAG.getNode(ISD::XOR, DL, MVT::i32, Shamt,
2588 DAG.getConstant(-1, DL, MVT::i32));
2589 SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo,
2590 DAG.getConstant(1, DL, VT));
2591 SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, Not);
2592 SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
2593 SDValue Or = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
2594 SDValue ShiftLeftLo = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
2595 SDValue Cond = DAG.getNode(ISD::AND, DL, MVT::i32, Shamt,
2596 DAG.getConstant(VT.getSizeInBits(), DL, MVT::i32));
2597 Lo = DAG.getNode(ISD::SELECT, DL, VT, Cond,
2598 DAG.getConstant(0, DL, VT), ShiftLeftLo);
2599 Hi = DAG.getNode(ISD::SELECT, DL, VT, Cond, ShiftLeftLo, Or);
2600
2601 SDValue Ops[2] = {Lo, Hi};
2602 return DAG.getMergeValues(Ops, DL);
2603}
2604
2605SDValue MipsTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
2606 bool IsSRA) const {
2607 SDLoc DL(Op);
2608 SDValue Lo = Op.getOperand(0), Hi = Op.getOperand(1);
2609 SDValue Shamt = Op.getOperand(2);
2610 MVT VT = Subtarget.isGP64bit() ? MVT::i64 : MVT::i32;
2611
2612 // if shamt < (VT.bits):
2613 // lo = (or (shl (shl hi, 1), ~shamt) (srl lo, shamt))
2614 // if isSRA:
2615 // hi = (sra hi, shamt)
2616 // else:
2617 // hi = (srl hi, shamt)
2618 // else:
2619 // if isSRA:
2620 // lo = (sra hi, shamt[4:0])
2621 // hi = (sra hi, 31)
2622 // else:
2623 // lo = (srl hi, shamt[4:0])
2624 // hi = 0
2625 SDValue Not = DAG.getNode(ISD::XOR, DL, MVT::i32, Shamt,
2626 DAG.getConstant(-1, DL, MVT::i32));
2627 SDValue ShiftLeft1Hi = DAG.getNode(ISD::SHL, DL, VT, Hi,
2628 DAG.getConstant(1, DL, VT));
2629 SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, ShiftLeft1Hi, Not);
2630 SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
2631 SDValue Or = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
2632 SDValue ShiftRightHi = DAG.getNode(IsSRA ? ISD::SRA : ISD::SRL,
2633 DL, VT, Hi, Shamt);
2634 SDValue Cond = DAG.getNode(ISD::AND, DL, MVT::i32, Shamt,
2635 DAG.getConstant(VT.getSizeInBits(), DL, MVT::i32));
2636 SDValue Ext = DAG.getNode(ISD::SRA, DL, VT, Hi,
2637 DAG.getConstant(VT.getSizeInBits() - 1, DL, VT));
2638
2639 if (!(Subtarget.hasMips4() || Subtarget.hasMips32())) {
2640 SDVTList VTList = DAG.getVTList(VT, VT);
2641 return DAG.getNode(Subtarget.isGP64bit() ? Mips::PseudoD_SELECT_I64
2642 : Mips::PseudoD_SELECT_I,
2643 DL, VTList, Cond, ShiftRightHi,
2644 IsSRA ? Ext : DAG.getConstant(0, DL, VT), Or,
2645 ShiftRightHi);
2646 }
2647
2648 Lo = DAG.getNode(ISD::SELECT, DL, VT, Cond, ShiftRightHi, Or);
2649 Hi = DAG.getNode(ISD::SELECT, DL, VT, Cond,
2650 IsSRA ? Ext : DAG.getConstant(0, DL, VT), ShiftRightHi);
2651
2652 SDValue Ops[2] = {Lo, Hi};
2653 return DAG.getMergeValues(Ops, DL);
2654}
2655
2656static SDValue createLoadLR(unsigned Opc, SelectionDAG &DAG, LoadSDNode *LD,
2657 SDValue Chain, SDValue Src, unsigned Offset) {
2658 SDValue Ptr = LD->getBasePtr();
2659 EVT VT = LD->getValueType(0), MemVT = LD->getMemoryVT();
2660 EVT BasePtrVT = Ptr.getValueType();
2661 SDLoc DL(LD);
2662 SDVTList VTList = DAG.getVTList(VT, MVT::Other);
2663
2664 if (Offset)
2665 Ptr = DAG.getNode(ISD::ADD, DL, BasePtrVT, Ptr,
2666 DAG.getConstant(Offset, DL, BasePtrVT));
2667
2668 SDValue Ops[] = { Chain, Ptr, Src };
2669 return DAG.getMemIntrinsicNode(Opc, DL, VTList, Ops, MemVT,
2670 LD->getMemOperand());
2671}
2672
2673// Expand an unaligned 32 or 64-bit integer load node.
2675 LoadSDNode *LD = cast<LoadSDNode>(Op);
2676 EVT MemVT = LD->getMemoryVT();
2677
2679 return Op;
2680
2681 // Return if load is aligned or if MemVT is neither i32 nor i64.
2682 if ((LD->getAlign().value() >= (MemVT.getSizeInBits() / 8)) ||
2683 ((MemVT != MVT::i32) && (MemVT != MVT::i64)))
2684 return SDValue();
2685
2686 bool IsLittle = Subtarget.isLittle();
2687 EVT VT = Op.getValueType();
2688 ISD::LoadExtType ExtType = LD->getExtensionType();
2689 SDValue Chain = LD->getChain(), Undef = DAG.getUNDEF(VT);
2690
2691 assert((VT == MVT::i32) || (VT == MVT::i64));
2692
2693 // Expand
2694 // (set dst, (i64 (load baseptr)))
2695 // to
2696 // (set tmp, (ldl (add baseptr, 7), undef))
2697 // (set dst, (ldr baseptr, tmp))
2698 if ((VT == MVT::i64) && (ExtType == ISD::NON_EXTLOAD)) {
2699 SDValue LDL = createLoadLR(MipsISD::LDL, DAG, LD, Chain, Undef,
2700 IsLittle ? 7 : 0);
2701 return createLoadLR(MipsISD::LDR, DAG, LD, LDL.getValue(1), LDL,
2702 IsLittle ? 0 : 7);
2703 }
2704
2705 SDValue LWL = createLoadLR(MipsISD::LWL, DAG, LD, Chain, Undef,
2706 IsLittle ? 3 : 0);
2707 SDValue LWR = createLoadLR(MipsISD::LWR, DAG, LD, LWL.getValue(1), LWL,
2708 IsLittle ? 0 : 3);
2709
2710 // Expand
2711 // (set dst, (i32 (load baseptr))) or
2712 // (set dst, (i64 (sextload baseptr))) or
2713 // (set dst, (i64 (extload baseptr)))
2714 // to
2715 // (set tmp, (lwl (add baseptr, 3), undef))
2716 // (set dst, (lwr baseptr, tmp))
2717 if ((VT == MVT::i32) || (ExtType == ISD::SEXTLOAD) ||
2718 (ExtType == ISD::EXTLOAD))
2719 return LWR;
2720
2721 assert((VT == MVT::i64) && (ExtType == ISD::ZEXTLOAD));
2722
2723 // Expand
2724 // (set dst, (i64 (zextload baseptr)))
2725 // to
2726 // (set tmp0, (lwl (add baseptr, 3), undef))
2727 // (set tmp1, (lwr baseptr, tmp0))
2728 // (set tmp2, (shl tmp1, 32))
2729 // (set dst, (srl tmp2, 32))
2730 SDLoc DL(LD);
2731 SDValue Const32 = DAG.getConstant(32, DL, MVT::i32);
2732 SDValue SLL = DAG.getNode(ISD::SHL, DL, MVT::i64, LWR, Const32);
2733 SDValue SRL = DAG.getNode(ISD::SRL, DL, MVT::i64, SLL, Const32);
2734 SDValue Ops[] = { SRL, LWR.getValue(1) };
2735 return DAG.getMergeValues(Ops, DL);
2736}
2737
2738static SDValue createStoreLR(unsigned Opc, SelectionDAG &DAG, StoreSDNode *SD,
2739 SDValue Chain, unsigned Offset) {
2740 SDValue Ptr = SD->getBasePtr(), Value = SD->getValue();
2741 EVT MemVT = SD->getMemoryVT(), BasePtrVT = Ptr.getValueType();
2742 SDLoc DL(SD);
2743 SDVTList VTList = DAG.getVTList(MVT::Other);
2744
2745 if (Offset)
2746 Ptr = DAG.getNode(ISD::ADD, DL, BasePtrVT, Ptr,
2747 DAG.getConstant(Offset, DL, BasePtrVT));
2748
2749 SDValue Ops[] = { Chain, Value, Ptr };
2750 return DAG.getMemIntrinsicNode(Opc, DL, VTList, Ops, MemVT,
2751 SD->getMemOperand());
2752}
2753
2754// Expand an unaligned 32 or 64-bit integer store node.
2756 bool IsLittle) {
2757 SDValue Value = SD->getValue(), Chain = SD->getChain();
2758 EVT VT = Value.getValueType();
2759
2760 // Expand
2761 // (store val, baseptr) or
2762 // (truncstore val, baseptr)
2763 // to
2764 // (swl val, (add baseptr, 3))
2765 // (swr val, baseptr)
2766 if ((VT == MVT::i32) || SD->isTruncatingStore()) {
2767 SDValue SWL = createStoreLR(MipsISD::SWL, DAG, SD, Chain,
2768 IsLittle ? 3 : 0);
2769 return createStoreLR(MipsISD::SWR, DAG, SD, SWL, IsLittle ? 0 : 3);
2770 }
2771
2772 assert(VT == MVT::i64);
2773
2774 // Expand
2775 // (store val, baseptr)
2776 // to
2777 // (sdl val, (add baseptr, 7))
2778 // (sdr val, baseptr)
2779 SDValue SDL = createStoreLR(MipsISD::SDL, DAG, SD, Chain, IsLittle ? 7 : 0);
2780 return createStoreLR(MipsISD::SDR, DAG, SD, SDL, IsLittle ? 0 : 7);
2781}
2782
2783// Lower (store (fp_to_sint $fp) $ptr) to (store (TruncIntFP $fp), $ptr).
2785 bool SingleFloat) {
2786 SDValue Val = SD->getValue();
2787
2788 if (Val.getOpcode() != ISD::FP_TO_SINT ||
2789 (Val.getValueSizeInBits() > 32 && SingleFloat))
2790 return SDValue();
2791
2793 SDValue Tr = DAG.getNode(MipsISD::TruncIntFP, SDLoc(Val), FPTy,
2794 Val.getOperand(0));
2795 return DAG.getStore(SD->getChain(), SDLoc(SD), Tr, SD->getBasePtr(),
2796 SD->getPointerInfo(), SD->getAlign(),
2797 SD->getMemOperand()->getFlags());
2798}
2799
2801 StoreSDNode *SD = cast<StoreSDNode>(Op);
2802 EVT MemVT = SD->getMemoryVT();
2803
2804 // Lower unaligned integer stores.
2806 (SD->getAlign().value() < (MemVT.getSizeInBits() / 8)) &&
2807 ((MemVT == MVT::i32) || (MemVT == MVT::i64)))
2808 return lowerUnalignedIntStore(SD, DAG, Subtarget.isLittle());
2809
2811}
2812
2813SDValue MipsTargetLowering::lowerEH_DWARF_CFA(SDValue Op,
2814 SelectionDAG &DAG) const {
2815
2816 // Return a fixed StackObject with offset 0 which points to the old stack
2817 // pointer.
2819 EVT ValTy = Op->getValueType(0);
2820 int FI = MFI.CreateFixedObject(Op.getValueSizeInBits() / 8, 0, false);
2821 return DAG.getFrameIndex(FI, ValTy);
2822}
2823
2824SDValue MipsTargetLowering::lowerFP_TO_SINT(SDValue Op,
2825 SelectionDAG &DAG) const {
2826 if (Op.getValueSizeInBits() > 32 && Subtarget.isSingleFloat())
2827 return SDValue();
2828
2829 EVT FPTy = EVT::getFloatingPointVT(Op.getValueSizeInBits());
2830 SDValue Trunc = DAG.getNode(MipsISD::TruncIntFP, SDLoc(Op), FPTy,
2831 Op.getOperand(0));
2832 return DAG.getNode(ISD::BITCAST, SDLoc(Op), Op.getValueType(), Trunc);
2833}
2834
2835//===----------------------------------------------------------------------===//
2836// Calling Convention Implementation
2837//===----------------------------------------------------------------------===//
2838
2839//===----------------------------------------------------------------------===//
2840// TODO: Implement a generic logic using tblgen that can support this.
2841// Mips O32 ABI rules:
2842// ---
2843// i32 - Passed in A0, A1, A2, A3 and stack
2844// f32 - Only passed in f32 registers if no int reg has been used yet to hold
2845// an argument. Otherwise, passed in A1, A2, A3 and stack.
2846// f64 - Only passed in two aliased f32 registers if no int reg has been used
2847// yet to hold an argument. Otherwise, use A2, A3 and stack. If A1 is
2848// not used, it must be shadowed. If only A3 is available, shadow it and
2849// go to stack.
2850// vXiX - Received as scalarized i32s, passed in A0 - A3 and the stack.
2851// vXf32 - Passed in either a pair of registers {A0, A1}, {A2, A3} or {A0 - A3}
2852// with the remainder spilled to the stack.
2853// vXf64 - Passed in either {A0, A1, A2, A3} or {A2, A3} and in both cases
2854// spilling the remainder to the stack.
2855//
2856// For vararg functions, all arguments are passed in A0, A1, A2, A3 and stack.
2857//===----------------------------------------------------------------------===//
2858
2859static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT,
2860 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
2861 CCState &State, ArrayRef<MCPhysReg> F64Regs) {
2862 const MipsSubtarget &Subtarget = static_cast<const MipsSubtarget &>(
2864
2865 static const MCPhysReg IntRegs[] = { Mips::A0, Mips::A1, Mips::A2, Mips::A3 };
2866
2867 const MipsCCState * MipsState = static_cast<MipsCCState *>(&State);
2868
2869 static const MCPhysReg F32Regs[] = { Mips::F12, Mips::F14 };
2870
2871 static const MCPhysReg FloatVectorIntRegs[] = { Mips::A0, Mips::A2 };
2872
2873 // Do not process byval args here.
2874 if (ArgFlags.isByVal())
2875 return true;
2876
2877 // Promote i8 and i16
2878 if (ArgFlags.isInReg() && !Subtarget.isLittle()) {
2879 if (LocVT == MVT::i8 || LocVT == MVT::i16 || LocVT == MVT::i32) {
2880 LocVT = MVT::i32;
2881 if (ArgFlags.isSExt())
2882 LocInfo = CCValAssign::SExtUpper;
2883 else if (ArgFlags.isZExt())
2884 LocInfo = CCValAssign::ZExtUpper;
2885 else
2886 LocInfo = CCValAssign::AExtUpper;
2887 }
2888 }
2889
2890 // Promote i8 and i16
2891 if (LocVT == MVT::i8 || LocVT == MVT::i16) {
2892 LocVT = MVT::i32;
2893 if (ArgFlags.isSExt())
2894 LocInfo = CCValAssign::SExt;
2895 else if (ArgFlags.isZExt())
2896 LocInfo = CCValAssign::ZExt;
2897 else
2898 LocInfo = CCValAssign::AExt;
2899 }
2900
2901 unsigned Reg;
2902
2903 // f32 and f64 are allocated in A0, A1, A2, A3 when either of the following
2904 // is true: function is vararg, argument is 3rd or higher, there is previous
2905 // argument which is not f32 or f64.
2906 bool AllocateFloatsInIntReg = State.isVarArg() || ValNo > 1 ||
2907 State.getFirstUnallocated(F32Regs) != ValNo;
2908 Align OrigAlign = ArgFlags.getNonZeroOrigAlign();
2909 bool isI64 = (ValVT == MVT::i32 && OrigAlign == Align(8));
2910 bool isVectorFloat = MipsState->WasOriginalArgVectorFloat(ValNo);
2911
2912 // The MIPS vector ABI for floats passes them in a pair of registers
2913 if (ValVT == MVT::i32 && isVectorFloat) {
2914 // This is the start of an vector that was scalarized into an unknown number
2915 // of components. It doesn't matter how many there are. Allocate one of the
2916 // notional 8 byte aligned registers which map onto the argument stack, and
2917 // shadow the register lost to alignment requirements.
2918 if (ArgFlags.isSplit()) {
2919 Reg = State.AllocateReg(FloatVectorIntRegs);
2920 if (Reg == Mips::A2)
2921 State.AllocateReg(Mips::A1);
2922 else if (Reg == 0)
2923 State.AllocateReg(Mips::A3);
2924 } else {
2925 // If we're an intermediate component of the split, we can just attempt to
2926 // allocate a register directly.
2927 Reg = State.AllocateReg(IntRegs);
2928 }
2929 } else if (ValVT == MVT::i32 ||
2930 (ValVT == MVT::f32 && AllocateFloatsInIntReg)) {
2931 Reg = State.AllocateReg(IntRegs);
2932 // If this is the first part of an i64 arg,
2933 // the allocated register must be either A0 or A2.
2934 if (isI64 && (Reg == Mips::A1 || Reg == Mips::A3))
2935 Reg = State.AllocateReg(IntRegs);
2936 LocVT = MVT::i32;
2937 } else if (ValVT == MVT::f64 && AllocateFloatsInIntReg) {
2938 LocVT = MVT::i32;
2939
2940 // Allocate int register and shadow next int register. If first
2941 // available register is Mips::A1 or Mips::A3, shadow it too.
2942 Reg = State.AllocateReg(IntRegs);
2943 if (Reg == Mips::A1 || Reg == Mips::A3)
2944 Reg = State.AllocateReg(IntRegs);
2945
2946 if (Reg) {
2947 State.addLoc(
2948 CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
2949 MCRegister HiReg = State.AllocateReg(IntRegs);
2950 assert(HiReg);
2951 State.addLoc(
2952 CCValAssign::getCustomReg(ValNo, ValVT, HiReg, LocVT, LocInfo));
2953 return false;
2954 }
2955 } else if (ValVT.isFloatingPoint() && !AllocateFloatsInIntReg) {
2956 // we are guaranteed to find an available float register
2957 if (ValVT == MVT::f32) {
2958 Reg = State.AllocateReg(F32Regs);
2959 // Shadow int register
2960 State.AllocateReg(IntRegs);
2961 } else {
2962 Reg = State.AllocateReg(F64Regs);
2963 // Shadow int registers
2964 unsigned Reg2 = State.AllocateReg(IntRegs);
2965 if (Reg2 == Mips::A1 || Reg2 == Mips::A3)
2966 State.AllocateReg(IntRegs);
2967 State.AllocateReg(IntRegs);
2968 }
2969 } else
2970 llvm_unreachable("Cannot handle this ValVT.");
2971
2972 if (!Reg) {
2973 unsigned Offset = State.AllocateStack(ValVT.getStoreSize(), OrigAlign);
2974 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
2975 } else
2976 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
2977
2978 return false;
2979}
2980
2981static bool CC_MipsO32_FP32(unsigned ValNo, MVT ValVT,
2982 MVT LocVT, CCValAssign::LocInfo LocInfo,
2983 ISD::ArgFlagsTy ArgFlags, CCState &State) {
2984 static const MCPhysReg F64Regs[] = { Mips::D6, Mips::D7 };
2985
2986 return CC_MipsO32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, F64Regs);
2987}
2988
2989static bool CC_MipsO32_FP64(unsigned ValNo, MVT ValVT,
2990 MVT LocVT, CCValAssign::LocInfo LocInfo,
2991 ISD::ArgFlagsTy ArgFlags, CCState &State) {
2992 static const MCPhysReg F64Regs[] = { Mips::D12_64, Mips::D14_64 };
2993
2994 return CC_MipsO32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, F64Regs);
2995}
2996
2997static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT,
2998 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
3000
3001#include "MipsGenCallingConv.inc"
3002
3004 return CC_Mips_FixedArg;
3005 }
3006
3008 return RetCC_Mips;
3009 }
3010//===----------------------------------------------------------------------===//
3011// Call Calling Convention Implementation
3012//===----------------------------------------------------------------------===//
3013
3014SDValue MipsTargetLowering::passArgOnStack(SDValue StackPtr, unsigned Offset,
3015 SDValue Chain, SDValue Arg,
3016 const SDLoc &DL, bool IsTailCall,
3017 SelectionDAG &DAG) const {
3018 if (!IsTailCall) {
3019 SDValue PtrOff =
3020 DAG.getNode(ISD::ADD, DL, getPointerTy(DAG.getDataLayout()), StackPtr,
3022 return DAG.getStore(Chain, DL, Arg, PtrOff, MachinePointerInfo());
3023 }
3024
3026 int FI = MFI.CreateFixedObject(Arg.getValueSizeInBits() / 8, Offset, false);
3027 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
3028 return DAG.getStore(Chain, DL, Arg, FIN, MachinePointerInfo(), MaybeAlign(),
3030}
3031
3034 std::deque<std::pair<unsigned, SDValue>> &RegsToPass,
3035 bool IsPICCall, bool GlobalOrExternal, bool InternalLinkage,
3036 bool IsCallReloc, CallLoweringInfo &CLI, SDValue Callee,
3037 SDValue Chain) const {
3038 // Insert node "GP copy globalreg" before call to function.
3039 //
3040 // R_MIPS_CALL* operators (emitted when non-internal functions are called
3041 // in PIC mode) allow symbols to be resolved via lazy binding.
3042 // The lazy binding stub requires GP to point to the GOT.
3043 // Note that we don't need GP to point to the GOT for indirect calls
3044 // (when R_MIPS_CALL* is not used for the call) because Mips linker generates
3045 // lazy binding stub for a function only when R_MIPS_CALL* are the only relocs
3046 // used for the function (that is, Mips linker doesn't generate lazy binding
3047 // stub for a function whose address is taken in the program).
3048 if (IsPICCall && !InternalLinkage && IsCallReloc) {
3049 unsigned GPReg = ABI.IsN64() ? Mips::GP_64 : Mips::GP;
3050 EVT Ty = ABI.IsN64() ? MVT::i64 : MVT::i32;
3051 RegsToPass.push_back(std::make_pair(GPReg, getGlobalReg(CLI.DAG, Ty)));
3052 }
3053
3054 // Build a sequence of copy-to-reg nodes chained together with token
3055 // chain and flag operands which copy the outgoing args into registers.
3056 // The InGlue in necessary since all emitted instructions must be
3057 // stuck together.
3058 SDValue InGlue;
3059
3060 for (auto &R : RegsToPass) {
3061 Chain = CLI.DAG.getCopyToReg(Chain, CLI.DL, R.first, R.second, InGlue);
3062 InGlue = Chain.getValue(1);
3063 }
3064
3065 // Add argument registers to the end of the list so that they are
3066 // known live into the call.
3067 for (auto &R : RegsToPass)
3068 Ops.push_back(CLI.DAG.getRegister(R.first, R.second.getValueType()));
3069
3070 // Add a register mask operand representing the call-preserved registers.
3072 const uint32_t *Mask =
3073 TRI->getCallPreservedMask(CLI.DAG.getMachineFunction(), CLI.CallConv);
3074 assert(Mask && "Missing call preserved mask for calling convention");
3076 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(CLI.Callee)) {
3077 StringRef Sym = G->getGlobal()->getName();
3078 Function *F = G->getGlobal()->getParent()->getFunction(Sym);
3079 if (F && F->hasFnAttribute("__Mips16RetHelper")) {
3081 }
3082 }
3083 }
3084 Ops.push_back(CLI.DAG.getRegisterMask(Mask));
3085
3086 if (InGlue.getNode())
3087 Ops.push_back(InGlue);
3088}
3089
3091 SDNode *Node) const {
3092 switch (MI.getOpcode()) {
3093 default:
3094 return;
3095 case Mips::JALR:
3096 case Mips::JALRPseudo:
3097 case Mips::JALR64:
3098 case Mips::JALR64Pseudo:
3099 case Mips::JALR16_MM:
3100 case Mips::JALRC16_MMR6:
3101 case Mips::TAILCALLREG:
3102 case Mips::TAILCALLREG64:
3103 case Mips::TAILCALLR6REG:
3104 case Mips::TAILCALL64R6REG:
3105 case Mips::TAILCALLREG_MM:
3106 case Mips::TAILCALLREG_MMR6: {
3107 if (!EmitJalrReloc ||
3110 Node->getNumOperands() < 1 ||
3111 Node->getOperand(0).getNumOperands() < 2) {
3112 return;
3113 }
3114 // We are after the callee address, set by LowerCall().
3115 // If added to MI, asm printer will emit .reloc R_MIPS_JALR for the
3116 // symbol.
3117 const SDValue TargetAddr = Node->getOperand(0).getOperand(1);
3118 StringRef Sym;
3119 if (const GlobalAddressSDNode *G =
3120 dyn_cast_or_null<const GlobalAddressSDNode>(TargetAddr)) {
3121 // We must not emit the R_MIPS_JALR relocation against data symbols
3122 // since this will cause run-time crashes if the linker replaces the
3123 // call instruction with a relative branch to the data symbol.
3124 if (!isa<Function>(G->getGlobal())) {
3125 LLVM_DEBUG(dbgs() << "Not adding R_MIPS_JALR against data symbol "
3126 << G->getGlobal()->getName() << "\n");
3127 return;
3128 }
3129 Sym = G->getGlobal()->getName();
3130 }
3131 else if (const ExternalSymbolSDNode *ES =
3132 dyn_cast_or_null<const ExternalSymbolSDNode>(TargetAddr)) {
3133 Sym = ES->getSymbol();
3134 }
3135
3136 if (Sym.empty())
3137 return;
3138
3139 MachineFunction *MF = MI.getParent()->getParent();
3141 LLVM_DEBUG(dbgs() << "Adding R_MIPS_JALR against " << Sym << "\n");
3143 }
3144 }
3145}
3146
3147/// LowerCall - functions arguments are copied from virtual regs to
3148/// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted.
3149SDValue
3150MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
3151 SmallVectorImpl<SDValue> &InVals) const {
3152 SelectionDAG &DAG = CLI.DAG;
3153 SDLoc DL = CLI.DL;
3155 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
3157 SDValue Chain = CLI.Chain;
3158 SDValue Callee = CLI.Callee;
3159 bool &IsTailCall = CLI.IsTailCall;
3160 CallingConv::ID CallConv = CLI.CallConv;
3161 bool IsVarArg = CLI.IsVarArg;
3162
3164 MachineFrameInfo &MFI = MF.getFrameInfo();
3167 bool IsPIC = isPositionIndependent();
3168
3169 // Analyze operands of the call, assigning locations to each operand.
3171 MipsCCState CCInfo(
3172 CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs, *DAG.getContext(),
3174
3175 const ExternalSymbolSDNode *ES =
3176 dyn_cast_or_null<const ExternalSymbolSDNode>(Callee.getNode());
3177
3178 // There is one case where CALLSEQ_START..CALLSEQ_END can be nested, which
3179 // is during the lowering of a call with a byval argument which produces
3180 // a call to memcpy. For the O32 case, this causes the caller to allocate
3181 // stack space for the reserved argument area for the callee, then recursively
3182 // again for the memcpy call. In the NEWABI case, this doesn't occur as those
3183 // ABIs mandate that the callee allocates the reserved argument area. We do
3184 // still produce nested CALLSEQ_START..CALLSEQ_END with zero space though.
3185 //
3186 // If the callee has a byval argument and memcpy is used, we are mandated
3187 // to already have produced a reserved argument area for the callee for O32.
3188 // Therefore, the reserved argument area can be reused for both calls.
3189 //
3190 // Other cases of calling memcpy cannot have a chain with a CALLSEQ_START
3191 // present, as we have yet to hook that node onto the chain.
3192 //
3193 // Hence, the CALLSEQ_START and CALLSEQ_END nodes can be eliminated in this
3194 // case. GCC does a similar trick, in that wherever possible, it calculates
3195 // the maximum out going argument area (including the reserved area), and
3196 // preallocates the stack space on entrance to the caller.
3197 //
3198 // FIXME: We should do the same for efficiency and space.
3199
3200 // Note: The check on the calling convention below must match
3201 // MipsABIInfo::GetCalleeAllocdArgSizeInBytes().
3202 bool MemcpyInByVal = ES &&
3203 StringRef(ES->getSymbol()) == StringRef("memcpy") &&
3204 CallConv != CallingConv::Fast &&
3205 Chain.getOpcode() == ISD::CALLSEQ_START;
3206
3207 // Allocate the reserved argument area. It seems strange to do this from the
3208 // caller side but removing it breaks the frame size calculation.
3209 unsigned ReservedArgArea =
3210 MemcpyInByVal ? 0 : ABI.GetCalleeAllocdArgSizeInBytes(CallConv);
3211 CCInfo.AllocateStack(ReservedArgArea, Align(1));
3212
3213 CCInfo.AnalyzeCallOperands(Outs, CC_Mips, CLI.getArgs(),
3214 ES ? ES->getSymbol() : nullptr);
3215
3216 // Get a count of how many bytes are to be pushed on the stack.
3217 unsigned StackSize = CCInfo.getStackSize();
3218
3219 // Call site info for function parameters tracking.
3221
3222 // Check if it's really possible to do a tail call. Restrict it to functions
3223 // that are part of this compilation unit.
3224 bool InternalLinkage = false;
3225 if (IsTailCall) {
3226 IsTailCall = isEligibleForTailCallOptimization(
3227 CCInfo, StackSize, *MF.getInfo<MipsFunctionInfo>());
3228 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
3229 InternalLinkage = G->getGlobal()->hasInternalLinkage();
3230 IsTailCall &= (InternalLinkage || G->getGlobal()->hasLocalLinkage() ||
3231 G->getGlobal()->hasPrivateLinkage() ||
3232 G->getGlobal()->hasHiddenVisibility() ||
3233 G->getGlobal()->hasProtectedVisibility());
3234 }
3235 }
3236 if (!IsTailCall && CLI.CB && CLI.CB->isMustTailCall())
3237 report_fatal_error("failed to perform tail call elimination on a call "
3238 "site marked musttail");
3239
3240 if (IsTailCall)
3241 ++NumTailCalls;
3242
3243 // Chain is the output chain of the last Load/Store or CopyToReg node.
3244 // ByValChain is the output chain of the last Memcpy node created for copying
3245 // byval arguments to the stack.
3246 unsigned StackAlignment = TFL->getStackAlignment();
3247 StackSize = alignTo(StackSize, StackAlignment);
3248
3249 if (!(IsTailCall || MemcpyInByVal))
3250 Chain = DAG.getCALLSEQ_START(Chain, StackSize, 0, DL);
3251
3253 DAG.getCopyFromReg(Chain, DL, ABI.IsN64() ? Mips::SP_64 : Mips::SP,
3255
3256 std::deque<std::pair<unsigned, SDValue>> RegsToPass;
3257 SmallVector<SDValue, 8> MemOpChains;
3258
3259 CCInfo.rewindByValRegsInfo();
3260
3261 // Walk the register/memloc assignments, inserting copies/loads.
3262 for (unsigned i = 0, e = ArgLocs.size(), OutIdx = 0; i != e; ++i, ++OutIdx) {
3263 SDValue Arg = OutVals[OutIdx];
3264 CCValAssign &VA = ArgLocs[i];
3265 MVT ValVT = VA.getValVT(), LocVT = VA.getLocVT();
3266 ISD::ArgFlagsTy Flags = Outs[OutIdx].Flags;
3267 bool UseUpperBits = false;
3268
3269 // ByVal Arg.
3270 if (Flags.isByVal()) {
3271 unsigned FirstByValReg, LastByValReg;
3272 unsigned ByValIdx = CCInfo.getInRegsParamsProcessed();
3273 CCInfo.getInRegsParamInfo(ByValIdx, FirstByValReg, LastByValReg);
3274
3275 assert(Flags.getByValSize() &&
3276 "ByVal args of size 0 should have been ignored by front-end.");
3277 assert(ByValIdx < CCInfo.getInRegsParamsCount());
3278 assert(!IsTailCall &&
3279 "Do not tail-call optimize if there is a byval argument.");
3280 passByValArg(Chain, DL, RegsToPass, MemOpChains, StackPtr, MFI, DAG, Arg,
3281 FirstByValReg, LastByValReg, Flags, Subtarget.isLittle(),
3282 VA);
3283 CCInfo.nextInRegsParam();
3284 continue;
3285 }
3286
3287 // Promote the value if needed.
3288 switch (VA.getLocInfo()) {
3289 default:
3290 llvm_unreachable("Unknown loc info!");
3291 case CCValAssign::Full:
3292 if (VA.isRegLoc()) {
3293 if ((ValVT == MVT::f32 && LocVT == MVT::i32) ||
3294 (ValVT == MVT::f64 && LocVT == MVT::i64) ||
3295 (ValVT == MVT::i64 && LocVT == MVT::f64))
3296 Arg = DAG.getNode(ISD::BITCAST, DL, LocVT, Arg);
3297 else if (ValVT == MVT::f64 && LocVT == MVT::i32) {
3299 Arg, DAG.getConstant(0, DL, MVT::i32));
3301 Arg, DAG.getConstant(1, DL, MVT::i32));
3302 if (!Subtarget.isLittle())
3303 std::swap(Lo, Hi);
3304
3305 assert(VA.needsCustom());
3306
3307 Register LocRegLo = VA.getLocReg();
3308 Register LocRegHigh = ArgLocs[++i].getLocReg();
3309 RegsToPass.push_back(std::make_pair(LocRegLo, Lo));
3310 RegsToPass.push_back(std::make_pair(LocRegHigh, Hi));
3311 continue;
3312 }
3313 }
3314 break;
3315 case CCValAssign::BCvt:
3316 Arg = DAG.getNode(ISD::BITCAST, DL, LocVT, Arg);
3317 break;
3319 UseUpperBits = true;
3320 [[fallthrough]];
3321 case CCValAssign::SExt:
3322 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, LocVT, Arg);
3323 break;
3325 UseUpperBits = true;
3326 [[fallthrough]];
3327 case CCValAssign::ZExt:
3328 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, LocVT, Arg);
3329 break;
3331 UseUpperBits = true;
3332 [[fallthrough]];
3333 case CCValAssign::AExt:
3334 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, LocVT, Arg);
3335 break;
3336 }
3337
3338 if (UseUpperBits) {
3339 unsigned ValSizeInBits = Outs[OutIdx].ArgVT.getSizeInBits();
3340 unsigned LocSizeInBits = VA.getLocVT().getSizeInBits();
3341 Arg = DAG.getNode(
3342 ISD::SHL, DL, VA.getLocVT(), Arg,
3343 DAG.getConstant(LocSizeInBits - ValSizeInBits, DL, VA.getLocVT()));
3344 }
3345
3346 // Arguments that can be passed on register must be kept at
3347 // RegsToPass vector
3348 if (VA.isRegLoc()) {
3349 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
3350
3351 // If the parameter is passed through reg $D, which splits into
3352 // two physical registers, avoid creating call site info.
3353 if (Mips::AFGR64RegClass.contains(VA.getLocReg()))
3354 continue;
3355
3356 // Collect CSInfo about which register passes which parameter.
3357 const TargetOptions &Options = DAG.getTarget().Options;
3358 if (Options.SupportsDebugEntryValues)
3359 CSInfo.emplace_back(VA.getLocReg(), i);
3360
3361 continue;
3362 }
3363
3364 // Register can't get to this point...
3365 assert(VA.isMemLoc());
3366
3367 // emit ISD::STORE whichs stores the
3368 // parameter value to a stack Location
3369 MemOpChains.push_back(passArgOnStack(StackPtr, VA.getLocMemOffset(),
3370 Chain, Arg, DL, IsTailCall, DAG));
3371 }
3372
3373 // Transform all store nodes into one single node because all store
3374 // nodes are independent of each other.
3375 if (!MemOpChains.empty())
3376 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
3377
3378 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
3379 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
3380 // node so that legalize doesn't hack it.
3381
3382 EVT Ty = Callee.getValueType();
3383 bool GlobalOrExternal = false, IsCallReloc = false;
3384
3385 // The long-calls feature is ignored in case of PIC.
3386 // While we do not support -mshared / -mno-shared properly,
3387 // ignore long-calls in case of -mabicalls too.
3388 if (!Subtarget.isABICalls() && !IsPIC) {
3389 // If the function should be called using "long call",
3390 // get its address into a register to prevent using
3391 // of the `jal` instruction for the direct call.
3392 if (auto *N = dyn_cast<ExternalSymbolSDNode>(Callee)) {
3393 if (Subtarget.useLongCalls())
3395 ? getAddrNonPIC(N, SDLoc(N), Ty, DAG)
3396 : getAddrNonPICSym64(N, SDLoc(N), Ty, DAG);
3397 } else if (auto *N = dyn_cast<GlobalAddressSDNode>(Callee)) {
3398 bool UseLongCalls = Subtarget.useLongCalls();
3399 // If the function has long-call/far/near attribute
3400 // it overrides command line switch pased to the backend.
3401 if (auto *F = dyn_cast<Function>(N->getGlobal())) {
3402 if (F->hasFnAttribute("long-call"))
3403 UseLongCalls = true;
3404 else if (F->hasFnAttribute("short-call"))
3405 UseLongCalls = false;
3406 }
3407 if (UseLongCalls)
3409 ? getAddrNonPIC(N, SDLoc(N), Ty, DAG)
3410 : getAddrNonPICSym64(N, SDLoc(N), Ty, DAG);
3411 }
3412 }
3413
3414 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
3415 if (IsPIC) {
3416 const GlobalValue *Val = G->getGlobal();
3417 InternalLinkage = Val->hasInternalLinkage();
3418
3419 if (InternalLinkage)
3420 Callee = getAddrLocal(G, DL, Ty, DAG, ABI.IsN32() || ABI.IsN64());
3421 else if (Subtarget.useXGOT()) {
3423 MipsII::MO_CALL_LO16, Chain,
3424 FuncInfo->callPtrInfo(MF, Val));
3425 IsCallReloc = true;
3426 } else {
3427 Callee = getAddrGlobal(G, DL, Ty, DAG, MipsII::MO_GOT_CALL, Chain,
3428 FuncInfo->callPtrInfo(MF, Val));
3429 IsCallReloc = true;
3430 }
3431 } else
3432 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL,
3433 getPointerTy(DAG.getDataLayout()), 0,
3435 GlobalOrExternal = true;
3436 }
3437 else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
3438 const char *Sym = S->getSymbol();
3439
3440 if (!IsPIC) // static
3443 else if (Subtarget.useXGOT()) {
3445 MipsII::MO_CALL_LO16, Chain,
3446 FuncInfo->callPtrInfo(MF, Sym));
3447 IsCallReloc = true;
3448 } else { // PIC
3449 Callee = getAddrGlobal(S, DL, Ty, DAG, MipsII::MO_GOT_CALL, Chain,
3450 FuncInfo->callPtrInfo(MF, Sym));
3451 IsCallReloc = true;
3452 }
3453
3454 GlobalOrExternal = true;
3455 }
3456
3457 SmallVector<SDValue, 8> Ops(1, Chain);
3458 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
3459
3460 getOpndList(Ops, RegsToPass, IsPIC, GlobalOrExternal, InternalLinkage,
3461 IsCallReloc, CLI, Callee, Chain);
3462
3463 if (IsTailCall) {
3465 SDValue Ret = DAG.getNode(MipsISD::TailCall, DL, MVT::Other, Ops);
3466 DAG.addCallSiteInfo(Ret.getNode(), std::move(CSInfo));
3467 return Ret;
3468 }
3469
3470 Chain = DAG.getNode(MipsISD::JmpLink, DL, NodeTys, Ops);
3471 SDValue InGlue = Chain.getValue(1);
3472
3473 DAG.addCallSiteInfo(Chain.getNode(), std::move(CSInfo));
3474
3475 // Create the CALLSEQ_END node in the case of where it is not a call to
3476 // memcpy.
3477 if (!(MemcpyInByVal)) {
3478 Chain = DAG.getCALLSEQ_END(Chain, StackSize, 0, InGlue, DL);
3479 InGlue = Chain.getValue(1);
3480 }
3481
3482 // Handle result values, copying them out of physregs into vregs that we
3483 // return.
3484 return LowerCallResult(Chain, InGlue, CallConv, IsVarArg, Ins, DL, DAG,
3485 InVals, CLI);
3486}
3487
3488/// LowerCallResult - Lower the result values of a call into the
3489/// appropriate copies out of appropriate physical registers.
3490SDValue MipsTargetLowering::LowerCallResult(
3491 SDValue Chain, SDValue InGlue, CallingConv::ID CallConv, bool IsVarArg,
3492 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
3495 // Assign locations to each value returned by this call.
3497 MipsCCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
3498 *DAG.getContext());
3499
3500 const ExternalSymbolSDNode *ES =
3501 dyn_cast_or_null<const ExternalSymbolSDNode>(CLI.Callee.getNode());
3502 CCInfo.AnalyzeCallResult(Ins, RetCC_Mips, CLI.RetTy,
3503 ES ? ES->getSymbol() : nullptr);
3504
3505 // Copy all of the result registers out of their specified physreg.
3506 for (unsigned i = 0; i != RVLocs.size(); ++i) {
3507 CCValAssign &VA = RVLocs[i];
3508 assert(VA.isRegLoc() && "Can only return in registers!");
3509
3510 SDValue Val = DAG.getCopyFromReg(Chain, DL, RVLocs[i].getLocReg(),
3511 RVLocs[i].getLocVT(), InGlue);
3512 Chain = Val.getValue(1);
3513 InGlue = Val.getValue(2);
3514
3515 if (VA.isUpperBitsInLoc()) {
3516 unsigned ValSizeInBits = Ins[i].ArgVT.getSizeInBits();
3517 unsigned LocSizeInBits = VA.getLocVT().getSizeInBits();
3518 unsigned Shift =
3520 Val = DAG.getNode(
3521 Shift, DL, VA.getLocVT(), Val,
3522 DAG.getConstant(LocSizeInBits - ValSizeInBits, DL, VA.getLocVT()));
3523 }
3524
3525 switch (VA.getLocInfo()) {
3526 default:
3527 llvm_unreachable("Unknown loc info!");
3528 case CCValAssign::Full:
3529 break;
3530 case CCValAssign::BCvt:
3531 Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
3532 break;
3533 case CCValAssign::AExt:
3535 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
3536 break;
3537 case CCValAssign::ZExt:
3539 Val = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Val,
3540 DAG.getValueType(VA.getValVT()));
3541 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
3542 break;
3543 case CCValAssign::SExt:
3545 Val = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Val,
3546 DAG.getValueType(VA.getValVT()));
3547 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
3548 break;
3549 }
3550
3551 InVals.push_back(Val);
3552 }
3553
3554 return Chain;
3555}
3556
3558 EVT ArgVT, const SDLoc &DL,
3559 SelectionDAG &DAG) {
3560 MVT LocVT = VA.getLocVT();
3561 EVT ValVT = VA.getValVT();
3562
3563 // Shift into the upper bits if necessary.
3564 switch (VA.getLocInfo()) {
3565 default:
3566 break;
3570 unsigned ValSizeInBits = ArgVT.getSizeInBits();
3571 unsigned LocSizeInBits = VA.getLocVT().getSizeInBits();
3572 unsigned Opcode =
3574 Val = DAG.getNode(
3575 Opcode, DL, VA.getLocVT(), Val,
3576 DAG.getConstant(LocSizeInBits - ValSizeInBits, DL, VA.getLocVT()));
3577 break;
3578 }
3579 }
3580
3581 // If this is an value smaller than the argument slot size (32-bit for O32,
3582 // 64-bit for N32/N64), it has been promoted in some way to the argument slot
3583 // size. Extract the value and insert any appropriate assertions regarding
3584 // sign/zero extension.
3585 switch (VA.getLocInfo()) {
3586 default:
3587 llvm_unreachable("Unknown loc info!");
3588 case CCValAssign::Full:
3589 break;
3591 case CCValAssign::AExt:
3592 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
3593 break;
3595 case CCValAssign::SExt:
3596 Val = DAG.getNode(ISD::AssertSext, DL, LocVT, Val, DAG.getValueType(ValVT));
3597 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
3598 break;
3600 case CCValAssign::ZExt:
3601 Val = DAG.getNode(ISD::AssertZext, DL, LocVT, Val, DAG.getValueType(ValVT));
3602 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
3603 break;
3604 case CCValAssign::BCvt:
3605 Val = DAG.getNode(ISD::BITCAST, DL, ValVT, Val);
3606 break;
3607 }
3608
3609 return Val;
3610}
3611
3612//===----------------------------------------------------------------------===//
3613// Formal Arguments Calling Convention Implementation
3614//===----------------------------------------------------------------------===//
3615/// LowerFormalArguments - transform physical registers into virtual registers
3616/// and generate load operations for arguments places on the stack.
3617SDValue MipsTargetLowering::LowerFormalArguments(
3618 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
3619 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
3620 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3622 MachineFrameInfo &MFI = MF.getFrameInfo();
3624
3625 MipsFI->setVarArgsFrameIndex(0);
3626
3627 // Used with vargs to acumulate store chains.
3628 std::vector<SDValue> OutChains;
3629
3630 // Assign locations to all of the incoming arguments.
3632 MipsCCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
3633 *DAG.getContext());
3634 CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(CallConv), Align(1));
3636 Function::const_arg_iterator FuncArg = Func.arg_begin();
3637
3638 if (Func.hasFnAttribute("interrupt") && !Func.arg_empty())
3640 "Functions with the interrupt attribute cannot have arguments!");
3641
3642 CCInfo.AnalyzeFormalArguments(Ins, CC_Mips_FixedArg);
3643 MipsFI->setFormalArgInfo(CCInfo.getStackSize(),
3644 CCInfo.getInRegsParamsCount() > 0);
3645
3646 unsigned CurArgIdx = 0;
3647 CCInfo.rewindByValRegsInfo();
3648
3649 for (unsigned i = 0, e = ArgLocs.size(), InsIdx = 0; i != e; ++i, ++InsIdx) {
3650 CCValAssign &VA = ArgLocs[i];
3651 if (Ins[InsIdx].isOrigArg()) {
3652 std::advance(FuncArg, Ins[InsIdx].getOrigArgIndex() - CurArgIdx);
3653 CurArgIdx = Ins[InsIdx].getOrigArgIndex();
3654 }
3655 EVT ValVT = VA.getValVT();
3656 ISD::ArgFlagsTy Flags = Ins[InsIdx].Flags;
3657 bool IsRegLoc = VA.isRegLoc();
3658
3659 if (Flags.isByVal()) {
3660 assert(Ins[InsIdx].isOrigArg() && "Byval arguments cannot be implicit");
3661 unsigned FirstByValReg, LastByValReg;
3662 unsigned ByValIdx = CCInfo.getInRegsParamsProcessed();
3663 CCInfo.getInRegsParamInfo(ByValIdx, FirstByValReg, LastByValReg);
3664
3665 assert(Flags.getByValSize() &&
3666 "ByVal args of size 0 should have been ignored by front-end.");
3667 assert(ByValIdx < CCInfo.getInRegsParamsCount());
3668 copyByValRegs(Chain, DL, OutChains, DAG, Flags, InVals, &*FuncArg,
3669 FirstByValReg, LastByValReg, VA, CCInfo);
3670 CCInfo.nextInRegsParam();
3671 continue;
3672 }
3673
3674 // Arguments stored on registers
3675 if (IsRegLoc) {
3676 MVT RegVT = VA.getLocVT();
3677 Register ArgReg = VA.getLocReg();
3678 const TargetRegisterClass *RC = getRegClassFor(RegVT);
3679
3680 // Transform the arguments stored on
3681 // physical registers into virtual ones
3682 unsigned Reg = addLiveIn(DAG.getMachineFunction(), ArgReg, RC);
3683 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegVT);
3684
3685 ArgValue =
3686 UnpackFromArgumentSlot(ArgValue, VA, Ins[InsIdx].ArgVT, DL, DAG);
3687
3688 // Handle floating point arguments passed in integer registers and
3689 // long double arguments passed in floating point registers.
3690 if ((RegVT == MVT::i32 && ValVT == MVT::f32) ||
3691 (RegVT == MVT::i64 && ValVT == MVT::f64) ||
3692 (RegVT == MVT::f64 && ValVT == MVT::i64))
3693 ArgValue = DAG.getNode(ISD::BITCAST, DL, ValVT, ArgValue);
3694 else if (ABI.IsO32() && RegVT == MVT::i32 &&
3695 ValVT == MVT::f64) {
3696 assert(VA.needsCustom() && "Expected custom argument for f64 split");
3697 CCValAssign &NextVA = ArgLocs[++i];
3698 unsigned Reg2 =
3699 addLiveIn(DAG.getMachineFunction(), NextVA.getLocReg(), RC);
3700 SDValue ArgValue2 = DAG.getCopyFromReg(Chain, DL, Reg2, RegVT);
3701 if (!Subtarget.isLittle())
3702 std::swap(ArgValue, ArgValue2);
3703 ArgValue = DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64,
3704 ArgValue, ArgValue2);
3705 }
3706
3707 InVals.push_back(ArgValue);
3708 } else { // VA.isRegLoc()
3709 MVT LocVT = VA.getLocVT();
3710
3711 assert(!VA.needsCustom() && "unexpected custom memory argument");
3712
3713 if (ABI.IsO32()) {
3714 // We ought to be able to use LocVT directly but O32 sets it to i32
3715 // when allocating floating point values to integer registers.
3716 // This shouldn't influence how we load the value into registers unless
3717 // we are targeting softfloat.
3719 LocVT = VA.getValVT();
3720 }
3721
3722 // Only arguments pased on the stack should make it here.
3723 assert(VA.isMemLoc());
3724
3725 // The stack pointer offset is relative to the caller stack frame.
3726 int FI = MFI.CreateFixedObject(LocVT.getSizeInBits() / 8,
3727 VA.getLocMemOffset(), true);
3728
3729 // Create load nodes to retrieve arguments from the stack
3730 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
3731 SDValue ArgValue = DAG.getLoad(
3732 LocVT, DL, Chain, FIN,
3734 OutChains.push_back(ArgValue.getValue(1));
3735
3736 ArgValue =
3737 UnpackFromArgumentSlot(ArgValue, VA, Ins[InsIdx].ArgVT, DL, DAG);
3738
3739 InVals.push_back(ArgValue);
3740 }
3741 }
3742
3743 for (unsigned i = 0, e = ArgLocs.size(), InsIdx = 0; i != e; ++i, ++InsIdx) {
3744
3745 if (ArgLocs[i].needsCustom()) {
3746 ++i;
3747 continue;
3748 }
3749
3750 // The mips ABIs for returning structs by value requires that we copy
3751 // the sret argument into $v0 for the return. Save the argument into
3752 // a virtual register so that we can access it from the return points.
3753 if (Ins[InsIdx].Flags.isSRet()) {
3754 unsigned Reg = MipsFI->getSRetReturnReg();
3755 if (!Reg) {
3757 getRegClassFor(ABI.IsN64() ? MVT::i64 : MVT::i32));
3758 MipsFI->setSRetReturnReg(Reg);
3759 }
3760 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), DL, Reg, InVals[i]);
3761 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Copy, Chain);
3762 break;
3763 }
3764 }
3765
3766 if (IsVarArg)
3767 writeVarArgRegs(OutChains, Chain, DL, DAG, CCInfo);
3768
3769 // All stores are grouped in one node to allow the matching between
3770 // the size of Ins and InVals. This only happens when on varg functions
3771 if (!OutChains.empty()) {
3772 OutChains.push_back(Chain);
3773 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
3774 }
3775
3776 return Chain;
3777}
3778
3779//===----------------------------------------------------------------------===//
3780// Return Value Calling Convention Implementation
3781//===----------------------------------------------------------------------===//
3782
3783bool
3784MipsTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
3785 MachineFunction &MF, bool IsVarArg,
3787 LLVMContext &Context) const {
3789 MipsCCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
3790 return CCInfo.CheckReturn(Outs, RetCC_Mips);
3791}
3792
3793bool MipsTargetLowering::shouldSignExtendTypeInLibCall(EVT Type,
3794 bool IsSigned) const {
3795 if ((ABI.IsN32() || ABI.IsN64()) && Type == MVT::i32)
3796 return true;
3797
3798 return IsSigned;
3799}
3800
3801SDValue
3802MipsTargetLowering::LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps,
3803 const SDLoc &DL,
3804 SelectionDAG &DAG) const {
3807
3808 MipsFI->setISR();
3809
3810 return DAG.getNode(MipsISD::ERet, DL, MVT::Other, RetOps);
3811}
3812
3813SDValue
3814MipsTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
3815 bool IsVarArg,
3817 const SmallVectorImpl<SDValue> &OutVals,
3818 const SDLoc &DL, SelectionDAG &DAG) const {
3819 // CCValAssign - represent the assignment of
3820 // the return value to a location
3823
3824 // CCState - Info about the registers and stack slot.
3825 MipsCCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
3826
3827 // Analyze return values.
3828 CCInfo.AnalyzeReturn(Outs, RetCC_Mips);
3829
3830 SDValue Glue;
3831 SmallVector<SDValue, 4> RetOps(1, Chain);
3832
3833 // Copy the result values into the output registers.
3834 for (unsigned i = 0; i != RVLocs.size(); ++i) {
3835 SDValue Val = OutVals[i];
3836 CCValAssign &VA = RVLocs[i];
3837 assert(VA.isRegLoc() && "Can only return in registers!");
3838 bool UseUpperBits = false;
3839
3840 switch (VA.getLocInfo()) {
3841 default:
3842 llvm_unreachable("Unknown loc info!");
3843 case CCValAssign::Full:
3844 break;
3845 case CCValAssign::BCvt:
3846 Val = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Val);
3847 break;
3849 UseUpperBits = true;
3850 [[fallthrough]];
3851 case CCValAssign::AExt:
3852 Val = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Val);
3853 break;
3855 UseUpperBits = true;
3856 [[fallthrough]];
3857 case CCValAssign::ZExt:
3858 Val = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Val);
3859 break;
3861 UseUpperBits = true;
3862 [[fallthrough]];
3863 case CCValAssign::SExt:
3864 Val = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Val);
3865 break;
3866 }
3867
3868 if (UseUpperBits) {
3869 unsigned ValSizeInBits = Outs[i].ArgVT.getSizeInBits();
3870 unsigned LocSizeInBits = VA.getLocVT().getSizeInBits();
3871 Val = DAG.getNode(
3872 ISD::SHL, DL, VA.getLocVT(), Val,
3873 DAG.getConstant(LocSizeInBits - ValSizeInBits, DL, VA.getLocVT()));
3874 }
3875
3876 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
3877
3878 // Guarantee that all emitted copies are stuck together with flags.
3879 Glue = Chain.getValue(1);
3880 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
3881 }
3882
3883 // The mips ABIs for returning structs by value requires that we copy
3884 // the sret argument into $v0 for the return. We saved the argument into
3885 // a virtual register in the entry block, so now we copy the value out
3886 // and into $v0.
3887 if (MF.getFunction().hasStructRetAttr()) {
3889 unsigned Reg = MipsFI->getSRetReturnReg();
3890
3891 if (!Reg)
3892 llvm_unreachable("sret virtual register not created in the entry block");
3893 SDValue Val =
3894 DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy(DAG.getDataLayout()));
3895 unsigned V0 = ABI.IsN64() ? Mips::V0_64 : Mips::V0;
3896
3897 Chain = DAG.getCopyToReg(Chain, DL, V0, Val, Glue);
3898 Glue = Chain.getValue(1);
3899 RetOps.push_back(DAG.getRegister(V0, getPointerTy(DAG.getDataLayout())));
3900 }
3901
3902 RetOps[0] = Chain; // Update chain.
3903
3904 // Add the glue if we have it.
3905 if (Glue.getNode())
3906 RetOps.push_back(Glue);
3907
3908 // ISRs must use "eret".
3909 if (DAG.getMachineFunction().getFunction().hasFnAttribute("interrupt"))
3910 return LowerInterruptReturn(RetOps, DL, DAG);
3911
3912 // Standard return on Mips is a "jr $ra"
3913 return DAG.getNode(MipsISD::Ret, DL, MVT::Other, RetOps);
3914}
3915
3916//===----------------------------------------------------------------------===//
3917// Mips Inline Assembly Support
3918//===----------------------------------------------------------------------===//
3919
3920/// getConstraintType - Given a constraint letter, return the type of
3921/// constraint it is for this target.
3923MipsTargetLowering::getConstraintType(StringRef Constraint) const {
3924 // Mips specific constraints
3925 // GCC config/mips/constraints.md
3926 //
3927 // 'd' : An address register. Equivalent to r
3928 // unless generating MIPS16 code.
3929 // 'y' : Equivalent to r; retained for
3930 // backwards compatibility.
3931 // 'c' : A register suitable for use in an indirect
3932 // jump. This will always be $25 for -mabicalls.
3933 // 'l' : The lo register. 1 word storage.
3934 // 'x' : The hilo register pair. Double word storage.
3935 if (Constraint.size() == 1) {
3936 switch (Constraint[0]) {
3937 default : break;
3938 case 'd':
3939 case 'y':
3940 case 'f':
3941 case 'c':
3942 case 'l':
3943 case 'x':
3944 return C_RegisterClass;
3945 case 'R':
3946 return C_Memory;
3947 }
3948 }
3949
3950 if (Constraint == "ZC")
3951 return C_Memory;
3952
3953 return TargetLowering::getConstraintType(Constraint);
3954}
3955
3956/// Examine constraint type and operand type and determine a weight value.
3957/// This object must already have been set up with the operand type
3958/// and the current alternative constraint selected.
3960MipsTargetLowering::getSingleConstraintMatchWeight(
3961 AsmOperandInfo &info, const char *constraint) const {
3963 Value *CallOperandVal = info.CallOperandVal;
3964 // If we don't have a value, we can't do a match,
3965 // but allow it at the lowest weight.
3966 if (!CallOperandVal)
3967 return CW_Default;
3968 Type *type = CallOperandVal->getType();
3969 // Look at the constraint type.
3970 switch (*constraint) {
3971 default:
3973 break;
3974 case 'd':
3975 case 'y':
3976 if (type->isIntegerTy())
3977 weight = CW_Register;
3978 break;
3979 case 'f': // FPU or MSA register
3980 if (Subtarget.hasMSA() && type->isVectorTy() &&
3981 type->getPrimitiveSizeInBits().getFixedValue() == 128)
3982 weight = CW_Register;
3983 else if (type->isFloatTy())
3984 weight = CW_Register;
3985 break;
3986 case 'c': // $25 for indirect jumps
3987 case 'l': // lo register
3988 case 'x': // hilo register pair
3989 if (type->isIntegerTy())
3990 weight = CW_SpecificReg;
3991 break;
3992 case 'I': // signed 16 bit immediate
3993 case 'J': // integer zero
3994 case 'K': // unsigned 16 bit immediate
3995 case 'L': // signed 32 bit immediate where lower 16 bits are 0
3996 case 'N': // immediate in the range of -65535 to -1 (inclusive)
3997 case 'O': // signed 15 bit immediate (+- 16383)
3998 case 'P': // immediate in the range of 65535 to 1 (inclusive)
3999 if (isa<ConstantInt>(CallOperandVal))
4000 weight = CW_Constant;
4001 break;
4002 case 'R':
4003 weight = CW_Memory;
4004 break;
4005 }
4006 return weight;
4007}
4008
4009/// This is a helper function to parse a physical register string and split it
4010/// into non-numeric and numeric parts (Prefix and Reg). The first boolean flag
4011/// that is returned indicates whether parsing was successful. The second flag
4012/// is true if the numeric part exists.
4013static std::pair<bool, bool> parsePhysicalReg(StringRef C, StringRef &Prefix,
4014 unsigned long long &Reg) {
4015 if (C.front() != '{' || C.back() != '}')
4016 return std::make_pair(false, false);
4017
4018 // Search for the first numeric character.
4019 StringRef::const_iterator I, B = C.begin() + 1, E =