LLVM 19.0.0git
MipsISelLowering.cpp
Go to the documentation of this file.
1//===- MipsISelLowering.cpp - Mips DAG Lowering Implementation ------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interfaces that Mips uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#include "MipsISelLowering.h"
18#include "MipsCCState.h"
19#include "MipsInstrInfo.h"
20#include "MipsMachineFunction.h"
21#include "MipsRegisterInfo.h"
22#include "MipsSubtarget.h"
23#include "MipsTargetMachine.h"
25#include "llvm/ADT/APFloat.h"
26#include "llvm/ADT/ArrayRef.h"
28#include "llvm/ADT/Statistic.h"
29#include "llvm/ADT/StringRef.h"
51#include "llvm/IR/CallingConv.h"
52#include "llvm/IR/Constants.h"
53#include "llvm/IR/DataLayout.h"
54#include "llvm/IR/DebugLoc.h"
56#include "llvm/IR/Function.h"
57#include "llvm/IR/GlobalValue.h"
58#include "llvm/IR/Type.h"
59#include "llvm/IR/Value.h"
60#include "llvm/MC/MCContext.h"
70#include <algorithm>
71#include <cassert>
72#include <cctype>
73#include <cstdint>
74#include <deque>
75#include <iterator>
76#include <utility>
77#include <vector>
78
79using namespace llvm;
80
81#define DEBUG_TYPE "mips-lower"
82
83STATISTIC(NumTailCalls, "Number of tail calls");
84
85static cl::opt<bool>
86NoZeroDivCheck("mno-check-zero-division", cl::Hidden,
87 cl::desc("MIPS: Don't trap on integer division by zero."),
88 cl::init(false));
89
91
92static const MCPhysReg Mips64DPRegs[8] = {
93 Mips::D12_64, Mips::D13_64, Mips::D14_64, Mips::D15_64,
94 Mips::D16_64, Mips::D17_64, Mips::D18_64, Mips::D19_64
95};
96
97// The MIPS MSA ABI passes vector arguments in the integer register set.
98// The number of integer registers used is dependant on the ABI used.
101 EVT VT) const {
102 if (!VT.isVector())
103 return getRegisterType(Context, VT);
104
106 return Subtarget.isABI_O32() || VT.getSizeInBits() == 32 ? MVT::i32
107 : MVT::i64;
109}
110
113 EVT VT) const {
114 if (VT.isVector()) {
116 return divideCeil(VT.getSizeInBits(), Subtarget.isABI_O32() ? 32 : 64);
117 return VT.getVectorNumElements() *
119 }
121}
122
124 LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
125 unsigned &NumIntermediates, MVT &RegisterVT) const {
126 if (VT.isPow2VectorType()) {
127 IntermediateVT = getRegisterTypeForCallingConv(Context, CC, VT);
128 RegisterVT = IntermediateVT.getSimpleVT();
129 NumIntermediates = getNumRegistersForCallingConv(Context, CC, VT);
130 return NumIntermediates;
131 }
132 IntermediateVT = VT.getVectorElementType();
133 NumIntermediates = VT.getVectorNumElements();
134 RegisterVT = getRegisterType(Context, IntermediateVT);
135 return NumIntermediates * getNumRegisters(Context, IntermediateVT);
136}
137
141 return DAG.getRegister(FI->getGlobalBaseReg(MF), Ty);
142}
143
144SDValue MipsTargetLowering::getTargetNode(GlobalAddressSDNode *N, EVT Ty,
145 SelectionDAG &DAG,
146 unsigned Flag) const {
147 return DAG.getTargetGlobalAddress(N->getGlobal(), SDLoc(N), Ty, 0, Flag);
148}
149
150SDValue MipsTargetLowering::getTargetNode(ExternalSymbolSDNode *N, EVT Ty,
151 SelectionDAG &DAG,
152 unsigned Flag) const {
153 return DAG.getTargetExternalSymbol(N->getSymbol(), Ty, Flag);
154}
155
156SDValue MipsTargetLowering::getTargetNode(BlockAddressSDNode *N, EVT Ty,
157 SelectionDAG &DAG,
158 unsigned Flag) const {
159 return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, 0, Flag);
160}
161
162SDValue MipsTargetLowering::getTargetNode(JumpTableSDNode *N, EVT Ty,
163 SelectionDAG &DAG,
164 unsigned Flag) const {
165 return DAG.getTargetJumpTable(N->getIndex(), Ty, Flag);
166}
167
168SDValue MipsTargetLowering::getTargetNode(ConstantPoolSDNode *N, EVT Ty,
169 SelectionDAG &DAG,
170 unsigned Flag) const {
171 return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
172 N->getOffset(), Flag);
173}
174
175const char *MipsTargetLowering::getTargetNodeName(unsigned Opcode) const {
176 switch ((MipsISD::NodeType)Opcode) {
177 case MipsISD::FIRST_NUMBER: break;
178 case MipsISD::JmpLink: return "MipsISD::JmpLink";
179 case MipsISD::TailCall: return "MipsISD::TailCall";
180 case MipsISD::Highest: return "MipsISD::Highest";
181 case MipsISD::Higher: return "MipsISD::Higher";
182 case MipsISD::Hi: return "MipsISD::Hi";
183 case MipsISD::Lo: return "MipsISD::Lo";
184 case MipsISD::GotHi: return "MipsISD::GotHi";
185 case MipsISD::TlsHi: return "MipsISD::TlsHi";
186 case MipsISD::GPRel: return "MipsISD::GPRel";
187 case MipsISD::ThreadPointer: return "MipsISD::ThreadPointer";
188 case MipsISD::Ret: return "MipsISD::Ret";
189 case MipsISD::ERet: return "MipsISD::ERet";
190 case MipsISD::EH_RETURN: return "MipsISD::EH_RETURN";
191 case MipsISD::FAbs: return "MipsISD::FAbs";
192 case MipsISD::FMS: return "MipsISD::FMS";
193 case MipsISD::FPBrcond: return "MipsISD::FPBrcond";
194 case MipsISD::FPCmp: return "MipsISD::FPCmp";
195 case MipsISD::FSELECT: return "MipsISD::FSELECT";
196 case MipsISD::MTC1_D64: return "MipsISD::MTC1_D64";
197 case MipsISD::CMovFP_T: return "MipsISD::CMovFP_T";
198 case MipsISD::CMovFP_F: return "MipsISD::CMovFP_F";
199 case MipsISD::TruncIntFP: return "MipsISD::TruncIntFP";
200 case MipsISD::MFHI: return "MipsISD::MFHI";
201 case MipsISD::MFLO: return "MipsISD::MFLO";
202 case MipsISD::MTLOHI: return "MipsISD::MTLOHI";
203 case MipsISD::Mult: return "MipsISD::Mult";
204 case MipsISD::Multu: return "MipsISD::Multu";
205 case MipsISD::MAdd: return "MipsISD::MAdd";
206 case MipsISD::MAddu: return "MipsISD::MAddu";
207 case MipsISD::MSub: return "MipsISD::MSub";
208 case MipsISD::MSubu: return "MipsISD::MSubu";
209 case MipsISD::DivRem: return "MipsISD::DivRem";
210 case MipsISD::DivRemU: return "MipsISD::DivRemU";
211 case MipsISD::DivRem16: return "MipsISD::DivRem16";
212 case MipsISD::DivRemU16: return "MipsISD::DivRemU16";
213 case MipsISD::BuildPairF64: return "MipsISD::BuildPairF64";
214 case MipsISD::ExtractElementF64: return "MipsISD::ExtractElementF64";
215 case MipsISD::Wrapper: return "MipsISD::Wrapper";
216 case MipsISD::DynAlloc: return "MipsISD::DynAlloc";
217 case MipsISD::Sync: return "MipsISD::Sync";
218 case MipsISD::Ext: return "MipsISD::Ext";
219 case MipsISD::Ins: return "MipsISD::Ins";
220 case MipsISD::CIns: return "MipsISD::CIns";
221 case MipsISD::LWL: return "MipsISD::LWL";
222 case MipsISD::LWR: return "MipsISD::LWR";
223 case MipsISD::SWL: return "MipsISD::SWL";
224 case MipsISD::SWR: return "MipsISD::SWR";
225 case MipsISD::LDL: return "MipsISD::LDL";
226 case MipsISD::LDR: return "MipsISD::LDR";
227 case MipsISD::SDL: return "MipsISD::SDL";
228 case MipsISD::SDR: return "MipsISD::SDR";
229 case MipsISD::EXTP: return "MipsISD::EXTP";
230 case MipsISD::EXTPDP: return "MipsISD::EXTPDP";
231 case MipsISD::EXTR_S_H: return "MipsISD::EXTR_S_H";
232 case MipsISD::EXTR_W: return "MipsISD::EXTR_W";
233 case MipsISD::EXTR_R_W: return "MipsISD::EXTR_R_W";
234 case MipsISD::EXTR_RS_W: return "MipsISD::EXTR_RS_W";
235 case MipsISD::SHILO: return "MipsISD::SHILO";
236 case MipsISD::MTHLIP: return "MipsISD::MTHLIP";
237 case MipsISD::MULSAQ_S_W_PH: return "MipsISD::MULSAQ_S_W_PH";
238 case MipsISD::MAQ_S_W_PHL: return "MipsISD::MAQ_S_W_PHL";
239 case MipsISD::MAQ_S_W_PHR: return "MipsISD::MAQ_S_W_PHR";
240 case MipsISD::MAQ_SA_W_PHL: return "MipsISD::MAQ_SA_W_PHL";
241 case MipsISD::MAQ_SA_W_PHR: return "MipsISD::MAQ_SA_W_PHR";
242 case MipsISD::DOUBLE_SELECT_I: return "MipsISD::DOUBLE_SELECT_I";
243 case MipsISD::DOUBLE_SELECT_I64: return "MipsISD::DOUBLE_SELECT_I64";
244 case MipsISD::DPAU_H_QBL: return "MipsISD::DPAU_H_QBL";
245 case MipsISD::DPAU_H_QBR: return "MipsISD::DPAU_H_QBR";
246 case MipsISD::DPSU_H_QBL: return "MipsISD::DPSU_H_QBL";
247 case MipsISD::DPSU_H_QBR: return "MipsISD::DPSU_H_QBR";
248 case MipsISD::DPAQ_S_W_PH: return "MipsISD::DPAQ_S_W_PH";
249 case MipsISD::DPSQ_S_W_PH: return "MipsISD::DPSQ_S_W_PH";
250 case MipsISD::DPAQ_SA_L_W: return "MipsISD::DPAQ_SA_L_W";
251 case MipsISD::DPSQ_SA_L_W: return "MipsISD::DPSQ_SA_L_W";
252 case MipsISD::DPA_W_PH: return "MipsISD::DPA_W_PH";
253 case MipsISD::DPS_W_PH: return "MipsISD::DPS_W_PH";
254 case MipsISD::DPAQX_S_W_PH: return "MipsISD::DPAQX_S_W_PH";
255 case MipsISD::DPAQX_SA_W_PH: return "MipsISD::DPAQX_SA_W_PH";
256 case MipsISD::DPAX_W_PH: return "MipsISD::DPAX_W_PH";
257 case MipsISD::DPSX_W_PH: return "MipsISD::DPSX_W_PH";
258 case MipsISD::DPSQX_S_W_PH: return "MipsISD::DPSQX_S_W_PH";
259 case MipsISD::DPSQX_SA_W_PH: return "MipsISD::DPSQX_SA_W_PH";
260 case MipsISD::MULSA_W_PH: return "MipsISD::MULSA_W_PH";
261 case MipsISD::MULT: return "MipsISD::MULT";
262 case MipsISD::MULTU: return "MipsISD::MULTU";
263 case MipsISD::MADD_DSP: return "MipsISD::MADD_DSP";
264 case MipsISD::MADDU_DSP: return "MipsISD::MADDU_DSP";
265 case MipsISD::MSUB_DSP: return "MipsISD::MSUB_DSP";
266 case MipsISD::MSUBU_DSP: return "MipsISD::MSUBU_DSP";
267 case MipsISD::SHLL_DSP: return "MipsISD::SHLL_DSP";
268 case MipsISD::SHRA_DSP: return "MipsISD::SHRA_DSP";
269 case MipsISD::SHRL_DSP: return "MipsISD::SHRL_DSP";
270 case MipsISD::SETCC_DSP: return "MipsISD::SETCC_DSP";
271 case MipsISD::SELECT_CC_DSP: return "MipsISD::SELECT_CC_DSP";
272 case MipsISD::VALL_ZERO: return "MipsISD::VALL_ZERO";
273 case MipsISD::VANY_ZERO: return "MipsISD::VANY_ZERO";
274 case MipsISD::VALL_NONZERO: return "MipsISD::VALL_NONZERO";
275 case MipsISD::VANY_NONZERO: return "MipsISD::VANY_NONZERO";
276 case MipsISD::VCEQ: return "MipsISD::VCEQ";
277 case MipsISD::VCLE_S: return "MipsISD::VCLE_S";
278 case MipsISD::VCLE_U: return "MipsISD::VCLE_U";
279 case MipsISD::VCLT_S: return "MipsISD::VCLT_S";
280 case MipsISD::VCLT_U: return "MipsISD::VCLT_U";
281 case MipsISD::VEXTRACT_SEXT_ELT: return "MipsISD::VEXTRACT_SEXT_ELT";
282 case MipsISD::VEXTRACT_ZEXT_ELT: return "MipsISD::VEXTRACT_ZEXT_ELT";
283 case MipsISD::VNOR: return "MipsISD::VNOR";
284 case MipsISD::VSHF: return "MipsISD::VSHF";
285 case MipsISD::SHF: return "MipsISD::SHF";
286 case MipsISD::ILVEV: return "MipsISD::ILVEV";
287 case MipsISD::ILVOD: return "MipsISD::ILVOD";
288 case MipsISD::ILVL: return "MipsISD::ILVL";
289 case MipsISD::ILVR: return "MipsISD::ILVR";
290 case MipsISD::PCKEV: return "MipsISD::PCKEV";
291 case MipsISD::PCKOD: return "MipsISD::PCKOD";
292 case MipsISD::INSVE: return "MipsISD::INSVE";
293 }
294 return nullptr;
295}
296
298 const MipsSubtarget &STI)
299 : TargetLowering(TM), Subtarget(STI), ABI(TM.getABI()) {
300 // Mips does not have i1 type, so use i32 for
301 // setcc operations results (slt, sgt, ...).
304 // The cmp.cond.fmt instruction in MIPS32r6/MIPS64r6 uses 0 and -1 like MSA
305 // does. Integer booleans still use 0 and 1.
309
310 // Load extented operations for i1 types must be promoted
311 for (MVT VT : MVT::integer_valuetypes()) {
315 }
316
317 // MIPS doesn't have extending float->double load/store. Set LoadExtAction
318 // for f32, f16
319 for (MVT VT : MVT::fp_valuetypes()) {
320 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
321 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand);
322 }
323
324 // Set LoadExtAction for f16 vectors to Expand
326 MVT F16VT = MVT::getVectorVT(MVT::f16, VT.getVectorNumElements());
327 if (F16VT.isValid())
329 }
330
331 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
332 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
333
334 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
335
336 // Used by legalize types to correctly generate the setcc result.
337 // Without this, every float setcc comes with a AND/OR with the result,
338 // we don't want this, since the fpcmp result goes to a flag register,
339 // which is used implicitly by brcond and select operations.
340 AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32);
341
342 // Mips Custom Operations
360
361 if (Subtarget.isGP64bit()) {
368 if (Subtarget.hasMips64r6()) {
371 } else {
374 }
379 }
380
381 if (!Subtarget.isGP64bit()) {
385 }
386
388 if (Subtarget.isGP64bit())
390
399
400 // Operations not directly supported by Mips.
414 if (Subtarget.hasCnMips()) {
417 } else {
420 }
427
428 if (!Subtarget.hasMips32r2())
430
431 if (!Subtarget.hasMips64r2())
433
450
451 // Lower f16 conversion operations into library calls
456
458
463
464 // Use the default for now
467
468 if (!Subtarget.isGP64bit()) {
471 }
472
473 if (!Subtarget.hasMips32r2()) {
476 }
477
478 // MIPS16 lacks MIPS32's clz and clo instructions.
481 if (!Subtarget.hasMips64())
483
484 if (!Subtarget.hasMips32r2())
486 if (!Subtarget.hasMips64r2())
488
490 setLoadExtAction(ISD::SEXTLOAD, MVT::i64, MVT::i32, Legal);
491 setLoadExtAction(ISD::ZEXTLOAD, MVT::i64, MVT::i32, Legal);
492 setLoadExtAction(ISD::EXTLOAD, MVT::i64, MVT::i32, Legal);
493 setTruncStoreAction(MVT::i64, MVT::i32, Legal);
494 } else if (Subtarget.isGP64bit()) {
495 setLoadExtAction(ISD::SEXTLOAD, MVT::i64, MVT::i32, Custom);
496 setLoadExtAction(ISD::ZEXTLOAD, MVT::i64, MVT::i32, Custom);
497 setLoadExtAction(ISD::EXTLOAD, MVT::i64, MVT::i32, Custom);
498 setTruncStoreAction(MVT::i64, MVT::i32, Custom);
499 }
500
501 setOperationAction(ISD::TRAP, MVT::Other, Legal);
502
505
506 if (ABI.IsO32()) {
507 // These libcalls are not available in 32-bit.
508 setLibcallName(RTLIB::SHL_I128, nullptr);
509 setLibcallName(RTLIB::SRL_I128, nullptr);
510 setLibcallName(RTLIB::SRA_I128, nullptr);
511 setLibcallName(RTLIB::MUL_I128, nullptr);
512 setLibcallName(RTLIB::MULO_I64, nullptr);
513 setLibcallName(RTLIB::MULO_I128, nullptr);
514 }
515
516 if (Subtarget.isGP64bit())
518 else
520
522
523 // The arguments on the stack are defined in terms of 4-byte slots on O32
524 // and 8-byte slots on N32/N64.
526 : Align(4));
527
528 setStackPointerRegisterToSaveRestore(ABI.IsN64() ? Mips::SP_64 : Mips::SP);
529
531
532 isMicroMips = Subtarget.inMicroMipsMode();
533}
534
535const MipsTargetLowering *
537 const MipsSubtarget &STI) {
538 if (STI.inMips16Mode())
539 return createMips16TargetLowering(TM, STI);
540
541 return createMipsSETargetLowering(TM, STI);
542}
543
544// Create a fast isel object.
545FastISel *
547 const TargetLibraryInfo *libInfo) const {
548 const MipsTargetMachine &TM =
549 static_cast<const MipsTargetMachine &>(funcInfo.MF->getTarget());
550
551 // We support only the standard encoding [MIPS32,MIPS32R5] ISAs.
552 bool UseFastISel = TM.Options.EnableFastISel && Subtarget.hasMips32() &&
555
556 // Disable if either of the following is true:
557 // We do not generate PIC, the ABI is not O32, XGOT is being used.
558 if (!TM.isPositionIndependent() || !TM.getABI().IsO32() ||
560 UseFastISel = false;
561
562 return UseFastISel ? Mips::createFastISel(funcInfo, libInfo) : nullptr;
563}
564
566 EVT VT) const {
567 if (!VT.isVector())
568 return MVT::i32;
570}
571
574 const MipsSubtarget &Subtarget) {
575 if (DCI.isBeforeLegalizeOps())
576 return SDValue();
577
578 EVT Ty = N->getValueType(0);
579 unsigned LO = (Ty == MVT::i32) ? Mips::LO0 : Mips::LO0_64;
580 unsigned HI = (Ty == MVT::i32) ? Mips::HI0 : Mips::HI0_64;
581 unsigned Opc = N->getOpcode() == ISD::SDIVREM ? MipsISD::DivRem16 :
583 SDLoc DL(N);
584
585 SDValue DivRem = DAG.getNode(Opc, DL, MVT::Glue,
586 N->getOperand(0), N->getOperand(1));
587 SDValue InChain = DAG.getEntryNode();
588 SDValue InGlue = DivRem;
589
590 // insert MFLO
591 if (N->hasAnyUseOfValue(0)) {
592 SDValue CopyFromLo = DAG.getCopyFromReg(InChain, DL, LO, Ty,
593 InGlue);
594 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), CopyFromLo);
595 InChain = CopyFromLo.getValue(1);
596 InGlue = CopyFromLo.getValue(2);
597 }
598
599 // insert MFHI
600 if (N->hasAnyUseOfValue(1)) {
601 SDValue CopyFromHi = DAG.getCopyFromReg(InChain, DL,
602 HI, Ty, InGlue);
603 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), CopyFromHi);
604 }
605
606 return SDValue();
607}
608
610 switch (CC) {
611 default: llvm_unreachable("Unknown fp condition code!");
612 case ISD::SETEQ:
613 case ISD::SETOEQ: return Mips::FCOND_OEQ;
614 case ISD::SETUNE: return Mips::FCOND_UNE;
615 case ISD::SETLT:
616 case ISD::SETOLT: return Mips::FCOND_OLT;
617 case ISD::SETGT:
618 case ISD::SETOGT: return Mips::FCOND_OGT;
619 case ISD::SETLE:
620 case ISD::SETOLE: return Mips::FCOND_OLE;
621 case ISD::SETGE:
622 case ISD::SETOGE: return Mips::FCOND_OGE;
623 case ISD::SETULT: return Mips::FCOND_ULT;
624 case ISD::SETULE: return Mips::FCOND_ULE;
625 case ISD::SETUGT: return Mips::FCOND_UGT;
626 case ISD::SETUGE: return Mips::FCOND_UGE;
627 case ISD::SETUO: return Mips::FCOND_UN;
628 case ISD::SETO: return Mips::FCOND_OR;
629 case ISD::SETNE:
630 case ISD::SETONE: return Mips::FCOND_ONE;
631 case ISD::SETUEQ: return Mips::FCOND_UEQ;
632 }
633}
634
635/// This function returns true if the floating point conditional branches and
636/// conditional moves which use condition code CC should be inverted.
638 if (CC >= Mips::FCOND_F && CC <= Mips::FCOND_NGT)
639 return false;
640
642 "Illegal Condition Code");
643
644 return true;
645}
646
647// Creates and returns an FPCmp node from a setcc node.
648// Returns Op if setcc is not a floating point comparison.
650 // must be a SETCC node
651 if (Op.getOpcode() != ISD::SETCC)
652 return Op;
653
654 SDValue LHS = Op.getOperand(0);
655
656 if (!LHS.getValueType().isFloatingPoint())
657 return Op;
658
659 SDValue RHS = Op.getOperand(1);
660 SDLoc DL(Op);
661
662 // Assume the 3rd operand is a CondCodeSDNode. Add code to check the type of
663 // node if necessary.
664 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
665
666 return DAG.getNode(MipsISD::FPCmp, DL, MVT::Glue, LHS, RHS,
667 DAG.getConstant(condCodeToFCC(CC), DL, MVT::i32));
668}
669
670// Creates and returns a CMovFPT/F node.
672 SDValue False, const SDLoc &DL) {
673 ConstantSDNode *CC = cast<ConstantSDNode>(Cond.getOperand(2));
674 bool invert = invertFPCondCodeUser((Mips::CondCode)CC->getSExtValue());
675 SDValue FCC0 = DAG.getRegister(Mips::FCC0, MVT::i32);
676
677 return DAG.getNode((invert ? MipsISD::CMovFP_F : MipsISD::CMovFP_T), DL,
678 True.getValueType(), True, FCC0, False, Cond);
679}
680
683 const MipsSubtarget &Subtarget) {
684 if (DCI.isBeforeLegalizeOps())
685 return SDValue();
686
687 SDValue SetCC = N->getOperand(0);
688
689 if ((SetCC.getOpcode() != ISD::SETCC) ||
690 !SetCC.getOperand(0).getValueType().isInteger())
691 return SDValue();
692
693 SDValue False = N->getOperand(2);
694 EVT FalseTy = False.getValueType();
695
696 if (!FalseTy.isInteger())
697 return SDValue();
698
699 ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(False);
700
701 // If the RHS (False) is 0, we swap the order of the operands
702 // of ISD::SELECT (obviously also inverting the condition) so that we can
703 // take advantage of conditional moves using the $0 register.
704 // Example:
705 // return (a != 0) ? x : 0;
706 // load $reg, x
707 // movz $reg, $0, a
708 if (!FalseC)
709 return SDValue();
710
711 const SDLoc DL(N);
712
713 if (!FalseC->getZExtValue()) {
714 ISD::CondCode CC = cast<CondCodeSDNode>(SetCC.getOperand(2))->get();
715 SDValue True = N->getOperand(1);
716
717 SetCC = DAG.getSetCC(DL, SetCC.getValueType(), SetCC.getOperand(0),
718 SetCC.getOperand(1),
720
721 return DAG.getNode(ISD::SELECT, DL, FalseTy, SetCC, False, True);
722 }
723
724 // If both operands are integer constants there's a possibility that we
725 // can do some interesting optimizations.
726 SDValue True = N->getOperand(1);
727 ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(True);
728
729 if (!TrueC || !True.getValueType().isInteger())
730 return SDValue();
731
732 // We'll also ignore MVT::i64 operands as this optimizations proves
733 // to be ineffective because of the required sign extensions as the result
734 // of a SETCC operator is always MVT::i32 for non-vector types.
735 if (True.getValueType() == MVT::i64)
736 return SDValue();
737
738 int64_t Diff = TrueC->getSExtValue() - FalseC->getSExtValue();
739
740 // 1) (a < x) ? y : y-1
741 // slti $reg1, a, x
742 // addiu $reg2, $reg1, y-1
743 if (Diff == 1)
744 return DAG.getNode(ISD::ADD, DL, SetCC.getValueType(), SetCC, False);
745
746 // 2) (a < x) ? y-1 : y
747 // slti $reg1, a, x
748 // xor $reg1, $reg1, 1
749 // addiu $reg2, $reg1, y-1
750 if (Diff == -1) {
751 ISD::CondCode CC = cast<CondCodeSDNode>(SetCC.getOperand(2))->get();
752 SetCC = DAG.getSetCC(DL, SetCC.getValueType(), SetCC.getOperand(0),
753 SetCC.getOperand(1),
755 return DAG.getNode(ISD::ADD, DL, SetCC.getValueType(), SetCC, True);
756 }
757
758 // Could not optimize.
759 return SDValue();
760}
761
764 const MipsSubtarget &Subtarget) {
765 if (DCI.isBeforeLegalizeOps())
766 return SDValue();
767
768 SDValue ValueIfTrue = N->getOperand(0), ValueIfFalse = N->getOperand(2);
769
770 ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(ValueIfFalse);
771 if (!FalseC || FalseC->getZExtValue())
772 return SDValue();
773
774 // Since RHS (False) is 0, we swap the order of the True/False operands
775 // (obviously also inverting the condition) so that we can
776 // take advantage of conditional moves using the $0 register.
777 // Example:
778 // return (a != 0) ? x : 0;
779 // load $reg, x
780 // movz $reg, $0, a
781 unsigned Opc = (N->getOpcode() == MipsISD::CMovFP_T) ? MipsISD::CMovFP_F :
783
784 SDValue FCC = N->getOperand(1), Glue = N->getOperand(3);
785 return DAG.getNode(Opc, SDLoc(N), ValueIfFalse.getValueType(),
786 ValueIfFalse, FCC, ValueIfTrue, Glue);
787}
788
791 const MipsSubtarget &Subtarget) {
792 if (DCI.isBeforeLegalizeOps() || !Subtarget.hasExtractInsert())
793 return SDValue();
794
795 SDValue FirstOperand = N->getOperand(0);
796 unsigned FirstOperandOpc = FirstOperand.getOpcode();
797 SDValue Mask = N->getOperand(1);
798 EVT ValTy = N->getValueType(0);
799 SDLoc DL(N);
800
801 uint64_t Pos = 0;
802 unsigned SMPos, SMSize;
803 ConstantSDNode *CN;
804 SDValue NewOperand;
805 unsigned Opc;
806
807 // Op's second operand must be a shifted mask.
808 if (!(CN = dyn_cast<ConstantSDNode>(Mask)) ||
809 !isShiftedMask_64(CN->getZExtValue(), SMPos, SMSize))
810 return SDValue();
811
812 if (FirstOperandOpc == ISD::SRA || FirstOperandOpc == ISD::SRL) {
813 // Pattern match EXT.
814 // $dst = and ((sra or srl) $src , pos), (2**size - 1)
815 // => ext $dst, $src, pos, size
816
817 // The second operand of the shift must be an immediate.
818 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.getOperand(1))))
819 return SDValue();
820
821 Pos = CN->getZExtValue();
822
823 // Return if the shifted mask does not start at bit 0 or the sum of its size
824 // and Pos exceeds the word's size.
825 if (SMPos != 0 || Pos + SMSize > ValTy.getSizeInBits())
826 return SDValue();
827
828 Opc = MipsISD::Ext;
829 NewOperand = FirstOperand.getOperand(0);
830 } else if (FirstOperandOpc == ISD::SHL && Subtarget.hasCnMips()) {
831 // Pattern match CINS.
832 // $dst = and (shl $src , pos), mask
833 // => cins $dst, $src, pos, size
834 // mask is a shifted mask with consecutive 1's, pos = shift amount,
835 // size = population count.
836
837 // The second operand of the shift must be an immediate.
838 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.getOperand(1))))
839 return SDValue();
840
841 Pos = CN->getZExtValue();
842
843 if (SMPos != Pos || Pos >= ValTy.getSizeInBits() || SMSize >= 32 ||
844 Pos + SMSize > ValTy.getSizeInBits())
845 return SDValue();
846
847 NewOperand = FirstOperand.getOperand(0);
848 // SMSize is 'location' (position) in this case, not size.
849 SMSize--;
850 Opc = MipsISD::CIns;
851 } else {
852 // Pattern match EXT.
853 // $dst = and $src, (2**size - 1) , if size > 16
854 // => ext $dst, $src, pos, size , pos = 0
855
856 // If the mask is <= 0xffff, andi can be used instead.
857 if (CN->getZExtValue() <= 0xffff)
858 return SDValue();
859
860 // Return if the mask doesn't start at position 0.
861 if (SMPos)
862 return SDValue();
863
864 Opc = MipsISD::Ext;
865 NewOperand = FirstOperand;
866 }
867 return DAG.getNode(Opc, DL, ValTy, NewOperand,
868 DAG.getConstant(Pos, DL, MVT::i32),
869 DAG.getConstant(SMSize, DL, MVT::i32));
870}
871
874 const MipsSubtarget &Subtarget) {
875 // Pattern match INS.
876 // $dst = or (and $src1 , mask0), (and (shl $src, pos), mask1),
877 // where mask1 = (2**size - 1) << pos, mask0 = ~mask1
878 // => ins $dst, $src, size, pos, $src1
879 if (DCI.isBeforeLegalizeOps() || !Subtarget.hasExtractInsert())
880 return SDValue();
881
882 SDValue And0 = N->getOperand(0), And1 = N->getOperand(1);
883 unsigned SMPos0, SMSize0, SMPos1, SMSize1;
884 ConstantSDNode *CN, *CN1;
885
886 // See if Op's first operand matches (and $src1 , mask0).
887 if (And0.getOpcode() != ISD::AND)
888 return SDValue();
889
890 if (!(CN = dyn_cast<ConstantSDNode>(And0.getOperand(1))) ||
891 !isShiftedMask_64(~CN->getSExtValue(), SMPos0, SMSize0))
892 return SDValue();
893
894 // See if Op's second operand matches (and (shl $src, pos), mask1).
895 if (And1.getOpcode() == ISD::AND &&
896 And1.getOperand(0).getOpcode() == ISD::SHL) {
897
898 if (!(CN = dyn_cast<ConstantSDNode>(And1.getOperand(1))) ||
899 !isShiftedMask_64(CN->getZExtValue(), SMPos1, SMSize1))
900 return SDValue();
901
902 // The shift masks must have the same position and size.
903 if (SMPos0 != SMPos1 || SMSize0 != SMSize1)
904 return SDValue();
905
906 SDValue Shl = And1.getOperand(0);
907
908 if (!(CN = dyn_cast<ConstantSDNode>(Shl.getOperand(1))))
909 return SDValue();
910
911 unsigned Shamt = CN->getZExtValue();
912
913 // Return if the shift amount and the first bit position of mask are not the
914 // same.
915 EVT ValTy = N->getValueType(0);
916 if ((Shamt != SMPos0) || (SMPos0 + SMSize0 > ValTy.getSizeInBits()))
917 return SDValue();
918
919 SDLoc DL(N);
920 return DAG.getNode(MipsISD::Ins, DL, ValTy, Shl.getOperand(0),
921 DAG.getConstant(SMPos0, DL, MVT::i32),
922 DAG.getConstant(SMSize0, DL, MVT::i32),
923 And0.getOperand(0));
924 } else {
925 // Pattern match DINS.
926 // $dst = or (and $src, mask0), mask1
927 // where mask0 = ((1 << SMSize0) -1) << SMPos0
928 // => dins $dst, $src, pos, size
929 if (~CN->getSExtValue() == ((((int64_t)1 << SMSize0) - 1) << SMPos0) &&
930 ((SMSize0 + SMPos0 <= 64 && Subtarget.hasMips64r2()) ||
931 (SMSize0 + SMPos0 <= 32))) {
932 // Check if AND instruction has constant as argument
933 bool isConstCase = And1.getOpcode() != ISD::AND;
934 if (And1.getOpcode() == ISD::AND) {
935 if (!(CN1 = dyn_cast<ConstantSDNode>(And1->getOperand(1))))
936 return SDValue();
937 } else {
938 if (!(CN1 = dyn_cast<ConstantSDNode>(N->getOperand(1))))
939 return SDValue();
940 }
941 // Don't generate INS if constant OR operand doesn't fit into bits
942 // cleared by constant AND operand.
943 if (CN->getSExtValue() & CN1->getSExtValue())
944 return SDValue();
945
946 SDLoc DL(N);
947 EVT ValTy = N->getOperand(0)->getValueType(0);
948 SDValue Const1;
949 SDValue SrlX;
950 if (!isConstCase) {
951 Const1 = DAG.getConstant(SMPos0, DL, MVT::i32);
952 SrlX = DAG.getNode(ISD::SRL, DL, And1->getValueType(0), And1, Const1);
953 }
954 return DAG.getNode(
955 MipsISD::Ins, DL, N->getValueType(0),
956 isConstCase
957 ? DAG.getConstant(CN1->getSExtValue() >> SMPos0, DL, ValTy)
958 : SrlX,
959 DAG.getConstant(SMPos0, DL, MVT::i32),
960 DAG.getConstant(ValTy.getSizeInBits() / 8 < 8 ? SMSize0 & 31
961 : SMSize0,
962 DL, MVT::i32),
963 And0->getOperand(0));
964
965 }
966 return SDValue();
967 }
968}
969
971 const MipsSubtarget &Subtarget) {
972 // ROOTNode must have a multiplication as an operand for the match to be
973 // successful.
974 if (ROOTNode->getOperand(0).getOpcode() != ISD::MUL &&
975 ROOTNode->getOperand(1).getOpcode() != ISD::MUL)
976 return SDValue();
977
978 // In the case where we have a multiplication as the left operand of
979 // of a subtraction, we can't combine into a MipsISD::MSub node as the
980 // the instruction definition of msub(u) places the multiplication on
981 // on the right.
982 if (ROOTNode->getOpcode() == ISD::SUB &&
983 ROOTNode->getOperand(0).getOpcode() == ISD::MUL)
984 return SDValue();
985
986 // We don't handle vector types here.
987 if (ROOTNode->getValueType(0).isVector())
988 return SDValue();
989
990 // For MIPS64, madd / msub instructions are inefficent to use with 64 bit
991 // arithmetic. E.g.
992 // (add (mul a b) c) =>
993 // let res = (madd (mthi (drotr c 32))x(mtlo c) a b) in
994 // MIPS64: (or (dsll (mfhi res) 32) (dsrl (dsll (mflo res) 32) 32)
995 // or
996 // MIPS64R2: (dins (mflo res) (mfhi res) 32 32)
997 //
998 // The overhead of setting up the Hi/Lo registers and reassembling the
999 // result makes this a dubious optimzation for MIPS64. The core of the
1000 // problem is that Hi/Lo contain the upper and lower 32 bits of the
1001 // operand and result.
1002 //
1003 // It requires a chain of 4 add/mul for MIPS64R2 to get better code
1004 // density than doing it naively, 5 for MIPS64. Additionally, using
1005 // madd/msub on MIPS64 requires the operands actually be 32 bit sign
1006 // extended operands, not true 64 bit values.
1007 //
1008 // FIXME: For the moment, disable this completely for MIPS64.
1009 if (Subtarget.hasMips64())
1010 return SDValue();
1011
1012 SDValue Mult = ROOTNode->getOperand(0).getOpcode() == ISD::MUL
1013 ? ROOTNode->getOperand(0)
1014 : ROOTNode->getOperand(1);
1015
1016 SDValue AddOperand = ROOTNode->getOperand(0).getOpcode() == ISD::MUL
1017 ? ROOTNode->getOperand(1)
1018 : ROOTNode->getOperand(0);
1019
1020 // Transform this to a MADD only if the user of this node is the add.
1021 // If there are other users of the mul, this function returns here.
1022 if (!Mult.hasOneUse())
1023 return SDValue();
1024
1025 // maddu and madd are unusual instructions in that on MIPS64 bits 63..31
1026 // must be in canonical form, i.e. sign extended. For MIPS32, the operands
1027 // of the multiply must have 32 or more sign bits, otherwise we cannot
1028 // perform this optimization. We have to check this here as we're performing
1029 // this optimization pre-legalization.
1030 SDValue MultLHS = Mult->getOperand(0);
1031 SDValue MultRHS = Mult->getOperand(1);
1032
1033 bool IsSigned = MultLHS->getOpcode() == ISD::SIGN_EXTEND &&
1034 MultRHS->getOpcode() == ISD::SIGN_EXTEND;
1035 bool IsUnsigned = MultLHS->getOpcode() == ISD::ZERO_EXTEND &&
1036 MultRHS->getOpcode() == ISD::ZERO_EXTEND;
1037
1038 if (!IsSigned && !IsUnsigned)
1039 return SDValue();
1040
1041 // Initialize accumulator.
1042 SDLoc DL(ROOTNode);
1043 SDValue BottomHalf, TopHalf;
1044 std::tie(BottomHalf, TopHalf) =
1045 CurDAG.SplitScalar(AddOperand, DL, MVT::i32, MVT::i32);
1046 SDValue ACCIn =
1047 CurDAG.getNode(MipsISD::MTLOHI, DL, MVT::Untyped, BottomHalf, TopHalf);
1048
1049 // Create MipsMAdd(u) / MipsMSub(u) node.
1050 bool IsAdd = ROOTNode->getOpcode() == ISD::ADD;
1051 unsigned Opcode = IsAdd ? (IsUnsigned ? MipsISD::MAddu : MipsISD::MAdd)
1052 : (IsUnsigned ? MipsISD::MSubu : MipsISD::MSub);
1053 SDValue MAddOps[3] = {
1054 CurDAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mult->getOperand(0)),
1055 CurDAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mult->getOperand(1)), ACCIn};
1056 EVT VTs[2] = {MVT::i32, MVT::i32};
1057 SDValue MAdd = CurDAG.getNode(Opcode, DL, VTs, MAddOps);
1058
1059 SDValue ResLo = CurDAG.getNode(MipsISD::MFLO, DL, MVT::i32, MAdd);
1060 SDValue ResHi = CurDAG.getNode(MipsISD::MFHI, DL, MVT::i32, MAdd);
1061 SDValue Combined =
1062 CurDAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, ResLo, ResHi);
1063 return Combined;
1064}
1065
1068 const MipsSubtarget &Subtarget) {
1069 // (sub v0 (mul v1, v2)) => (msub v1, v2, v0)
1070 if (DCI.isBeforeLegalizeOps()) {
1071 if (Subtarget.hasMips32() && !Subtarget.hasMips32r6() &&
1072 !Subtarget.inMips16Mode() && N->getValueType(0) == MVT::i64)
1073 return performMADD_MSUBCombine(N, DAG, Subtarget);
1074
1075 return SDValue();
1076 }
1077
1078 return SDValue();
1079}
1080
1083 const MipsSubtarget &Subtarget) {
1084 // (add v0 (mul v1, v2)) => (madd v1, v2, v0)
1085 if (DCI.isBeforeLegalizeOps()) {
1086 if (Subtarget.hasMips32() && !Subtarget.hasMips32r6() &&
1087 !Subtarget.inMips16Mode() && N->getValueType(0) == MVT::i64)
1088 return performMADD_MSUBCombine(N, DAG, Subtarget);
1089
1090 return SDValue();
1091 }
1092
1093 // (add v0, (add v1, abs_lo(tjt))) => (add (add v0, v1), abs_lo(tjt))
1094 SDValue Add = N->getOperand(1);
1095
1096 if (Add.getOpcode() != ISD::ADD)
1097 return SDValue();
1098
1099 SDValue Lo = Add.getOperand(1);
1100
1101 if ((Lo.getOpcode() != MipsISD::Lo) ||
1102 (Lo.getOperand(0).getOpcode() != ISD::TargetJumpTable))
1103 return SDValue();
1104
1105 EVT ValTy = N->getValueType(0);
1106 SDLoc DL(N);
1107
1108 SDValue Add1 = DAG.getNode(ISD::ADD, DL, ValTy, N->getOperand(0),
1109 Add.getOperand(0));
1110 return DAG.getNode(ISD::ADD, DL, ValTy, Add1, Lo);
1111}
1112
1115 const MipsSubtarget &Subtarget) {
1116 // Pattern match CINS.
1117 // $dst = shl (and $src , imm), pos
1118 // => cins $dst, $src, pos, size
1119
1120 if (DCI.isBeforeLegalizeOps() || !Subtarget.hasCnMips())
1121 return SDValue();
1122
1123 SDValue FirstOperand = N->getOperand(0);
1124 unsigned FirstOperandOpc = FirstOperand.getOpcode();
1125 SDValue SecondOperand = N->getOperand(1);
1126 EVT ValTy = N->getValueType(0);
1127 SDLoc DL(N);
1128
1129 uint64_t Pos = 0;
1130 unsigned SMPos, SMSize;
1131 ConstantSDNode *CN;
1132 SDValue NewOperand;
1133
1134 // The second operand of the shift must be an immediate.
1135 if (!(CN = dyn_cast<ConstantSDNode>(SecondOperand)))
1136 return SDValue();
1137
1138 Pos = CN->getZExtValue();
1139
1140 if (Pos >= ValTy.getSizeInBits())
1141 return SDValue();
1142
1143 if (FirstOperandOpc != ISD::AND)
1144 return SDValue();
1145
1146 // AND's second operand must be a shifted mask.
1147 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.getOperand(1))) ||
1148 !isShiftedMask_64(CN->getZExtValue(), SMPos, SMSize))
1149 return SDValue();
1150
1151 // Return if the shifted mask does not start at bit 0 or the sum of its size
1152 // and Pos exceeds the word's size.
1153 if (SMPos != 0 || SMSize > 32 || Pos + SMSize > ValTy.getSizeInBits())
1154 return SDValue();
1155
1156 NewOperand = FirstOperand.getOperand(0);
1157 // SMSize is 'location' (position) in this case, not size.
1158 SMSize--;
1159
1160 return DAG.getNode(MipsISD::CIns, DL, ValTy, NewOperand,
1161 DAG.getConstant(Pos, DL, MVT::i32),
1162 DAG.getConstant(SMSize, DL, MVT::i32));
1163}
1164
1166 const {
1167 SelectionDAG &DAG = DCI.DAG;
1168 unsigned Opc = N->getOpcode();
1169
1170 switch (Opc) {
1171 default: break;
1172 case ISD::SDIVREM:
1173 case ISD::UDIVREM:
1174 return performDivRemCombine(N, DAG, DCI, Subtarget);
1175 case ISD::SELECT:
1176 return performSELECTCombine(N, DAG, DCI, Subtarget);
1177 case MipsISD::CMovFP_F:
1178 case MipsISD::CMovFP_T:
1179 return performCMovFPCombine(N, DAG, DCI, Subtarget);
1180 case ISD::AND:
1181 return performANDCombine(N, DAG, DCI, Subtarget);
1182 case ISD::OR:
1183 return performORCombine(N, DAG, DCI, Subtarget);
1184 case ISD::ADD:
1185 return performADDCombine(N, DAG, DCI, Subtarget);
1186 case ISD::SHL:
1187 return performSHLCombine(N, DAG, DCI, Subtarget);
1188 case ISD::SUB:
1189 return performSUBCombine(N, DAG, DCI, Subtarget);
1190 }
1191
1192 return SDValue();
1193}
1194
1196 return Subtarget.hasMips32();
1197}
1198
1200 return Subtarget.hasMips32();
1201}
1202
1204 // We can use ANDI+SLTIU as a bit test. Y contains the bit position.
1205 // For MIPSR2 or later, we may be able to use the `ext` instruction or its'
1206 // double-word variants.
1207 if (auto *C = dyn_cast<ConstantSDNode>(Y))
1208 return C->getAPIntValue().ule(15);
1209
1210 return false;
1211}
1212
1214 const SDNode *N, CombineLevel Level) const {
1215 assert(((N->getOpcode() == ISD::SHL &&
1216 N->getOperand(0).getOpcode() == ISD::SRL) ||
1217 (N->getOpcode() == ISD::SRL &&
1218 N->getOperand(0).getOpcode() == ISD::SHL)) &&
1219 "Expected shift-shift mask");
1220
1221 if (N->getOperand(0).getValueType().isVector())
1222 return false;
1223 return true;
1224}
1225
1226void
1229 SelectionDAG &DAG) const {
1230 return LowerOperationWrapper(N, Results, DAG);
1231}
1232
1235{
1236 switch (Op.getOpcode())
1237 {
1238 case ISD::BRCOND: return lowerBRCOND(Op, DAG);
1239 case ISD::ConstantPool: return lowerConstantPool(Op, DAG);
1240 case ISD::GlobalAddress: return lowerGlobalAddress(Op, DAG);
1241 case ISD::BlockAddress: return lowerBlockAddress(Op, DAG);
1242 case ISD::GlobalTLSAddress: return lowerGlobalTLSAddress(Op, DAG);
1243 case ISD::JumpTable: return lowerJumpTable(Op, DAG);
1244 case ISD::SELECT: return lowerSELECT(Op, DAG);
1245 case ISD::SETCC: return lowerSETCC(Op, DAG);
1246 case ISD::VASTART: return lowerVASTART(Op, DAG);
1247 case ISD::VAARG: return lowerVAARG(Op, DAG);
1248 case ISD::FCOPYSIGN: return lowerFCOPYSIGN(Op, DAG);
1249 case ISD::FABS: return lowerFABS(Op, DAG);
1250 case ISD::FRAMEADDR: return lowerFRAMEADDR(Op, DAG);
1251 case ISD::RETURNADDR: return lowerRETURNADDR(Op, DAG);
1252 case ISD::EH_RETURN: return lowerEH_RETURN(Op, DAG);
1253 case ISD::ATOMIC_FENCE: return lowerATOMIC_FENCE(Op, DAG);
1254 case ISD::SHL_PARTS: return lowerShiftLeftParts(Op, DAG);
1255 case ISD::SRA_PARTS: return lowerShiftRightParts(Op, DAG, true);
1256 case ISD::SRL_PARTS: return lowerShiftRightParts(Op, DAG, false);
1257 case ISD::LOAD: return lowerLOAD(Op, DAG);
1258 case ISD::STORE: return lowerSTORE(Op, DAG);
1259 case ISD::EH_DWARF_CFA: return lowerEH_DWARF_CFA(Op, DAG);
1260 case ISD::FP_TO_SINT: return lowerFP_TO_SINT(Op, DAG);
1261 }
1262 return SDValue();
1263}
1264
1265//===----------------------------------------------------------------------===//
1266// Lower helper functions
1267//===----------------------------------------------------------------------===//
1268
1269// addLiveIn - This helper function adds the specified physical register to the
1270// MachineFunction as a live in value. It also creates a corresponding
1271// virtual register for it.
1272static unsigned
1273addLiveIn(MachineFunction &MF, unsigned PReg, const TargetRegisterClass *RC)
1274{
1276 MF.getRegInfo().addLiveIn(PReg, VReg);
1277 return VReg;
1278}
1279
1282 const TargetInstrInfo &TII,
1283 bool Is64Bit, bool IsMicroMips) {
1284 if (NoZeroDivCheck)
1285 return &MBB;
1286
1287 // Insert instruction "teq $divisor_reg, $zero, 7".
1290 MachineOperand &Divisor = MI.getOperand(2);
1291 MIB = BuildMI(MBB, std::next(I), MI.getDebugLoc(),
1292 TII.get(IsMicroMips ? Mips::TEQ_MM : Mips::TEQ))
1293 .addReg(Divisor.getReg(), getKillRegState(Divisor.isKill()))
1294 .addReg(Mips::ZERO)
1295 .addImm(7);
1296
1297 // Use the 32-bit sub-register if this is a 64-bit division.
1298 if (Is64Bit)
1299 MIB->getOperand(0).setSubReg(Mips::sub_32);
1300
1301 // Clear Divisor's kill flag.
1302 Divisor.setIsKill(false);
1303
1304 // We would normally delete the original instruction here but in this case
1305 // we only needed to inject an additional instruction rather than replace it.
1306
1307 return &MBB;
1308}
1309
1312 MachineBasicBlock *BB) const {
1313 switch (MI.getOpcode()) {
1314 default:
1315 llvm_unreachable("Unexpected instr type to insert");
1316 case Mips::ATOMIC_LOAD_ADD_I8:
1317 return emitAtomicBinaryPartword(MI, BB, 1);
1318 case Mips::ATOMIC_LOAD_ADD_I16:
1319 return emitAtomicBinaryPartword(MI, BB, 2);
1320 case Mips::ATOMIC_LOAD_ADD_I32:
1321 return emitAtomicBinary(MI, BB);
1322 case Mips::ATOMIC_LOAD_ADD_I64:
1323 return emitAtomicBinary(MI, BB);
1324
1325 case Mips::ATOMIC_LOAD_AND_I8:
1326 return emitAtomicBinaryPartword(MI, BB, 1);
1327 case Mips::ATOMIC_LOAD_AND_I16:
1328 return emitAtomicBinaryPartword(MI, BB, 2);
1329 case Mips::ATOMIC_LOAD_AND_I32:
1330 return emitAtomicBinary(MI, BB);
1331 case Mips::ATOMIC_LOAD_AND_I64:
1332 return emitAtomicBinary(MI, BB);
1333
1334 case Mips::ATOMIC_LOAD_OR_I8:
1335 return emitAtomicBinaryPartword(MI, BB, 1);
1336 case Mips::ATOMIC_LOAD_OR_I16:
1337 return emitAtomicBinaryPartword(MI, BB, 2);
1338 case Mips::ATOMIC_LOAD_OR_I32:
1339 return emitAtomicBinary(MI, BB);
1340 case Mips::ATOMIC_LOAD_OR_I64:
1341 return emitAtomicBinary(MI, BB);
1342
1343 case Mips::ATOMIC_LOAD_XOR_I8:
1344 return emitAtomicBinaryPartword(MI, BB, 1);
1345 case Mips::ATOMIC_LOAD_XOR_I16:
1346 return emitAtomicBinaryPartword(MI, BB, 2);
1347 case Mips::ATOMIC_LOAD_XOR_I32:
1348 return emitAtomicBinary(MI, BB);
1349 case Mips::ATOMIC_LOAD_XOR_I64:
1350 return emitAtomicBinary(MI, BB);
1351
1352 case Mips::ATOMIC_LOAD_NAND_I8:
1353 return emitAtomicBinaryPartword(MI, BB, 1);
1354 case Mips::ATOMIC_LOAD_NAND_I16:
1355 return emitAtomicBinaryPartword(MI, BB, 2);
1356 case Mips::ATOMIC_LOAD_NAND_I32:
1357 return emitAtomicBinary(MI, BB);
1358 case Mips::ATOMIC_LOAD_NAND_I64:
1359 return emitAtomicBinary(MI, BB);
1360
1361 case Mips::ATOMIC_LOAD_SUB_I8:
1362 return emitAtomicBinaryPartword(MI, BB, 1);
1363 case Mips::ATOMIC_LOAD_SUB_I16:
1364 return emitAtomicBinaryPartword(MI, BB, 2);
1365 case Mips::ATOMIC_LOAD_SUB_I32:
1366 return emitAtomicBinary(MI, BB);
1367 case Mips::ATOMIC_LOAD_SUB_I64:
1368 return emitAtomicBinary(MI, BB);
1369
1370 case Mips::ATOMIC_SWAP_I8:
1371 return emitAtomicBinaryPartword(MI, BB, 1);
1372 case Mips::ATOMIC_SWAP_I16:
1373 return emitAtomicBinaryPartword(MI, BB, 2);
1374 case Mips::ATOMIC_SWAP_I32:
1375 return emitAtomicBinary(MI, BB);
1376 case Mips::ATOMIC_SWAP_I64:
1377 return emitAtomicBinary(MI, BB);
1378
1379 case Mips::ATOMIC_CMP_SWAP_I8:
1380 return emitAtomicCmpSwapPartword(MI, BB, 1);
1381 case Mips::ATOMIC_CMP_SWAP_I16:
1382 return emitAtomicCmpSwapPartword(MI, BB, 2);
1383 case Mips::ATOMIC_CMP_SWAP_I32:
1384 return emitAtomicCmpSwap(MI, BB);
1385 case Mips::ATOMIC_CMP_SWAP_I64:
1386 return emitAtomicCmpSwap(MI, BB);
1387
1388 case Mips::ATOMIC_LOAD_MIN_I8:
1389 return emitAtomicBinaryPartword(MI, BB, 1);
1390 case Mips::ATOMIC_LOAD_MIN_I16:
1391 return emitAtomicBinaryPartword(MI, BB, 2);
1392 case Mips::ATOMIC_LOAD_MIN_I32:
1393 return emitAtomicBinary(MI, BB);
1394 case Mips::ATOMIC_LOAD_MIN_I64:
1395 return emitAtomicBinary(MI, BB);
1396
1397 case Mips::ATOMIC_LOAD_MAX_I8:
1398 return emitAtomicBinaryPartword(MI, BB, 1);
1399 case Mips::ATOMIC_LOAD_MAX_I16:
1400 return emitAtomicBinaryPartword(MI, BB, 2);
1401 case Mips::ATOMIC_LOAD_MAX_I32:
1402 return emitAtomicBinary(MI, BB);
1403 case Mips::ATOMIC_LOAD_MAX_I64:
1404 return emitAtomicBinary(MI, BB);
1405
1406 case Mips::ATOMIC_LOAD_UMIN_I8:
1407 return emitAtomicBinaryPartword(MI, BB, 1);
1408 case Mips::ATOMIC_LOAD_UMIN_I16:
1409 return emitAtomicBinaryPartword(MI, BB, 2);
1410 case Mips::ATOMIC_LOAD_UMIN_I32:
1411 return emitAtomicBinary(MI, BB);
1412 case Mips::ATOMIC_LOAD_UMIN_I64:
1413 return emitAtomicBinary(MI, BB);
1414
1415 case Mips::ATOMIC_LOAD_UMAX_I8:
1416 return emitAtomicBinaryPartword(MI, BB, 1);
1417 case Mips::ATOMIC_LOAD_UMAX_I16:
1418 return emitAtomicBinaryPartword(MI, BB, 2);
1419 case Mips::ATOMIC_LOAD_UMAX_I32:
1420 return emitAtomicBinary(MI, BB);
1421 case Mips::ATOMIC_LOAD_UMAX_I64:
1422 return emitAtomicBinary(MI, BB);
1423
1424 case Mips::PseudoSDIV:
1425 case Mips::PseudoUDIV:
1426 case Mips::DIV:
1427 case Mips::DIVU:
1428 case Mips::MOD:
1429 case Mips::MODU:
1430 return insertDivByZeroTrap(MI, *BB, *Subtarget.getInstrInfo(), false,
1431 false);
1432 case Mips::SDIV_MM_Pseudo:
1433 case Mips::UDIV_MM_Pseudo:
1434 case Mips::SDIV_MM:
1435 case Mips::UDIV_MM:
1436 case Mips::DIV_MMR6:
1437 case Mips::DIVU_MMR6:
1438 case Mips::MOD_MMR6:
1439 case Mips::MODU_MMR6:
1440 return insertDivByZeroTrap(MI, *BB, *Subtarget.getInstrInfo(), false, true);
1441 case Mips::PseudoDSDIV:
1442 case Mips::PseudoDUDIV:
1443 case Mips::DDIV:
1444 case Mips::DDIVU:
1445 case Mips::DMOD:
1446 case Mips::DMODU:
1447 return insertDivByZeroTrap(MI, *BB, *Subtarget.getInstrInfo(), true, false);
1448
1449 case Mips::PseudoSELECT_I:
1450 case Mips::PseudoSELECT_I64:
1451 case Mips::PseudoSELECT_S:
1452 case Mips::PseudoSELECT_D32:
1453 case Mips::PseudoSELECT_D64:
1454 return emitPseudoSELECT(MI, BB, false, Mips::BNE);
1455 case Mips::PseudoSELECTFP_F_I:
1456 case Mips::PseudoSELECTFP_F_I64:
1457 case Mips::PseudoSELECTFP_F_S:
1458 case Mips::PseudoSELECTFP_F_D32:
1459 case Mips::PseudoSELECTFP_F_D64:
1460 return emitPseudoSELECT(MI, BB, true, Mips::BC1F);
1461 case Mips::PseudoSELECTFP_T_I:
1462 case Mips::PseudoSELECTFP_T_I64:
1463 case Mips::PseudoSELECTFP_T_S:
1464 case Mips::PseudoSELECTFP_T_D32:
1465 case Mips::PseudoSELECTFP_T_D64:
1466 return emitPseudoSELECT(MI, BB, true, Mips::BC1T);
1467 case Mips::PseudoD_SELECT_I:
1468 case Mips::PseudoD_SELECT_I64:
1469 return emitPseudoD_SELECT(MI, BB);
1470 case Mips::LDR_W:
1471 return emitLDR_W(MI, BB);
1472 case Mips::LDR_D:
1473 return emitLDR_D(MI, BB);
1474 case Mips::STR_W:
1475 return emitSTR_W(MI, BB);
1476 case Mips::STR_D:
1477 return emitSTR_D(MI, BB);
1478 }
1479}
1480
1481// This function also handles Mips::ATOMIC_SWAP_I32 (when BinOpcode == 0), and
1482// Mips::ATOMIC_LOAD_NAND_I32 (when Nand == true)
1484MipsTargetLowering::emitAtomicBinary(MachineInstr &MI,
1485 MachineBasicBlock *BB) const {
1486
1487 MachineFunction *MF = BB->getParent();
1488 MachineRegisterInfo &RegInfo = MF->getRegInfo();
1490 DebugLoc DL = MI.getDebugLoc();
1491
1492 unsigned AtomicOp;
1493 bool NeedsAdditionalReg = false;
1494 switch (MI.getOpcode()) {
1495 case Mips::ATOMIC_LOAD_ADD_I32:
1496 AtomicOp = Mips::ATOMIC_LOAD_ADD_I32_POSTRA;
1497 break;
1498 case Mips::ATOMIC_LOAD_SUB_I32:
1499 AtomicOp = Mips::ATOMIC_LOAD_SUB_I32_POSTRA;
1500 break;
1501 case Mips::ATOMIC_LOAD_AND_I32:
1502 AtomicOp = Mips::ATOMIC_LOAD_AND_I32_POSTRA;
1503 break;
1504 case Mips::ATOMIC_LOAD_OR_I32:
1505 AtomicOp = Mips::ATOMIC_LOAD_OR_I32_POSTRA;
1506 break;
1507 case Mips::ATOMIC_LOAD_XOR_I32:
1508 AtomicOp = Mips::ATOMIC_LOAD_XOR_I32_POSTRA;
1509 break;
1510 case Mips::ATOMIC_LOAD_NAND_I32:
1511 AtomicOp = Mips::ATOMIC_LOAD_NAND_I32_POSTRA;
1512 break;
1513 case Mips::ATOMIC_SWAP_I32:
1514 AtomicOp = Mips::ATOMIC_SWAP_I32_POSTRA;
1515 break;
1516 case Mips::ATOMIC_LOAD_ADD_I64:
1517 AtomicOp = Mips::ATOMIC_LOAD_ADD_I64_POSTRA;
1518 break;
1519 case Mips::ATOMIC_LOAD_SUB_I64:
1520 AtomicOp = Mips::ATOMIC_LOAD_SUB_I64_POSTRA;
1521 break;
1522 case Mips::ATOMIC_LOAD_AND_I64:
1523 AtomicOp = Mips::ATOMIC_LOAD_AND_I64_POSTRA;
1524 break;
1525 case Mips::ATOMIC_LOAD_OR_I64:
1526 AtomicOp = Mips::ATOMIC_LOAD_OR_I64_POSTRA;
1527 break;
1528 case Mips::ATOMIC_LOAD_XOR_I64:
1529 AtomicOp = Mips::ATOMIC_LOAD_XOR_I64_POSTRA;
1530 break;
1531 case Mips::ATOMIC_LOAD_NAND_I64:
1532 AtomicOp = Mips::ATOMIC_LOAD_NAND_I64_POSTRA;
1533 break;
1534 case Mips::ATOMIC_SWAP_I64:
1535 AtomicOp = Mips::ATOMIC_SWAP_I64_POSTRA;
1536 break;
1537 case Mips::ATOMIC_LOAD_MIN_I32:
1538 AtomicOp = Mips::ATOMIC_LOAD_MIN_I32_POSTRA;
1539 NeedsAdditionalReg = true;
1540 break;
1541 case Mips::ATOMIC_LOAD_MAX_I32:
1542 AtomicOp = Mips::ATOMIC_LOAD_MAX_I32_POSTRA;
1543 NeedsAdditionalReg = true;
1544 break;
1545 case Mips::ATOMIC_LOAD_UMIN_I32:
1546 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I32_POSTRA;
1547 NeedsAdditionalReg = true;
1548 break;
1549 case Mips::ATOMIC_LOAD_UMAX_I32:
1550 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I32_POSTRA;
1551 NeedsAdditionalReg = true;
1552 break;
1553 case Mips::ATOMIC_LOAD_MIN_I64:
1554 AtomicOp = Mips::ATOMIC_LOAD_MIN_I64_POSTRA;
1555 NeedsAdditionalReg = true;
1556 break;
1557 case Mips::ATOMIC_LOAD_MAX_I64:
1558 AtomicOp = Mips::ATOMIC_LOAD_MAX_I64_POSTRA;
1559 NeedsAdditionalReg = true;
1560 break;
1561 case Mips::ATOMIC_LOAD_UMIN_I64:
1562 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I64_POSTRA;
1563 NeedsAdditionalReg = true;
1564 break;
1565 case Mips::ATOMIC_LOAD_UMAX_I64:
1566 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I64_POSTRA;
1567 NeedsAdditionalReg = true;
1568 break;
1569 default:
1570 llvm_unreachable("Unknown pseudo atomic for replacement!");
1571 }
1572
1573 Register OldVal = MI.getOperand(0).getReg();
1574 Register Ptr = MI.getOperand(1).getReg();
1575 Register Incr = MI.getOperand(2).getReg();
1576 Register Scratch = RegInfo.createVirtualRegister(RegInfo.getRegClass(OldVal));
1577
1579
1580 // The scratch registers here with the EarlyClobber | Define | Implicit
1581 // flags is used to persuade the register allocator and the machine
1582 // verifier to accept the usage of this register. This has to be a real
1583 // register which has an UNDEF value but is dead after the instruction which
1584 // is unique among the registers chosen for the instruction.
1585
1586 // The EarlyClobber flag has the semantic properties that the operand it is
1587 // attached to is clobbered before the rest of the inputs are read. Hence it
1588 // must be unique among the operands to the instruction.
1589 // The Define flag is needed to coerce the machine verifier that an Undef
1590 // value isn't a problem.
1591 // The Dead flag is needed as the value in scratch isn't used by any other
1592 // instruction. Kill isn't used as Dead is more precise.
1593 // The implicit flag is here due to the interaction between the other flags
1594 // and the machine verifier.
1595
1596 // For correctness purpose, a new pseudo is introduced here. We need this
1597 // new pseudo, so that FastRegisterAllocator does not see an ll/sc sequence
1598 // that is spread over >1 basic blocks. A register allocator which
1599 // introduces (or any codegen infact) a store, can violate the expectations
1600 // of the hardware.
1601 //
1602 // An atomic read-modify-write sequence starts with a linked load
1603 // instruction and ends with a store conditional instruction. The atomic
1604 // read-modify-write sequence fails if any of the following conditions
1605 // occur between the execution of ll and sc:
1606 // * A coherent store is completed by another process or coherent I/O
1607 // module into the block of synchronizable physical memory containing
1608 // the word. The size and alignment of the block is
1609 // implementation-dependent.
1610 // * A coherent store is executed between an LL and SC sequence on the
1611 // same processor to the block of synchornizable physical memory
1612 // containing the word.
1613 //
1614
1615 Register PtrCopy = RegInfo.createVirtualRegister(RegInfo.getRegClass(Ptr));
1616 Register IncrCopy = RegInfo.createVirtualRegister(RegInfo.getRegClass(Incr));
1617
1618 BuildMI(*BB, II, DL, TII->get(Mips::COPY), IncrCopy).addReg(Incr);
1619 BuildMI(*BB, II, DL, TII->get(Mips::COPY), PtrCopy).addReg(Ptr);
1620
1622 BuildMI(*BB, II, DL, TII->get(AtomicOp))
1624 .addReg(PtrCopy)
1625 .addReg(IncrCopy)
1628 if (NeedsAdditionalReg) {
1629 Register Scratch2 =
1630 RegInfo.createVirtualRegister(RegInfo.getRegClass(OldVal));
1633 }
1634
1635 MI.eraseFromParent();
1636
1637 return BB;
1638}
1639
1640MachineBasicBlock *MipsTargetLowering::emitSignExtendToI32InReg(
1641 MachineInstr &MI, MachineBasicBlock *BB, unsigned Size, unsigned DstReg,
1642 unsigned SrcReg) const {
1644 const DebugLoc &DL = MI.getDebugLoc();
1645
1646 if (Subtarget.hasMips32r2() && Size == 1) {
1647 BuildMI(BB, DL, TII->get(Mips::SEB), DstReg).addReg(SrcReg);
1648 return BB;
1649 }
1650
1651 if (Subtarget.hasMips32r2() && Size == 2) {
1652 BuildMI(BB, DL, TII->get(Mips::SEH), DstReg).addReg(SrcReg);
1653 return BB;
1654 }
1655
1656 MachineFunction *MF = BB->getParent();
1658 const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
1659 Register ScrReg = RegInfo.createVirtualRegister(RC);
1660
1661 assert(Size < 32);
1662 int64_t ShiftImm = 32 - (Size * 8);
1663
1664 BuildMI(BB, DL, TII->get(Mips::SLL), ScrReg).addReg(SrcReg).addImm(ShiftImm);
1665 BuildMI(BB, DL, TII->get(Mips::SRA), DstReg).addReg(ScrReg).addImm(ShiftImm);
1666
1667 return BB;
1668}
1669
1670MachineBasicBlock *MipsTargetLowering::emitAtomicBinaryPartword(
1671 MachineInstr &MI, MachineBasicBlock *BB, unsigned Size) const {
1672 assert((Size == 1 || Size == 2) &&
1673 "Unsupported size for EmitAtomicBinaryPartial.");
1674
1675 MachineFunction *MF = BB->getParent();
1677 const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
1678 const bool ArePtrs64bit = ABI.ArePtrs64bit();
1679 const TargetRegisterClass *RCp =
1680 getRegClassFor(ArePtrs64bit ? MVT::i64 : MVT::i32);
1682 DebugLoc DL = MI.getDebugLoc();
1683
1684 Register Dest = MI.getOperand(0).getReg();
1685 Register Ptr = MI.getOperand(1).getReg();
1686 Register Incr = MI.getOperand(2).getReg();
1687
1688 Register AlignedAddr = RegInfo.createVirtualRegister(RCp);
1689 Register ShiftAmt = RegInfo.createVirtualRegister(RC);
1690 Register Mask = RegInfo.createVirtualRegister(RC);
1691 Register Mask2 = RegInfo.createVirtualRegister(RC);
1692 Register Incr2 = RegInfo.createVirtualRegister(RC);
1693 Register MaskLSB2 = RegInfo.createVirtualRegister(RCp);
1694 Register PtrLSB2 = RegInfo.createVirtualRegister(RC);
1695 Register MaskUpper = RegInfo.createVirtualRegister(RC);
1696 Register Scratch = RegInfo.createVirtualRegister(RC);
1697 Register Scratch2 = RegInfo.createVirtualRegister(RC);
1698 Register Scratch3 = RegInfo.createVirtualRegister(RC);
1699
1700 unsigned AtomicOp = 0;
1701 bool NeedsAdditionalReg = false;
1702 switch (MI.getOpcode()) {
1703 case Mips::ATOMIC_LOAD_NAND_I8:
1704 AtomicOp = Mips::ATOMIC_LOAD_NAND_I8_POSTRA;
1705 break;
1706 case Mips::ATOMIC_LOAD_NAND_I16:
1707 AtomicOp = Mips::ATOMIC_LOAD_NAND_I16_POSTRA;
1708 break;
1709 case Mips::ATOMIC_SWAP_I8:
1710 AtomicOp = Mips::ATOMIC_SWAP_I8_POSTRA;
1711 break;
1712 case Mips::ATOMIC_SWAP_I16:
1713 AtomicOp = Mips::ATOMIC_SWAP_I16_POSTRA;
1714 break;
1715 case Mips::ATOMIC_LOAD_ADD_I8:
1716 AtomicOp = Mips::ATOMIC_LOAD_ADD_I8_POSTRA;
1717 break;
1718 case Mips::ATOMIC_LOAD_ADD_I16:
1719 AtomicOp = Mips::ATOMIC_LOAD_ADD_I16_POSTRA;
1720 break;
1721 case Mips::ATOMIC_LOAD_SUB_I8:
1722 AtomicOp = Mips::ATOMIC_LOAD_SUB_I8_POSTRA;
1723 break;
1724 case Mips::ATOMIC_LOAD_SUB_I16:
1725 AtomicOp = Mips::ATOMIC_LOAD_SUB_I16_POSTRA;
1726 break;
1727 case Mips::ATOMIC_LOAD_AND_I8:
1728 AtomicOp = Mips::ATOMIC_LOAD_AND_I8_POSTRA;
1729 break;
1730 case Mips::ATOMIC_LOAD_AND_I16:
1731 AtomicOp = Mips::ATOMIC_LOAD_AND_I16_POSTRA;
1732 break;
1733 case Mips::ATOMIC_LOAD_OR_I8:
1734 AtomicOp = Mips::ATOMIC_LOAD_OR_I8_POSTRA;
1735 break;
1736 case Mips::ATOMIC_LOAD_OR_I16:
1737 AtomicOp = Mips::ATOMIC_LOAD_OR_I16_POSTRA;
1738 break;
1739 case Mips::ATOMIC_LOAD_XOR_I8:
1740 AtomicOp = Mips::ATOMIC_LOAD_XOR_I8_POSTRA;
1741 break;
1742 case Mips::ATOMIC_LOAD_XOR_I16:
1743 AtomicOp = Mips::ATOMIC_LOAD_XOR_I16_POSTRA;
1744 break;
1745 case Mips::ATOMIC_LOAD_MIN_I8:
1746 AtomicOp = Mips::ATOMIC_LOAD_MIN_I8_POSTRA;
1747 NeedsAdditionalReg = true;
1748 break;
1749 case Mips::ATOMIC_LOAD_MIN_I16:
1750 AtomicOp = Mips::ATOMIC_LOAD_MIN_I16_POSTRA;
1751 NeedsAdditionalReg = true;
1752 break;
1753 case Mips::ATOMIC_LOAD_MAX_I8:
1754 AtomicOp = Mips::ATOMIC_LOAD_MAX_I8_POSTRA;
1755 NeedsAdditionalReg = true;
1756 break;
1757 case Mips::ATOMIC_LOAD_MAX_I16:
1758 AtomicOp = Mips::ATOMIC_LOAD_MAX_I16_POSTRA;
1759 NeedsAdditionalReg = true;
1760 break;
1761 case Mips::ATOMIC_LOAD_UMIN_I8:
1762 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I8_POSTRA;
1763 NeedsAdditionalReg = true;
1764 break;
1765 case Mips::ATOMIC_LOAD_UMIN_I16:
1766 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I16_POSTRA;
1767 NeedsAdditionalReg = true;
1768 break;
1769 case Mips::ATOMIC_LOAD_UMAX_I8:
1770 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I8_POSTRA;
1771 NeedsAdditionalReg = true;
1772 break;
1773 case Mips::ATOMIC_LOAD_UMAX_I16:
1774 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I16_POSTRA;
1775 NeedsAdditionalReg = true;
1776 break;
1777 default:
1778 llvm_unreachable("Unknown subword atomic pseudo for expansion!");
1779 }
1780
1781 // insert new blocks after the current block
1782 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1783 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
1785 MF->insert(It, exitMBB);
1786
1787 // Transfer the remainder of BB and its successor edges to exitMBB.
1788 exitMBB->splice(exitMBB->begin(), BB,
1789 std::next(MachineBasicBlock::iterator(MI)), BB->end());
1791
1793
1794 // thisMBB:
1795 // addiu masklsb2,$0,-4 # 0xfffffffc
1796 // and alignedaddr,ptr,masklsb2
1797 // andi ptrlsb2,ptr,3
1798 // sll shiftamt,ptrlsb2,3
1799 // ori maskupper,$0,255 # 0xff
1800 // sll mask,maskupper,shiftamt
1801 // nor mask2,$0,mask
1802 // sll incr2,incr,shiftamt
1803
1804 int64_t MaskImm = (Size == 1) ? 255 : 65535;
1805 BuildMI(BB, DL, TII->get(ABI.GetPtrAddiuOp()), MaskLSB2)
1806 .addReg(ABI.GetNullPtr()).addImm(-4);
1807 BuildMI(BB, DL, TII->get(ABI.GetPtrAndOp()), AlignedAddr)
1808 .addReg(Ptr).addReg(MaskLSB2);
1809 BuildMI(BB, DL, TII->get(Mips::ANDi), PtrLSB2)
1810 .addReg(Ptr, 0, ArePtrs64bit ? Mips::sub_32 : 0).addImm(3);
1811 if (Subtarget.isLittle()) {
1812 BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(PtrLSB2).addImm(3);
1813 } else {
1814 Register Off = RegInfo.createVirtualRegister(RC);
1815 BuildMI(BB, DL, TII->get(Mips::XORi), Off)
1816 .addReg(PtrLSB2).addImm((Size == 1) ? 3 : 2);
1817 BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(Off).addImm(3);
1818 }
1819 BuildMI(BB, DL, TII->get(Mips::ORi), MaskUpper)
1820 .addReg(Mips::ZERO).addImm(MaskImm);
1821 BuildMI(BB, DL, TII->get(Mips::SLLV), Mask)
1822 .addReg(MaskUpper).addReg(ShiftAmt);
1823 BuildMI(BB, DL, TII->get(Mips::NOR), Mask2).addReg(Mips::ZERO).addReg(Mask);
1824 BuildMI(BB, DL, TII->get(Mips::SLLV), Incr2).addReg(Incr).addReg(ShiftAmt);
1825
1826
1827 // The purposes of the flags on the scratch registers is explained in
1828 // emitAtomicBinary. In summary, we need a scratch register which is going to
1829 // be undef, that is unique among registers chosen for the instruction.
1830
1832 BuildMI(BB, DL, TII->get(AtomicOp))
1834 .addReg(AlignedAddr)
1835 .addReg(Incr2)
1836 .addReg(Mask)
1837 .addReg(Mask2)
1838 .addReg(ShiftAmt)
1845 if (NeedsAdditionalReg) {
1846 Register Scratch4 = RegInfo.createVirtualRegister(RC);
1849 }
1850
1851 MI.eraseFromParent(); // The instruction is gone now.
1852
1853 return exitMBB;
1854}
1855
1856// Lower atomic compare and swap to a pseudo instruction, taking care to
1857// define a scratch register for the pseudo instruction's expansion. The
1858// instruction is expanded after the register allocator as to prevent
1859// the insertion of stores between the linked load and the store conditional.
1860
1862MipsTargetLowering::emitAtomicCmpSwap(MachineInstr &MI,
1863 MachineBasicBlock *BB) const {
1864
1865 assert((MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32 ||
1866 MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I64) &&
1867 "Unsupported atomic pseudo for EmitAtomicCmpSwap.");
1868
1869 const unsigned Size = MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32 ? 4 : 8;
1870
1871 MachineFunction *MF = BB->getParent();
1875 DebugLoc DL = MI.getDebugLoc();
1876
1877 unsigned AtomicOp = MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32
1878 ? Mips::ATOMIC_CMP_SWAP_I32_POSTRA
1879 : Mips::ATOMIC_CMP_SWAP_I64_POSTRA;
1880 Register Dest = MI.getOperand(0).getReg();
1881 Register Ptr = MI.getOperand(1).getReg();
1882 Register OldVal = MI.getOperand(2).getReg();
1883 Register NewVal = MI.getOperand(3).getReg();
1884
1885 Register Scratch = MRI.createVirtualRegister(RC);
1887
1888 // We need to create copies of the various registers and kill them at the
1889 // atomic pseudo. If the copies are not made, when the atomic is expanded
1890 // after fast register allocation, the spills will end up outside of the
1891 // blocks that their values are defined in, causing livein errors.
1892
1893 Register PtrCopy = MRI.createVirtualRegister(MRI.getRegClass(Ptr));
1894 Register OldValCopy = MRI.createVirtualRegister(MRI.getRegClass(OldVal));
1895 Register NewValCopy = MRI.createVirtualRegister(MRI.getRegClass(NewVal));
1896
1897 BuildMI(*BB, II, DL, TII->get(Mips::COPY), PtrCopy).addReg(Ptr);
1898 BuildMI(*BB, II, DL, TII->get(Mips::COPY), OldValCopy).addReg(OldVal);
1899 BuildMI(*BB, II, DL, TII->get(Mips::COPY), NewValCopy).addReg(NewVal);
1900
1901 // The purposes of the flags on the scratch registers is explained in
1902 // emitAtomicBinary. In summary, we need a scratch register which is going to
1903 // be undef, that is unique among registers chosen for the instruction.
1904
1905 BuildMI(*BB, II, DL, TII->get(AtomicOp))
1907 .addReg(PtrCopy, RegState::Kill)
1908 .addReg(OldValCopy, RegState::Kill)
1909 .addReg(NewValCopy, RegState::Kill)
1912
1913 MI.eraseFromParent(); // The instruction is gone now.
1914
1915 return BB;
1916}
1917
1918MachineBasicBlock *MipsTargetLowering::emitAtomicCmpSwapPartword(
1919 MachineInstr &MI, MachineBasicBlock *BB, unsigned Size) const {
1920 assert((Size == 1 || Size == 2) &&
1921 "Unsupported size for EmitAtomicCmpSwapPartial.");
1922
1923 MachineFunction *MF = BB->getParent();
1925 const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
1926 const bool ArePtrs64bit = ABI.ArePtrs64bit();
1927 const TargetRegisterClass *RCp =
1928 getRegClassFor(ArePtrs64bit ? MVT::i64 : MVT::i32);
1930 DebugLoc DL = MI.getDebugLoc();
1931
1932 Register Dest = MI.getOperand(0).getReg();
1933 Register Ptr = MI.getOperand(1).getReg();
1934 Register CmpVal = MI.getOperand(2).getReg();
1935 Register NewVal = MI.getOperand(3).getReg();
1936
1937 Register AlignedAddr = RegInfo.createVirtualRegister(RCp);
1938 Register ShiftAmt = RegInfo.createVirtualRegister(RC);
1939 Register Mask = RegInfo.createVirtualRegister(RC);
1940 Register Mask2 = RegInfo.createVirtualRegister(RC);
1941 Register ShiftedCmpVal = RegInfo.createVirtualRegister(RC);
1942 Register ShiftedNewVal = RegInfo.createVirtualRegister(RC);
1943 Register MaskLSB2 = RegInfo.createVirtualRegister(RCp);
1944 Register PtrLSB2 = RegInfo.createVirtualRegister(RC);
1945 Register MaskUpper = RegInfo.createVirtualRegister(RC);
1946 Register MaskedCmpVal = RegInfo.createVirtualRegister(RC);
1947 Register MaskedNewVal = RegInfo.createVirtualRegister(RC);
1948 unsigned AtomicOp = MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I8
1949 ? Mips::ATOMIC_CMP_SWAP_I8_POSTRA
1950 : Mips::ATOMIC_CMP_SWAP_I16_POSTRA;
1951
1952 // The scratch registers here with the EarlyClobber | Define | Dead | Implicit
1953 // flags are used to coerce the register allocator and the machine verifier to
1954 // accept the usage of these registers.
1955 // The EarlyClobber flag has the semantic properties that the operand it is
1956 // attached to is clobbered before the rest of the inputs are read. Hence it
1957 // must be unique among the operands to the instruction.
1958 // The Define flag is needed to coerce the machine verifier that an Undef
1959 // value isn't a problem.
1960 // The Dead flag is needed as the value in scratch isn't used by any other
1961 // instruction. Kill isn't used as Dead is more precise.
1962 Register Scratch = RegInfo.createVirtualRegister(RC);
1963 Register Scratch2 = RegInfo.createVirtualRegister(RC);
1964
1965 // insert new blocks after the current block
1966 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1967 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
1969 MF->insert(It, exitMBB);
1970
1971 // Transfer the remainder of BB and its successor edges to exitMBB.
1972 exitMBB->splice(exitMBB->begin(), BB,
1973 std::next(MachineBasicBlock::iterator(MI)), BB->end());
1975
1977
1978 // thisMBB:
1979 // addiu masklsb2,$0,-4 # 0xfffffffc
1980 // and alignedaddr,ptr,masklsb2
1981 // andi ptrlsb2,ptr,3
1982 // xori ptrlsb2,ptrlsb2,3 # Only for BE
1983 // sll shiftamt,ptrlsb2,3
1984 // ori maskupper,$0,255 # 0xff
1985 // sll mask,maskupper,shiftamt
1986 // nor mask2,$0,mask
1987 // andi maskedcmpval,cmpval,255
1988 // sll shiftedcmpval,maskedcmpval,shiftamt
1989 // andi maskednewval,newval,255
1990 // sll shiftednewval,maskednewval,shiftamt
1991 int64_t MaskImm = (Size == 1) ? 255 : 65535;
1992 BuildMI(BB, DL, TII->get(ArePtrs64bit ? Mips::DADDiu : Mips::ADDiu), MaskLSB2)
1993 .addReg(ABI.GetNullPtr()).addImm(-4);
1994 BuildMI(BB, DL, TII->get(ArePtrs64bit ? Mips::AND64 : Mips::AND), AlignedAddr)
1995 .addReg(Ptr).addReg(MaskLSB2);
1996 BuildMI(BB, DL, TII->get(Mips::ANDi), PtrLSB2)
1997 .addReg(Ptr, 0, ArePtrs64bit ? Mips::sub_32 : 0).addImm(3);
1998 if (Subtarget.isLittle()) {
1999 BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(PtrLSB2).addImm(3);
2000 } else {
2001 Register Off = RegInfo.createVirtualRegister(RC);
2002 BuildMI(BB, DL, TII->get(Mips::XORi), Off)
2003 .addReg(PtrLSB2).addImm((Size == 1) ? 3 : 2);
2004 BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(Off).addImm(3);
2005 }
2006 BuildMI(BB, DL, TII->get(Mips::ORi), MaskUpper)
2007 .addReg(Mips::ZERO).addImm(MaskImm);
2008 BuildMI(BB, DL, TII->get(Mips::SLLV), Mask)
2009 .addReg(MaskUpper).addReg(ShiftAmt);
2010 BuildMI(BB, DL, TII->get(Mips::NOR), Mask2).addReg(Mips::ZERO).addReg(Mask);
2011 BuildMI(BB, DL, TII->get(Mips::ANDi), MaskedCmpVal)
2012 .addReg(CmpVal).addImm(MaskImm);
2013 BuildMI(BB, DL, TII->get(Mips::SLLV), ShiftedCmpVal)
2014 .addReg(MaskedCmpVal).addReg(ShiftAmt);
2015 BuildMI(BB, DL, TII->get(Mips::ANDi), MaskedNewVal)
2016 .addReg(NewVal).addImm(MaskImm);
2017 BuildMI(BB, DL, TII->get(Mips::SLLV), ShiftedNewVal)
2018 .addReg(MaskedNewVal).addReg(ShiftAmt);
2019
2020 // The purposes of the flags on the scratch registers are explained in
2021 // emitAtomicBinary. In summary, we need a scratch register which is going to
2022 // be undef, that is unique among the register chosen for the instruction.
2023
2024 BuildMI(BB, DL, TII->get(AtomicOp))
2026 .addReg(AlignedAddr)
2027 .addReg(Mask)
2028 .addReg(ShiftedCmpVal)
2029 .addReg(Mask2)
2030 .addReg(ShiftedNewVal)
2031 .addReg(ShiftAmt)
2036
2037 MI.eraseFromParent(); // The instruction is gone now.
2038
2039 return exitMBB;
2040}
2041
2042SDValue MipsTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
2043 // The first operand is the chain, the second is the condition, the third is
2044 // the block to branch to if the condition is true.
2045 SDValue Chain = Op.getOperand(0);
2046 SDValue Dest = Op.getOperand(2);
2047 SDLoc DL(Op);
2048
2050 SDValue CondRes = createFPCmp(DAG, Op.getOperand(1));
2051
2052 // Return if flag is not set by a floating point comparison.
2053 if (CondRes.getOpcode() != MipsISD::FPCmp)
2054 return Op;
2055
2056 SDValue CCNode = CondRes.getOperand(2);
2059 SDValue BrCode = DAG.getConstant(Opc, DL, MVT::i32);
2060 SDValue FCC0 = DAG.getRegister(Mips::FCC0, MVT::i32);
2061 return DAG.getNode(MipsISD::FPBrcond, DL, Op.getValueType(), Chain, BrCode,
2062 FCC0, Dest, CondRes);
2063}
2064
2065SDValue MipsTargetLowering::
2066lowerSELECT(SDValue Op, SelectionDAG &DAG) const
2067{
2069 SDValue Cond = createFPCmp(DAG, Op.getOperand(0));
2070
2071 // Return if flag is not set by a floating point comparison.
2072 if (Cond.getOpcode() != MipsISD::FPCmp)
2073 return Op;
2074
2075 return createCMovFP(DAG, Cond, Op.getOperand(1), Op.getOperand(2),
2076 SDLoc(Op));
2077}
2078
2079SDValue MipsTargetLowering::lowerSETCC(SDValue Op, SelectionDAG &DAG) const {
2081 SDValue Cond = createFPCmp(DAG, Op);
2082
2083 assert(Cond.getOpcode() == MipsISD::FPCmp &&
2084 "Floating point operand expected.");
2085
2086 SDLoc DL(Op);
2087 SDValue True = DAG.getConstant(1, DL, MVT::i32);
2088 SDValue False = DAG.getConstant(0, DL, MVT::i32);
2089
2090 return createCMovFP(DAG, Cond, True, False, DL);
2091}
2092
2093SDValue MipsTargetLowering::lowerGlobalAddress(SDValue Op,
2094 SelectionDAG &DAG) const {
2095 EVT Ty = Op.getValueType();
2096 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
2097 const GlobalValue *GV = N->getGlobal();
2098
2099 if (!isPositionIndependent()) {
2100 const MipsTargetObjectFile *TLOF =
2101 static_cast<const MipsTargetObjectFile *>(
2103 const GlobalObject *GO = GV->getAliaseeObject();
2104 if (GO && TLOF->IsGlobalInSmallSection(GO, getTargetMachine()))
2105 // %gp_rel relocation
2106 return getAddrGPRel(N, SDLoc(N), Ty, DAG, ABI.IsN64());
2107
2108 // %hi/%lo relocation
2109 return Subtarget.hasSym32() ? getAddrNonPIC(N, SDLoc(N), Ty, DAG)
2110 // %highest/%higher/%hi/%lo relocation
2111 : getAddrNonPICSym64(N, SDLoc(N), Ty, DAG);
2112 }
2113
2114 // Every other architecture would use shouldAssumeDSOLocal in here, but
2115 // mips is special.
2116 // * In PIC code mips requires got loads even for local statics!
2117 // * To save on got entries, for local statics the got entry contains the
2118 // page and an additional add instruction takes care of the low bits.
2119 // * It is legal to access a hidden symbol with a non hidden undefined,
2120 // so one cannot guarantee that all access to a hidden symbol will know
2121 // it is hidden.
2122 // * Mips linkers don't support creating a page and a full got entry for
2123 // the same symbol.
2124 // * Given all that, we have to use a full got entry for hidden symbols :-(
2125 if (GV->hasLocalLinkage())
2126 return getAddrLocal(N, SDLoc(N), Ty, DAG, ABI.IsN32() || ABI.IsN64());
2127
2128 if (Subtarget.useXGOT())
2129 return getAddrGlobalLargeGOT(
2131 DAG.getEntryNode(),
2132 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
2133
2134 return getAddrGlobal(
2135 N, SDLoc(N), Ty, DAG,
2137 DAG.getEntryNode(), MachinePointerInfo::getGOT(DAG.getMachineFunction()));
2138}
2139
2140SDValue MipsTargetLowering::lowerBlockAddress(SDValue Op,
2141 SelectionDAG &DAG) const {
2142 BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
2143 EVT Ty = Op.getValueType();
2144
2145 if (!isPositionIndependent())
2146 return Subtarget.hasSym32() ? getAddrNonPIC(N, SDLoc(N), Ty, DAG)
2147 : getAddrNonPICSym64(N, SDLoc(N), Ty, DAG);
2148
2149 return getAddrLocal(N, SDLoc(N), Ty, DAG, ABI.IsN32() || ABI.IsN64());
2150}
2151
2152SDValue MipsTargetLowering::
2153lowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const
2154{
2155 // If the relocation model is PIC, use the General Dynamic TLS Model or
2156 // Local Dynamic TLS model, otherwise use the Initial Exec or
2157 // Local Exec TLS Model.
2158
2159 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
2160 if (DAG.getTarget().useEmulatedTLS())
2161 return LowerToTLSEmulatedModel(GA, DAG);
2162
2163 SDLoc DL(GA);
2164 const GlobalValue *GV = GA->getGlobal();
2165 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2166
2168
2169 if (model == TLSModel::GeneralDynamic || model == TLSModel::LocalDynamic) {
2170 // General Dynamic and Local Dynamic TLS Model.
2171 unsigned Flag = (model == TLSModel::LocalDynamic) ? MipsII::MO_TLSLDM
2173
2174 SDValue TGA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, Flag);
2176 getGlobalReg(DAG, PtrVT), TGA);
2177 unsigned PtrSize = PtrVT.getSizeInBits();
2178 IntegerType *PtrTy = Type::getIntNTy(*DAG.getContext(), PtrSize);
2179
2180 SDValue TlsGetAddr = DAG.getExternalSymbol("__tls_get_addr", PtrVT);
2181
2183 ArgListEntry Entry;
2184 Entry.Node = Argument;
2185 Entry.Ty = PtrTy;
2186 Args.push_back(Entry);
2187
2189 CLI.setDebugLoc(DL)
2190 .setChain(DAG.getEntryNode())
2191 .setLibCallee(CallingConv::C, PtrTy, TlsGetAddr, std::move(Args));
2192 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
2193
2194 SDValue Ret = CallResult.first;
2195
2196 if (model != TLSModel::LocalDynamic)
2197 return Ret;
2198
2199 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2201 SDValue Hi = DAG.getNode(MipsISD::TlsHi, DL, PtrVT, TGAHi);
2202 SDValue TGALo = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2204 SDValue Lo = DAG.getNode(MipsISD::Lo, DL, PtrVT, TGALo);
2205 SDValue Add = DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Ret);
2206 return DAG.getNode(ISD::ADD, DL, PtrVT, Add, Lo);
2207 }
2208
2210 if (model == TLSModel::InitialExec) {
2211 // Initial Exec TLS Model
2212 SDValue TGA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2214 TGA = DAG.getNode(MipsISD::Wrapper, DL, PtrVT, getGlobalReg(DAG, PtrVT),
2215 TGA);
2216 Offset =
2217 DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), TGA, MachinePointerInfo());
2218 } else {
2219 // Local Exec TLS Model
2220 assert(model == TLSModel::LocalExec);
2221 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2223 SDValue TGALo = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2225 SDValue Hi = DAG.getNode(MipsISD::TlsHi, DL, PtrVT, TGAHi);
2226 SDValue Lo = DAG.getNode(MipsISD::Lo, DL, PtrVT, TGALo);
2227 Offset = DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo);
2228 }
2229
2231 return DAG.getNode(ISD::ADD, DL, PtrVT, ThreadPointer, Offset);
2232}
2233
2234SDValue MipsTargetLowering::
2235lowerJumpTable(SDValue Op, SelectionDAG &DAG) const
2236{
2237 JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
2238 EVT Ty = Op.getValueType();
2239
2240 if (!isPositionIndependent())
2241 return Subtarget.hasSym32() ? getAddrNonPIC(N, SDLoc(N), Ty, DAG)
2242 : getAddrNonPICSym64(N, SDLoc(N), Ty, DAG);
2243
2244 return getAddrLocal(N, SDLoc(N), Ty, DAG, ABI.IsN32() || ABI.IsN64());
2245}
2246
2247SDValue MipsTargetLowering::
2248lowerConstantPool(SDValue Op, SelectionDAG &DAG) const
2249{
2250 ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
2251 EVT Ty = Op.getValueType();
2252
2253 if (!isPositionIndependent()) {
2254 const MipsTargetObjectFile *TLOF =
2255 static_cast<const MipsTargetObjectFile *>(
2257
2258 if (TLOF->IsConstantInSmallSection(DAG.getDataLayout(), N->getConstVal(),
2260 // %gp_rel relocation
2261 return getAddrGPRel(N, SDLoc(N), Ty, DAG, ABI.IsN64());
2262
2263 return Subtarget.hasSym32() ? getAddrNonPIC(N, SDLoc(N), Ty, DAG)
2264 : getAddrNonPICSym64(N, SDLoc(N), Ty, DAG);
2265 }
2266
2267 return getAddrLocal(N, SDLoc(N), Ty, DAG, ABI.IsN32() || ABI.IsN64());
2268}
2269
2270SDValue MipsTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
2272 MipsFunctionInfo *FuncInfo = MF.getInfo<MipsFunctionInfo>();
2273
2274 SDLoc DL(Op);
2275 SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
2277
2278 // vastart just stores the address of the VarArgsFrameIndex slot into the
2279 // memory location argument.
2280 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2281 return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
2282 MachinePointerInfo(SV));
2283}
2284
2285SDValue MipsTargetLowering::lowerVAARG(SDValue Op, SelectionDAG &DAG) const {
2286 SDNode *Node = Op.getNode();
2287 EVT VT = Node->getValueType(0);
2288 SDValue Chain = Node->getOperand(0);
2289 SDValue VAListPtr = Node->getOperand(1);
2290 const Align Align =
2291 llvm::MaybeAlign(Node->getConstantOperandVal(3)).valueOrOne();
2292 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2293 SDLoc DL(Node);
2294 unsigned ArgSlotSizeInBytes = (ABI.IsN32() || ABI.IsN64()) ? 8 : 4;
2295
2296 SDValue VAListLoad = DAG.getLoad(getPointerTy(DAG.getDataLayout()), DL, Chain,
2297 VAListPtr, MachinePointerInfo(SV));
2298 SDValue VAList = VAListLoad;
2299
2300 // Re-align the pointer if necessary.
2301 // It should only ever be necessary for 64-bit types on O32 since the minimum
2302 // argument alignment is the same as the maximum type alignment for N32/N64.
2303 //
2304 // FIXME: We currently align too often. The code generator doesn't notice
2305 // when the pointer is still aligned from the last va_arg (or pair of
2306 // va_args for the i64 on O32 case).
2308 VAList = DAG.getNode(
2309 ISD::ADD, DL, VAList.getValueType(), VAList,
2310 DAG.getConstant(Align.value() - 1, DL, VAList.getValueType()));
2311
2312 VAList = DAG.getNode(
2313 ISD::AND, DL, VAList.getValueType(), VAList,
2314 DAG.getConstant(-(int64_t)Align.value(), DL, VAList.getValueType()));
2315 }
2316
2317 // Increment the pointer, VAList, to the next vaarg.
2318 auto &TD = DAG.getDataLayout();
2319 unsigned ArgSizeInBytes =
2321 SDValue Tmp3 =
2322 DAG.getNode(ISD::ADD, DL, VAList.getValueType(), VAList,
2323 DAG.getConstant(alignTo(ArgSizeInBytes, ArgSlotSizeInBytes),
2324 DL, VAList.getValueType()));
2325 // Store the incremented VAList to the legalized pointer
2326 Chain = DAG.getStore(VAListLoad.getValue(1), DL, Tmp3, VAListPtr,
2327 MachinePointerInfo(SV));
2328
2329 // In big-endian mode we must adjust the pointer when the load size is smaller
2330 // than the argument slot size. We must also reduce the known alignment to
2331 // match. For example in the N64 ABI, we must add 4 bytes to the offset to get
2332 // the correct half of the slot, and reduce the alignment from 8 (slot
2333 // alignment) down to 4 (type alignment).
2334 if (!Subtarget.isLittle() && ArgSizeInBytes < ArgSlotSizeInBytes) {
2335 unsigned Adjustment = ArgSlotSizeInBytes - ArgSizeInBytes;
2336 VAList = DAG.getNode(ISD::ADD, DL, VAListPtr.getValueType(), VAList,
2337 DAG.getIntPtrConstant(Adjustment, DL));
2338 }
2339 // Load the actual argument out of the pointer VAList
2340 return DAG.getLoad(VT, DL, Chain, VAList, MachinePointerInfo());
2341}
2342
2344 bool HasExtractInsert) {
2345 EVT TyX = Op.getOperand(0).getValueType();
2346 EVT TyY = Op.getOperand(1).getValueType();
2347 SDLoc DL(Op);
2348 SDValue Const1 = DAG.getConstant(1, DL, MVT::i32);
2349 SDValue Const31 = DAG.getConstant(31, DL, MVT::i32);
2350 SDValue Res;
2351
2352 // If operand is of type f64, extract the upper 32-bit. Otherwise, bitcast it
2353 // to i32.
2354 SDValue X = (TyX == MVT::f32) ?
2355 DAG.getNode(ISD::BITCAST, DL, MVT::i32, Op.getOperand(0)) :
2356 DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32, Op.getOperand(0),
2357 Const1);
2358 SDValue Y = (TyY == MVT::f32) ?
2359 DAG.getNode(ISD::BITCAST, DL, MVT::i32, Op.getOperand(1)) :
2360 DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32, Op.getOperand(1),
2361 Const1);
2362
2363 if (HasExtractInsert) {
2364 // ext E, Y, 31, 1 ; extract bit31 of Y
2365 // ins X, E, 31, 1 ; insert extracted bit at bit31 of X
2366 SDValue E = DAG.getNode(MipsISD::Ext, DL, MVT::i32, Y, Const31, Const1);
2367 Res = DAG.getNode(MipsISD::Ins, DL, MVT::i32, E, Const31, Const1, X);
2368 } else {
2369 // sll SllX, X, 1
2370 // srl SrlX, SllX, 1
2371 // srl SrlY, Y, 31
2372 // sll SllY, SrlX, 31
2373 // or Or, SrlX, SllY
2374 SDValue SllX = DAG.getNode(ISD::SHL, DL, MVT::i32, X, Const1);
2375 SDValue SrlX = DAG.getNode(ISD::SRL, DL, MVT::i32, SllX, Const1);
2376 SDValue SrlY = DAG.getNode(ISD::SRL, DL, MVT::i32, Y, Const31);
2377 SDValue SllY = DAG.getNode(ISD::SHL, DL, MVT::i32, SrlY, Const31);
2378 Res = DAG.getNode(ISD::OR, DL, MVT::i32, SrlX, SllY);
2379 }
2380
2381 if (TyX == MVT::f32)
2382 return DAG.getNode(ISD::BITCAST, DL, Op.getOperand(0).getValueType(), Res);
2383
2384 SDValue LowX = DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
2385 Op.getOperand(0),
2386 DAG.getConstant(0, DL, MVT::i32));
2387 return DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64, LowX, Res);
2388}
2389
2391 bool HasExtractInsert) {
2392 unsigned WidthX = Op.getOperand(0).getValueSizeInBits();
2393 unsigned WidthY = Op.getOperand(1).getValueSizeInBits();
2394 EVT TyX = MVT::getIntegerVT(WidthX), TyY = MVT::getIntegerVT(WidthY);
2395 SDLoc DL(Op);
2396 SDValue Const1 = DAG.getConstant(1, DL, MVT::i32);
2397
2398 // Bitcast to integer nodes.
2399 SDValue X = DAG.getNode(ISD::BITCAST, DL, TyX, Op.getOperand(0));
2400 SDValue Y = DAG.getNode(ISD::BITCAST, DL, TyY, Op.getOperand(1));
2401
2402 if (HasExtractInsert) {
2403 // ext E, Y, width(Y) - 1, 1 ; extract bit width(Y)-1 of Y
2404 // ins X, E, width(X) - 1, 1 ; insert extracted bit at bit width(X)-1 of X
2405 SDValue E = DAG.getNode(MipsISD::Ext, DL, TyY, Y,
2406 DAG.getConstant(WidthY - 1, DL, MVT::i32), Const1);
2407
2408 if (WidthX > WidthY)
2409 E = DAG.getNode(ISD::ZERO_EXTEND, DL, TyX, E);
2410 else if (WidthY > WidthX)
2411 E = DAG.getNode(ISD::TRUNCATE, DL, TyX, E);
2412
2413 SDValue I = DAG.getNode(MipsISD::Ins, DL, TyX, E,
2414 DAG.getConstant(WidthX - 1, DL, MVT::i32), Const1,
2415 X);
2416 return DAG.getNode(ISD::BITCAST, DL, Op.getOperand(0).getValueType(), I);
2417 }
2418
2419 // (d)sll SllX, X, 1
2420 // (d)srl SrlX, SllX, 1
2421 // (d)srl SrlY, Y, width(Y)-1
2422 // (d)sll SllY, SrlX, width(Y)-1
2423 // or Or, SrlX, SllY
2424 SDValue SllX = DAG.getNode(ISD::SHL, DL, TyX, X, Const1);
2425 SDValue SrlX = DAG.getNode(ISD::SRL, DL, TyX, SllX, Const1);
2426 SDValue SrlY = DAG.getNode(ISD::SRL, DL, TyY, Y,
2427 DAG.getConstant(WidthY - 1, DL, MVT::i32));
2428
2429 if (WidthX > WidthY)
2430 SrlY = DAG.getNode(ISD::ZERO_EXTEND, DL, TyX, SrlY);
2431 else if (WidthY > WidthX)
2432 SrlY = DAG.getNode(ISD::TRUNCATE, DL, TyX, SrlY);
2433
2434 SDValue SllY = DAG.getNode(ISD::SHL, DL, TyX, SrlY,
2435 DAG.getConstant(WidthX - 1, DL, MVT::i32));
2436 SDValue Or = DAG.getNode(ISD::OR, DL, TyX, SrlX, SllY);
2437 return DAG.getNode(ISD::BITCAST, DL, Op.getOperand(0).getValueType(), Or);
2438}
2439
2440SDValue
2441MipsTargetLowering::lowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
2442 if (Subtarget.isGP64bit())
2444
2446}
2447
2448SDValue MipsTargetLowering::lowerFABS32(SDValue Op, SelectionDAG &DAG,
2449 bool HasExtractInsert) const {
2450 SDLoc DL(Op);
2451 SDValue Res, Const1 = DAG.getConstant(1, DL, MVT::i32);
2452
2454 return DAG.getNode(MipsISD::FAbs, DL, Op.getValueType(), Op.getOperand(0));
2455
2456 // If operand is of type f64, extract the upper 32-bit. Otherwise, bitcast it
2457 // to i32.
2458 SDValue X = (Op.getValueType() == MVT::f32)
2459 ? DAG.getNode(ISD::BITCAST, DL, MVT::i32, Op.getOperand(0))
2460 : DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
2461 Op.getOperand(0), Const1);
2462
2463 // Clear MSB.
2464 if (HasExtractInsert)
2465 Res = DAG.getNode(MipsISD::Ins, DL, MVT::i32,
2466 DAG.getRegister(Mips::ZERO, MVT::i32),
2467 DAG.getConstant(31, DL, MVT::i32), Const1, X);
2468 else {
2469 // TODO: Provide DAG patterns which transform (and x, cst)
2470 // back to a (shl (srl x (clz cst)) (clz cst)) sequence.
2471 SDValue SllX = DAG.getNode(ISD::SHL, DL, MVT::i32, X, Const1);
2472 Res = DAG.getNode(ISD::SRL, DL, MVT::i32, SllX, Const1);
2473 }
2474
2475 if (Op.getValueType() == MVT::f32)
2476 return DAG.getNode(ISD::BITCAST, DL, MVT::f32, Res);
2477
2478 // FIXME: For mips32r2, the sequence of (BuildPairF64 (ins (ExtractElementF64
2479 // Op 1), $zero, 31 1) (ExtractElementF64 Op 0)) and the Op has one use, we
2480 // should be able to drop the usage of mfc1/mtc1 and rewrite the register in
2481 // place.
2482 SDValue LowX =
2483 DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32, Op.getOperand(0),
2484 DAG.getConstant(0, DL, MVT::i32));
2485 return DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64, LowX, Res);
2486}
2487
2488SDValue MipsTargetLowering::lowerFABS64(SDValue Op, SelectionDAG &DAG,
2489 bool HasExtractInsert) const {
2490 SDLoc DL(Op);
2491 SDValue Res, Const1 = DAG.getConstant(1, DL, MVT::i32);
2492
2494 return DAG.getNode(MipsISD::FAbs, DL, Op.getValueType(), Op.getOperand(0));
2495
2496 // Bitcast to integer node.
2497 SDValue X = DAG.getNode(ISD::BITCAST, DL, MVT::i64, Op.getOperand(0));
2498
2499 // Clear MSB.
2500 if (HasExtractInsert)
2501 Res = DAG.getNode(MipsISD::Ins, DL, MVT::i64,
2502 DAG.getRegister(Mips::ZERO_64, MVT::i64),
2503 DAG.getConstant(63, DL, MVT::i32), Const1, X);
2504 else {
2505 SDValue SllX = DAG.getNode(ISD::SHL, DL, MVT::i64, X, Const1);
2506 Res = DAG.getNode(ISD::SRL, DL, MVT::i64, SllX, Const1);
2507 }
2508
2509 return DAG.getNode(ISD::BITCAST, DL, MVT::f64, Res);
2510}
2511
2512SDValue MipsTargetLowering::lowerFABS(SDValue Op, SelectionDAG &DAG) const {
2513 if ((ABI.IsN32() || ABI.IsN64()) && (Op.getValueType() == MVT::f64))
2514 return lowerFABS64(Op, DAG, Subtarget.hasExtractInsert());
2515
2516 return lowerFABS32(Op, DAG, Subtarget.hasExtractInsert());
2517}
2518
2519SDValue MipsTargetLowering::
2520lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
2521 // check the depth
2522 if (Op.getConstantOperandVal(0) != 0) {
2523 DAG.getContext()->emitError(
2524 "return address can be determined only for current frame");
2525 return SDValue();
2526 }
2527
2529 MFI.setFrameAddressIsTaken(true);
2530 EVT VT = Op.getValueType();
2531 SDLoc DL(Op);
2532 SDValue FrameAddr = DAG.getCopyFromReg(
2533 DAG.getEntryNode(), DL, ABI.IsN64() ? Mips::FP_64 : Mips::FP, VT);
2534 return FrameAddr;
2535}
2536
2537SDValue MipsTargetLowering::lowerRETURNADDR(SDValue Op,
2538 SelectionDAG &DAG) const {
2540 return SDValue();
2541
2542 // check the depth
2543 if (Op.getConstantOperandVal(0) != 0) {
2544 DAG.getContext()->emitError(
2545 "return address can be determined only for current frame");
2546 return SDValue();
2547 }
2548
2550 MachineFrameInfo &MFI = MF.getFrameInfo();
2551 MVT VT = Op.getSimpleValueType();
2552 unsigned RA = ABI.IsN64() ? Mips::RA_64 : Mips::RA;
2553 MFI.setReturnAddressIsTaken(true);
2554
2555 // Return RA, which contains the return address. Mark it an implicit live-in.
2557 return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), Reg, VT);
2558}
2559
2560// An EH_RETURN is the result of lowering llvm.eh.return which in turn is
2561// generated from __builtin_eh_return (offset, handler)
2562// The effect of this is to adjust the stack pointer by "offset"
2563// and then branch to "handler".
2564SDValue MipsTargetLowering::lowerEH_RETURN(SDValue Op, SelectionDAG &DAG)
2565 const {
2568
2569 MipsFI->setCallsEhReturn();
2570 SDValue Chain = Op.getOperand(0);
2571 SDValue Offset = Op.getOperand(1);
2572 SDValue Handler = Op.getOperand(2);
2573 SDLoc DL(Op);
2574 EVT Ty = ABI.IsN64() ? MVT::i64 : MVT::i32;
2575
2576 // Store stack offset in V1, store jump target in V0. Glue CopyToReg and
2577 // EH_RETURN nodes, so that instructions are emitted back-to-back.
2578 unsigned OffsetReg = ABI.IsN64() ? Mips::V1_64 : Mips::V1;
2579 unsigned AddrReg = ABI.IsN64() ? Mips::V0_64 : Mips::V0;
2580 Chain = DAG.getCopyToReg(Chain, DL, OffsetReg, Offset, SDValue());
2581 Chain = DAG.getCopyToReg(Chain, DL, AddrReg, Handler, Chain.getValue(1));
2582 return DAG.getNode(MipsISD::EH_RETURN, DL, MVT::Other, Chain,
2583 DAG.getRegister(OffsetReg, Ty),
2584 DAG.getRegister(AddrReg, getPointerTy(MF.getDataLayout())),
2585 Chain.getValue(1));
2586}
2587
2588SDValue MipsTargetLowering::lowerATOMIC_FENCE(SDValue Op,
2589 SelectionDAG &DAG) const {
2590 // FIXME: Need pseudo-fence for 'singlethread' fences
2591 // FIXME: Set SType for weaker fences where supported/appropriate.
2592 unsigned SType = 0;
2593 SDLoc DL(Op);
2594 return DAG.getNode(MipsISD::Sync, DL, MVT::Other, Op.getOperand(0),
2595 DAG.getConstant(SType, DL, MVT::i32));
2596}
2597
2598SDValue MipsTargetLowering::lowerShiftLeftParts(SDValue Op,
2599 SelectionDAG &DAG) const {
2600 SDLoc DL(Op);
2601 MVT VT = Subtarget.isGP64bit() ? MVT::i64 : MVT::i32;
2602
2603 SDValue Lo = Op.getOperand(0), Hi = Op.getOperand(1);
2604 SDValue Shamt = Op.getOperand(2);
2605 // if shamt < (VT.bits):
2606 // lo = (shl lo, shamt)
2607 // hi = (or (shl hi, shamt) (srl (srl lo, 1), (xor shamt, (VT.bits-1))))
2608 // else:
2609 // lo = 0
2610 // hi = (shl lo, shamt[4:0])
2611 SDValue Not =
2612 DAG.getNode(ISD::XOR, DL, MVT::i32, Shamt,
2613 DAG.getConstant(VT.getSizeInBits() - 1, DL, MVT::i32));
2614 SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo,
2615 DAG.getConstant(1, DL, VT));
2616 SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, Not);
2617 SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
2618 SDValue Or = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
2619 SDValue ShiftLeftLo = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
2620 SDValue Cond = DAG.getNode(ISD::AND, DL, MVT::i32, Shamt,
2621 DAG.getConstant(VT.getSizeInBits(), DL, MVT::i32));
2622 Lo = DAG.getNode(ISD::SELECT, DL, VT, Cond,
2623 DAG.getConstant(0, DL, VT), ShiftLeftLo);
2624 Hi = DAG.getNode(ISD::SELECT, DL, VT, Cond, ShiftLeftLo, Or);
2625
2626 SDValue Ops[2] = {Lo, Hi};
2627 return DAG.getMergeValues(Ops, DL);
2628}
2629
2630SDValue MipsTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
2631 bool IsSRA) const {
2632 SDLoc DL(Op);
2633 SDValue Lo = Op.getOperand(0), Hi = Op.getOperand(1);
2634 SDValue Shamt = Op.getOperand(2);
2635 MVT VT = Subtarget.isGP64bit() ? MVT::i64 : MVT::i32;
2636
2637 // if shamt < (VT.bits):
2638 // lo = (or (shl (shl hi, 1), (xor shamt, (VT.bits-1))) (srl lo, shamt))
2639 // if isSRA:
2640 // hi = (sra hi, shamt)
2641 // else:
2642 // hi = (srl hi, shamt)
2643 // else:
2644 // if isSRA:
2645 // lo = (sra hi, shamt[4:0])
2646 // hi = (sra hi, 31)
2647 // else:
2648 // lo = (srl hi, shamt[4:0])
2649 // hi = 0
2650 SDValue Not =
2651 DAG.getNode(ISD::XOR, DL, MVT::i32, Shamt,
2652 DAG.getConstant(VT.getSizeInBits() - 1, DL, MVT::i32));
2653 SDValue ShiftLeft1Hi = DAG.getNode(ISD::SHL, DL, VT, Hi,
2654 DAG.getConstant(1, DL, VT));
2655 SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, ShiftLeft1Hi, Not);
2656 SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
2657 SDValue Or = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
2658 SDValue ShiftRightHi = DAG.getNode(IsSRA ? ISD::SRA : ISD::SRL,
2659 DL, VT, Hi, Shamt);
2660 SDValue Cond = DAG.getNode(ISD::AND, DL, MVT::i32, Shamt,
2661 DAG.getConstant(VT.getSizeInBits(), DL, MVT::i32));
2662 SDValue Ext = DAG.getNode(ISD::SRA, DL, VT, Hi,
2663 DAG.getConstant(VT.getSizeInBits() - 1, DL, VT));
2664
2665 if (!(Subtarget.hasMips4() || Subtarget.hasMips32())) {
2666 SDVTList VTList = DAG.getVTList(VT, VT);
2669 DL, VTList, Cond, ShiftRightHi,
2670 IsSRA ? Ext : DAG.getConstant(0, DL, VT), Or,
2671 ShiftRightHi);
2672 }
2673
2674 Lo = DAG.getNode(ISD::SELECT, DL, VT, Cond, ShiftRightHi, Or);
2675 Hi = DAG.getNode(ISD::SELECT, DL, VT, Cond,
2676 IsSRA ? Ext : DAG.getConstant(0, DL, VT), ShiftRightHi);
2677
2678 SDValue Ops[2] = {Lo, Hi};
2679 return DAG.getMergeValues(Ops, DL);
2680}
2681
2682static SDValue createLoadLR(unsigned Opc, SelectionDAG &DAG, LoadSDNode *LD,
2683 SDValue Chain, SDValue Src, unsigned Offset) {
2684 SDValue Ptr = LD->getBasePtr();
2685 EVT VT = LD->getValueType(0), MemVT = LD->getMemoryVT();
2686 EVT BasePtrVT = Ptr.getValueType();
2687 SDLoc DL(LD);
2688 SDVTList VTList = DAG.getVTList(VT, MVT::Other);
2689
2690 if (Offset)
2691 Ptr = DAG.getNode(ISD::ADD, DL, BasePtrVT, Ptr,
2692 DAG.getConstant(Offset, DL, BasePtrVT));
2693
2694 SDValue Ops[] = { Chain, Ptr, Src };
2695 return DAG.getMemIntrinsicNode(Opc, DL, VTList, Ops, MemVT,
2696 LD->getMemOperand());
2697}
2698
2699// Expand an unaligned 32 or 64-bit integer load node.
2701 LoadSDNode *LD = cast<LoadSDNode>(Op);
2702 EVT MemVT = LD->getMemoryVT();
2703
2705 return Op;
2706
2707 // Return if load is aligned or if MemVT is neither i32 nor i64.
2708 if ((LD->getAlign().value() >= (MemVT.getSizeInBits() / 8)) ||
2709 ((MemVT != MVT::i32) && (MemVT != MVT::i64)))
2710 return SDValue();
2711
2712 bool IsLittle = Subtarget.isLittle();
2713 EVT VT = Op.getValueType();
2714 ISD::LoadExtType ExtType = LD->getExtensionType();
2715 SDValue Chain = LD->getChain(), Undef = DAG.getUNDEF(VT);
2716
2717 assert((VT == MVT::i32) || (VT == MVT::i64));
2718
2719 // Expand
2720 // (set dst, (i64 (load baseptr)))
2721 // to
2722 // (set tmp, (ldl (add baseptr, 7), undef))
2723 // (set dst, (ldr baseptr, tmp))
2724 if ((VT == MVT::i64) && (ExtType == ISD::NON_EXTLOAD)) {
2725 SDValue LDL = createLoadLR(MipsISD::LDL, DAG, LD, Chain, Undef,
2726 IsLittle ? 7 : 0);
2727 return createLoadLR(MipsISD::LDR, DAG, LD, LDL.getValue(1), LDL,
2728 IsLittle ? 0 : 7);
2729 }
2730
2731 SDValue LWL = createLoadLR(MipsISD::LWL, DAG, LD, Chain, Undef,
2732 IsLittle ? 3 : 0);
2733 SDValue LWR = createLoadLR(MipsISD::LWR, DAG, LD, LWL.getValue(1), LWL,
2734 IsLittle ? 0 : 3);
2735
2736 // Expand
2737 // (set dst, (i32 (load baseptr))) or
2738 // (set dst, (i64 (sextload baseptr))) or
2739 // (set dst, (i64 (extload baseptr)))
2740 // to
2741 // (set tmp, (lwl (add baseptr, 3), undef))
2742 // (set dst, (lwr baseptr, tmp))
2743 if ((VT == MVT::i32) || (ExtType == ISD::SEXTLOAD) ||
2744 (ExtType == ISD::EXTLOAD))
2745 return LWR;
2746
2747 assert((VT == MVT::i64) && (ExtType == ISD::ZEXTLOAD));
2748
2749 // Expand
2750 // (set dst, (i64 (zextload baseptr)))
2751 // to
2752 // (set tmp0, (lwl (add baseptr, 3), undef))
2753 // (set tmp1, (lwr baseptr, tmp0))
2754 // (set tmp2, (shl tmp1, 32))
2755 // (set dst, (srl tmp2, 32))
2756 SDLoc DL(LD);
2757 SDValue Const32 = DAG.getConstant(32, DL, MVT::i32);
2758 SDValue SLL = DAG.getNode(ISD::SHL, DL, MVT::i64, LWR, Const32);
2759 SDValue SRL = DAG.getNode(ISD::SRL, DL, MVT::i64, SLL, Const32);
2760 SDValue Ops[] = { SRL, LWR.getValue(1) };
2761 return DAG.getMergeValues(Ops, DL);
2762}
2763
2764static SDValue createStoreLR(unsigned Opc, SelectionDAG &DAG, StoreSDNode *SD,
2765 SDValue Chain, unsigned Offset) {
2766 SDValue Ptr = SD->getBasePtr(), Value = SD->getValue();
2767 EVT MemVT = SD->getMemoryVT(), BasePtrVT = Ptr.getValueType();
2768 SDLoc DL(SD);
2769 SDVTList VTList = DAG.getVTList(MVT::Other);
2770
2771 if (Offset)
2772 Ptr = DAG.getNode(ISD::ADD, DL, BasePtrVT, Ptr,
2773 DAG.getConstant(Offset, DL, BasePtrVT));
2774
2775 SDValue Ops[] = { Chain, Value, Ptr };
2776 return DAG.getMemIntrinsicNode(Opc, DL, VTList, Ops, MemVT,
2777 SD->getMemOperand());
2778}
2779
2780// Expand an unaligned 32 or 64-bit integer store node.
2782 bool IsLittle) {
2783 SDValue Value = SD->getValue(), Chain = SD->getChain();
2784 EVT VT = Value.getValueType();
2785
2786 // Expand
2787 // (store val, baseptr) or
2788 // (truncstore val, baseptr)
2789 // to
2790 // (swl val, (add baseptr, 3))
2791 // (swr val, baseptr)
2792 if ((VT == MVT::i32) || SD->isTruncatingStore()) {
2793 SDValue SWL = createStoreLR(MipsISD::SWL, DAG, SD, Chain,
2794 IsLittle ? 3 : 0);
2795 return createStoreLR(MipsISD::SWR, DAG, SD, SWL, IsLittle ? 0 : 3);
2796 }
2797
2798 assert(VT == MVT::i64);
2799
2800 // Expand
2801 // (store val, baseptr)
2802 // to
2803 // (sdl val, (add baseptr, 7))
2804 // (sdr val, baseptr)
2805 SDValue SDL = createStoreLR(MipsISD::SDL, DAG, SD, Chain, IsLittle ? 7 : 0);
2806 return createStoreLR(MipsISD::SDR, DAG, SD, SDL, IsLittle ? 0 : 7);
2807}
2808
2809// Lower (store (fp_to_sint $fp) $ptr) to (store (TruncIntFP $fp), $ptr).
2811 bool SingleFloat) {
2812 SDValue Val = SD->getValue();
2813
2814 if (Val.getOpcode() != ISD::FP_TO_SINT ||
2815 (Val.getValueSizeInBits() > 32 && SingleFloat))
2816 return SDValue();
2817
2819 SDValue Tr = DAG.getNode(MipsISD::TruncIntFP, SDLoc(Val), FPTy,
2820 Val.getOperand(0));
2821 return DAG.getStore(SD->getChain(), SDLoc(SD), Tr, SD->getBasePtr(),
2822 SD->getPointerInfo(), SD->getAlign(),
2823 SD->getMemOperand()->getFlags());
2824}
2825
2827 StoreSDNode *SD = cast<StoreSDNode>(Op);
2828 EVT MemVT = SD->getMemoryVT();
2829
2830 // Lower unaligned integer stores.
2832 (SD->getAlign().value() < (MemVT.getSizeInBits() / 8)) &&
2833 ((MemVT == MVT::i32) || (MemVT == MVT::i64)))
2834 return lowerUnalignedIntStore(SD, DAG, Subtarget.isLittle());
2835
2837}
2838
2839SDValue MipsTargetLowering::lowerEH_DWARF_CFA(SDValue Op,
2840 SelectionDAG &DAG) const {
2841
2842 // Return a fixed StackObject with offset 0 which points to the old stack
2843 // pointer.
2845 EVT ValTy = Op->getValueType(0);
2846 int FI = MFI.CreateFixedObject(Op.getValueSizeInBits() / 8, 0, false);
2847 return DAG.getFrameIndex(FI, ValTy);
2848}
2849
2850SDValue MipsTargetLowering::lowerFP_TO_SINT(SDValue Op,
2851 SelectionDAG &DAG) const {
2852 if (Op.getValueSizeInBits() > 32 && Subtarget.isSingleFloat())
2853 return SDValue();
2854
2855 EVT FPTy = EVT::getFloatingPointVT(Op.getValueSizeInBits());
2856 SDValue Trunc = DAG.getNode(MipsISD::TruncIntFP, SDLoc(Op), FPTy,
2857 Op.getOperand(0));
2858 return DAG.getNode(ISD::BITCAST, SDLoc(Op), Op.getValueType(), Trunc);
2859}
2860
2861//===----------------------------------------------------------------------===//
2862// Calling Convention Implementation
2863//===----------------------------------------------------------------------===//
2864
2865//===----------------------------------------------------------------------===//
2866// TODO: Implement a generic logic using tblgen that can support this.
2867// Mips O32 ABI rules:
2868// ---
2869// i32 - Passed in A0, A1, A2, A3 and stack
2870// f32 - Only passed in f32 registers if no int reg has been used yet to hold
2871// an argument. Otherwise, passed in A1, A2, A3 and stack.
2872// f64 - Only passed in two aliased f32 registers if no int reg has been used
2873// yet to hold an argument. Otherwise, use A2, A3 and stack. If A1 is
2874// not used, it must be shadowed. If only A3 is available, shadow it and
2875// go to stack.
2876// vXiX - Received as scalarized i32s, passed in A0 - A3 and the stack.
2877// vXf32 - Passed in either a pair of registers {A0, A1}, {A2, A3} or {A0 - A3}
2878// with the remainder spilled to the stack.
2879// vXf64 - Passed in either {A0, A1, A2, A3} or {A2, A3} and in both cases
2880// spilling the remainder to the stack.
2881//
2882// For vararg functions, all arguments are passed in A0, A1, A2, A3 and stack.
2883//===----------------------------------------------------------------------===//
2884
2885static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT,
2886 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
2887 CCState &State, ArrayRef<MCPhysReg> F64Regs) {
2888 const MipsSubtarget &Subtarget = static_cast<const MipsSubtarget &>(
2890
2891 static const MCPhysReg IntRegs[] = { Mips::A0, Mips::A1, Mips::A2, Mips::A3 };
2892
2893 const MipsCCState * MipsState = static_cast<MipsCCState *>(&State);
2894
2895 static const MCPhysReg F32Regs[] = { Mips::F12, Mips::F14 };
2896
2897 static const MCPhysReg FloatVectorIntRegs[] = { Mips::A0, Mips::A2 };
2898
2899 // Do not process byval args here.
2900 if (ArgFlags.isByVal())
2901 return true;
2902
2903 // Promote i8 and i16
2904 if (ArgFlags.isInReg() && !Subtarget.isLittle()) {
2905 if (LocVT == MVT::i8 || LocVT == MVT::i16 || LocVT == MVT::i32) {
2906 LocVT = MVT::i32;
2907 if (ArgFlags.isSExt())
2908 LocInfo = CCValAssign::SExtUpper;
2909 else if (ArgFlags.isZExt())
2910 LocInfo = CCValAssign::ZExtUpper;
2911 else
2912 LocInfo = CCValAssign::AExtUpper;
2913 }
2914 }
2915
2916 // Promote i8 and i16
2917 if (LocVT == MVT::i8 || LocVT == MVT::i16) {
2918 LocVT = MVT::i32;
2919 if (ArgFlags.isSExt())
2920 LocInfo = CCValAssign::SExt;
2921 else if (ArgFlags.isZExt())
2922 LocInfo = CCValAssign::ZExt;
2923 else
2924 LocInfo = CCValAssign::AExt;
2925 }
2926
2927 unsigned Reg;
2928
2929 // f32 and f64 are allocated in A0, A1, A2, A3 when either of the following
2930 // is true: function is vararg, argument is 3rd or higher, there is previous
2931 // argument which is not f32 or f64.
2932 bool AllocateFloatsInIntReg = State.isVarArg() || ValNo > 1 ||
2933 State.getFirstUnallocated(F32Regs) != ValNo;
2934 Align OrigAlign = ArgFlags.getNonZeroOrigAlign();
2935 bool isI64 = (ValVT == MVT::i32 && OrigAlign == Align(8));
2936 bool isVectorFloat = MipsState->WasOriginalArgVectorFloat(ValNo);
2937
2938 // The MIPS vector ABI for floats passes them in a pair of registers
2939 if (ValVT == MVT::i32 && isVectorFloat) {
2940 // This is the start of an vector that was scalarized into an unknown number
2941 // of components. It doesn't matter how many there are. Allocate one of the
2942 // notional 8 byte aligned registers which map onto the argument stack, and
2943 // shadow the register lost to alignment requirements.
2944 if (ArgFlags.isSplit()) {
2945 Reg = State.AllocateReg(FloatVectorIntRegs);
2946 if (Reg == Mips::A2)
2947 State.AllocateReg(Mips::A1);
2948 else if (Reg == 0)
2949 State.AllocateReg(Mips::A3);
2950 } else {
2951 // If we're an intermediate component of the split, we can just attempt to
2952 // allocate a register directly.
2953 Reg = State.AllocateReg(IntRegs);
2954 }
2955 } else if (ValVT == MVT::i32 ||
2956 (ValVT == MVT::f32 && AllocateFloatsInIntReg)) {
2957 Reg = State.AllocateReg(IntRegs);
2958 // If this is the first part of an i64 arg,
2959 // the allocated register must be either A0 or A2.
2960 if (isI64 && (Reg == Mips::A1 || Reg == Mips::A3))
2961 Reg = State.AllocateReg(IntRegs);
2962 LocVT = MVT::i32;
2963 } else if (ValVT == MVT::f64 && AllocateFloatsInIntReg) {
2964 // Allocate int register and shadow next int register. If first
2965 // available register is Mips::A1 or Mips::A3, shadow it too.
2966 Reg = State.AllocateReg(IntRegs);
2967 if (Reg == Mips::A1 || Reg == Mips::A3)
2968 Reg = State.AllocateReg(IntRegs);
2969
2970 if (Reg) {
2971 LocVT = MVT::i32;
2972
2973 State.addLoc(
2974 CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
2975 MCRegister HiReg = State.AllocateReg(IntRegs);
2976 assert(HiReg);
2977 State.addLoc(
2978 CCValAssign::getCustomReg(ValNo, ValVT, HiReg, LocVT, LocInfo));
2979 return false;
2980 }
2981 } else if (ValVT.isFloatingPoint() && !AllocateFloatsInIntReg) {
2982 // we are guaranteed to find an available float register
2983 if (ValVT == MVT::f32) {
2984 Reg = State.AllocateReg(F32Regs);
2985 // Shadow int register
2986 State.AllocateReg(IntRegs);
2987 } else {
2988 Reg = State.AllocateReg(F64Regs);
2989 // Shadow int registers
2990 unsigned Reg2 = State.AllocateReg(IntRegs);
2991 if (Reg2 == Mips::A1 || Reg2 == Mips::A3)
2992 State.AllocateReg(IntRegs);
2993 State.AllocateReg(IntRegs);
2994 }
2995 } else
2996 llvm_unreachable("Cannot handle this ValVT.");
2997
2998 if (!Reg) {
2999 unsigned Offset = State.AllocateStack(ValVT.getStoreSize(), OrigAlign);
3000 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
3001 } else
3002 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
3003
3004 return false;
3005}
3006
3007static bool CC_MipsO32_FP32(unsigned ValNo, MVT ValVT,
3008 MVT LocVT, CCValAssign::LocInfo LocInfo,
3009 ISD::ArgFlagsTy ArgFlags, CCState &State) {
3010 static const MCPhysReg F64Regs[] = { Mips::D6, Mips::D7 };
3011
3012 return CC_MipsO32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, F64Regs);
3013}
3014
3015static bool CC_MipsO32_FP64(unsigned ValNo, MVT ValVT,
3016 MVT LocVT, CCValAssign::LocInfo LocInfo,
3017 ISD::ArgFlagsTy ArgFlags, CCState &State) {
3018 static const MCPhysReg F64Regs[] = { Mips::D12_64, Mips::D14_64 };
3019
3020 return CC_MipsO32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, F64Regs);
3021}
3022
3023static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT,
3024 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
3026
3027#include "MipsGenCallingConv.inc"
3028
3030 return CC_Mips_FixedArg;
3031 }
3032
3034 return RetCC_Mips;
3035 }
3036//===----------------------------------------------------------------------===//
3037// Call Calling Convention Implementation
3038//===----------------------------------------------------------------------===//
3039
3040SDValue MipsTargetLowering::passArgOnStack(SDValue StackPtr, unsigned Offset,
3041 SDValue Chain, SDValue Arg,
3042 const SDLoc &DL, bool IsTailCall,
3043 SelectionDAG &DAG) const {
3044 if (!IsTailCall) {
3045 SDValue PtrOff =
3046 DAG.getNode(ISD::ADD, DL, getPointerTy(DAG.getDataLayout()), StackPtr,
3048 return DAG.getStore(Chain, DL, Arg, PtrOff, MachinePointerInfo());
3049 }
3050
3052 int FI = MFI.CreateFixedObject(Arg.getValueSizeInBits() / 8, Offset, false);
3053 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
3054 return DAG.getStore(Chain, DL, Arg, FIN, MachinePointerInfo(), MaybeAlign(),
3056}
3057
3060 std::deque<std::pair<unsigned, SDValue>> &RegsToPass,
3061 bool IsPICCall, bool GlobalOrExternal, bool InternalLinkage,
3062 bool IsCallReloc, CallLoweringInfo &CLI, SDValue Callee,
3063 SDValue Chain) const {
3064 // Insert node "GP copy globalreg" before call to function.
3065 //
3066 // R_MIPS_CALL* operators (emitted when non-internal functions are called
3067 // in PIC mode) allow symbols to be resolved via lazy binding.
3068 // The lazy binding stub requires GP to point to the GOT.
3069 // Note that we don't need GP to point to the GOT for indirect calls
3070 // (when R_MIPS_CALL* is not used for the call) because Mips linker generates
3071 // lazy binding stub for a function only when R_MIPS_CALL* are the only relocs
3072 // used for the function (that is, Mips linker doesn't generate lazy binding
3073 // stub for a function whose address is taken in the program).
3074 if (IsPICCall && !InternalLinkage && IsCallReloc) {
3075 unsigned GPReg = ABI.IsN64() ? Mips::GP_64 : Mips::GP;
3076 EVT Ty = ABI.IsN64() ? MVT::i64 : MVT::i32;
3077 RegsToPass.push_back(std::make_pair(GPReg, getGlobalReg(CLI.DAG, Ty)));
3078 }
3079
3080 // Build a sequence of copy-to-reg nodes chained together with token
3081 // chain and flag operands which copy the outgoing args into registers.
3082 // The InGlue in necessary since all emitted instructions must be
3083 // stuck together.
3084 SDValue InGlue;
3085
3086 for (auto &R : RegsToPass) {
3087 Chain = CLI.DAG.getCopyToReg(Chain, CLI.DL, R.first, R.second, InGlue);
3088 InGlue = Chain.getValue(1);
3089 }
3090
3091 // Add argument registers to the end of the list so that they are
3092 // known live into the call.
3093 for (auto &R : RegsToPass)
3094 Ops.push_back(CLI.DAG.getRegister(R.first, R.second.getValueType()));
3095
3096 // Add a register mask operand representing the call-preserved registers.
3098 const uint32_t *Mask =
3099 TRI->getCallPreservedMask(CLI.DAG.getMachineFunction(), CLI.CallConv);
3100 assert(Mask && "Missing call preserved mask for calling convention");
3102 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(CLI.Callee)) {
3103 StringRef Sym = G->getGlobal()->getName();
3104 Function *F = G->getGlobal()->getParent()->getFunction(Sym);
3105 if (F && F->hasFnAttribute("__Mips16RetHelper")) {
3107 }
3108 }
3109 }
3110 Ops.push_back(CLI.DAG.getRegisterMask(Mask));
3111
3112 if (InGlue.getNode())
3113 Ops.push_back(InGlue);
3114}
3115
3117 SDNode *Node) const {
3118 switch (MI.getOpcode()) {
3119 default:
3120 return;
3121 case Mips::JALR:
3122 case Mips::JALRPseudo:
3123 case Mips::JALR64:
3124 case Mips::JALR64Pseudo:
3125 case Mips::JALR16_MM:
3126 case Mips::JALRC16_MMR6:
3127 case Mips::TAILCALLREG:
3128 case Mips::TAILCALLREG64:
3129 case Mips::TAILCALLR6REG:
3130 case Mips::TAILCALL64R6REG:
3131 case Mips::TAILCALLREG_MM:
3132 case Mips::TAILCALLREG_MMR6: {
3133 if (!EmitJalrReloc ||
3136 Node->getNumOperands() < 1 ||
3137 Node->getOperand(0).getNumOperands() < 2) {
3138 return;
3139 }
3140 // We are after the callee address, set by LowerCall().
3141 // If added to MI, asm printer will emit .reloc R_MIPS_JALR for the
3142 // symbol.
3143 const SDValue TargetAddr = Node->getOperand(0).getOperand(1);
3144 StringRef Sym;
3145 if (const GlobalAddressSDNode *G =
3146 dyn_cast_or_null<const GlobalAddressSDNode>(TargetAddr)) {
3147 // We must not emit the R_MIPS_JALR relocation against data symbols
3148 // since this will cause run-time crashes if the linker replaces the
3149 // call instruction with a relative branch to the data symbol.
3150 if (!isa<Function>(G->getGlobal())) {
3151 LLVM_DEBUG(dbgs() << "Not adding R_MIPS_JALR against data symbol "
3152 << G->getGlobal()->getName() << "\n");
3153 return;
3154 }
3155 Sym = G->getGlobal()->getName();
3156 }
3157 else if (const ExternalSymbolSDNode *ES =
3158 dyn_cast_or_null<const ExternalSymbolSDNode>(TargetAddr)) {
3159 Sym = ES->getSymbol();
3160 }
3161
3162 if (Sym.empty())
3163 return;
3164
3165 MachineFunction *MF = MI.getParent()->getParent();
3167 LLVM_DEBUG(dbgs() << "Adding R_MIPS_JALR against " << Sym << "\n");
3169 }
3170 }
3171}
3172
3173/// LowerCall - functions arguments are copied from virtual regs to
3174/// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted.
3175SDValue
3176MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
3177 SmallVectorImpl<SDValue> &InVals) const {
3178 SelectionDAG &DAG = CLI.DAG;
3179 SDLoc DL = CLI.DL;
3181 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
3183 SDValue Chain = CLI.Chain;
3184 SDValue Callee = CLI.Callee;
3185 bool &IsTailCall = CLI.IsTailCall;
3186 CallingConv::ID CallConv = CLI.CallConv;
3187 bool IsVarArg = CLI.IsVarArg;
3188
3190 MachineFrameInfo &MFI = MF.getFrameInfo();
3192 MipsFunctionInfo *FuncInfo = MF.getInfo<MipsFunctionInfo>();
3193 bool IsPIC = isPositionIndependent();
3194
3195 // Analyze operands of the call, assigning locations to each operand.
3197 MipsCCState CCInfo(
3198 CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs, *DAG.getContext(),
3200
3201 const ExternalSymbolSDNode *ES =
3202 dyn_cast_or_null<const ExternalSymbolSDNode>(Callee.getNode());
3203
3204 // There is one case where CALLSEQ_START..CALLSEQ_END can be nested, which
3205 // is during the lowering of a call with a byval argument which produces
3206 // a call to memcpy. For the O32 case, this causes the caller to allocate
3207 // stack space for the reserved argument area for the callee, then recursively
3208 // again for the memcpy call. In the NEWABI case, this doesn't occur as those
3209 // ABIs mandate that the callee allocates the reserved argument area. We do
3210 // still produce nested CALLSEQ_START..CALLSEQ_END with zero space though.
3211 //
3212 // If the callee has a byval argument and memcpy is used, we are mandated
3213 // to already have produced a reserved argument area for the callee for O32.
3214 // Therefore, the reserved argument area can be reused for both calls.
3215 //
3216 // Other cases of calling memcpy cannot have a chain with a CALLSEQ_START
3217 // present, as we have yet to hook that node onto the chain.
3218 //
3219 // Hence, the CALLSEQ_START and CALLSEQ_END nodes can be eliminated in this
3220 // case. GCC does a similar trick, in that wherever possible, it calculates
3221 // the maximum out going argument area (including the reserved area), and
3222 // preallocates the stack space on entrance to the caller.
3223 //
3224 // FIXME: We should do the same for efficiency and space.
3225
3226 // Note: The check on the calling convention below must match
3227 // MipsABIInfo::GetCalleeAllocdArgSizeInBytes().
3228 bool MemcpyInByVal = ES &&
3229 StringRef(ES->getSymbol()) == StringRef("memcpy") &&
3230 CallConv != CallingConv::Fast &&
3231 Chain.getOpcode() == ISD::CALLSEQ_START;
3232
3233 // Allocate the reserved argument area. It seems strange to do this from the
3234 // caller side but removing it breaks the frame size calculation.
3235 unsigned ReservedArgArea =
3236 MemcpyInByVal ? 0 : ABI.GetCalleeAllocdArgSizeInBytes(CallConv);
3237 CCInfo.AllocateStack(ReservedArgArea, Align(1));
3238
3239 CCInfo.AnalyzeCallOperands(Outs, CC_Mips, CLI.getArgs(),
3240 ES ? ES->getSymbol() : nullptr);
3241
3242 // Get a count of how many bytes are to be pushed on the stack.
3243 unsigned StackSize = CCInfo.getStackSize();
3244
3245 // Call site info for function parameters tracking.
3247
3248 // Check if it's really possible to do a tail call. Restrict it to functions
3249 // that are part of this compilation unit.
3250 bool InternalLinkage = false;
3251 if (IsTailCall) {
3252 IsTailCall = isEligibleForTailCallOptimization(
3253 CCInfo, StackSize, *MF.getInfo<MipsFunctionInfo>());
3254 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
3255 InternalLinkage = G->getGlobal()->hasInternalLinkage();
3256 IsTailCall &= (InternalLinkage || G->getGlobal()->hasLocalLinkage() ||
3257 G->getGlobal()->hasPrivateLinkage() ||
3258 G->getGlobal()->hasHiddenVisibility() ||
3259 G->getGlobal()->hasProtectedVisibility());
3260 }
3261 }
3262 if (!IsTailCall && CLI.CB && CLI.CB->isMustTailCall())
3263 report_fatal_error("failed to perform tail call elimination on a call "
3264 "site marked musttail");
3265
3266 if (IsTailCall)
3267 ++NumTailCalls;
3268
3269 // Chain is the output chain of the last Load/Store or CopyToReg node.
3270 // ByValChain is the output chain of the last Memcpy node created for copying
3271 // byval arguments to the stack.
3272 unsigned StackAlignment = TFL->getStackAlignment();
3273 StackSize = alignTo(StackSize, StackAlignment);
3274
3275 if (!(IsTailCall || MemcpyInByVal))
3276 Chain = DAG.getCALLSEQ_START(Chain, StackSize, 0, DL);
3277
3279 DAG.getCopyFromReg(Chain, DL, ABI.IsN64() ? Mips::SP_64 : Mips::SP,
3281
3282 std::deque<std::pair<unsigned, SDValue>> RegsToPass;
3283 SmallVector<SDValue, 8> MemOpChains;
3284
3285 CCInfo.rewindByValRegsInfo();
3286
3287 // Walk the register/memloc assignments, inserting copies/loads.
3288 for (unsigned i = 0, e = ArgLocs.size(), OutIdx = 0; i != e; ++i, ++OutIdx) {
3289 SDValue Arg = OutVals[OutIdx];
3290 CCValAssign &VA = ArgLocs[i];
3291 MVT ValVT = VA.getValVT(), LocVT = VA.getLocVT();
3292 ISD::ArgFlagsTy Flags = Outs[OutIdx].Flags;
3293 bool UseUpperBits = false;
3294
3295 // ByVal Arg.
3296 if (Flags.isByVal()) {
3297 unsigned FirstByValReg, LastByValReg;
3298 unsigned ByValIdx = CCInfo.getInRegsParamsProcessed();
3299 CCInfo.getInRegsParamInfo(ByValIdx, FirstByValReg, LastByValReg);
3300
3301 assert(Flags.getByValSize() &&
3302 "ByVal args of size 0 should have been ignored by front-end.");
3303 assert(ByValIdx < CCInfo.getInRegsParamsCount());
3304 assert(!IsTailCall &&
3305 "Do not tail-call optimize if there is a byval argument.");
3306 passByValArg(Chain, DL, RegsToPass, MemOpChains, StackPtr, MFI, DAG, Arg,
3307 FirstByValReg, LastByValReg, Flags, Subtarget.isLittle(),
3308 VA);
3309 CCInfo.nextInRegsParam();
3310 continue;
3311 }
3312
3313 // Promote the value if needed.
3314 switch (VA.getLocInfo()) {
3315 default:
3316 llvm_unreachable("Unknown loc info!");
3317 case CCValAssign::Full:
3318 if (VA.isRegLoc()) {
3319 if ((ValVT == MVT::f32 && LocVT == MVT::i32) ||
3320 (ValVT == MVT::f64 && LocVT == MVT::i64) ||
3321 (ValVT == MVT::i64 && LocVT == MVT::f64))
3322 Arg = DAG.getNode(ISD::BITCAST, DL, LocVT, Arg);
3323 else if (ValVT == MVT::f64 && LocVT == MVT::i32) {
3325 Arg, DAG.getConstant(0, DL, MVT::i32));
3327 Arg, DAG.getConstant(1, DL, MVT::i32));
3328 if (!Subtarget.isLittle())
3329 std::swap(Lo, Hi);
3330
3331 assert(VA.needsCustom());
3332
3333 Register LocRegLo = VA.getLocReg();
3334 Register LocRegHigh = ArgLocs[++i].getLocReg();
3335 RegsToPass.push_back(std::make_pair(LocRegLo, Lo));
3336 RegsToPass.push_back(std::make_pair(LocRegHigh, Hi));
3337 continue;
3338 }
3339 }
3340 break;
3341 case CCValAssign::BCvt:
3342 Arg = DAG.getNode(ISD::BITCAST, DL, LocVT, Arg);
3343 break;
3345 UseUpperBits = true;
3346 [[fallthrough]];
3347 case CCValAssign::SExt:
3348 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, LocVT, Arg);
3349 break;
3351 UseUpperBits = true;
3352 [[fallthrough]];
3353 case CCValAssign::ZExt:
3354 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, LocVT, Arg);
3355 break;
3357 UseUpperBits = true;
3358 [[fallthrough]];
3359 case CCValAssign::AExt:
3360 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, LocVT, Arg);
3361 break;
3362 }
3363
3364 if (UseUpperBits) {
3365 unsigned ValSizeInBits = Outs[OutIdx].ArgVT.getSizeInBits();
3366 unsigned LocSizeInBits = VA.getLocVT().getSizeInBits();
3367 Arg = DAG.getNode(
3368 ISD::SHL, DL, VA.getLocVT(), Arg,
3369 DAG.getConstant(LocSizeInBits - ValSizeInBits, DL, VA.getLocVT()));
3370 }
3371
3372 // Arguments that can be passed on register must be kept at
3373 // RegsToPass vector
3374 if (VA.isRegLoc()) {
3375 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
3376
3377 // If the parameter is passed through reg $D, which splits into
3378 // two physical registers, avoid creating call site info.
3379 if (Mips::AFGR64RegClass.contains(VA.getLocReg()))
3380 continue;
3381
3382 // Collect CSInfo about which register passes which parameter.
3383 const TargetOptions &Options = DAG.getTarget().Options;
3384 if (Options.SupportsDebugEntryValues)
3385 CSInfo.emplace_back(VA.getLocReg(), i);
3386
3387 continue;
3388 }
3389
3390 // Register can't get to this point...
3391 assert(VA.isMemLoc());
3392
3393 // emit ISD::STORE whichs stores the
3394 // parameter value to a stack Location
3395 MemOpChains.push_back(passArgOnStack(StackPtr, VA.getLocMemOffset(),
3396 Chain, Arg, DL, IsTailCall, DAG));
3397 }
3398
3399 // Transform all store nodes into one single node because all store
3400 // nodes are independent of each other.
3401 if (!MemOpChains.empty())
3402 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
3403
3404 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
3405 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
3406 // node so that legalize doesn't hack it.
3407
3408 EVT Ty = Callee.getValueType();
3409 bool GlobalOrExternal = false, IsCallReloc = false;
3410
3411 // The long-calls feature is ignored in case of PIC.
3412 // While we do not support -mshared / -mno-shared properly,
3413 // ignore long-calls in case of -mabicalls too.
3414 if (!Subtarget.isABICalls() && !IsPIC) {
3415 // If the function should be called using "long call",
3416 // get its address into a register to prevent using
3417 // of the `jal` instruction for the direct call.
3418 if (auto *N = dyn_cast<ExternalSymbolSDNode>(Callee)) {
3419 if (Subtarget.useLongCalls())
3421 ? getAddrNonPIC(N, SDLoc(N), Ty, DAG)
3422 : getAddrNonPICSym64(N, SDLoc(N), Ty, DAG);
3423 } else if (auto *N = dyn_cast<GlobalAddressSDNode>(Callee)) {
3424 bool UseLongCalls = Subtarget.useLongCalls();
3425 // If the function has long-call/far/near attribute
3426 // it overrides command line switch pased to the backend.
3427 if (auto *F = dyn_cast<Function>(N->getGlobal())) {
3428 if (F->hasFnAttribute("long-call"))
3429 UseLongCalls = true;
3430 else if (F->hasFnAttribute("short-call"))
3431 UseLongCalls = false;
3432 }
3433 if (UseLongCalls)
3435 ? getAddrNonPIC(N, SDLoc(N), Ty, DAG)
3436 : getAddrNonPICSym64(N, SDLoc(N), Ty, DAG);
3437 }
3438 }
3439
3440 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
3441 if (IsPIC) {
3442 const GlobalValue *Val = G->getGlobal();
3443 InternalLinkage = Val->hasInternalLinkage();
3444
3445 if (InternalLinkage)
3446 Callee = getAddrLocal(G, DL, Ty, DAG, ABI.IsN32() || ABI.IsN64());
3447 else if (Subtarget.useXGOT()) {
3449 MipsII::MO_CALL_LO16, Chain,
3450 FuncInfo->callPtrInfo(MF, Val));
3451 IsCallReloc = true;
3452 } else {
3453 Callee = getAddrGlobal(G, DL, Ty, DAG, MipsII::MO_GOT_CALL, Chain,
3454 FuncInfo->callPtrInfo(MF, Val));
3455 IsCallReloc = true;
3456 }
3457 } else
3458 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL,
3459 getPointerTy(DAG.getDataLayout()), 0,
3461 GlobalOrExternal = true;
3462 }
3463 else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
3464 const char *Sym = S->getSymbol();
3465
3466 if (!IsPIC) // static
3469 else if (Subtarget.useXGOT()) {
3471 MipsII::MO_CALL_LO16, Chain,
3472 FuncInfo->callPtrInfo(MF, Sym));
3473 IsCallReloc = true;
3474 } else { // PIC
3475 Callee = getAddrGlobal(S, DL, Ty, DAG, MipsII::MO_GOT_CALL, Chain,
3476 FuncInfo->callPtrInfo(MF, Sym));
3477 IsCallReloc = true;
3478 }
3479
3480 GlobalOrExternal = true;
3481 }
3482
3483 SmallVector<SDValue, 8> Ops(1, Chain);
3484 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
3485
3486 getOpndList(Ops, RegsToPass, IsPIC, GlobalOrExternal, InternalLinkage,
3487 IsCallReloc, CLI, Callee, Chain);
3488
3489 if (IsTailCall) {
3491 SDValue Ret = DAG.getNode(MipsISD::TailCall, DL, MVT::Other, Ops);
3492 DAG.addCallSiteInfo(Ret.getNode(), std::move(CSInfo));
3493 return Ret;
3494 }
3495
3496 Chain = DAG.getNode(MipsISD::JmpLink, DL, NodeTys, Ops);
3497 SDValue InGlue = Chain.getValue(1);
3498
3499 DAG.addCallSiteInfo(Chain.getNode(), std::move(CSInfo));
3500
3501 // Create the CALLSEQ_END node in the case of where it is not a call to
3502 // memcpy.
3503 if (!(MemcpyInByVal)) {
3504 Chain = DAG.getCALLSEQ_END(Chain, StackSize, 0, InGlue, DL);
3505 InGlue = Chain.getValue(1);
3506 }
3507
3508 // Handle result values, copying them out of physregs into vregs that we
3509 // return.
3510 return LowerCallResult(Chain, InGlue, CallConv, IsVarArg, Ins, DL, DAG,
3511 InVals, CLI);
3512}
3513
3514/// LowerCallResult - Lower the result values of a call into the
3515/// appropriate copies out of appropriate physical registers.
3516SDValue MipsTargetLowering::LowerCallResult(
3517 SDValue Chain, SDValue InGlue, CallingConv::ID CallConv, bool IsVarArg,
3518 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
3521 // Assign locations to each value returned by this call.
3523 MipsCCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
3524 *DAG.getContext());
3525
3526 const ExternalSymbolSDNode *ES =
3527 dyn_cast_or_null<const ExternalSymbolSDNode>(CLI.Callee.getNode());
3528 CCInfo.AnalyzeCallResult(Ins, RetCC_Mips, CLI.RetTy,
3529 ES ? ES->getSymbol() : nullptr);
3530
3531 // Copy all of the result registers out of their specified physreg.
3532 for (unsigned i = 0; i != RVLocs.size(); ++i) {
3533 CCValAssign &VA = RVLocs[i];
3534 assert(VA.isRegLoc() && "Can only return in registers!");
3535
3536 SDValue Val = DAG.getCopyFromReg(Chain, DL, RVLocs[i].getLocReg(),
3537 RVLocs[i].getLocVT(), InGlue);
3538 Chain = Val.getValue(1);
3539 InGlue = Val.getValue(2);
3540
3541 if (VA.isUpperBitsInLoc()) {
3542 unsigned ValSizeInBits = Ins[i].ArgVT.getSizeInBits();
3543 unsigned LocSizeInBits = VA.getLocVT().getSizeInBits();
3544 unsigned Shift =
3546 Val = DAG.getNode(
3547 Shift, DL, VA.getLocVT(), Val,
3548 DAG.getConstant(LocSizeInBits - ValSizeInBits, DL, VA.getLocVT()));
3549 }
3550
3551 switch (VA.getLocInfo()) {
3552 default:
3553 llvm_unreachable("Unknown loc info!");
3554 case CCValAssign::Full:
3555 break;
3556 case CCValAssign::BCvt:
3557 Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
3558 break;
3559 case CCValAssign::AExt:
3561 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
3562 break;
3563 case CCValAssign::ZExt:
3565 Val = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Val,
3566 DAG.getValueType(VA.getValVT()));
3567 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
3568 break;
3569 case CCValAssign::SExt:
3571 Val = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Val,
3572 DAG.getValueType(VA.getValVT()));
3573 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
3574 break;
3575 }
3576
3577 InVals.push_back(Val);
3578 }
3579
3580 return Chain;
3581}
3582
3584 EVT ArgVT, const SDLoc &DL,
3585 SelectionDAG &DAG) {
3586 MVT LocVT = VA.getLocVT();
3587 EVT ValVT = VA.getValVT();
3588
3589 // Shift into the upper bits if necessary.
3590 switch (VA.getLocInfo()) {
3591 default:
3592 break;
3596 unsigned ValSizeInBits = ArgVT.getSizeInBits();
3597 unsigned LocSizeInBits = VA.getLocVT().getSizeInBits();
3598 unsigned Opcode =
3600 Val = DAG.getNode(
3601 Opcode, DL, VA.getLocVT(), Val,
3602 DAG.getConstant(LocSizeInBits - ValSizeInBits, DL, VA.getLocVT()));
3603 break;
3604 }
3605 }
3606
3607 // If this is an value smaller than the argument slot size (32-bit for O32,
3608 // 64-bit for N32/N64), it has been promoted in some way to the argument slot
3609 // size. Extract the value and insert any appropriate assertions regarding
3610 // sign/zero extension.
3611 switch (VA.getLocInfo()) {
3612 default:
3613 llvm_unreachable("Unknown loc info!");
3614 case CCValAssign::Full:
3615 break;
3617 case CCValAssign::AExt:
3618 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
3619 break;
3621 case CCValAssign::SExt:
3622 Val = DAG.getNode(ISD::AssertSext, DL, LocVT, Val, DAG.getValueType(ValVT));
3623 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
3624 break;
3626 case CCValAssign::ZExt:
3627 Val = DAG.getNode(ISD::AssertZext, DL, LocVT, Val, DAG.getValueType(ValVT));
3628 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
3629 break;
3630 case CCValAssign::BCvt:
3631 Val = DAG.getNode(ISD::BITCAST, DL, ValVT, Val);
3632 break;
3633 }
3634
3635 return Val;
3636}
3637
3638//===----------------------------------------------------------------------===//
3639// Formal Arguments Calling Convention Implementation
3640//===----------------------------------------------------------------------===//
3641/// LowerFormalArguments - transform physical registers into virtual registers
3642/// and generate load operations for arguments places on the stack.
3643SDValue MipsTargetLowering::LowerFormalArguments(
3644 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
3645 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
3646 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3648 MachineFrameInfo &MFI = MF.getFrameInfo();
3650
3651 MipsFI->setVarArgsFrameIndex(0);
3652
3653 // Used with vargs to acumulate store chains.
3654 std::vector<SDValue> OutChains;
3655
3656 // Assign locations to all of the incoming arguments.
3658 MipsCCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
3659 *DAG.getContext());
3660 CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(CallConv), Align(1));
3662 Function::const_arg_iterator FuncArg = Func.arg_begin();
3663
3664 if (Func.hasFnAttribute("interrupt") && !Func.arg_empty())
3666 "Functions with the interrupt attribute cannot have arguments!");
3667
3668 CCInfo.AnalyzeFormalArguments(Ins, CC_Mips_FixedArg);
3669 MipsFI->setFormalArgInfo(CCInfo.getStackSize(),
3670 CCInfo.getInRegsParamsCount() > 0);
3671
3672 unsigned CurArgIdx = 0;
3673 CCInfo.rewindByValRegsInfo();
3674
3675 for (unsigned i = 0, e = ArgLocs.size(), InsIdx = 0; i != e; ++i, ++InsIdx) {
3676 CCValAssign &VA = ArgLocs[i];
3677 if (Ins[InsIdx].isOrigArg()) {
3678 std::advance(FuncArg, Ins[InsIdx].getOrigArgIndex() - CurArgIdx);
3679 CurArgIdx = Ins[InsIdx].getOrigArgIndex();
3680 }
3681 EVT ValVT = VA.getValVT();
3682 ISD::ArgFlagsTy Flags = Ins[InsIdx].Flags;
3683 bool IsRegLoc = VA.isRegLoc();
3684
3685 if (Flags.isByVal()) {
3686 assert(Ins[InsIdx].isOrigArg() && "Byval arguments cannot be implicit");
3687 unsigned FirstByValReg, LastByValReg;
3688 unsigned ByValIdx = CCInfo.getInRegsParamsProcessed();
3689 CCInfo.getInRegsParamInfo(ByValIdx, FirstByValReg, LastByValReg);
3690
3691 assert(Flags.getByValSize() &&
3692 "ByVal args of size 0 should have been ignored by front-end.");
3693 assert(ByValIdx < CCInfo.getInRegsParamsCount());
3694 copyByValRegs(Chain, DL, OutChains, DAG, Flags, InVals, &*FuncArg,
3695 FirstByValReg, LastByValReg, VA, CCInfo);
3696 CCInfo.nextInRegsParam();
3697 continue;
3698 }
3699
3700 // Arguments stored on registers
3701 if (IsRegLoc) {
3702 MVT RegVT = VA.getLocVT();
3703 Register ArgReg = VA.getLocReg();
3704 const TargetRegisterClass *RC = getRegClassFor(RegVT);
3705
3706 // Transform the arguments stored on
3707 // physical registers into virtual ones
3708 unsigned Reg = addLiveIn(DAG.getMachineFunction(), ArgReg, RC);
3709 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegVT);
3710
3711 ArgValue =
3712 UnpackFromArgumentSlot(ArgValue, VA, Ins[InsIdx].ArgVT, DL, DAG);
3713
3714 // Handle floating point arguments passed in integer registers and
3715 // long double arguments passed in floating point registers.
3716 if ((RegVT == MVT::i32 && ValVT == MVT::f32) ||
3717 (RegVT == MVT::i64 && ValVT == MVT::f64) ||
3718 (RegVT == MVT::f64 && ValVT == MVT::i64))
3719 ArgValue = DAG.getNode(ISD::BITCAST, DL, ValVT, ArgValue);
3720 else if (ABI.IsO32() && RegVT == MVT::i32 &&
3721 ValVT == MVT::f64) {
3722 assert(VA.needsCustom() && "Expected custom argument for f64 split");
3723 CCValAssign &NextVA = ArgLocs[++i];
3724 unsigned Reg2 =
3725 addLiveIn(DAG.getMachineFunction(), NextVA.getLocReg(), RC);
3726 SDValue ArgValue2 = DAG.getCopyFromReg(Chain, DL, Reg2, RegVT);
3727 if (!Subtarget.isLittle())
3728 std::swap(ArgValue, ArgValue2);
3729 ArgValue = DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64,
3730 ArgValue, ArgValue2);
3731 }
3732
3733 InVals.push_back(ArgValue);
3734 } else { // VA.isRegLoc()
3735 MVT LocVT = VA.getLocVT();
3736
3737 assert(!VA.needsCustom() && "unexpected custom memory argument");
3738
3739 // Only arguments pased on the stack should make it here.
3740 assert(VA.isMemLoc());
3741
3742 // The stack pointer offset is relative to the caller stack frame.
3743 int FI = MFI.CreateFixedObject(LocVT.getSizeInBits() / 8,
3744 VA.getLocMemOffset(), true);
3745
3746 // Create load nodes to retrieve arguments from the stack
3747 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
3748 SDValue ArgValue = DAG.getLoad(
3749 LocVT, DL, Chain, FIN,
3751 OutChains.push_back(ArgValue.getValue(1));
3752
3753 ArgValue =
3754 UnpackFromArgumentSlot(ArgValue, VA, Ins[InsIdx].ArgVT, DL, DAG);
3755
3756 InVals.push_back(ArgValue);
3757 }
3758 }
3759
3760 for (unsigned i = 0, e = ArgLocs.size(), InsIdx = 0; i != e; ++i, ++InsIdx) {
3761
3762 if (ArgLocs[i].needsCustom()) {
3763 ++i;
3764 continue;
3765 }
3766
3767 // The mips ABIs for returning structs by value requires that we copy
3768 // the sret argument into $v0 for the return. Save the argument into
3769 // a virtual register so that we can access it from the return points.
3770 if (Ins[InsIdx].Flags.isSRet()) {
3771 unsigned Reg = MipsFI->getSRetReturnReg();
3772 if (!Reg) {
3774 getRegClassFor(ABI.IsN64() ? MVT::i64 : MVT::i32));
3775 MipsFI->setSRetReturnReg(Reg);
3776 }
3777 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), DL, Reg, InVals[i]);
3778 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Copy, Chain);
3779 break;
3780 }
3781 }
3782
3783 if (IsVarArg)
3784 writeVarArgRegs(OutChains, Chain, DL, DAG, CCInfo);
3785
3786 // All stores are grouped in one node to allow the matching between
3787 // the size of Ins and InVals. This only happens when on varg functions
3788 if (!OutChains.empty()) {
3789 OutChains.push_back(Chain);
3790 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
3791 }
3792
3793 return Chain;
3794}
3795
3796//===----------------------------------------------------------------------===//
3797// Return Value Calling Convention Implementation
3798//===----------------------------------------------------------------------===//
3799
3800bool
3801MipsTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
3802 MachineFunction &MF, bool IsVarArg,
3804 LLVMContext &Context) const {
3806 MipsCCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
3807 return CCInfo.CheckReturn(Outs, RetCC_Mips);
3808}
3809
3810bool MipsTargetLowering::shouldSignExtendTypeInLibCall(EVT Type,
3811 bool IsSigned) const {
3812 if ((ABI.IsN32() || ABI.IsN64()) && Type == MVT::i32)
3813 return true;
3814
3815 return IsSigned;
3816}
3817
3818SDValue
3819MipsTargetLowering::LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps,
3820 const SDLoc &DL,
3821 SelectionDAG &DAG) const {
3824
3825 MipsFI->setISR();
3826
3827 return DAG.getNode(MipsISD::ERet, DL, MVT::Other, RetOps);
3828}
3829
3830SDValue
3831MipsTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
3832 bool IsVarArg,
3834 const SmallVectorImpl<SDValue> &OutVals,
3835 const SDLoc &DL, SelectionDAG &DAG) const {
3836 // CCValAssign - represent the assignment of
3837 // the return value to a location
3840
3841 // CCState - Info about the registers and stack slot.
3842 MipsCCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
3843
3844 // Analyze return values.
3845 CCInfo.AnalyzeReturn(Outs, RetCC_Mips);
3846
3847 SDValue Glue;
3848 SmallVector<SDValue, 4> RetOps(1, Chain);
3849
3850 // Copy the result values into the output registers.
3851 for (unsigned i = 0; i != RVLocs.size(); ++i) {
3852 SDValue Val = OutVals[i];
3853 CCValAssign &VA = RVLocs[i];
3854 assert(VA.isRegLoc() && "Can only return in registers!");
3855 bool UseUpperBits = false;
3856
3857 switch (VA.getLocInfo()) {
3858 default:
3859 llvm_unreachable("Unknown loc info!");
3860 case CCValAssign::Full:
3861 break;
3862 case CCValAssign::BCvt:
3863 Val = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Val);
3864 break;
3866 UseUpperBits = true;
3867 [[fallthrough]];
3868 case CCValAssign::AExt:
3869 Val = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Val);
3870 break;
3872 UseUpperBits = true;
3873 [[fallthrough]];
3874 case CCValAssign::ZExt:
3875 Val = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Val);
3876 break;
3878 UseUpperBits = true;
3879 [[fallthrough]];
3880 case CCValAssign::SExt:
3881 Val = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Val);
3882 break;
3883 }
3884
3885 if (UseUpperBits) {
3886 unsigned ValSizeInBits = Outs[i].ArgVT.getSizeInBits();
3887 unsigned LocSizeInBits = VA.getLocVT().getSizeInBits();
3888 Val = DAG.getNode(
3889 ISD::SHL, DL, VA.getLocVT(), Val,
3890 DAG.getConstant(LocSizeInBits - ValSizeInBits, DL, VA.getLocVT()));
3891 }
3892
3893 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
3894
3895 // Guarantee that all emitted copies are stuck together with flags.
3896 Glue = Chain.getValue(1);
3897 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
3898 }
3899
3900 // The mips ABIs for returning structs by value requires that we copy
3901 // the sret argument into $v0 for the return. We saved the argument into
3902 // a virtual register in the entry block, so now we copy the value out
3903 // and into $v0.
3904 if (MF.getFunction().hasStructRetAttr()) {
3906 unsigned Reg = MipsFI->getSRetReturnReg();
3907
3908 if (!Reg)
3909 llvm_unreachable("sret virtual register not created in the entry block");
3910 SDValue Val =
3911 DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy(DAG.getDataLayout()));
3912 unsigned V0 = ABI.IsN64() ? Mips::V0_64 : Mips::V0;
3913
3914 Chain = DAG.getCopyToReg(Chain, DL, V0, Val, Glue);
3915 Glue = Chain.getValue(1);
3916 RetOps.push_back(DAG.getRegister(V0, getPointerTy(DAG.getDataLayout())));
3917 }
3918
3919 RetOps[0] = Chain; // Update chain.
3920
3921 // Add the glue if we have it.
3922 if (Glue.getNode())
3923 RetOps.push_back(Glue);
3924
3925 // ISRs must use "eret".
3926 if (DAG.getMachineFunction().getFunction().hasFnAttribute("interrupt"))
3927 return LowerInterruptReturn(RetOps, DL, DAG);
3928
3929 // Standard return on Mips is a "jr $ra"
3930 return DAG.getNode(MipsISD::Ret, DL, MVT::Other, RetOps);
3931}
3932
3933//===----------------------------------------------------------------------===//
3934// Mips Inline Assembly Support
3935//===----------------------------------------------------------------------===//
3936
3937/// getConstraintType - Given a constraint letter, return the type of
3938/// constraint it is for this target.
3940MipsTargetLowering::getConstraintType(StringRef Constraint) const {
3941 // Mips specific constraints
3942 // GCC config/mips/constraints.md
3943 //
3944 // 'd' : An address register. Equivalent to r
3945 // unless generating MIPS16 code.
3946 // 'y' : Equivalent to r; retained for
3947 // backwards compatibility.
3948 // 'c' : A register suitable for use in an indirect
3949 // jump. This will always be $25 for -mabicalls.
3950 // 'l' : The lo register. 1 word storage.
3951 // 'x' : The hilo register pair. Double word storage.
3952 if (Constraint.size() == 1) {
3953 switch (Constraint[0]) {
3954 default : break;
3955 case 'd':
3956 case 'y':
3957 case 'f':
3958 case 'c':
3959 case 'l':
3960 case 'x':
3961 return C_RegisterClass;
3962 case 'R':
3963 return C_Memory;
3964 }
3965 }
3966
3967 if (Constraint == "ZC")
3968 return C_Memory;
3969
3970 return TargetLowering::getConstraintType(Constraint);
3971}
3972
3973/// Examine constraint type and operand type and determine a weight value.
3974/// This object must already have been set up with the operand type
3975/// and the current alternative constraint selected.
3977MipsTargetLowering::getSingleConstraintMatchWeight(
3978 AsmOperandInfo &info, const char *constraint) const {
3980 Value *CallOperandVal = info.CallOperandVal;
3981 // If we don't have a value, we can't do a match,
3982 // but allow it at the lowest weight.
3983 if (!CallOperandVal)
3984 return CW_Default;
3985 Type *type = CallOperandVal->getType();
3986 // Look at the constraint type.
3987 switch (*constraint) {
3988 default:
3990 break;
3991 case 'd':
3992 case 'y':
3993 if (type->isIntegerTy())
3994 weight = CW_Register;
3995 break;
3996 case 'f': // FPU or MSA register
3997 if (Subtarget.hasMSA() && type->isVectorTy() &&
3998 type->getPrimitiveSizeInBits().getFixedValue() == 128)
3999 weight = CW_Register;
4000 else if (type->isFloatTy())
4001 weight = CW_Register;
4002 break;
4003 case 'c': // $25 for indirect jumps
4004 case 'l': // lo register
4005 case 'x': // hilo register pair
4006 if (type->isIntegerTy())
4007 weight = CW_SpecificReg;
4008 break;
4009 case 'I': // signed 16 bit immediate
4010 case 'J': // integer zero
4011 case 'K': // unsigned 16 bit immediate
4012 case 'L': // signed 32 bit immediate where lower 16 bits are 0
4013 case 'N': // immediate in the range of -65535 to -1 (inclusive)
4014 case 'O': // signed 15 bit immediate (+- 16383)
4015 case 'P': // immediate in the range of 65535 to 1 (inclusive)
4016 if (isa<ConstantInt>(CallOperandVal))
4017 weight = CW_Constant;
4018 break;
4019 case 'R':
4020 weight = CW_Memory;
4021 break;
4022 }
4023 return weight;
4024}
4025
4026/// This is a helper function to parse a physical register string and split it
4027/// into non-numeric and numeric parts (Prefix and Reg). The first boolean flag
4028/// that is returned indicates whether parsing was successful. The second flag
4029/// is true if the numeric part exists.
4030static std::pair<bool, bool> parsePhysicalReg(StringRef C, StringRef &Prefix,
4031 unsigned long long &Reg) {
4032 if (C.front() != '{' || C.back() != '}')
4033 return std::make_pair(false, false);
4034
4035 // Search for the first numeric character.
4036 StringRef::const_iterator I, B = C.begin() + 1, E = C.end() - 1;
4037 I = std::find_if(B, E, isdigit);
4038
4039 Prefix = StringRef(B, I - B);
4040
4041 // The second flag is set to false if no numeric characters were found.
4042 if (I == E)
4043 return std::make_pair(true, false);
4044
4045 // Parse the numeric characters.
4046 return std::make_pair(!getAsUnsignedInteger(StringRef(I, E - I), 10, Reg),
4047 true);
4048}
4049
4051 ISD::NodeType) const {
4052 bool Cond = !Subtarget.isABI_O32() && VT.getSizeInBits() == 32;
4053 EVT MinVT = getRegisterType(Cond ? MVT::i64 : MVT::i32);
4054 return VT.bitsLT(MinVT) ? MinVT : VT;
4055}
4056
4057std::pair<unsigned, const TargetRegisterClass *> MipsTargetLowering::
4058parseRegForInlineAsmConstraint(StringRef C, MVT VT) const {
4059 const TargetRegisterInfo *TRI =
4061 const TargetRegisterClass *RC;
4062 StringRef Prefix;
4063 unsigned long long Reg;
4064
4065 std::pair<bool, bool> R = parsePhysicalReg(C, Prefix, Reg);
4066
4067 if (!R.first)
4068 return std::make_pair(0U, nullptr);
4069
4070 if ((Prefix == "hi" || Prefix == "lo")) { // Parse hi/lo.
4071 // No numeric characters follow "hi" or "lo".
4072 if (R.second)
4073 return std::make_pair(0U, nullptr);
4074
4075 RC = TRI->getRegClass(Prefix == "hi" ?
4076 Mips::HI32RegClassID : Mips::LO32RegClassID);
4077 return std::make_pair(*(RC->begin()), RC);
4078 } else if (Prefix.starts_with("$msa")) {
4079 // Parse $msa(ir|csr|access|save|modify|request|map|unmap)
4080
4081 // No numeric characters follow the name.
4082 if (R.second)
4083 return std::make_pair(0U, nullptr);
4084
4086 .Case("$msair", Mips::MSAIR)
4087 .Case("$msacsr", Mips::MSACSR)
4088 .Case("$msaaccess", Mips::MSAAccess)
4089 .Case("$msasave", Mips::MSASave)
4090 .Case("$msamodify", Mips::MSAModify)
4091 .Case("$msarequest", Mips::MSARequest)
4092 .Case("$msamap", Mips::MSAMap)
4093 .Case("$msaunmap", Mips::MSAUnmap)
4094 .Default(0);
4095
4096 if (!Reg)
4097 return std::make_pair(0U, nullptr);
4098
4099 RC = TRI->getRegClass(Mips::MSACtrlRegClassID);
4100 return std::make_pair(Reg, RC);
4101 }
4102
4103 if (!R.second)
4104 return std::make_pair(0U, nullptr);
4105
4106 if (Prefix == "$f") { // Parse $f0-$f31.
4107 // If the size of FP registers is 64-bit or Reg is an even number, select
4108 // the 64-bit register class. Otherwise, select the 32-bit register class.
4109 if (VT == MVT::Other)
4110 VT = (Subtarget.isFP64bit() || !(Reg % 2)) ? MVT::f64 : MVT::f32;
4111
4112 RC = getRegClassFor(VT);
4113
4114 if (RC == &Mips::AFGR64RegClass) {
4115 assert(Reg % 2 == 0);
4116 Reg >>= 1;
4117 }
4118 } else if (Prefix == "$fcc") // Parse $fcc0-$fcc7.
4119 RC = TRI->getRegClass(Mips::FCCRegClassID);
4120 else if (Prefix == "$w") { // Parse $w0-$w31.
4121 RC = getRegClassFor((VT == MVT::Other) ? MVT::v16i8 : VT);
4122 } else { // Parse $0-$31.
4123 assert(Prefix == "$");
4124 RC = getRegClassFor((VT == MVT::Other) ? MVT::i32 : VT);
4125 }
4126
4127 assert(Reg < RC->getNumRegs());
4128 return std::make_pair(*(RC->begin() + Reg), RC);
4129}
4130
4131/// Given a register class constraint, like 'r', if this corresponds directly
4132/// to an LLVM register class, return a register of 0 and the register class
4133/// pointer.
4134std::pair<unsigned, const TargetRegisterClass *>
4135MipsTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
4136 StringRef Constraint,
4137 MVT VT) const {
4138 if (Constraint.size() == 1) {
4139 switch (Constraint[0]) {
4140 case 'd': // Address register. Same as 'r' unless generating MIPS16 code.
4141 case 'y': // Same as 'r'. Exists for compatibility.
4142 case 'r':
4143 if ((VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8 ||
4144 VT == MVT::i1) ||
4145 (VT == MVT::f32 && Subtarget.useSoftFloat())) {
4146 if (Subtarget.inMips16Mode())
4147 return std::make_pair(0U, &Mips::CPU16RegsRegClass);
4148 return std::make_pair(0U, &Mips::GPR32RegClass);
4149 }
4150 if ((VT == MVT::i64 || (VT == MVT::f64 && Subtarget.useSoftFloat())) &&
4152 return std::make_pair(0U, &Mips::GPR32RegClass);
4153 if ((VT == MVT::i64 || (VT == MVT::f64 && Subtarget.useSoftFloat())) &&
4155 return std::make_pair(0U, &Mips::GPR64RegClass);
4156 // This will generate an error message
4157 return std::make_pair(0U, nullptr);
4158 case 'f': // FPU or MSA register
4159 if (VT == MVT::v16i8)
4160 return std::make_pair(0U, &Mips::MSA128BRegClass);
4161 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
4162 return std::make_pair(0U, &Mips::MSA128HRegClass);
4163 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
4164 return std::make_pair(0U, &Mips::MSA128WRegClass);
4165 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
4166 return std::make_pair(0U, &Mips::MSA128DRegClass);
4167 else if (VT == MVT::f32)
4168 return std::make_pair(0U, &Mips::FGR32RegClass);
4169 else if ((VT == MVT::f64) && (!Subtarget.isSingleFloat())) {
4170 if (Subtarget.isFP64bit())
4171 return std::make_pair(0U, &Mips::FGR64RegClass);
4172 return std::make_pair(0U, &Mips::AFGR64RegClass);
4173 }
4174 break;
4175 case 'c': // register suitable for indirect jump
4176 if (VT == MVT::i32)
4177 return std::make_pair((unsigned)Mips::T9, &Mips::GPR32RegClass);
4178 if (VT == MVT::i64)
4179 return std::make_pair((unsigned)Mips::T9_64, &Mips::GPR64RegClass);
4180 // This will generate an error message
4181 return std::make_pair(0U, nullptr);
4182 case 'l': // use the `lo` register to store values
4183 // that are no bigger than a word
4184 if (VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8)
4185 return std::make_pair((unsigned)Mips::LO0, &Mips::LO32RegClass);
4186 return std::make_pair((unsigned)Mips::LO0_64, &Mips::LO64RegClass);
4187 case 'x': // use the concatenated `hi` and `lo` registers
4188 // to store doubleword values
4189 // Fixme: Not triggering the use of both hi and low
4190 // This will generate an error message
4191 return std::make_pair(0U, nullptr);
4192 }
4193 }
4194
4195 if (!Constraint.empty()) {
4196 std::pair<unsigned, const TargetRegisterClass *> R;
4197 R = parseRegForInlineAsmConstraint(Constraint, VT);
4198
4199 if (R.second)
4200 return R;
4201 }
4202
4203 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
4204}
4205
4206/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
4207/// vector. If it is invalid, don't add anything to Ops.
4208void MipsTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
4209 StringRef Constraint,
4210 std::vector<SDValue> &Ops,
4211 SelectionDAG &DAG) const {
4212 SDLoc DL(Op);
4214
4215 // Only support length 1 constraints for now.
4216 if (Constraint.size() > 1)
4217 return;
4218
4219 char ConstraintLetter = Constraint[0];
4220 switch (ConstraintLetter) {
4221 default: break; // This will fall through to the generic implementation
4222 case 'I': // Signed 16 bit constant
4223 // If this fails, the parent routine will give an error
4224 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
4225 EVT Type = Op.getValueType();
4226 int64_t Val = C->getSExtValue();
4227 if (isInt<16>(Val)) {
4228 Result = DAG.getTargetConstant(Val, DL, Type);
4229 break;
4230 }
4231 }
4232 return;
4233 case 'J': // integer zero
4234 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
4235 EVT Type = Op.getValueType();
4236 int64_t Val = C->getZExtValue();
4237 if (Val == 0) {
4238 Result = DAG.getTargetConstant(0, DL, Type);
4239 break;
4240 }
4241 }
4242 return;
4243 case 'K': // unsigned 16 bit immediate
4244 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
4245 EVT Type = Op.getValueType();
4246 uint64_t Val = (uint64_t)C->getZExtValue();
4247 if (isUInt<16>(Val)) {
4248 Result = DAG.getTargetConstant(Val, DL, Type);
4249 break;
4250 }
4251 }
4252 return;
4253 case 'L': // signed 32 bit immediate where lower 16 bits are 0
4254 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
4255 EVT Type = Op.getValueType();
4256 int64_t Val = C->getSExtValue();
4257 if ((isInt<32>(Val)) && ((Val & 0xffff) == 0)){
4258 Result = DAG.getTargetConstant(Val, DL, Type);
4259 break;
4260 }
4261 }
4262 return;
4263 case 'N': // immediate in the range of -65535 to -1 (inclusive)
4264 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
4265 EVT Type = Op.getValueType();
4266 int64_t Val = C->getSExtValue();
4267 if ((Val >= -65535) && (Val <= -1)) {
4268 Result = DAG.getTargetConstant(Val, DL, Type);
4269 break;
4270 }
4271 }
4272 return;
4273 case 'O': // signed 15 bit immediate
4274 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
4275 EVT Type = Op.getValueType();
4276 int64_t Val = C->getSExtValue();
4277 if ((isInt<15>(Val))) {
4278 Result = DAG.getTargetConstant(Val, DL, Type);
4279 break;
4280 }
4281 }
4282 return;
4283 case 'P': // immediate in the range of 1 to 65535 (inclusive)
4284 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
4285 EVT Type = Op.getValueType();
4286 int64_t Val = C->getSExtValue();
4287 if ((Val <= 65535) && (Val >= 1)) {
4288 Result = DAG.getTargetConstant(Val, DL, Type);
4289 break;
4290 }
4291 }
4292 return;
4293 }
4294
4295 if (Result.getNode()) {
4296 Ops.push_back(Result);
4297 return;
4298 }
4299
4300 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
4301}
4302
4303bool MipsTargetLowering::isLegalAddressingMode(const DataLayout &DL,
4304 const AddrMode &AM, Type *Ty,
4305 unsigned AS,
4306 Instruction *I) const {
4307 // No global is ever allowed as a base.
4308 if (AM.BaseGV)
4309 return false;
4310
4311 switch (AM.Scale) {
4312 case 0: // "r+i" or just "i", depending on HasBaseReg.
4313 break;
4314 case 1:
4315 if (!AM.HasBaseReg) // allow "r+i".
4316 break;
4317 return false; // disallow "r+r" or "r+r+i".
4318 default:
4319 return false;
4320 }
4321
4322 return true;
4323}
4324
4325bool
4326MipsTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
4327 // The Mips target isn't yet aware of offsets.
4328 return false;
4329}
4330
4331EVT MipsTargetLowering::getOptimalMemOpType(
4332 const MemOp &Op, const AttributeList &FuncAttributes) const {
4333 if (Subtarget.hasMips64())
4334 return MVT::i64;
4335
4336 return MVT::i32;
4337}
4338
4339bool MipsTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
4340 bool ForCodeSize) const {
4341 if (VT != MVT::f32 && VT != MVT::f64)
4342 return false;
4343 if (Imm.isNegZero())
4344 return false;
4345 return Imm.isZero();
4346}
4347
4348unsigned MipsTargetLowering::getJumpTableEncoding() const {
4349
4350 // FIXME: For space reasons this should be: EK_GPRel32BlockAddress.
4351 if (ABI.IsN64() && isPositionIndependent())
4353
4355}
4356
4357bool MipsTargetLowering::useSoftFloat() const {
4358 return Subtarget.useSoftFloat();
4359}
4360
4361void MipsTargetLowering::copyByValRegs(
4362 SDValue Chain, const SDLoc &DL, std::vector<SDValue> &OutChains,
4363 SelectionDAG &DAG, const ISD::ArgFlagsTy &Flags,
4364 SmallVectorImpl<SDValue> &InVals, const Argument *FuncArg,
4365 unsigned FirstReg, unsigned LastReg, const CCValAssign &VA,
4366 MipsCCState &State) const {
4368 MachineFrameInfo &MFI = MF.getFrameInfo();
4369 unsigned GPRSizeInBytes = Subtarget.getGPRSizeInBytes();
4370 unsigned NumRegs = LastReg - FirstReg;
4371 unsigned RegAreaSize = NumRegs * GPRSizeInBytes;
4372 unsigned FrameObjSize = std::max(Flags.getByValSize(), RegAreaSize);
4373 int FrameObjOffset;
4374 ArrayRef<MCPhysReg> ByValArgRegs = ABI.GetByValArgRegs();
4375
4376 if (RegAreaSize)
4377 FrameObjOffset =
4379 (int)((ByValArgRegs.size() - FirstReg) * GPRSizeInBytes);
4380 else
4381 FrameObjOffset = VA.getLocMemOffset();
4382
4383 // Create frame object.
4384 EVT PtrTy = getPointerTy(DAG.getDataLayout());
4385 // Make the fixed object stored to mutable so that the load instructions
4386 // referencing it have their memory dependencies added.
4387 // Set the frame object as isAliased which clears the underlying objects
4388 // vector in ScheduleDAGInstrs::buildSchedGraph() resulting in addition of all
4389 // stores as dependencies for loads referencing this fixed object.
4390 int FI = MFI.CreateFixedObject(FrameObjSize, FrameObjOffset, false, true);
4391 SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
4392 InVals.push_back(FIN);
4393
4394 if (!NumRegs)
4395 return;
4396
4397 // Copy arg registers.
4398 MVT RegTy = MVT::getIntegerVT(GPRSizeInBytes * 8);
4399 const TargetRegisterClass *RC = getRegClassFor(RegTy);
4400
4401 for (unsigned I = 0; I < NumRegs; ++I) {
4402 unsigned ArgReg = ByValArgRegs[FirstReg + I];
4403 unsigned VReg = addLiveIn(MF, ArgReg, RC);
4404 unsigned Offset = I * GPRSizeInBytes;
4405 SDValue StorePtr = DAG.getNode(ISD::ADD, DL, PtrTy, FIN,
4406 DAG.getConstant(Offset, DL, PtrTy));
4407 SDValue Store = DAG.getStore(Chain, DL, DAG.getRegister(VReg, RegTy),
4408 StorePtr, MachinePointerInfo(FuncArg, Offset));
4409 OutChains.push_back(Store);
4410 }
4411}
4412
4413// Copy byVal arg to registers and stack.
4414void MipsTargetLowering::passByValArg(
4415 SDValue Chain, const SDLoc &DL,
4416 std::deque<std::pair<unsigned, SDValue>> &RegsToPass,
4417 SmallVectorImpl<SDValue> &MemOpChains, SDValue StackPtr,
4418 MachineFrameInfo &MFI, SelectionDAG &DAG, SDValue Arg, unsigned FirstReg,
4419 unsigned LastReg, const ISD::ArgFlagsTy &Flags, bool isLittle,
4420 const CCValAssign &VA) const {
4421 unsigned ByValSizeInBytes = Flags.getByValSize();
4422 unsigned OffsetInBytes = 0; // From beginning of struct
4423 unsigned RegSizeInBytes = Subtarget.getGPRSizeInBytes();
4424 Align Alignment =
4425 std::min(Flags.getNonZeroByValAlign(), Align(RegSizeInBytes));
4426 EVT PtrTy = getPointerTy(DAG.getDataLayout()),
4427 RegTy = MVT::getIntegerVT(RegSizeInBytes * 8);
4428 unsigned NumRegs = LastReg - FirstReg;
4429
4430 if (NumRegs) {
4432 bool LeftoverBytes = (NumRegs * RegSizeInBytes > ByValSizeInBytes);
4433 unsigned I = 0;
4434
4435 // Copy words to registers.
4436 for (; I < NumRegs - LeftoverBytes; ++I, OffsetInBytes += RegSizeInBytes) {
4437 SDValue LoadPtr = DAG.getNode(ISD::ADD, DL, PtrTy, Arg,
4438 DAG.getConstant(OffsetInBytes, DL, PtrTy));
4439 SDValue LoadVal = DAG.getLoad(RegTy, DL, Chain, LoadPtr,
4440 MachinePointerInfo(), Alignment);
4441 MemOpChains.push_back(LoadVal.getValue(1));
4442 unsigned ArgReg = ArgRegs[FirstReg + I];
4443 RegsToPass.push_back(std::make_pair(ArgReg, LoadVal));
4444 }
4445
4446 // Return if the struct has been fully copied.
4447 if (ByValSizeInBytes == OffsetInBytes)
4448 return;
4449
4450 // Copy the remainder of the byval argument with sub-word loads and shifts.
4451 if (LeftoverBytes) {
4452 SDValue Val;
4453
4454 for (unsigned LoadSizeInBytes = RegSizeInBytes / 2, TotalBytesLoaded = 0;
4455 OffsetInBytes < ByValSizeInBytes; LoadSizeInBytes /= 2) {
4456 unsigned RemainingSizeInBytes = ByValSizeInBytes - OffsetInBytes;
4457
4458 if (RemainingSizeInBytes < LoadSizeInBytes)
4459 continue;
4460
4461 // Load subword.
4462 SDValue LoadPtr = DAG.getNode(ISD::ADD, DL, PtrTy, Arg,
4463 DAG.getConstant(OffsetInBytes, DL,
4464 PtrTy));
4465 SDValue LoadVal = DAG.getExtLoad(
4466 ISD::ZEXTLOAD, DL, RegTy, Chain, LoadPtr, MachinePointerInfo(),
4467 MVT::getIntegerVT(LoadSizeInBytes * 8), Alignment);
4468 MemOpChains.push_back(LoadVal.getValue(1));
4469
4470 // Shift the loaded value.
4471 unsigned Shamt;
4472
4473 if (isLittle)
4474 Shamt = TotalBytesLoaded * 8;
4475 else
4476 Shamt = (RegSizeInBytes - (TotalBytesLoaded + LoadSizeInBytes)) * 8;
4477
4478 SDValue Shift = DAG.getNode(ISD::SHL, DL, RegTy, LoadVal,
4479 DAG.getConstant(Shamt, DL, MVT::i32));
4480
4481 if (Val.getNode())
4482 Val = DAG.getNode(ISD::OR, DL, RegTy, Val, Shift);
4483 else
4484 Val = Shift;
4485
4486 OffsetInBytes += LoadSizeInBytes;
4487 TotalBytesLoaded += LoadSizeInBytes;
4488 Alignment = std::min(Alignment, Align(LoadSizeInBytes));
4489 }
4490
4491 unsigned ArgReg = ArgRegs[FirstReg + I];
4492 RegsToPass.push_back(std::make_pair(ArgReg, Val));
4493 return;
4494 }
4495 }
4496
4497 // Copy remainder of byval arg to it with memcpy.
4498 unsigned MemCpySize = ByValSizeInBytes - OffsetInBytes;
4499 SDValue Src = DAG.getNode(ISD::ADD, DL, PtrTy, Arg,
4500 DAG.getConstant(OffsetInBytes, DL, PtrTy));
4501 SDValue Dst = DAG.getNode(ISD::ADD, DL, PtrTy, StackPtr,
4503 Chain = DAG.getMemcpy(
4504 Chain, DL, Dst, Src, DAG.getConstant(MemCpySize, DL, PtrTy),
4505 Align(Alignment), /*isVolatile=*/false, /*AlwaysInline=*/false,
4506 /*isTailCall=*/false, MachinePointerInfo(), MachinePointerInfo());
4507 MemOpChains.push_back(Chain);
4508}
4509
4510void MipsTargetLowering::writeVarArgRegs(std::vector<SDValue> &OutChains,
4511 SDValue Chain, const SDLoc &DL,
4512 SelectionDAG &DAG,
4513 CCState &State) const {
4515 unsigned Idx = State.getFirstUnallocated(ArgRegs);
4516 unsigned RegSizeInBytes = Subtarget.getGPRSizeInBytes();
4517 MVT RegTy = MVT::getIntegerVT(RegSizeInBytes * 8);
4518 const TargetRegisterClass *RC = getRegClassFor(RegTy);
4520 MachineFrameInfo &MFI = MF.getFrameInfo();
4522
4523 // Offset of the first variable argument from stack pointer.
4524 int VaArgOffset;
4525
4526 if (ArgRegs.size() == Idx)
4527 VaArgOffset = alignTo(State.getStackSize(), RegSizeInBytes);
4528 else {
4529 VaArgOffset =
4531 (int)(RegSizeInBytes * (ArgRegs.size() - Idx));
4532 }
4533
4534 // Record the frame index of the first variable argument
4535 // which is a value necessary to VASTART.
4536 int FI = MFI.CreateFixedObject(RegSizeInBytes, VaArgOffset, true);
4537 MipsFI->setVarArgsFrameIndex(FI);
4538
4539 // Copy the integer registers that have not been used for argument passing
4540 // to the argument register save area. For O32, the save area is allocated
4541 // in the caller's stack frame, while for N32/64, it is allocated in the
4542 // callee's stack frame.
4543 for (unsigned I = Idx; I < ArgRegs.size();
4544 ++I, VaArgOffset += RegSizeInBytes) {
4545 unsigned Reg = addLiveIn(MF, ArgRegs[I], RC);
4546 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegTy);
4547 FI = MFI.CreateFixedObject(RegSizeInBytes, VaArgOffset, true);
4548 SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
4549 SDValue Store =
4550 DAG.getStore(Chain, DL, ArgValue, PtrOff, MachinePointerInfo());
4551 cast<StoreSDNode>(Store.getNode())->getMemOperand()->setValue(
4552 (Value *)nullptr);
4553 OutChains.push_back(Store);
4554 }
4555}
4556
4558 Align Alignment) const {
4560
4561 assert(Size && "Byval argument's size shouldn't be 0.");
4562
4563 Alignment = std::min(Alignment, TFL->getStackAlign());
4564
4565 unsigned FirstReg = 0;
4566 unsigned NumRegs = 0;
4567
4568 if (State->getCallingConv() != CallingConv::Fast) {
4569 unsigned RegSizeInBytes = Subtarget.getGPRSizeInBytes();
4571 // FIXME: The O32 case actually describes no shadow registers.
4572 const MCPhysReg *ShadowRegs =
4573 ABI.IsO32() ? IntArgRegs.data() : Mips64DPRegs;
4574
4575 // We used to check the size as well but we can't do that anymore since
4576 // CCState::HandleByVal() rounds up the size after calling this function.
4577 assert(
4578 Alignment >= Align(RegSizeInBytes) &&
4579 "Byval argument's alignment should be a multiple of RegSizeInBytes.");
4580
4581 FirstReg = State->getFirstUnallocated(IntArgRegs);
4582
4583 // If Alignment > RegSizeInBytes, the first arg register must be even.
4584 // FIXME: This condition happens to do the right thing but it's not the
4585 // right way to test it. We want to check that the stack frame offset
4586 // of the register is aligned.
4587 if ((Alignment > RegSizeInBytes) && (FirstReg % 2)) {
4588 State->AllocateReg(IntArgRegs[FirstReg], ShadowRegs[FirstReg]);
4589 ++FirstReg;
4590 }
4591
4592 // Mark the registers allocated.
4593 Size = alignTo(Size, RegSizeInBytes);
4594 for (unsigned I = FirstReg; Size > 0 && (I < IntArgRegs.size());
4595 Size -= RegSizeInBytes, ++I, ++NumRegs)
4596 State->AllocateReg(IntArgRegs[I], ShadowRegs[I]);
4597 }
4598
4599 State->addInRegsParamInfo(FirstReg, FirstReg + NumRegs);
4600}
4601
4602MachineBasicBlock *MipsTargetLowering::emitPseudoSELECT(MachineInstr &MI,
4604 bool isFPCmp,
4605 unsigned Opc) const {
4607 "Subtarget already supports SELECT nodes with the use of"
4608 "conditional-move instructions.");
4609
4610 const TargetInstrInfo *TII =
4612 DebugLoc DL = MI.getDebugLoc();
4613
4614 // To "insert" a SELECT instruction, we actually have to insert the
4615 // diamond control-flow pattern. The incoming instruction knows the
4616 // destination vreg to set, the condition code register to branch on, the
4617 // true/false values to select between, and a branch opcode to use.
4618 const BasicBlock *LLVM_BB = BB->getBasicBlock();
4620
4621 // thisMBB:
4622 // ...
4623 // TrueVal = ...
4624 // setcc r1, r2, r3
4625 // bNE r1, r0, copy1MBB
4626 // fallthrough --> copy0MBB
4627 MachineBasicBlock *thisMBB = BB;
4628 MachineFunction *F = BB->getParent();
4629 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
4630 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
4631 F->insert(It, copy0MBB);
4632 F->insert(It, sinkMBB);
4633
4634 // Transfer the remainder of BB and its successor edges to sinkMBB.
4635 sinkMBB->splice(sinkMBB->begin(), BB,
4636 std::next(MachineBasicBlock::iterator(MI)), BB->end());
4638
4639 // Next, add the true and fallthrough blocks as its successors.
4640 BB->addSuccessor(copy0MBB);
4641 BB->addSuccessor(sinkMBB);
4642
4643 if (isFPCmp) {
4644 // bc1[tf] cc, sinkMBB
4645 BuildMI(BB, DL, TII->get(Opc))
4646 .addReg(MI.getOperand(1).getReg())
4647 .addMBB(sinkMBB);
4648 } else {
4649 // bne rs, $0, sinkMBB
4650 BuildMI(BB, DL, TII->get(Opc))
4651 .addReg(MI.getOperand(1).getReg())
4652 .addReg(Mips::ZERO)
4653 .addMBB(sinkMBB);
4654 }
4655
4656 // copy0MBB:
4657 // %FalseValue = ...
4658 // # fallthrough to sinkMBB
4659 BB = copy0MBB;
4660
4661 // Update machine-CFG edges
4662 BB->addSuccessor(sinkMBB);
4663
4664 // sinkMBB:
4665 // %Result = phi [ %TrueValue, thisMBB ], [ %FalseValue, copy0MBB ]
4666 // ...
4667 BB = sinkMBB;
4668
4669 BuildMI(*BB, BB->begin(), DL, TII->get(Mips::PHI), MI.getOperand(0).getReg())
4670 .addReg(MI.getOperand(2).getReg())
4671 .addMBB(thisMBB)
4672 .addReg(MI.getOperand(3).getReg())
4673 .addMBB(copy0MBB);
4674
4675 MI.eraseFromParent(); // The pseudo instruction is gone now.
4676
4677 return BB;
4678}
4679
4681MipsTargetLowering::emitPseudoD_SELECT(MachineInstr &MI,
4682 MachineBasicBlock *BB) const {
4684 "Subtarget already supports SELECT nodes with the use of"
4685 "conditional-move instructions.");
4686
4688 DebugLoc DL = MI.getDebugLoc();
4689
4690 // D_SELECT substitutes two SELECT nodes that goes one after another and
4691 // have the same condition operand. On machines which don't have
4692 // conditional-move instruction, it reduces unnecessary branch instructions
4693 // which are result of using two diamond patterns that are result of two
4694 // SELECT pseudo instructions.
4695 const BasicBlock *LLVM_BB = BB->getBasicBlock();
4697
4698 // thisMBB:
4699 // ...
4700 // TrueVal = ...
4701 // setcc r1, r2, r3
4702 // bNE r1, r0, copy1MBB
4703 // fallthrough --> copy0MBB
4704 MachineBasicBlock *thisMBB = BB;
4705 MachineFunction *F = BB->getParent();
4706 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
4707 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
4708 F->insert(It, copy0MBB);
4709 F->insert(It, sinkMBB);
4710
4711 // Transfer the remainder of BB and its successor edges to sinkMBB.
4712 sinkMBB->splice(sinkMBB->begin(), BB,
4713 std::next(MachineBasicBlock::iterator(MI)), BB->end());
4715
4716 // Next, add the true and fallthrough blocks as its successors.
4717 BB->addSuccessor(copy0MBB);
4718 BB->addSuccessor(sinkMBB);
4719
4720 // bne rs, $0, sinkMBB
4721 BuildMI(BB, DL, TII->get(Mips::BNE))
4722 .addReg(MI.getOperand(2).getReg())
4723 .addReg(Mips::ZERO)
4724 .addMBB(sinkMBB);
4725
4726 // copy0MBB:
4727 // %FalseValue = ...
4728 // # fallthrough to sinkMBB
4729 BB = copy0MBB;
4730
4731 // Update machine-CFG edges
4732 BB->addSuccessor(sinkMBB);
4733
4734 // sinkMBB:
4735 // %Result = phi [ %TrueValue, thisMBB ], [ %FalseValue, copy0MBB ]
4736 // ...
4737 BB = sinkMBB;
4738
4739 // Use two PHI nodes to select two reults
4740 BuildMI(*BB, BB->begin(), DL, TII->get(Mips::PHI), MI.getOperand(0).getReg())
4741 .addReg(MI.getOperand(3).getReg())
4742 .addMBB(thisMBB)
4743 .addReg(MI.getOperand(5).getReg())
4744 .addMBB(copy0MBB);
4745 BuildMI(*BB, BB->begin(), DL, TII->get(Mips::PHI), MI.getOperand(1).getReg())
4746 .addReg(MI.getOperand(4).getReg())
4747 .addMBB(thisMBB)
4748 .addReg(MI.getOperand(6).getReg())
4749 .addMBB(copy0MBB);
4750
4751 MI.eraseFromParent(); // The pseudo instruction is gone now.
4752
4753 return BB;
4754}
4755
4756// FIXME? Maybe this could be a TableGen attribute on some registers and
4757// this table could be generated automatically from RegInfo.
4760 const MachineFunction &MF) const {
4761 // The Linux kernel uses $28 and sp.
4762 if (Subtarget.isGP64bit()) {
4764 .Case("$28", Mips::GP_64)
4765 .Case("sp", Mips::SP_64)
4766 .Default(Register());
4767 if (Reg)
4768 return Reg;
4769 } else {
4771 .Case("$28", Mips::GP)
4772 .Case("sp", Mips::SP)
4773 .Default(Register());
4774 if (Reg)
4775 return Reg;
4776 }
4777 report_fatal_error("Invalid register name global variable");
4778}
4779
4780MachineBasicBlock *MipsTargetLowering::emitLDR_W(MachineInstr &MI,
4781 MachineBasicBlock *BB) const {
4782 MachineFunction *MF = BB->getParent();
4785 const bool IsLittle = Subtarget.isLittle();
4786 DebugLoc DL = MI.getDebugLoc();
4787
4788 Register Dest = MI.getOperand(0).getReg();
4789 Register Address = MI.getOperand(1).getReg();
4790 unsigned Imm = MI.getOperand(2).getImm();
4791
4793
4795 // Mips release 6 can load from adress that is not naturally-aligned.
4796 Register Temp = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4797 BuildMI(*BB, I, DL, TII->get(Mips::LW))
4798 .addDef(Temp)
4799 .addUse(Address)
4800 .addImm(Imm);
4801 BuildMI(*BB, I, DL, TII->get(Mips::FILL_W)).addDef(Dest).addUse(Temp);
4802 } else {
4803 // Mips release 5 needs to use instructions that can load from an unaligned
4804 // memory address.
4805 Register LoadHalf = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4806 Register LoadFull = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4807 Register Undef = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4808 BuildMI(*BB, I, DL, TII->get(Mips::IMPLICIT_DEF)).addDef(Undef);
4809 BuildMI(*BB, I, DL, TII->get(Mips::LWR))
4810 .addDef(LoadHalf)
4811 .addUse(Address)
4812 .addImm(Imm + (IsLittle ? 0 : 3))
4813 .addUse(Undef);
4814 BuildMI(*BB, I, DL, TII->get(Mips::LWL))
4815 .addDef(LoadFull)
4816 .addUse(Address)
4817 .addImm(Imm + (IsLittle ? 3 : 0))
4818 .addUse(LoadHalf);
4819 BuildMI(*BB, I, DL, TII->get(Mips::FILL_W)).addDef(Dest).addUse(LoadFull);
4820 }
4821
4822 MI.eraseFromParent();
4823 return BB;
4824}
4825
4826MachineBasicBlock *MipsTargetLowering::emitLDR_D(MachineInstr &MI,
4827 MachineBasicBlock *BB) const {
4828 MachineFunction *MF = BB->getParent();
4831 const bool IsLittle = Subtarget.isLittle();
4832 DebugLoc DL = MI.getDebugLoc();
4833
4834 Register Dest = MI.getOperand(0).getReg();
4835 Register Address = MI.getOperand(1).getReg();
4836 unsigned Imm = MI.getOperand(2).getImm();
4837
4839
4841 // Mips release 6 can load from adress that is not naturally-aligned.
4842 if (Subtarget.isGP64bit()) {
4843 Register Temp = MRI.createVirtualRegister(&Mips::GPR64RegClass);
4844 BuildMI(*BB, I, DL, TII->get(Mips::LD))
4845 .addDef(Temp)
4846 .addUse(Address)
4847 .addImm(Imm);
4848 BuildMI(*BB, I, DL, TII->get(Mips::FILL_D)).addDef(Dest).addUse(Temp);
4849 } else {
4850 Register Wtemp = MRI.createVirtualRegister(&Mips::MSA128WRegClass);
4851 Register Lo = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4852 Register Hi = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4853 BuildMI(*BB, I, DL, TII->get(Mips::LW))
4854 .addDef(Lo)
4855 .addUse(Address)
4856 .addImm(Imm + (IsLittle ? 0 : 4));
4857 BuildMI(*BB, I, DL, TII->get(Mips::LW))
4858 .addDef(Hi)
4859 .addUse(Address)
4860 .addImm(Imm + (IsLittle ? 4 : 0));
4861 BuildMI(*BB, I, DL, TII->get(Mips::FILL_W)).addDef(Wtemp).addUse(Lo);
4862 BuildMI(*BB, I, DL, TII->get(Mips::INSERT_W), Dest)
4863 .addUse(Wtemp)
4864 .addUse(Hi)
4865 .addImm(1);
4866 }
4867 } else {
4868 // Mips release 5 needs to use instructions that can load from an unaligned
4869 // memory address.
4870 Register LoHalf = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4871 Register LoFull = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4872 Register LoUndef = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4873 Register HiHalf = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4874 Register HiFull = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4875 Register HiUndef = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4876 Register Wtemp = MRI.createVirtualRegister(&Mips::MSA128WRegClass);
4877 BuildMI(*BB, I, DL, TII->get(Mips::IMPLICIT_DEF)).addDef(LoUndef);
4878 BuildMI(*BB, I, DL, TII->get(Mips::LWR))
4879 .addDef(LoHalf)
4880 .addUse(Address)
4881 .addImm(Imm + (IsLittle ? 0 : 7))
4882 .addUse(LoUndef);
4883 BuildMI(*BB, I, DL, TII->get(Mips::LWL))
4884 .addDef(LoFull)
4885 .addUse(Address)
4886 .addImm(Imm + (IsLittle ? 3 : 4))
4887 .addUse(LoHalf);
4888 BuildMI(*BB, I, DL, TII->get(Mips::IMPLICIT_DEF)).addDef(HiUndef);
4889 BuildMI(*BB, I, DL, TII->get(Mips::LWR))
4890 .addDef(HiHalf)
4891 .addUse(Address)
4892 .addImm(Imm + (IsLittle ? 4 : 3))
4893 .addUse(HiUndef);
4894 BuildMI(*BB, I, DL, TII->get(Mips::LWL))
4895 .addDef(HiFull)
4896 .addUse(Address)
4897 .addImm(Imm + (IsLittle ? 7 : 0))
4898 .addUse(HiHalf);
4899 BuildMI(*BB, I, DL, TII->get(Mips::FILL_W)).addDef(Wtemp).addUse(LoFull);
4900 BuildMI(*BB, I, DL, TII->get(Mips::INSERT_W), Dest)
4901 .addUse(Wtemp)
4902 .addUse(HiFull)
4903 .addImm(1);
4904 }
4905
4906 MI.eraseFromParent();
4907 return BB;
4908}
4909
4910MachineBasicBlock *MipsTargetLowering::emitSTR_W(MachineInstr &MI,
4911 MachineBasicBlock *BB) const {
4912 MachineFunction *MF = BB->getParent();
4915 const bool IsLittle = Subtarget.isLittle();
4916 DebugLoc DL = MI.getDebugLoc();
4917
4918 Register StoreVal = MI.getOperand(0).getReg();
4919 Register Address = MI.getOperand(1).getReg();
4920 unsigned Imm = MI.getOperand(2).getImm();
4921
4923
4925 // Mips release 6 can store to adress that is not naturally-aligned.
4926 Register BitcastW = MRI.createVirtualRegister(&Mips::MSA128WRegClass);
4927 Register Tmp = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4928 BuildMI(*BB, I, DL, TII->get(Mips::COPY)).addDef(BitcastW).addUse(StoreVal);
4929 BuildMI(*BB, I, DL, TII->get(Mips::COPY_S_W))
4930 .addDef(Tmp)
4931 .addUse(BitcastW)
4932 .addImm(0);
4933 BuildMI(*BB, I, DL, TII->get(Mips::SW))
4934 .addUse(Tmp)
4935 .addUse(Address)
4936 .addImm(Imm);
4937 } else {
4938 // Mips release 5 needs to use instructions that can store to an unaligned
4939 // memory address.
4940 Register Tmp = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4941 BuildMI(*BB, I, DL, TII->get(Mips::COPY_S_W))
4942 .addDef(Tmp)
4943 .addUse(StoreVal)
4944 .addImm(0);
4945 BuildMI(*BB, I, DL, TII->get(Mips::SWR))
4946 .addUse(Tmp)
4947 .addUse(Address)
4948 .addImm(Imm + (IsLittle ? 0 : 3));
4949 BuildMI(*BB, I, DL, TII->get(Mips::SWL))
4950 .addUse(Tmp)
4951 .addUse(Address)
4952 .addImm(Imm + (IsLittle ? 3 : 0));
4953 }
4954
4955 MI.eraseFromParent();
4956
4957 return BB;
4958}
4959
4960MachineBasicBlock *MipsTargetLowering::emitSTR_D(MachineInstr &MI,
4961 MachineBasicBlock *BB) const {
4962 MachineFunction *MF = BB->getParent();
4965 const bool IsLittle = Subtarget.isLittle();
4966 DebugLoc DL = MI.getDebugLoc();
4967
4968 Register StoreVal = MI.getOperand(0).getReg();
4969 Register Address = MI.getOperand(1).getReg();
4970 unsigned Imm = MI.getOperand(2).getImm();
4971
4973
4975 // Mips release 6 can store to adress that is not naturally-aligned.
4976 if (Subtarget.isGP64bit()) {
4977 Register BitcastD = MRI.createVirtualRegister(&Mips::MSA128DRegClass);
4978 Register Lo = MRI.createVirtualRegister(&Mips::GPR64RegClass);
4979 BuildMI(*BB, I, DL, TII->get(Mips::COPY))
4980 .addDef(BitcastD)
4981 .addUse(StoreVal);
4982 BuildMI(*BB, I, DL, TII->get(Mips::COPY_S_D))
4983 .addDef(Lo)
4984 .addUse(BitcastD)
4985 .addImm(0);
4986 BuildMI(*BB, I, DL, TII->get(Mips::SD))
4987 .addUse(Lo)
4988 .addUse(Address)
4989 .addImm(Imm);
4990 } else {
4991 Register BitcastW = MRI.createVirtualRegister(&Mips::MSA128WRegClass);
4992 Register Lo = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4993 Register Hi = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4994 BuildMI(*BB, I, DL, TII->get(Mips::COPY))
4995 .addDef(BitcastW)
4996 .addUse(StoreVal);
4997 BuildMI(*BB, I, DL, TII->get(Mips::COPY_S_W))
4998 .addDef(Lo)
4999 .addUse(BitcastW)
5000 .addImm(0);
5001 BuildMI(*BB, I, DL, TII->get(Mips::COPY_S_W))
5002 .addDef(Hi)
5003 .addUse(BitcastW)
5004 .addImm(1);
5005 BuildMI(*BB, I, DL, TII->get(Mips::SW))
5006 .addUse(Lo)
5007 .addUse(Address)
5008 .addImm(Imm + (IsLittle ? 0 : 4));
5009 BuildMI(*BB, I, DL, TII->get(Mips::SW))
5010 .addUse(Hi)
5011 .addUse(Address)
5012 .addImm(Imm + (IsLittle ? 4 : 0));
5013 }
5014 } else {
5015 // Mips release 5 needs to use instructions that can store to an unaligned
5016 // memory address.
5017 Register Bitcast = MRI.createVirtualRegister(&Mips::MSA128WRegClass);
5018 Register Lo = MRI.createVirtualRegister(&Mips::GPR32RegClass);
5019 Register Hi = MRI.createVirtualRegister(&Mips::GPR32RegClass);
5020 BuildMI(*BB, I, DL, TII->get(Mips::COPY)).addDef(Bitcast).addUse(StoreVal);
5021 BuildMI(*BB, I, DL, TII->get(Mips::COPY_S_W))
5022 .addDef(Lo)
5023 .addUse(Bitcast)
5024 .addImm(0);
5025 BuildMI(*BB, I, DL, TII->get(Mips::COPY_S_W))
5026 .addDef(Hi)
5027 .addUse(Bitcast)
5028 .addImm(1);
5029 BuildMI(*BB, I, DL, TII->get(Mips::SWR))
5030 .addUse(Lo)
5031 .addUse(Address)
5032 .addImm(Imm + (IsLittle ? 0 : 3));
5033 BuildMI(*BB, I, DL, TII->get(Mips::SWL))
5034 .addUse(Lo)
5035 .addUse(Address)
5036 .addImm(Imm + (IsLittle ? 3 : 0));
5037 BuildMI(*BB, I, DL, TII->get(Mips::SWR))
5038 .addUse(Hi)
5039 .addUse(Address)
5040 .addImm(Imm + (IsLittle ? 4 : 7));
5041 BuildMI(*BB, I, DL, TII->get(Mips::SWL))
5042 .addUse(Hi)
5043 .addUse(Address)
5044 .addImm(Imm + (IsLittle ? 7 : 4));
5045 }
5046
5047 MI.eraseFromParent();
5048 return BB;
5049}
unsigned const MachineRegisterInfo * MRI
static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget, const AArch64TargetLowering &TLI)
static SDValue performANDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file declares a class to represent arbitrary precision floating point values and provide a varie...
Function Alias Analysis Results
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_ATTRIBUTE_UNUSED
Definition: Compiler.h:203
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
uint64_t Size
Symbol * Sym
Definition: ELF_riscv.cpp:479
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define RegName(no)
static LVOptions Options
Definition: LVOptions.cpp:25
lazy value info
static MachineBasicBlock * insertDivByZeroTrap(MachineInstr &MI, MachineBasicBlock *MBB)
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
#define G(x, y, z)
Definition: MD5.cpp:56
unsigned const TargetRegisterInfo * TRI
cl::opt< bool > EmitJalrReloc
static bool CC_MipsO32_FP64(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
static bool CC_Mips(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State) LLVM_ATTRIBUTE_UNUSED
static bool CC_MipsO32_FP32(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State, ArrayRef< MCPhysReg > F64Regs)
static SDValue performMADD_MSUBCombine(SDNode *ROOTNode, SelectionDAG &CurDAG, const MipsSubtarget &Subtarget)
static bool invertFPCondCodeUser(Mips::CondCode CC)
This function returns true if the floating point conditional branches and conditional moves which use...
static SDValue lowerFP_TO_SINT_STORE(StoreSDNode *SD, SelectionDAG &DAG, bool SingleFloat)
static SDValue performDivRemCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)
static const MCPhysReg Mips64DPRegs[8]
static SDValue performSHLCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)
static SDValue lowerUnalignedIntStore(StoreSDNode *SD, SelectionDAG &DAG, bool IsLittle)
static SDValue createStoreLR(unsigned Opc, SelectionDAG &DAG, StoreSDNode *SD, SDValue Chain, unsigned Offset)
static unsigned addLiveIn(MachineFunction &MF, unsigned PReg, const TargetRegisterClass *RC)
static std::pair< bool, bool > parsePhysicalReg(StringRef C, StringRef &Prefix, unsigned long long &Reg)
This is a helper function to parse a physical register string and split it into non-numeric and numer...
static SDValue createLoadLR(unsigned Opc, SelectionDAG &DAG, LoadSDNode *LD, SDValue Chain, SDValue Src, unsigned Offset)
static SDValue lowerFCOPYSIGN64(SDValue Op, SelectionDAG &DAG, bool HasExtractInsert)
static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)
cl::opt< bool > EmitJalrReloc
static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)
static SDValue createFPCmp(SelectionDAG &DAG, const SDValue &Op)
static SDValue lowerFCOPYSIGN32(SDValue Op, SelectionDAG &DAG, bool HasExtractInsert)
static cl::opt< bool > NoZeroDivCheck("mno-check-zero-division", cl::Hidden, cl::desc("MIPS: Don't trap on integer division by zero."), cl::init(false))
static SDValue performSELECTCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)
static SDValue performCMovFPCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)
static SDValue UnpackFromArgumentSlot(SDValue Val, const CCValAssign &VA, EVT ArgVT, const SDLoc &DL, SelectionDAG &DAG)
static Mips::CondCode condCodeToFCC(ISD::CondCode CC)
static SDValue createCMovFP(SelectionDAG &DAG, SDValue Cond, SDValue True, SDValue False, const SDLoc &DL)
LLVMContext & Context
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const char LLVMTargetMachineRef TM
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SI optimize exec mask operations pre RA
This file defines the SmallVector class.
static const MCPhysReg IntRegs[32]
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
static const MCPhysReg F32Regs[64]
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:469
Value * RHS
Value * LHS
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
const T * data() const
Definition: ArrayRef.h:162
LLVM Basic Block Representation.
Definition: BasicBlock.h:60
static BranchProbability getOne()
CCState - This class holds information needed while lowering arguments and return values.
MachineFunction & getMachineFunction() const
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set,...
CallingConv::ID getCallingConv() const
MCRegister AllocateReg(MCPhysReg Reg)
AllocateReg - Attempt to allocate one register.
int64_t AllocateStack(unsigned Size, Align Alignment)
AllocateStack - Allocate a chunk of stack space with the specified size and alignment.
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
bool isVarArg() const
void addInRegsParamInfo(unsigned RegBegin, unsigned RegEnd)
void addLoc(const CCValAssign &V)
CCValAssign - Represent assignment of one arg/retval to a location.
bool isRegLoc() const
Register getLocReg() const
LocInfo getLocInfo() const
bool isUpperBitsInLoc() const
static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)
static CCValAssign getReg(unsigned ValNo, MVT ValVT, unsigned RegNo, MVT LocVT, LocInfo HTP, bool IsCustom=false)
bool needsCustom() const
bool isMemLoc() const
static CCValAssign getCustomReg(unsigned ValNo, MVT ValVT, unsigned RegNo, MVT LocVT, LocInfo HTP)
int64_t getLocMemOffset() const
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
uint64_t getZExtValue() const
int64_t getSExtValue() const
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:504
A debug info location.
Definition: DebugLoc.h:33
const char * getSymbol() const
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
Definition: FastISel.h:66
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
bool hasStructRetAttr() const
Determine if the function returns a structure through first or second pointer argument.
Definition: Function.h:661
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.cpp:677
const GlobalValue * getGlobal() const
bool hasLocalLinkage() const
Definition: GlobalValue.h:527
const GlobalObject * getAliaseeObject() const
Definition: Globals.cpp:368
bool hasInternalLinkage() const
Definition: GlobalValue.h:525
Class to represent integer types.
Definition: DerivedTypes.h:40
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
void emitError(uint64_t LocCookie, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
This class is used to represent ISD::LOAD nodes.
MCSymbol * getOrCreateSymbol(const Twine &Name)
Lookup the symbol inside with the specified Name.
Definition: MCContext.cpp:200
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:40
Machine Value Type.
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static MVT getVectorVT(MVT VT, unsigned NumElements)
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
bool isValid() const
Return true if this is a valid simple valuetype.
static MVT getIntegerVT(unsigned BitWidth)
static auto fp_valuetypes()
static auto fp_fixedlen_vector_valuetypes()
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
void setFrameAddressIsTaken(bool T)
void setHasTailCall(bool V=true)
void setReturnAddressIsTaken(bool s)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MCContext & getContext() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
Definition: MachineInstr.h:69
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:556
@ EK_GPRel64BlockAddress
EK_GPRel64BlockAddress - Each entry is an address of block, encoded with a relocation as gp-relative,...
@ MOVolatile
The memory access is volatile.
Flags getFlags() const
Return the raw flags of the source value,.
MachineOperand class - Representation of each machine instruction operand.
void setSubReg(unsigned subReg)
static MachineOperand CreateMCSymbol(MCSymbol *Sym, unsigned TargetFlags=0)
void setIsKill(bool Val=true)
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void addLiveIn(MCRegister Reg, Register vreg=Register())
addLiveIn - Add the specified register as a live-in.
Align getAlign() const
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const MachinePointerInfo & getPointerInfo() const
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
bool IsN64() const
Definition: MipsABIInfo.h:42
ArrayRef< MCPhysReg > GetVarArgRegs() const
The registers to use for the variable argument list.
Definition: MipsABIInfo.cpp:41
bool ArePtrs64bit() const
Definition: MipsABIInfo.h:73
unsigned GetCalleeAllocdArgSizeInBytes(CallingConv::ID CC) const
Obtain the size of the area allocated by the callee for arguments.
Definition: MipsABIInfo.cpp:49
unsigned GetPtrAddiuOp() const
unsigned GetPtrAndOp() const
ArrayRef< MCPhysReg > GetByValArgRegs() const
The registers to use for byval arguments.
Definition: MipsABIInfo.cpp:33
unsigned GetNullPtr() const
Definition: MipsABIInfo.cpp:90
bool IsN32() const
Definition: MipsABIInfo.h:41
bool IsO32() const
Definition: MipsABIInfo.h:40
bool WasOriginalArgVectorFloat(unsigned ValNo) const
Definition: MipsCCState.h:198
static SpecialCallingConvType getSpecialCallingConvForCallee(const SDNode *Callee, const MipsSubtarget &Subtarget)
Determine the SpecialCallingConvType for the given callee.
Definition: MipsCCState.cpp:70
MipsFunctionInfo - This class is derived from MachineFunction private Mips target-specific informatio...
void setVarArgsFrameIndex(int Index)
unsigned getSRetReturnReg() const
MachinePointerInfo callPtrInfo(MachineFunction &MF, const char *ES)
Create a MachinePointerInfo that has an ExternalSymbolPseudoSourceValue object representing a GOT ent...
Register getGlobalBaseReg(MachineFunction &MF)
void setSRetReturnReg(unsigned Reg)
void setFormalArgInfo(unsigned Size, bool HasByval)
static const uint32_t * getMips16RetHelperMask()
bool hasMips32r6() const
bool hasMips4() const
bool hasMips64r2() const
bool isFP64bit() const
bool isLittle() const
bool inMicroMipsMode() const
bool useSoftFloat() const
const MipsInstrInfo * getInstrInfo() const override
bool hasMips64r6() const
bool inMips16Mode() const
bool hasMips64() const
bool hasMips32() const
bool hasSym32() const
bool useXGOT() const
bool inAbs2008Mode() const
const MipsRegisterInfo * getRegisterInfo() const override
bool isABICalls() const
bool hasCnMips() const
bool systemSupportsUnalignedAccess() const
Does the system support unaligned memory access.
bool isGP64bit() const
bool hasExtractInsert() const
Features related to the presence of specific instructions.
bool hasMips32r2() const
bool hasMSA() const
bool isSingleFloat() const
bool isABI_O32() const
bool useLongCalls() const
unsigned getGPRSizeInBytes() const
bool inMips16HardFloat() const
const TargetFrameLowering * getFrameLowering() const override
MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Return the register type for a given MVT, ensuring vectors are treated as a series of gpr sized integ...
bool hasBitTest(SDValue X, SDValue Y) const override
Return true if the target has a bit-test instruction: (X & (1 << Y)) ==/!= 0 This knowledge can be us...
static const MipsTargetLowering * create(const MipsTargetMachine &TM, const MipsSubtarget &STI)
SDValue getAddrGPRel(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, bool IsN64) const
unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const override
Break down vectors to the correct number of gpr sized integers.
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
const char * getTargetNodeName(unsigned Opcode) const override
getTargetNodeName - This method returns the name of a target specific
SDValue getAddrNonPICSym64(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG) const
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
getSetCCResultType - get the ISD::SETCC result ValueType
SDValue getAddrGlobal(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, unsigned Flag, SDValue Chain, const MachinePointerInfo &PtrInfo) const
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo) const override
createFastISel - This method returns a target specific FastISel object, or null if the target does no...
MipsTargetLowering(const MipsTargetMachine &TM, const MipsSubtarget &STI)
const MipsABIInfo & ABI
SDValue getAddrGlobalLargeGOT(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, unsigned HiFlag, unsigned LoFlag, SDValue Chain, const MachinePointerInfo &PtrInfo) const
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
CCAssignFn * CCAssignFnForReturn() const
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
ReplaceNodeResults - Replace the results of node with an illegal result type with new values built ou...
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
CCAssignFn * CCAssignFnForCall() const
unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Return the number of registers for a given MVT, ensuring vectors are treated as a series of gpr sized...
SDValue getAddrNonPIC(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG) const
SDValue lowerSTORE(SDValue Op, SelectionDAG &DAG) const
void AdjustInstrPostInstrSelection(MachineInstr &MI, SDNode *Node) const override
This method should be implemented by targets that mark instructions with the 'hasPostISelHook' flag.
virtual void getOpndList(SmallVectorImpl< SDValue > &Ops, std::deque< std::pair< unsigned, SDValue > > &RegsToPass, bool IsPICCall, bool GlobalOrExternal, bool InternalLinkage, bool IsCallReloc, CallLoweringInfo &CLI, SDValue Callee, SDValue Chain) const
This function fills Ops, which is the list of operands that will later be used when a function call n...
EVT getTypeForExtReturn(LLVMContext &Context, EVT VT, ISD::NodeType) const override
Return the type that should be used to zero or sign extend a zeroext/signext integer return value.
bool isCheapToSpeculateCtlz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
LowerOperation - Provide custom lowering hooks for some operations.
bool isCheapToSpeculateCttz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic cttz.
bool shouldFoldConstantShiftPairToMask(const SDNode *N, CombineLevel Level) const override
Return true if it is profitable to fold a pair of shifts into a mask.
SDValue getAddrLocal(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, bool IsN32OrN64) const
SDValue getGlobalReg(SelectionDAG &DAG, EVT Ty) const
const MipsSubtarget & Subtarget
void HandleByVal(CCState *, unsigned &, Align) const override
Target-specific cleanup for formal ByVal parameters.
SDValue lowerLOAD(SDValue Op, SelectionDAG &DAG) const
bool IsConstantInSmallSection(const DataLayout &DL, const Constant *CN, const TargetMachine &TM) const
Return true if this constant should be placed into small data section.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:225
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:722
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
Definition: SelectionDAG.h:732
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:472
void addCallSiteInfo(const SDNode *Node, CallSiteInfoImpl &&CallInfo)
Set CallSiteInfo to be associated with Node.
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
SDValue getRegister(unsigned Reg, EVT VT)
SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
Definition: SelectionDAG.h:473
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
Definition: SelectionDAG.h:773
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
Definition: SelectionDAG.h:676
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:768
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:469
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
Definition: SelectionDAG.h:799
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
SDValue getRegisterMask(const uint32_t *RegMask)
LLVMContext * getContext() const
Definition: SelectionDAG.h:485
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:739
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
Definition: SelectionDAG.h:554
std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:950
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
This class is used to represent ISD::STORE nodes.
const SDValue & getBasePtr() const
const SDValue & getValue() const
bool isTruncatingStore() const
Return true if the op does a truncation before store.
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
constexpr bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:134
constexpr size_t size() const
size - Get the string size.
Definition: StringRef.h:137
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:44
StringSwitch & Case(StringLiteral S, T Value)
Definition: StringSwitch.h:69
R Default(T Value)
Definition: StringSwitch.h:182
Information about stack frame layout on the target.
unsigned getStackAlignment() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
TargetInstrInfo - Interface to description of machine instruction set.
Provides information about what library functions are available for the current target.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
void setMinStackArgumentAlignment(Align Alignment)
Set the minimum stack alignment of an argument.
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
Align getMinStackArgumentAlignment() const
Return the minimum stack alignment of an argument.
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
std::vector< ArgListEntry > ArgListTy
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
bool isPositionIndependent() const
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
virtual unsigned getJumpTableEncoding() const
Return the entry encoding for a jump table in the current function.
virtual void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
virtual TargetLoweringObjectFile * getObjFileLowering() const
TargetOptions Options
unsigned NoNaNsFPMath
NoNaNsFPMath - This flag is enabled when the -enable-no-nans-fp-math flag is specified on the command...
iterator begin() const
begin/end - Return all of the registers in this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:265
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
Definition: Type.h:154
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:228
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:187
self_iterator getIterator()
Definition: ilist_node.h:109
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:121
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
Definition: ISDOpcodes.h:40
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
Definition: ISDOpcodes.h:750
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
Definition: ISDOpcodes.h:1126
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
Definition: ISDOpcodes.h:1122
@ BSWAP
Byte Swap and Counting operators.
Definition: ISDOpcodes.h:714
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
Definition: ISDOpcodes.h:1155
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) This corresponds to "store atomic" instruction.
Definition: ISDOpcodes.h:1241
@ ADD
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:239
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
Definition: ISDOpcodes.h:1031
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition: ISDOpcodes.h:783
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
Definition: ISDOpcodes.h:483
@ RETURNADDR
Definition: ISDOpcodes.h:95
@ GlobalAddress
Definition: ISDOpcodes.h:78
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
Definition: ISDOpcodes.h:1233
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition: ISDOpcodes.h:255
@ FP16_TO_FP
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
Definition: ISDOpcodes.h:913
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:903
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
Definition: ISDOpcodes.h:229
@ GlobalTLSAddress
Definition: ISDOpcodes.h:79
@ EH_RETURN
OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents 'eh_return' gcc dwarf builtin,...
Definition: ISDOpcodes.h:135
@ SIGN_EXTEND
Conversion operators.
Definition: ISDOpcodes.h:774
@ TargetJumpTable
Definition: ISDOpcodes.h:167
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
Definition: ISDOpcodes.h:988
@ BR_CC
BR_CC - Conditional branch.
Definition: ISDOpcodes.h:1077
@ BR_JT
BR_JT - Jumptable branch.
Definition: ISDOpcodes.h:1056
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
Definition: ISDOpcodes.h:727
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
Definition: ISDOpcodes.h:1237
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
Definition: ISDOpcodes.h:1151
@ SHL
Shift and rotation operations.
Definition: ISDOpcodes.h:705
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:780
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition: ISDOpcodes.h:742
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
Definition: ISDOpcodes.h:1041
@ ConstantPool
Definition: ISDOpcodes.h:82
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition: ISDOpcodes.h:798
@ EH_DWARF_CFA
EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical Frame Address (CFA),...
Definition: ISDOpcodes.h:129
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
Definition: ISDOpcodes.h:94
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition: ISDOpcodes.h:836
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:680
@ TRAP
TRAP - Trapping instruction.
Definition: ISDOpcodes.h:1208
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
Definition: ISDOpcodes.h:52
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition: ISDOpcodes.h:786
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
Definition: ISDOpcodes.h:1146
@ BRCOND
BRCOND - Conditional branch.
Definition: ISDOpcodes.h:1070
@ BlockAddress
Definition: ISDOpcodes.h:84
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
Definition: ISDOpcodes.h:763
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
Definition: ISDOpcodes.h:61
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition: ISDOpcodes.h:493
@ AssertZext
Definition: ISDOpcodes.h:62
@ CALLSEQ_START
CALLSEQ_START/CALLSEQ_END - These operators mark the beginning and end of a call sequence,...
Definition: ISDOpcodes.h:1140
CondCode getSetCCInverse(CondCode Operation, EVT Type)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
Definition: ISDOpcodes.h:1523
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
Definition: ISDOpcodes.h:1503
@ Bitcast
Perform the operation on a different, but equivalently sized type.
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Definition: MCInstrDesc.h:148
@ MO_GOT_CALL
MO_GOT_CALL - Represents the offset into the global offset table at which the address of a call site ...
Definition: MipsBaseInfo.h:44
@ MO_TPREL_HI
MO_TPREL_HI/LO - Represents the hi and low part of the offset from.
Definition: MipsBaseInfo.h:73
@ MO_GOT
MO_GOT - Represents the offset into the global offset table at which the address the relocation entry...
Definition: MipsBaseInfo.h:38
@ MO_JALR
Helper operand used to generate R_MIPS_JALR.
Definition: MipsBaseInfo.h:95
@ MO_GOTTPREL
MO_GOTTPREL - Represents the offset from the thread pointer (Initial.
Definition: MipsBaseInfo.h:69
@ MO_GOT_HI16
MO_GOT_HI16/LO16, MO_CALL_HI16/LO16 - Relocations used for large GOTs.
Definition: MipsBaseInfo.h:89
@ MO_TLSLDM
MO_TLSLDM - Represents the offset into the global offset table at which.
Definition: MipsBaseInfo.h:63
@ MO_TLSGD
MO_TLSGD - Represents the offset into the global offset table at which.
Definition: MipsBaseInfo.h:58
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Dead
Unused definition.
@ Define
Register definition.
@ Kill
The last use of a register.
@ EarlyClobber
Register definition happens before uses.
@ GeneralDynamic
Definition: CodeGen.h:46
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:450
NodeAddr< FuncNode * > Func
Definition: RDFGraph.h:393
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:456
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
uint64_t divideCeil(uint64_t Numerator, uint64_t Denominator)
Returns the integer ceil(Numerator / Denominator).
Definition: MathExtras.h:417
constexpr bool isShiftedMask_64(uint64_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (64 bit ver...
Definition: MathExtras.h:258
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:156
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
CombineLevel
Definition: DAGCombine.h:15
const MipsTargetLowering * createMips16TargetLowering(const MipsTargetMachine &TM, const MipsSubtarget &STI)
Create MipsTargetLowering objects.
@ Or
Bitwise or logical OR of integers.
@ Add
Sum of integers.
unsigned getKillRegState(bool B)
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
DWARFExpression::Operation Op
const MipsTargetLowering * createMipsSETargetLowering(const MipsTargetMachine &TM, const MipsSubtarget &STI)
bool getAsUnsignedInteger(StringRef Str, unsigned Radix, unsigned long long &Result)
Helper functions for StringRef::getAsInteger.
Definition: StringRef.cpp:486
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
Extended Value Type.
Definition: ValueTypes.h:34
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
Definition: ValueTypes.h:93
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
Definition: ValueTypes.h:290
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:358
bool isPow2VectorType() const
Returns true if the given vector is a power of 2.
Definition: ValueTypes.h:455
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:306
static EVT getFloatingPointVT(unsigned BitWidth)
Returns the EVT that represents a floating-point type with the given number of bits.
Definition: ValueTypes.h:58
bool isVector() const
Return true if this is a vector value type.
Definition: ValueTypes.h:167
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Definition: ValueTypes.cpp:202
bool isRound() const
Return true if the size is a power-of-two number of bytes.
Definition: ValueTypes.h:238
EVT getVectorElementType() const
Given a vector type, return the type of each element.
Definition: ValueTypes.h:318
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
Definition: ValueTypes.h:326
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition: ValueTypes.h:151
Align getNonZeroOrigAlign() const
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition: Alignment.h:141
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals