LLVM 19.0.0git
SparcISelLowering.cpp
Go to the documentation of this file.
1//===-- SparcISelLowering.cpp - Sparc DAG Lowering Implementation ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the interfaces that Sparc uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SparcISelLowering.h"
18#include "SparcRegisterInfo.h"
19#include "SparcTargetMachine.h"
33#include "llvm/IR/Function.h"
34#include "llvm/IR/Module.h"
37using namespace llvm;
38
39
40//===----------------------------------------------------------------------===//
41// Calling Convention Implementation
42//===----------------------------------------------------------------------===//
43
44static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT,
45 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
46 ISD::ArgFlagsTy &ArgFlags, CCState &State)
47{
48 assert (ArgFlags.isSRet());
49
50 // Assign SRet argument.
51 State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
52 0,
53 LocVT, LocInfo));
54 return true;
55}
56
57static bool CC_Sparc_Assign_Split_64(unsigned &ValNo, MVT &ValVT,
58 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
59 ISD::ArgFlagsTy &ArgFlags, CCState &State)
60{
61 static const MCPhysReg RegList[] = {
62 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
63 };
64 // Try to get first reg.
65 if (Register Reg = State.AllocateReg(RegList)) {
66 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
67 } else {
68 // Assign whole thing in stack.
70 ValNo, ValVT, State.AllocateStack(8, Align(4)), LocVT, LocInfo));
71 return true;
72 }
73
74 // Try to get second reg.
75 if (Register Reg = State.AllocateReg(RegList))
76 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
77 else
79 ValNo, ValVT, State.AllocateStack(4, Align(4)), LocVT, LocInfo));
80 return true;
81}
82
83static bool CC_Sparc_Assign_Ret_Split_64(unsigned &ValNo, MVT &ValVT,
84 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
85 ISD::ArgFlagsTy &ArgFlags, CCState &State)
86{
87 static const MCPhysReg RegList[] = {
88 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
89 };
90
91 // Try to get first reg.
92 if (Register Reg = State.AllocateReg(RegList))
93 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
94 else
95 return false;
96
97 // Try to get second reg.
98 if (Register Reg = State.AllocateReg(RegList))
99 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
100 else
101 return false;
102
103 return true;
104}
105
106// Allocate a full-sized argument for the 64-bit ABI.
107static bool Analyze_CC_Sparc64_Full(bool IsReturn, unsigned &ValNo, MVT &ValVT,
108 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
109 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
110 assert((LocVT == MVT::f32 || LocVT == MVT::f128
111 || LocVT.getSizeInBits() == 64) &&
112 "Can't handle non-64 bits locations");
113
114 // Stack space is allocated for all arguments starting from [%fp+BIAS+128].
115 unsigned size = (LocVT == MVT::f128) ? 16 : 8;
116 Align alignment = (LocVT == MVT::f128) ? Align(16) : Align(8);
117 unsigned Offset = State.AllocateStack(size, alignment);
118 unsigned Reg = 0;
119
120 if (LocVT == MVT::i64 && Offset < 6*8)
121 // Promote integers to %i0-%i5.
122 Reg = SP::I0 + Offset/8;
123 else if (LocVT == MVT::f64 && Offset < 16*8)
124 // Promote doubles to %d0-%d30. (Which LLVM calls D0-D15).
125 Reg = SP::D0 + Offset/8;
126 else if (LocVT == MVT::f32 && Offset < 16*8)
127 // Promote floats to %f1, %f3, ...
128 Reg = SP::F1 + Offset/4;
129 else if (LocVT == MVT::f128 && Offset < 16*8)
130 // Promote long doubles to %q0-%q28. (Which LLVM calls Q0-Q7).
131 Reg = SP::Q0 + Offset/16;
132
133 // Promote to register when possible, otherwise use the stack slot.
134 if (Reg) {
135 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
136 return true;
137 }
138
139 // Bail out if this is a return CC and we run out of registers to place
140 // values into.
141 if (IsReturn)
142 return false;
143
144 // This argument goes on the stack in an 8-byte slot.
145 // When passing floats, LocVT is smaller than 8 bytes. Adjust the offset to
146 // the right-aligned float. The first 4 bytes of the stack slot are undefined.
147 if (LocVT == MVT::f32)
148 Offset += 4;
149
150 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
151 return true;
152}
153
154// Allocate a half-sized argument for the 64-bit ABI.
155//
156// This is used when passing { float, int } structs by value in registers.
157static bool Analyze_CC_Sparc64_Half(bool IsReturn, unsigned &ValNo, MVT &ValVT,
158 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
159 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
160 assert(LocVT.getSizeInBits() == 32 && "Can't handle non-32 bits locations");
161 unsigned Offset = State.AllocateStack(4, Align(4));
162
163 if (LocVT == MVT::f32 && Offset < 16*8) {
164 // Promote floats to %f0-%f31.
165 State.addLoc(CCValAssign::getReg(ValNo, ValVT, SP::F0 + Offset/4,
166 LocVT, LocInfo));
167 return true;
168 }
169
170 if (LocVT == MVT::i32 && Offset < 6*8) {
171 // Promote integers to %i0-%i5, using half the register.
172 unsigned Reg = SP::I0 + Offset/8;
173 LocVT = MVT::i64;
174 LocInfo = CCValAssign::AExt;
175
176 // Set the Custom bit if this i32 goes in the high bits of a register.
177 if (Offset % 8 == 0)
178 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg,
179 LocVT, LocInfo));
180 else
181 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
182 return true;
183 }
184
185 // Bail out if this is a return CC and we run out of registers to place
186 // values into.
187 if (IsReturn)
188 return false;
189
190 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
191 return true;
192}
193
194static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
195 CCValAssign::LocInfo &LocInfo,
196 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
197 return Analyze_CC_Sparc64_Full(false, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
198 State);
199}
200
201static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
202 CCValAssign::LocInfo &LocInfo,
203 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
204 return Analyze_CC_Sparc64_Half(false, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
205 State);
206}
207
208static bool RetCC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
209 CCValAssign::LocInfo &LocInfo,
210 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
211 return Analyze_CC_Sparc64_Full(true, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
212 State);
213}
214
215static bool RetCC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
216 CCValAssign::LocInfo &LocInfo,
217 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
218 return Analyze_CC_Sparc64_Half(true, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
219 State);
220}
221
222#include "SparcGenCallingConv.inc"
223
224// The calling conventions in SparcCallingConv.td are described in terms of the
225// callee's register window. This function translates registers to the
226// corresponding caller window %o register.
227static unsigned toCallerWindow(unsigned Reg) {
228 static_assert(SP::I0 + 7 == SP::I7 && SP::O0 + 7 == SP::O7,
229 "Unexpected enum");
230 if (Reg >= SP::I0 && Reg <= SP::I7)
231 return Reg - SP::I0 + SP::O0;
232 return Reg;
233}
234
236 CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,
237 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
239 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
240 return CCInfo.CheckReturn(Outs, Subtarget->is64Bit() ? RetCC_Sparc64
241 : RetCC_Sparc32);
242}
243
246 bool IsVarArg,
248 const SmallVectorImpl<SDValue> &OutVals,
249 const SDLoc &DL, SelectionDAG &DAG) const {
250 if (Subtarget->is64Bit())
251 return LowerReturn_64(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
252 return LowerReturn_32(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
253}
254
257 bool IsVarArg,
259 const SmallVectorImpl<SDValue> &OutVals,
260 const SDLoc &DL, SelectionDAG &DAG) const {
262
263 // CCValAssign - represent the assignment of the return value to locations.
265
266 // CCState - Info about the registers and stack slot.
267 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
268 *DAG.getContext());
269
270 // Analyze return values.
271 CCInfo.AnalyzeReturn(Outs, RetCC_Sparc32);
272
273 SDValue Glue;
274 SmallVector<SDValue, 4> RetOps(1, Chain);
275 // Make room for the return address offset.
276 RetOps.push_back(SDValue());
277
278 // Copy the result values into the output registers.
279 for (unsigned i = 0, realRVLocIdx = 0;
280 i != RVLocs.size();
281 ++i, ++realRVLocIdx) {
282 CCValAssign &VA = RVLocs[i];
283 assert(VA.isRegLoc() && "Can only return in registers!");
284
285 SDValue Arg = OutVals[realRVLocIdx];
286
287 if (VA.needsCustom()) {
288 assert(VA.getLocVT() == MVT::v2i32);
289 // Legalize ret v2i32 -> ret 2 x i32 (Basically: do what would
290 // happen by default if this wasn't a legal type)
291
292 SDValue Part0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32,
293 Arg,
295 SDValue Part1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32,
296 Arg,
298
299 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part0, Glue);
300 Glue = Chain.getValue(1);
301 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
302 VA = RVLocs[++i]; // skip ahead to next loc
303 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part1,
304 Glue);
305 } else
306 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Glue);
307
308 // Guarantee that all emitted copies are stuck together with flags.
309 Glue = Chain.getValue(1);
310 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
311 }
312
313 unsigned RetAddrOffset = 8; // Call Inst + Delay Slot
314 // If the function returns a struct, copy the SRetReturnReg to I0
315 if (MF.getFunction().hasStructRetAttr()) {
317 Register Reg = SFI->getSRetReturnReg();
318 if (!Reg)
319 llvm_unreachable("sret virtual register not created in the entry block");
320 auto PtrVT = getPointerTy(DAG.getDataLayout());
321 SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, PtrVT);
322 Chain = DAG.getCopyToReg(Chain, DL, SP::I0, Val, Glue);
323 Glue = Chain.getValue(1);
324 RetOps.push_back(DAG.getRegister(SP::I0, PtrVT));
325 RetAddrOffset = 12; // CallInst + Delay Slot + Unimp
326 }
327
328 RetOps[0] = Chain; // Update chain.
329 RetOps[1] = DAG.getConstant(RetAddrOffset, DL, MVT::i32);
330
331 // Add the glue if we have it.
332 if (Glue.getNode())
333 RetOps.push_back(Glue);
334
335 return DAG.getNode(SPISD::RET_GLUE, DL, MVT::Other, RetOps);
336}
337
338// Lower return values for the 64-bit ABI.
339// Return values are passed the exactly the same way as function arguments.
342 bool IsVarArg,
344 const SmallVectorImpl<SDValue> &OutVals,
345 const SDLoc &DL, SelectionDAG &DAG) const {
346 // CCValAssign - represent the assignment of the return value to locations.
348
349 // CCState - Info about the registers and stack slot.
350 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
351 *DAG.getContext());
352
353 // Analyze return values.
354 CCInfo.AnalyzeReturn(Outs, RetCC_Sparc64);
355
356 SDValue Glue;
357 SmallVector<SDValue, 4> RetOps(1, Chain);
358
359 // The second operand on the return instruction is the return address offset.
360 // The return address is always %i7+8 with the 64-bit ABI.
361 RetOps.push_back(DAG.getConstant(8, DL, MVT::i32));
362
363 // Copy the result values into the output registers.
364 for (unsigned i = 0; i != RVLocs.size(); ++i) {
365 CCValAssign &VA = RVLocs[i];
366 assert(VA.isRegLoc() && "Can only return in registers!");
367 SDValue OutVal = OutVals[i];
368
369 // Integer return values must be sign or zero extended by the callee.
370 switch (VA.getLocInfo()) {
371 case CCValAssign::Full: break;
373 OutVal = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), OutVal);
374 break;
376 OutVal = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), OutVal);
377 break;
379 OutVal = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), OutVal);
380 break;
381 default:
382 llvm_unreachable("Unknown loc info!");
383 }
384
385 // The custom bit on an i32 return value indicates that it should be passed
386 // in the high bits of the register.
387 if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
388 OutVal = DAG.getNode(ISD::SHL, DL, MVT::i64, OutVal,
389 DAG.getConstant(32, DL, MVT::i32));
390
391 // The next value may go in the low bits of the same register.
392 // Handle both at once.
393 if (i+1 < RVLocs.size() && RVLocs[i+1].getLocReg() == VA.getLocReg()) {
394 SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, OutVals[i+1]);
395 OutVal = DAG.getNode(ISD::OR, DL, MVT::i64, OutVal, NV);
396 // Skip the next value, it's already done.
397 ++i;
398 }
399 }
400
401 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVal, Glue);
402
403 // Guarantee that all emitted copies are stuck together with flags.
404 Glue = Chain.getValue(1);
405 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
406 }
407
408 RetOps[0] = Chain; // Update chain.
409
410 // Add the flag if we have it.
411 if (Glue.getNode())
412 RetOps.push_back(Glue);
413
414 return DAG.getNode(SPISD::RET_GLUE, DL, MVT::Other, RetOps);
415}
416
418 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
419 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
420 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
421 if (Subtarget->is64Bit())
422 return LowerFormalArguments_64(Chain, CallConv, IsVarArg, Ins,
423 DL, DAG, InVals);
424 return LowerFormalArguments_32(Chain, CallConv, IsVarArg, Ins,
425 DL, DAG, InVals);
426}
427
428/// LowerFormalArguments32 - V8 uses a very simple ABI, where all values are
429/// passed in either one or two GPRs, including FP values. TODO: we should
430/// pass FP values in FP registers for fastcc functions.
432 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
433 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
434 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
436 MachineRegisterInfo &RegInfo = MF.getRegInfo();
438
439 // Assign locations to all of the incoming arguments.
441 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
442 *DAG.getContext());
443 CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc32);
444
445 const unsigned StackOffset = 92;
446 bool IsLittleEndian = DAG.getDataLayout().isLittleEndian();
447
448 unsigned InIdx = 0;
449 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i, ++InIdx) {
450 CCValAssign &VA = ArgLocs[i];
451
452 if (Ins[InIdx].Flags.isSRet()) {
453 if (InIdx != 0)
454 report_fatal_error("sparc only supports sret on the first parameter");
455 // Get SRet from [%fp+64].
456 int FrameIdx = MF.getFrameInfo().CreateFixedObject(4, 64, true);
457 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
458 SDValue Arg =
459 DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
460 InVals.push_back(Arg);
461 continue;
462 }
463
464 if (VA.isRegLoc()) {
465 if (VA.needsCustom()) {
466 assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);
467
468 Register VRegHi = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
469 MF.getRegInfo().addLiveIn(VA.getLocReg(), VRegHi);
470 SDValue HiVal = DAG.getCopyFromReg(Chain, dl, VRegHi, MVT::i32);
471
472 assert(i+1 < e);
473 CCValAssign &NextVA = ArgLocs[++i];
474
475 SDValue LoVal;
476 if (NextVA.isMemLoc()) {
477 int FrameIdx = MF.getFrameInfo().
478 CreateFixedObject(4, StackOffset+NextVA.getLocMemOffset(),true);
479 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
480 LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
481 } else {
482 Register loReg = MF.addLiveIn(NextVA.getLocReg(),
483 &SP::IntRegsRegClass);
484 LoVal = DAG.getCopyFromReg(Chain, dl, loReg, MVT::i32);
485 }
486
487 if (IsLittleEndian)
488 std::swap(LoVal, HiVal);
489
490 SDValue WholeValue =
491 DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
492 WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), WholeValue);
493 InVals.push_back(WholeValue);
494 continue;
495 }
496 Register VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
497 MF.getRegInfo().addLiveIn(VA.getLocReg(), VReg);
498 SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
499 if (VA.getLocVT() == MVT::f32)
500 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Arg);
501 else if (VA.getLocVT() != MVT::i32) {
502 Arg = DAG.getNode(ISD::AssertSext, dl, MVT::i32, Arg,
503 DAG.getValueType(VA.getLocVT()));
504 Arg = DAG.getNode(ISD::TRUNCATE, dl, VA.getLocVT(), Arg);
505 }
506 InVals.push_back(Arg);
507 continue;
508 }
509
510 assert(VA.isMemLoc());
511
512 unsigned Offset = VA.getLocMemOffset()+StackOffset;
513 auto PtrVT = getPointerTy(DAG.getDataLayout());
514
515 if (VA.needsCustom()) {
516 assert(VA.getValVT() == MVT::f64 || VA.getValVT() == MVT::v2i32);
517 // If it is double-word aligned, just load.
518 if (Offset % 8 == 0) {
519 int FI = MF.getFrameInfo().CreateFixedObject(8,
520 Offset,
521 true);
522 SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
523 SDValue Load =
524 DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, MachinePointerInfo());
525 InVals.push_back(Load);
526 continue;
527 }
528
529 int FI = MF.getFrameInfo().CreateFixedObject(4,
530 Offset,
531 true);
532 SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
533 SDValue HiVal =
534 DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
535 int FI2 = MF.getFrameInfo().CreateFixedObject(4,
536 Offset+4,
537 true);
538 SDValue FIPtr2 = DAG.getFrameIndex(FI2, PtrVT);
539
540 SDValue LoVal =
541 DAG.getLoad(MVT::i32, dl, Chain, FIPtr2, MachinePointerInfo());
542
543 if (IsLittleEndian)
544 std::swap(LoVal, HiVal);
545
546 SDValue WholeValue =
547 DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
548 WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), WholeValue);
549 InVals.push_back(WholeValue);
550 continue;
551 }
552
553 int FI = MF.getFrameInfo().CreateFixedObject(4,
554 Offset,
555 true);
556 SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
557 SDValue Load ;
558 if (VA.getValVT() == MVT::i32 || VA.getValVT() == MVT::f32) {
559 Load = DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, MachinePointerInfo());
560 } else if (VA.getValVT() == MVT::f128) {
561 report_fatal_error("SPARCv8 does not handle f128 in calls; "
562 "pass indirectly");
563 } else {
564 // We shouldn't see any other value types here.
565 llvm_unreachable("Unexpected ValVT encountered in frame lowering.");
566 }
567 InVals.push_back(Load);
568 }
569
570 if (MF.getFunction().hasStructRetAttr()) {
571 // Copy the SRet Argument to SRetReturnReg.
573 Register Reg = SFI->getSRetReturnReg();
574 if (!Reg) {
575 Reg = MF.getRegInfo().createVirtualRegister(&SP::IntRegsRegClass);
576 SFI->setSRetReturnReg(Reg);
577 }
578 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]);
579 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
580 }
581
582 // Store remaining ArgRegs to the stack if this is a varargs function.
583 if (isVarArg) {
584 static const MCPhysReg ArgRegs[] = {
585 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
586 };
587 unsigned NumAllocated = CCInfo.getFirstUnallocated(ArgRegs);
588 const MCPhysReg *CurArgReg = ArgRegs+NumAllocated, *ArgRegEnd = ArgRegs+6;
589 unsigned ArgOffset = CCInfo.getStackSize();
590 if (NumAllocated == 6)
591 ArgOffset += StackOffset;
592 else {
593 assert(!ArgOffset);
594 ArgOffset = 68+4*NumAllocated;
595 }
596
597 // Remember the vararg offset for the va_start implementation.
598 FuncInfo->setVarArgsFrameOffset(ArgOffset);
599
600 std::vector<SDValue> OutChains;
601
602 for (; CurArgReg != ArgRegEnd; ++CurArgReg) {
603 Register VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
604 MF.getRegInfo().addLiveIn(*CurArgReg, VReg);
605 SDValue Arg = DAG.getCopyFromReg(DAG.getRoot(), dl, VReg, MVT::i32);
606
607 int FrameIdx = MF.getFrameInfo().CreateFixedObject(4, ArgOffset,
608 true);
609 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
610
611 OutChains.push_back(
612 DAG.getStore(DAG.getRoot(), dl, Arg, FIPtr, MachinePointerInfo()));
613 ArgOffset += 4;
614 }
615
616 if (!OutChains.empty()) {
617 OutChains.push_back(Chain);
618 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
619 }
620 }
621
622 return Chain;
623}
624
625// Lower formal arguments for the 64 bit ABI.
627 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
628 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
629 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
631
632 // Analyze arguments according to CC_Sparc64.
634 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
635 *DAG.getContext());
636 CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc64);
637
638 // The argument array begins at %fp+BIAS+128, after the register save area.
639 const unsigned ArgArea = 128;
640
641 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
642 CCValAssign &VA = ArgLocs[i];
643 if (VA.isRegLoc()) {
644 // This argument is passed in a register.
645 // All integer register arguments are promoted by the caller to i64.
646
647 // Create a virtual register for the promoted live-in value.
648 Register VReg = MF.addLiveIn(VA.getLocReg(),
650 SDValue Arg = DAG.getCopyFromReg(Chain, DL, VReg, VA.getLocVT());
651
652 // Get the high bits for i32 struct elements.
653 if (VA.getValVT() == MVT::i32 && VA.needsCustom())
654 Arg = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), Arg,
655 DAG.getConstant(32, DL, MVT::i32));
656
657 // The caller promoted the argument, so insert an Assert?ext SDNode so we
658 // won't promote the value again in this function.
659 switch (VA.getLocInfo()) {
661 Arg = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Arg,
662 DAG.getValueType(VA.getValVT()));
663 break;
665 Arg = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Arg,
666 DAG.getValueType(VA.getValVT()));
667 break;
668 default:
669 break;
670 }
671
672 // Truncate the register down to the argument type.
673 if (VA.isExtInLoc())
674 Arg = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Arg);
675
676 InVals.push_back(Arg);
677 continue;
678 }
679
680 // The registers are exhausted. This argument was passed on the stack.
681 assert(VA.isMemLoc());
682 // The CC_Sparc64_Full/Half functions compute stack offsets relative to the
683 // beginning of the arguments area at %fp+BIAS+128.
684 unsigned Offset = VA.getLocMemOffset() + ArgArea;
685 unsigned ValSize = VA.getValVT().getSizeInBits() / 8;
686 // Adjust offset for extended arguments, SPARC is big-endian.
687 // The caller will have written the full slot with extended bytes, but we
688 // prefer our own extending loads.
689 if (VA.isExtInLoc())
690 Offset += 8 - ValSize;
691 int FI = MF.getFrameInfo().CreateFixedObject(ValSize, Offset, true);
692 InVals.push_back(
693 DAG.getLoad(VA.getValVT(), DL, Chain,
696 }
697
698 if (!IsVarArg)
699 return Chain;
700
701 // This function takes variable arguments, some of which may have been passed
702 // in registers %i0-%i5. Variable floating point arguments are never passed
703 // in floating point registers. They go on %i0-%i5 or on the stack like
704 // integer arguments.
705 //
706 // The va_start intrinsic needs to know the offset to the first variable
707 // argument.
708 unsigned ArgOffset = CCInfo.getStackSize();
710 // Skip the 128 bytes of register save area.
711 FuncInfo->setVarArgsFrameOffset(ArgOffset + ArgArea +
712 Subtarget->getStackPointerBias());
713
714 // Save the variable arguments that were passed in registers.
715 // The caller is required to reserve stack space for 6 arguments regardless
716 // of how many arguments were actually passed.
717 SmallVector<SDValue, 8> OutChains;
718 for (; ArgOffset < 6*8; ArgOffset += 8) {
719 Register VReg = MF.addLiveIn(SP::I0 + ArgOffset/8, &SP::I64RegsRegClass);
720 SDValue VArg = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64);
721 int FI = MF.getFrameInfo().CreateFixedObject(8, ArgOffset + ArgArea, true);
722 auto PtrVT = getPointerTy(MF.getDataLayout());
723 OutChains.push_back(
724 DAG.getStore(Chain, DL, VArg, DAG.getFrameIndex(FI, PtrVT),
726 }
727
728 if (!OutChains.empty())
729 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
730
731 return Chain;
732}
733
734// Check whether any of the argument registers are reserved
736 const MachineFunction &MF) {
737 // The register window design means that outgoing parameters at O*
738 // will appear in the callee as I*.
739 // Be conservative and check both sides of the register names.
740 bool Outgoing =
741 llvm::any_of(SP::GPROutgoingArgRegClass, [TRI, &MF](MCPhysReg r) {
742 return TRI->isReservedReg(MF, r);
743 });
744 bool Incoming =
745 llvm::any_of(SP::GPRIncomingArgRegClass, [TRI, &MF](MCPhysReg r) {
746 return TRI->isReservedReg(MF, r);
747 });
748 return Outgoing || Incoming;
749}
750
752 const Function &F = MF.getFunction();
753 F.getContext().diagnose(DiagnosticInfoUnsupported{
754 F, ("SPARC doesn't support"
755 " function calls if any of the argument registers is reserved.")});
756}
757
760 SmallVectorImpl<SDValue> &InVals) const {
761 if (Subtarget->is64Bit())
762 return LowerCall_64(CLI, InVals);
763 return LowerCall_32(CLI, InVals);
764}
765
766static bool hasReturnsTwiceAttr(SelectionDAG &DAG, SDValue Callee,
767 const CallBase *Call) {
768 if (Call)
769 return Call->hasFnAttr(Attribute::ReturnsTwice);
770
771 const Function *CalleeFn = nullptr;
772 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
773 CalleeFn = dyn_cast<Function>(G->getGlobal());
774 } else if (ExternalSymbolSDNode *E =
775 dyn_cast<ExternalSymbolSDNode>(Callee)) {
776 const Function &Fn = DAG.getMachineFunction().getFunction();
777 const Module *M = Fn.getParent();
778 const char *CalleeName = E->getSymbol();
779 CalleeFn = M->getFunction(CalleeName);
780 }
781
782 if (!CalleeFn)
783 return false;
784 return CalleeFn->hasFnAttribute(Attribute::ReturnsTwice);
785}
786
787/// IsEligibleForTailCallOptimization - Check whether the call is eligible
788/// for tail call optimization.
790 CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF) const {
791
792 auto &Outs = CLI.Outs;
793 auto &Caller = MF.getFunction();
794
795 // Do not tail call opt functions with "disable-tail-calls" attribute.
796 if (Caller.getFnAttribute("disable-tail-calls").getValueAsString() == "true")
797 return false;
798
799 // Do not tail call opt if the stack is used to pass parameters.
800 // 64-bit targets have a slightly higher limit since the ABI requires
801 // to allocate some space even when all the parameters fit inside registers.
802 unsigned StackSizeLimit = Subtarget->is64Bit() ? 48 : 0;
803 if (CCInfo.getStackSize() > StackSizeLimit)
804 return false;
805
806 // Do not tail call opt if either the callee or caller returns
807 // a struct and the other does not.
808 if (!Outs.empty() && Caller.hasStructRetAttr() != Outs[0].Flags.isSRet())
809 return false;
810
811 // Byval parameters hand the function a pointer directly into the stack area
812 // we want to reuse during a tail call.
813 for (auto &Arg : Outs)
814 if (Arg.Flags.isByVal())
815 return false;
816
817 return true;
818}
819
820// Lower a call for the 32-bit ABI.
823 SmallVectorImpl<SDValue> &InVals) const {
824 SelectionDAG &DAG = CLI.DAG;
825 SDLoc &dl = CLI.DL;
827 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
829 SDValue Chain = CLI.Chain;
830 SDValue Callee = CLI.Callee;
831 bool &isTailCall = CLI.IsTailCall;
832 CallingConv::ID CallConv = CLI.CallConv;
833 bool isVarArg = CLI.IsVarArg;
835
836 // Analyze operands of the call, assigning locations to each operand.
838 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
839 *DAG.getContext());
840 CCInfo.AnalyzeCallOperands(Outs, CC_Sparc32);
841
842 isTailCall = isTailCall && IsEligibleForTailCallOptimization(
843 CCInfo, CLI, DAG.getMachineFunction());
844
845 // Get the size of the outgoing arguments stack space requirement.
846 unsigned ArgsSize = CCInfo.getStackSize();
847
848 // Keep stack frames 8-byte aligned.
849 ArgsSize = (ArgsSize+7) & ~7;
850
852
853 // Create local copies for byval args.
854 SmallVector<SDValue, 8> ByValArgs;
855 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
856 ISD::ArgFlagsTy Flags = Outs[i].Flags;
857 if (!Flags.isByVal())
858 continue;
859
860 SDValue Arg = OutVals[i];
861 unsigned Size = Flags.getByValSize();
862 Align Alignment = Flags.getNonZeroByValAlign();
863
864 if (Size > 0U) {
865 int FI = MFI.CreateStackObject(Size, Alignment, false);
866 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
867 SDValue SizeNode = DAG.getConstant(Size, dl, MVT::i32);
868
869 Chain = DAG.getMemcpy(Chain, dl, FIPtr, Arg, SizeNode, Alignment,
870 false, // isVolatile,
871 (Size <= 32), // AlwaysInline if size <= 32,
872 false, // isTailCall
874 ByValArgs.push_back(FIPtr);
875 }
876 else {
877 SDValue nullVal;
878 ByValArgs.push_back(nullVal);
879 }
880 }
881
882 assert(!isTailCall || ArgsSize == 0);
883
884 if (!isTailCall)
885 Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, dl);
886
888 SmallVector<SDValue, 8> MemOpChains;
889
890 const unsigned StackOffset = 92;
891 bool hasStructRetAttr = false;
892 unsigned SRetArgSize = 0;
893 // Walk the register/memloc assignments, inserting copies/loads.
894 for (unsigned i = 0, realArgIdx = 0, byvalArgIdx = 0, e = ArgLocs.size();
895 i != e;
896 ++i, ++realArgIdx) {
897 CCValAssign &VA = ArgLocs[i];
898 SDValue Arg = OutVals[realArgIdx];
899
900 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
901
902 // Use local copy if it is a byval arg.
903 if (Flags.isByVal()) {
904 Arg = ByValArgs[byvalArgIdx++];
905 if (!Arg) {
906 continue;
907 }
908 }
909
910 // Promote the value if needed.
911 switch (VA.getLocInfo()) {
912 default: llvm_unreachable("Unknown loc info!");
913 case CCValAssign::Full: break;
915 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
916 break;
918 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
919 break;
921 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
922 break;
924 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
925 break;
926 }
927
928 if (Flags.isSRet()) {
929 assert(VA.needsCustom());
930
931 if (isTailCall)
932 continue;
933
934 // store SRet argument in %sp+64
935 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
936 SDValue PtrOff = DAG.getIntPtrConstant(64, dl);
937 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
938 MemOpChains.push_back(
939 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
940 hasStructRetAttr = true;
941 // sret only allowed on first argument
942 assert(Outs[realArgIdx].OrigArgIndex == 0);
943 SRetArgSize =
944 DAG.getDataLayout().getTypeAllocSize(CLI.getArgs()[0].IndirectType);
945 continue;
946 }
947
948 if (VA.needsCustom()) {
949 assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);
950
951 if (VA.isMemLoc()) {
952 unsigned Offset = VA.getLocMemOffset() + StackOffset;
953 // if it is double-word aligned, just store.
954 if (Offset % 8 == 0) {
955 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
956 SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
957 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
958 MemOpChains.push_back(
959 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
960 continue;
961 }
962 }
963
964 if (VA.getLocVT() == MVT::f64) {
965 // Move from the float value from float registers into the
966 // integer registers.
967 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Arg))
968 Arg = bitcastConstantFPToInt(C, dl, DAG);
969 else
970 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, Arg);
971 }
972
973 SDValue Part0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
974 Arg,
975 DAG.getConstant(0, dl, getVectorIdxTy(DAG.getDataLayout())));
976 SDValue Part1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
977 Arg,
978 DAG.getConstant(1, dl, getVectorIdxTy(DAG.getDataLayout())));
979
980 if (VA.isRegLoc()) {
981 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Part0));
982 assert(i+1 != e);
983 CCValAssign &NextVA = ArgLocs[++i];
984 if (NextVA.isRegLoc()) {
985 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Part1));
986 } else {
987 // Store the second part in stack.
988 unsigned Offset = NextVA.getLocMemOffset() + StackOffset;
989 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
990 SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
991 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
992 MemOpChains.push_back(
993 DAG.getStore(Chain, dl, Part1, PtrOff, MachinePointerInfo()));
994 }
995 } else {
996 unsigned Offset = VA.getLocMemOffset() + StackOffset;
997 // Store the first part.
998 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
999 SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
1000 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
1001 MemOpChains.push_back(
1002 DAG.getStore(Chain, dl, Part0, PtrOff, MachinePointerInfo()));
1003 // Store the second part.
1004 PtrOff = DAG.getIntPtrConstant(Offset + 4, dl);
1005 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
1006 MemOpChains.push_back(
1007 DAG.getStore(Chain, dl, Part1, PtrOff, MachinePointerInfo()));
1008 }
1009 continue;
1010 }
1011
1012 // Arguments that can be passed on register must be kept at
1013 // RegsToPass vector
1014 if (VA.isRegLoc()) {
1015 if (VA.getLocVT() != MVT::f32) {
1016 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1017 continue;
1018 }
1019 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
1020 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1021 continue;
1022 }
1023
1024 assert(VA.isMemLoc());
1025
1026 // Create a store off the stack pointer for this argument.
1027 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
1029 dl);
1030 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
1031 MemOpChains.push_back(
1032 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
1033 }
1034
1035
1036 // Emit all stores, make sure the occur before any copies into physregs.
1037 if (!MemOpChains.empty())
1038 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
1039
1040 // Build a sequence of copy-to-reg nodes chained together with token
1041 // chain and flag operands which copy the outgoing args into registers.
1042 // The InGlue in necessary since all emitted instructions must be
1043 // stuck together.
1044 SDValue InGlue;
1045 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1046 Register Reg = RegsToPass[i].first;
1047 if (!isTailCall)
1048 Reg = toCallerWindow(Reg);
1049 Chain = DAG.getCopyToReg(Chain, dl, Reg, RegsToPass[i].second, InGlue);
1050 InGlue = Chain.getValue(1);
1051 }
1052
1053 bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CB);
1054
1055 // If the callee is a GlobalAddress node (quite common, every direct call is)
1056 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1057 // Likewise ExternalSymbol -> TargetExternalSymbol.
1060 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1061 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32, 0, TF);
1062 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
1063 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32, TF);
1064
1065 // Returns a chain & a flag for retval copy to use
1066 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1068 Ops.push_back(Chain);
1069 Ops.push_back(Callee);
1070 if (hasStructRetAttr)
1071 Ops.push_back(DAG.getTargetConstant(SRetArgSize, dl, MVT::i32));
1072 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1073 Register Reg = RegsToPass[i].first;
1074 if (!isTailCall)
1075 Reg = toCallerWindow(Reg);
1076 Ops.push_back(DAG.getRegister(Reg, RegsToPass[i].second.getValueType()));
1077 }
1078
1079 // Add a register mask operand representing the call-preserved registers.
1080 const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
1081 const uint32_t *Mask =
1082 ((hasReturnsTwice)
1083 ? TRI->getRTCallPreservedMask(CallConv)
1084 : TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv));
1085
1086 if (isAnyArgRegReserved(TRI, MF))
1088
1089 assert(Mask && "Missing call preserved mask for calling convention");
1090 Ops.push_back(DAG.getRegisterMask(Mask));
1091
1092 if (InGlue.getNode())
1093 Ops.push_back(InGlue);
1094
1095 if (isTailCall) {
1097 return DAG.getNode(SPISD::TAIL_CALL, dl, MVT::Other, Ops);
1098 }
1099
1100 Chain = DAG.getNode(SPISD::CALL, dl, NodeTys, Ops);
1101 InGlue = Chain.getValue(1);
1102
1103 Chain = DAG.getCALLSEQ_END(Chain, ArgsSize, 0, InGlue, dl);
1104 InGlue = Chain.getValue(1);
1105
1106 // Assign locations to each value returned by this call.
1108 CCState RVInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1109 *DAG.getContext());
1110
1111 RVInfo.AnalyzeCallResult(Ins, RetCC_Sparc32);
1112
1113 // Copy all of the result registers out of their specified physreg.
1114 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1115 assert(RVLocs[i].isRegLoc() && "Can only return in registers!");
1116 if (RVLocs[i].getLocVT() == MVT::v2i32) {
1117 SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2i32);
1119 Chain, dl, toCallerWindow(RVLocs[i++].getLocReg()), MVT::i32, InGlue);
1120 Chain = Lo.getValue(1);
1121 InGlue = Lo.getValue(2);
1122 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Lo,
1123 DAG.getConstant(0, dl, MVT::i32));
1125 Chain, dl, toCallerWindow(RVLocs[i].getLocReg()), MVT::i32, InGlue);
1126 Chain = Hi.getValue(1);
1127 InGlue = Hi.getValue(2);
1128 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Hi,
1129 DAG.getConstant(1, dl, MVT::i32));
1130 InVals.push_back(Vec);
1131 } else {
1132 Chain =
1133 DAG.getCopyFromReg(Chain, dl, toCallerWindow(RVLocs[i].getLocReg()),
1134 RVLocs[i].getValVT(), InGlue)
1135 .getValue(1);
1136 InGlue = Chain.getValue(2);
1137 InVals.push_back(Chain.getValue(0));
1138 }
1139 }
1140
1141 return Chain;
1142}
1143
1144// FIXME? Maybe this could be a TableGen attribute on some registers and
1145// this table could be generated automatically from RegInfo.
1147 const MachineFunction &MF) const {
1149 .Case("i0", SP::I0).Case("i1", SP::I1).Case("i2", SP::I2).Case("i3", SP::I3)
1150 .Case("i4", SP::I4).Case("i5", SP::I5).Case("i6", SP::I6).Case("i7", SP::I7)
1151 .Case("o0", SP::O0).Case("o1", SP::O1).Case("o2", SP::O2).Case("o3", SP::O3)
1152 .Case("o4", SP::O4).Case("o5", SP::O5).Case("o6", SP::O6).Case("o7", SP::O7)
1153 .Case("l0", SP::L0).Case("l1", SP::L1).Case("l2", SP::L2).Case("l3", SP::L3)
1154 .Case("l4", SP::L4).Case("l5", SP::L5).Case("l6", SP::L6).Case("l7", SP::L7)
1155 .Case("g0", SP::G0).Case("g1", SP::G1).Case("g2", SP::G2).Case("g3", SP::G3)
1156 .Case("g4", SP::G4).Case("g5", SP::G5).Case("g6", SP::G6).Case("g7", SP::G7)
1157 .Default(0);
1158
1159 // If we're directly referencing register names
1160 // (e.g in GCC C extension `register int r asm("g1");`),
1161 // make sure that said register is in the reserve list.
1162 const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
1163 if (!TRI->isReservedReg(MF, Reg))
1164 Reg = 0;
1165
1166 if (Reg)
1167 return Reg;
1168
1169 report_fatal_error("Invalid register name global variable");
1170}
1171
1172// Fixup floating point arguments in the ... part of a varargs call.
1173//
1174// The SPARC v9 ABI requires that floating point arguments are treated the same
1175// as integers when calling a varargs function. This does not apply to the
1176// fixed arguments that are part of the function's prototype.
1177//
1178// This function post-processes a CCValAssign array created by
1179// AnalyzeCallOperands().
1182 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1183 CCValAssign &VA = ArgLocs[i];
1184 MVT ValTy = VA.getLocVT();
1185 // FIXME: What about f32 arguments? C promotes them to f64 when calling
1186 // varargs functions.
1187 if (!VA.isRegLoc() || (ValTy != MVT::f64 && ValTy != MVT::f128))
1188 continue;
1189 // The fixed arguments to a varargs function still go in FP registers.
1190 if (Outs[VA.getValNo()].IsFixed)
1191 continue;
1192
1193 // This floating point argument should be reassigned.
1194 // Determine the offset into the argument array.
1195 Register firstReg = (ValTy == MVT::f64) ? SP::D0 : SP::Q0;
1196 unsigned argSize = (ValTy == MVT::f64) ? 8 : 16;
1197 unsigned Offset = argSize * (VA.getLocReg() - firstReg);
1198 assert(Offset < 16*8 && "Offset out of range, bad register enum?");
1199
1200 if (Offset < 6*8) {
1201 // This argument should go in %i0-%i5.
1202 unsigned IReg = SP::I0 + Offset/8;
1203 if (ValTy == MVT::f64)
1204 // Full register, just bitconvert into i64.
1205 VA = CCValAssign::getReg(VA.getValNo(), VA.getValVT(), IReg, MVT::i64,
1207 else {
1208 assert(ValTy == MVT::f128 && "Unexpected type!");
1209 // Full register, just bitconvert into i128 -- We will lower this into
1210 // two i64s in LowerCall_64.
1211 VA = CCValAssign::getCustomReg(VA.getValNo(), VA.getValVT(), IReg,
1212 MVT::i128, CCValAssign::BCvt);
1213 }
1214 } else {
1215 // This needs to go to memory, we're out of integer registers.
1217 VA.getLocVT(), VA.getLocInfo());
1218 }
1219 }
1220}
1221
1222// Lower a call for the 64-bit ABI.
1223SDValue
1225 SmallVectorImpl<SDValue> &InVals) const {
1226 SelectionDAG &DAG = CLI.DAG;
1227 SDLoc DL = CLI.DL;
1228 SDValue Chain = CLI.Chain;
1229 auto PtrVT = getPointerTy(DAG.getDataLayout());
1231
1232 // Analyze operands of the call, assigning locations to each operand.
1234 CCState CCInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), ArgLocs,
1235 *DAG.getContext());
1236 CCInfo.AnalyzeCallOperands(CLI.Outs, CC_Sparc64);
1237
1239 CCInfo, CLI, DAG.getMachineFunction());
1240
1241 // Get the size of the outgoing arguments stack space requirement.
1242 // The stack offset computed by CC_Sparc64 includes all arguments.
1243 // Called functions expect 6 argument words to exist in the stack frame, used
1244 // or not.
1245 unsigned StackReserved = 6 * 8u;
1246 unsigned ArgsSize = std::max<unsigned>(StackReserved, CCInfo.getStackSize());
1247
1248 // Keep stack frames 16-byte aligned.
1249 ArgsSize = alignTo(ArgsSize, 16);
1250
1251 // Varargs calls require special treatment.
1252 if (CLI.IsVarArg)
1253 fixupVariableFloatArgs(ArgLocs, CLI.Outs);
1254
1255 assert(!CLI.IsTailCall || ArgsSize == StackReserved);
1256
1257 // Adjust the stack pointer to make room for the arguments.
1258 // FIXME: Use hasReservedCallFrame to avoid %sp adjustments around all calls
1259 // with more than 6 arguments.
1260 if (!CLI.IsTailCall)
1261 Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, DL);
1262
1263 // Collect the set of registers to pass to the function and their values.
1264 // This will be emitted as a sequence of CopyToReg nodes glued to the call
1265 // instruction.
1267
1268 // Collect chains from all the memory opeations that copy arguments to the
1269 // stack. They must follow the stack pointer adjustment above and precede the
1270 // call instruction itself.
1271 SmallVector<SDValue, 8> MemOpChains;
1272
1273 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1274 const CCValAssign &VA = ArgLocs[i];
1275 SDValue Arg = CLI.OutVals[i];
1276
1277 // Promote the value if needed.
1278 switch (VA.getLocInfo()) {
1279 default:
1280 llvm_unreachable("Unknown location info!");
1281 case CCValAssign::Full:
1282 break;
1283 case CCValAssign::SExt:
1284 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
1285 break;
1286 case CCValAssign::ZExt:
1287 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
1288 break;
1289 case CCValAssign::AExt:
1290 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
1291 break;
1292 case CCValAssign::BCvt:
1293 // fixupVariableFloatArgs() may create bitcasts from f128 to i128. But
1294 // SPARC does not support i128 natively. Lower it into two i64, see below.
1295 if (!VA.needsCustom() || VA.getValVT() != MVT::f128
1296 || VA.getLocVT() != MVT::i128)
1297 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
1298 break;
1299 }
1300
1301 if (VA.isRegLoc()) {
1302 if (VA.needsCustom() && VA.getValVT() == MVT::f128
1303 && VA.getLocVT() == MVT::i128) {
1304 // Store and reload into the integer register reg and reg+1.
1305 unsigned Offset = 8 * (VA.getLocReg() - SP::I0);
1306 unsigned StackOffset = Offset + Subtarget->getStackPointerBias() + 128;
1307 SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT);
1308 SDValue HiPtrOff = DAG.getIntPtrConstant(StackOffset, DL);
1309 HiPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, HiPtrOff);
1310 SDValue LoPtrOff = DAG.getIntPtrConstant(StackOffset + 8, DL);
1311 LoPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, LoPtrOff);
1312
1313 // Store to %sp+BIAS+128+Offset
1314 SDValue Store =
1315 DAG.getStore(Chain, DL, Arg, HiPtrOff, MachinePointerInfo());
1316 // Load into Reg and Reg+1
1317 SDValue Hi64 =
1318 DAG.getLoad(MVT::i64, DL, Store, HiPtrOff, MachinePointerInfo());
1319 SDValue Lo64 =
1320 DAG.getLoad(MVT::i64, DL, Store, LoPtrOff, MachinePointerInfo());
1321
1322 Register HiReg = VA.getLocReg();
1323 Register LoReg = VA.getLocReg() + 1;
1324 if (!CLI.IsTailCall) {
1325 HiReg = toCallerWindow(HiReg);
1326 LoReg = toCallerWindow(LoReg);
1327 }
1328
1329 RegsToPass.push_back(std::make_pair(HiReg, Hi64));
1330 RegsToPass.push_back(std::make_pair(LoReg, Lo64));
1331 continue;
1332 }
1333
1334 // The custom bit on an i32 return value indicates that it should be
1335 // passed in the high bits of the register.
1336 if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
1337 Arg = DAG.getNode(ISD::SHL, DL, MVT::i64, Arg,
1338 DAG.getConstant(32, DL, MVT::i32));
1339
1340 // The next value may go in the low bits of the same register.
1341 // Handle both at once.
1342 if (i+1 < ArgLocs.size() && ArgLocs[i+1].isRegLoc() &&
1343 ArgLocs[i+1].getLocReg() == VA.getLocReg()) {
1344 SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64,
1345 CLI.OutVals[i+1]);
1346 Arg = DAG.getNode(ISD::OR, DL, MVT::i64, Arg, NV);
1347 // Skip the next value, it's already done.
1348 ++i;
1349 }
1350 }
1351
1352 Register Reg = VA.getLocReg();
1353 if (!CLI.IsTailCall)
1354 Reg = toCallerWindow(Reg);
1355 RegsToPass.push_back(std::make_pair(Reg, Arg));
1356 continue;
1357 }
1358
1359 assert(VA.isMemLoc());
1360
1361 // Create a store off the stack pointer for this argument.
1362 SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT);
1363 // The argument area starts at %fp+BIAS+128 in the callee frame,
1364 // %sp+BIAS+128 in ours.
1365 SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset() +
1366 Subtarget->getStackPointerBias() +
1367 128, DL);
1368 PtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, PtrOff);
1369 MemOpChains.push_back(
1370 DAG.getStore(Chain, DL, Arg, PtrOff, MachinePointerInfo()));
1371 }
1372
1373 // Emit all stores, make sure they occur before the call.
1374 if (!MemOpChains.empty())
1375 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
1376
1377 // Build a sequence of CopyToReg nodes glued together with token chain and
1378 // glue operands which copy the outgoing args into registers. The InGlue is
1379 // necessary since all emitted instructions must be stuck together in order
1380 // to pass the live physical registers.
1381 SDValue InGlue;
1382 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1383 Chain = DAG.getCopyToReg(Chain, DL,
1384 RegsToPass[i].first, RegsToPass[i].second, InGlue);
1385 InGlue = Chain.getValue(1);
1386 }
1387
1388 // If the callee is a GlobalAddress node (quite common, every direct call is)
1389 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1390 // Likewise ExternalSymbol -> TargetExternalSymbol.
1391 SDValue Callee = CLI.Callee;
1392 bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CB);
1395 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1396 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT, 0, TF);
1397 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
1398 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT, TF);
1399
1400 // Build the operands for the call instruction itself.
1402 Ops.push_back(Chain);
1403 Ops.push_back(Callee);
1404 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1405 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1406 RegsToPass[i].second.getValueType()));
1407
1408 // Add a register mask operand representing the call-preserved registers.
1409 const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
1410 const uint32_t *Mask =
1411 ((hasReturnsTwice) ? TRI->getRTCallPreservedMask(CLI.CallConv)
1412 : TRI->getCallPreservedMask(DAG.getMachineFunction(),
1413 CLI.CallConv));
1414
1415 if (isAnyArgRegReserved(TRI, MF))
1417
1418 assert(Mask && "Missing call preserved mask for calling convention");
1419 Ops.push_back(DAG.getRegisterMask(Mask));
1420
1421 // Make sure the CopyToReg nodes are glued to the call instruction which
1422 // consumes the registers.
1423 if (InGlue.getNode())
1424 Ops.push_back(InGlue);
1425
1426 // Now the call itself.
1427 if (CLI.IsTailCall) {
1429 return DAG.getNode(SPISD::TAIL_CALL, DL, MVT::Other, Ops);
1430 }
1431 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1432 Chain = DAG.getNode(SPISD::CALL, DL, NodeTys, Ops);
1433 InGlue = Chain.getValue(1);
1434
1435 // Revert the stack pointer immediately after the call.
1436 Chain = DAG.getCALLSEQ_END(Chain, ArgsSize, 0, InGlue, DL);
1437 InGlue = Chain.getValue(1);
1438
1439 // Now extract the return values. This is more or less the same as
1440 // LowerFormalArguments_64.
1441
1442 // Assign locations to each value returned by this call.
1444 CCState RVInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), RVLocs,
1445 *DAG.getContext());
1446
1447 // Set inreg flag manually for codegen generated library calls that
1448 // return float.
1449 if (CLI.Ins.size() == 1 && CLI.Ins[0].VT == MVT::f32 && !CLI.CB)
1450 CLI.Ins[0].Flags.setInReg();
1451
1452 RVInfo.AnalyzeCallResult(CLI.Ins, RetCC_Sparc64);
1453
1454 // Copy all of the result registers out of their specified physreg.
1455 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1456 CCValAssign &VA = RVLocs[i];
1457 assert(VA.isRegLoc() && "Can only return in registers!");
1458 unsigned Reg = toCallerWindow(VA.getLocReg());
1459
1460 // When returning 'inreg {i32, i32 }', two consecutive i32 arguments can
1461 // reside in the same register in the high and low bits. Reuse the
1462 // CopyFromReg previous node to avoid duplicate copies.
1463 SDValue RV;
1464 if (RegisterSDNode *SrcReg = dyn_cast<RegisterSDNode>(Chain.getOperand(1)))
1465 if (SrcReg->getReg() == Reg && Chain->getOpcode() == ISD::CopyFromReg)
1466 RV = Chain.getValue(0);
1467
1468 // But usually we'll create a new CopyFromReg for a different register.
1469 if (!RV.getNode()) {
1470 RV = DAG.getCopyFromReg(Chain, DL, Reg, RVLocs[i].getLocVT(), InGlue);
1471 Chain = RV.getValue(1);
1472 InGlue = Chain.getValue(2);
1473 }
1474
1475 // Get the high bits for i32 struct elements.
1476 if (VA.getValVT() == MVT::i32 && VA.needsCustom())
1477 RV = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), RV,
1478 DAG.getConstant(32, DL, MVT::i32));
1479
1480 // The callee promoted the return value, so insert an Assert?ext SDNode so
1481 // we won't promote the value again in this function.
1482 switch (VA.getLocInfo()) {
1483 case CCValAssign::SExt:
1484 RV = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), RV,
1485 DAG.getValueType(VA.getValVT()));
1486 break;
1487 case CCValAssign::ZExt:
1488 RV = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), RV,
1489 DAG.getValueType(VA.getValVT()));
1490 break;
1491 default:
1492 break;
1493 }
1494
1495 // Truncate the register down to the return value type.
1496 if (VA.isExtInLoc())
1497 RV = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), RV);
1498
1499 InVals.push_back(RV);
1500 }
1501
1502 return Chain;
1503}
1504
1505//===----------------------------------------------------------------------===//
1506// TargetLowering Implementation
1507//===----------------------------------------------------------------------===//
1508
1510 if (AI->getOperation() == AtomicRMWInst::Xchg &&
1511 AI->getType()->getPrimitiveSizeInBits() == 32)
1512 return AtomicExpansionKind::None; // Uses xchg instruction
1513
1515}
1516
1517/// intCondCCodeToRcond - Convert a DAG integer condition code to a SPARC
1518/// rcond condition.
1520 switch (CC) {
1521 default:
1522 llvm_unreachable("Unknown/unsigned integer condition code!");
1523 case ISD::SETEQ:
1524 return SPCC::REG_Z;
1525 case ISD::SETNE:
1526 return SPCC::REG_NZ;
1527 case ISD::SETLT:
1528 return SPCC::REG_LZ;
1529 case ISD::SETGT:
1530 return SPCC::REG_GZ;
1531 case ISD::SETLE:
1532 return SPCC::REG_LEZ;
1533 case ISD::SETGE:
1534 return SPCC::REG_GEZ;
1535 }
1536}
1537
1538/// IntCondCCodeToICC - Convert a DAG integer condition code to a SPARC ICC
1539/// condition.
1541 switch (CC) {
1542 default: llvm_unreachable("Unknown integer condition code!");
1543 case ISD::SETEQ: return SPCC::ICC_E;
1544 case ISD::SETNE: return SPCC::ICC_NE;
1545 case ISD::SETLT: return SPCC::ICC_L;
1546 case ISD::SETGT: return SPCC::ICC_G;
1547 case ISD::SETLE: return SPCC::ICC_LE;
1548 case ISD::SETGE: return SPCC::ICC_GE;
1549 case ISD::SETULT: return SPCC::ICC_CS;
1550 case ISD::SETULE: return SPCC::ICC_LEU;
1551 case ISD::SETUGT: return SPCC::ICC_GU;
1552 case ISD::SETUGE: return SPCC::ICC_CC;
1553 }
1554}
1555
1556/// FPCondCCodeToFCC - Convert a DAG floatingp oint condition code to a SPARC
1557/// FCC condition.
1559 switch (CC) {
1560 default: llvm_unreachable("Unknown fp condition code!");
1561 case ISD::SETEQ:
1562 case ISD::SETOEQ: return SPCC::FCC_E;
1563 case ISD::SETNE:
1564 case ISD::SETUNE: return SPCC::FCC_NE;
1565 case ISD::SETLT:
1566 case ISD::SETOLT: return SPCC::FCC_L;
1567 case ISD::SETGT:
1568 case ISD::SETOGT: return SPCC::FCC_G;
1569 case ISD::SETLE:
1570 case ISD::SETOLE: return SPCC::FCC_LE;
1571 case ISD::SETGE:
1572 case ISD::SETOGE: return SPCC::FCC_GE;
1573 case ISD::SETULT: return SPCC::FCC_UL;
1574 case ISD::SETULE: return SPCC::FCC_ULE;
1575 case ISD::SETUGT: return SPCC::FCC_UG;
1576 case ISD::SETUGE: return SPCC::FCC_UGE;
1577 case ISD::SETUO: return SPCC::FCC_U;
1578 case ISD::SETO: return SPCC::FCC_O;
1579 case ISD::SETONE: return SPCC::FCC_LG;
1580 case ISD::SETUEQ: return SPCC::FCC_UE;
1581 }
1582}
1583
1585 const SparcSubtarget &STI)
1586 : TargetLowering(TM), Subtarget(&STI) {
1587 MVT PtrVT = MVT::getIntegerVT(TM.getPointerSizeInBits(0));
1588
1589 // Instructions which use registers as conditionals examine all the
1590 // bits (as does the pseudo SELECT_CC expansion). I don't think it
1591 // matters much whether it's ZeroOrOneBooleanContent, or
1592 // ZeroOrNegativeOneBooleanContent, so, arbitrarily choose the
1593 // former.
1596
1597 // Set up the register classes.
1598 addRegisterClass(MVT::i32, &SP::IntRegsRegClass);
1599 if (!Subtarget->useSoftFloat()) {
1600 addRegisterClass(MVT::f32, &SP::FPRegsRegClass);
1601 addRegisterClass(MVT::f64, &SP::DFPRegsRegClass);
1602 addRegisterClass(MVT::f128, &SP::QFPRegsRegClass);
1603 }
1604 if (Subtarget->is64Bit()) {
1605 addRegisterClass(MVT::i64, &SP::I64RegsRegClass);
1606 } else {
1607 // On 32bit sparc, we define a double-register 32bit register
1608 // class, as well. This is modeled in LLVM as a 2-vector of i32.
1609 addRegisterClass(MVT::v2i32, &SP::IntPairRegClass);
1610
1611 // ...but almost all operations must be expanded, so set that as
1612 // the default.
1613 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
1614 setOperationAction(Op, MVT::v2i32, Expand);
1615 }
1616 // Truncating/extending stores/loads are also not supported.
1618 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Expand);
1619 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i32, Expand);
1620 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Expand);
1621
1622 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i32, VT, Expand);
1623 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i32, VT, Expand);
1624 setLoadExtAction(ISD::EXTLOAD, MVT::v2i32, VT, Expand);
1625
1626 setTruncStoreAction(VT, MVT::v2i32, Expand);
1627 setTruncStoreAction(MVT::v2i32, VT, Expand);
1628 }
1629 // However, load and store *are* legal.
1630 setOperationAction(ISD::LOAD, MVT::v2i32, Legal);
1631 setOperationAction(ISD::STORE, MVT::v2i32, Legal);
1634
1635 // And we need to promote i64 loads/stores into vector load/store
1638
1639 // Sadly, this doesn't work:
1640 // AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32);
1641 // AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32);
1642 }
1643
1644 // Turn FP extload into load/fpextend
1645 for (MVT VT : MVT::fp_valuetypes()) {
1646 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand);
1647 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
1648 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f64, Expand);
1649 }
1650
1651 // Sparc doesn't have i1 sign extending load
1652 for (MVT VT : MVT::integer_valuetypes())
1653 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
1654
1655 // Turn FP truncstore into trunc + store.
1656 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
1657 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
1658 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
1659 setTruncStoreAction(MVT::f128, MVT::f16, Expand);
1660 setTruncStoreAction(MVT::f128, MVT::f32, Expand);
1661 setTruncStoreAction(MVT::f128, MVT::f64, Expand);
1662
1663 // Custom legalize GlobalAddress nodes into LO/HI parts.
1668
1669 // Sparc doesn't have sext_inreg, replace them with shl/sra
1673
1674 // Sparc has no REM or DIVREM operations.
1679
1680 // ... nor does SparcV9.
1681 if (Subtarget->is64Bit()) {
1686 }
1687
1688 // Custom expand fp<->sint
1693
1694 // Custom Expand fp<->uint
1699
1700 // Lower f16 conversion operations into library calls
1707
1710
1711 // Sparc has no select or setcc: expand to SELECT_CC.
1716
1721
1722 // Sparc doesn't have BRCOND either, it has BR_CC.
1724 setOperationAction(ISD::BRIND, MVT::Other, Expand);
1725 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
1730
1735
1740
1741 if (Subtarget->is64Bit()) {
1752
1754 Subtarget->usePopc() ? Legal : Expand);
1755 setOperationAction(ISD::CTTZ , MVT::i64, Expand);
1756 setOperationAction(ISD::CTLZ , MVT::i64, Expand);
1758 setOperationAction(ISD::ROTL , MVT::i64, Expand);
1759 setOperationAction(ISD::ROTR , MVT::i64, Expand);
1761 }
1762
1763 // ATOMICs.
1764 // Atomics are supported on SparcV9. 32-bit atomics are also
1765 // supported by some Leon SparcV8 variants. Otherwise, atomics
1766 // are unsupported.
1767 if (Subtarget->isV9()) {
1768 // TODO: we _ought_ to be able to support 64-bit atomics on 32-bit sparcv9,
1769 // but it hasn't been implemented in the backend yet.
1770 if (Subtarget->is64Bit())
1772 else
1774 } else if (Subtarget->hasLeonCasa())
1776 else
1778
1780
1782
1784
1785 // Custom Lower Atomic LOAD/STORE
1788
1789 if (Subtarget->is64Bit()) {
1794 }
1795
1796 if (!Subtarget->is64Bit()) {
1797 // These libcalls are not available in 32-bit.
1798 setLibcallName(RTLIB::MULO_I64, nullptr);
1799 setLibcallName(RTLIB::MUL_I128, nullptr);
1800 setLibcallName(RTLIB::SHL_I128, nullptr);
1801 setLibcallName(RTLIB::SRL_I128, nullptr);
1802 setLibcallName(RTLIB::SRA_I128, nullptr);
1803 }
1804
1805 setLibcallName(RTLIB::MULO_I128, nullptr);
1806
1807 if (!Subtarget->isV9()) {
1808 // SparcV8 does not have FNEGD and FABSD.
1811 }
1812
1813 setOperationAction(ISD::FSIN , MVT::f128, Expand);
1814 setOperationAction(ISD::FCOS , MVT::f128, Expand);
1816 setOperationAction(ISD::FREM , MVT::f128, Expand);
1817 setOperationAction(ISD::FMA , MVT::f128, Expand);
1818 setOperationAction(ISD::FSIN , MVT::f64, Expand);
1819 setOperationAction(ISD::FCOS , MVT::f64, Expand);
1821 setOperationAction(ISD::FREM , MVT::f64, Expand);
1822 setOperationAction(ISD::FMA , MVT::f64, Expand);
1823 setOperationAction(ISD::FSIN , MVT::f32, Expand);
1824 setOperationAction(ISD::FCOS , MVT::f32, Expand);
1826 setOperationAction(ISD::FREM , MVT::f32, Expand);
1827 setOperationAction(ISD::FMA , MVT::f32, Expand);
1828 setOperationAction(ISD::CTTZ , MVT::i32, Expand);
1829 setOperationAction(ISD::CTLZ , MVT::i32, Expand);
1830 setOperationAction(ISD::ROTL , MVT::i32, Expand);
1831 setOperationAction(ISD::ROTR , MVT::i32, Expand);
1836 setOperationAction(ISD::FPOW , MVT::f128, Expand);
1837 setOperationAction(ISD::FPOW , MVT::f64, Expand);
1838 setOperationAction(ISD::FPOW , MVT::f32, Expand);
1839
1843
1844 // Expands to [SU]MUL_LOHI.
1848
1849 if (Subtarget->useSoftMulDiv()) {
1850 // .umul works for both signed and unsigned
1853 setLibcallName(RTLIB::MUL_I32, ".umul");
1854
1856 setLibcallName(RTLIB::SDIV_I32, ".div");
1857
1859 setLibcallName(RTLIB::UDIV_I32, ".udiv");
1860
1861 setLibcallName(RTLIB::SREM_I32, ".rem");
1862 setLibcallName(RTLIB::UREM_I32, ".urem");
1863 }
1864
1865 if (Subtarget->is64Bit()) {
1870
1873
1877 }
1878
1879 // VASTART needs to be custom lowered to use the VarArgsFrameIndex.
1880 setOperationAction(ISD::VASTART , MVT::Other, Custom);
1881 // VAARG needs to be lowered to not do unaligned accesses for doubles.
1882 setOperationAction(ISD::VAARG , MVT::Other, Custom);
1883
1884 setOperationAction(ISD::TRAP , MVT::Other, Legal);
1886
1887 // Use the default implementation.
1888 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
1889 setOperationAction(ISD::VAEND , MVT::Other, Expand);
1893
1895
1897 Subtarget->usePopc() ? Legal : Expand);
1898
1899 if (Subtarget->isV9() && Subtarget->hasHardQuad()) {
1900 setOperationAction(ISD::LOAD, MVT::f128, Legal);
1901 setOperationAction(ISD::STORE, MVT::f128, Legal);
1902 } else {
1903 setOperationAction(ISD::LOAD, MVT::f128, Custom);
1905 }
1906
1907 if (Subtarget->hasHardQuad()) {
1908 setOperationAction(ISD::FADD, MVT::f128, Legal);
1909 setOperationAction(ISD::FSUB, MVT::f128, Legal);
1910 setOperationAction(ISD::FMUL, MVT::f128, Legal);
1911 setOperationAction(ISD::FDIV, MVT::f128, Legal);
1912 setOperationAction(ISD::FSQRT, MVT::f128, Legal);
1915 if (Subtarget->isV9()) {
1916 setOperationAction(ISD::FNEG, MVT::f128, Legal);
1917 setOperationAction(ISD::FABS, MVT::f128, Legal);
1918 } else {
1919 setOperationAction(ISD::FNEG, MVT::f128, Custom);
1920 setOperationAction(ISD::FABS, MVT::f128, Custom);
1921 }
1922
1923 if (!Subtarget->is64Bit()) {
1924 setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Q_qtoll");
1925 setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Q_qtoull");
1926 setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Q_lltoq");
1927 setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Q_ulltoq");
1928 }
1929
1930 } else {
1931 // Custom legalize f128 operations.
1932
1933 setOperationAction(ISD::FADD, MVT::f128, Custom);
1934 setOperationAction(ISD::FSUB, MVT::f128, Custom);
1935 setOperationAction(ISD::FMUL, MVT::f128, Custom);
1936 setOperationAction(ISD::FDIV, MVT::f128, Custom);
1938 setOperationAction(ISD::FNEG, MVT::f128, Custom);
1939 setOperationAction(ISD::FABS, MVT::f128, Custom);
1940
1944
1945 // Setup Runtime library names.
1946 if (Subtarget->is64Bit() && !Subtarget->useSoftFloat()) {
1947 setLibcallName(RTLIB::ADD_F128, "_Qp_add");
1948 setLibcallName(RTLIB::SUB_F128, "_Qp_sub");
1949 setLibcallName(RTLIB::MUL_F128, "_Qp_mul");
1950 setLibcallName(RTLIB::DIV_F128, "_Qp_div");
1951 setLibcallName(RTLIB::SQRT_F128, "_Qp_sqrt");
1952 setLibcallName(RTLIB::FPTOSINT_F128_I32, "_Qp_qtoi");
1953 setLibcallName(RTLIB::FPTOUINT_F128_I32, "_Qp_qtoui");
1954 setLibcallName(RTLIB::SINTTOFP_I32_F128, "_Qp_itoq");
1955 setLibcallName(RTLIB::UINTTOFP_I32_F128, "_Qp_uitoq");
1956 setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Qp_qtox");
1957 setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Qp_qtoux");
1958 setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Qp_xtoq");
1959 setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Qp_uxtoq");
1960 setLibcallName(RTLIB::FPEXT_F32_F128, "_Qp_stoq");
1961 setLibcallName(RTLIB::FPEXT_F64_F128, "_Qp_dtoq");
1962 setLibcallName(RTLIB::FPROUND_F128_F32, "_Qp_qtos");
1963 setLibcallName(RTLIB::FPROUND_F128_F64, "_Qp_qtod");
1964 } else if (!Subtarget->useSoftFloat()) {
1965 setLibcallName(RTLIB::ADD_F128, "_Q_add");
1966 setLibcallName(RTLIB::SUB_F128, "_Q_sub");
1967 setLibcallName(RTLIB::MUL_F128, "_Q_mul");
1968 setLibcallName(RTLIB::DIV_F128, "_Q_div");
1969 setLibcallName(RTLIB::SQRT_F128, "_Q_sqrt");
1970 setLibcallName(RTLIB::FPTOSINT_F128_I32, "_Q_qtoi");
1971 setLibcallName(RTLIB::FPTOUINT_F128_I32, "_Q_qtou");
1972 setLibcallName(RTLIB::SINTTOFP_I32_F128, "_Q_itoq");
1973 setLibcallName(RTLIB::UINTTOFP_I32_F128, "_Q_utoq");
1974 setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Q_qtoll");
1975 setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Q_qtoull");
1976 setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Q_lltoq");
1977 setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Q_ulltoq");
1978 setLibcallName(RTLIB::FPEXT_F32_F128, "_Q_stoq");
1979 setLibcallName(RTLIB::FPEXT_F64_F128, "_Q_dtoq");
1980 setLibcallName(RTLIB::FPROUND_F128_F32, "_Q_qtos");
1981 setLibcallName(RTLIB::FPROUND_F128_F64, "_Q_qtod");
1982 }
1983 }
1984
1985 if (Subtarget->fixAllFDIVSQRT()) {
1986 // Promote FDIVS and FSQRTS to FDIVD and FSQRTD instructions instead as
1987 // the former instructions generate errata on LEON processors.
1990 }
1991
1992 if (Subtarget->hasNoFMULS()) {
1994 }
1995
1996 // Custom combine bitcast between f64 and v2i32
1997 if (!Subtarget->is64Bit())
1999
2000 if (Subtarget->hasLeonCycleCounter())
2002
2004
2006
2008}
2009
2011 return Subtarget->useSoftFloat();
2012}
2013
2014const char *SparcTargetLowering::getTargetNodeName(unsigned Opcode) const {
2015 switch ((SPISD::NodeType)Opcode) {
2016 case SPISD::FIRST_NUMBER: break;
2017 case SPISD::CMPICC: return "SPISD::CMPICC";
2018 case SPISD::CMPFCC: return "SPISD::CMPFCC";
2019 case SPISD::CMPFCC_V9:
2020 return "SPISD::CMPFCC_V9";
2021 case SPISD::BRICC: return "SPISD::BRICC";
2022 case SPISD::BPICC:
2023 return "SPISD::BPICC";
2024 case SPISD::BPXCC:
2025 return "SPISD::BPXCC";
2026 case SPISD::BRFCC: return "SPISD::BRFCC";
2027 case SPISD::BRFCC_V9:
2028 return "SPISD::BRFCC_V9";
2029 case SPISD::BR_REG:
2030 return "SPISD::BR_REG";
2031 case SPISD::SELECT_ICC: return "SPISD::SELECT_ICC";
2032 case SPISD::SELECT_XCC: return "SPISD::SELECT_XCC";
2033 case SPISD::SELECT_FCC: return "SPISD::SELECT_FCC";
2034 case SPISD::SELECT_REG:
2035 return "SPISD::SELECT_REG";
2036 case SPISD::Hi: return "SPISD::Hi";
2037 case SPISD::Lo: return "SPISD::Lo";
2038 case SPISD::FTOI: return "SPISD::FTOI";
2039 case SPISD::ITOF: return "SPISD::ITOF";
2040 case SPISD::FTOX: return "SPISD::FTOX";
2041 case SPISD::XTOF: return "SPISD::XTOF";
2042 case SPISD::CALL: return "SPISD::CALL";
2043 case SPISD::RET_GLUE: return "SPISD::RET_GLUE";
2044 case SPISD::GLOBAL_BASE_REG: return "SPISD::GLOBAL_BASE_REG";
2045 case SPISD::FLUSHW: return "SPISD::FLUSHW";
2046 case SPISD::TLS_ADD: return "SPISD::TLS_ADD";
2047 case SPISD::TLS_LD: return "SPISD::TLS_LD";
2048 case SPISD::TLS_CALL: return "SPISD::TLS_CALL";
2049 case SPISD::TAIL_CALL: return "SPISD::TAIL_CALL";
2050 case SPISD::LOAD_GDOP: return "SPISD::LOAD_GDOP";
2051 }
2052 return nullptr;
2053}
2054
2056 EVT VT) const {
2057 if (!VT.isVector())
2058 return MVT::i32;
2060}
2061
2062/// isMaskedValueZeroForTargetNode - Return true if 'Op & Mask' is known to
2063/// be zero. Op is expected to be a target specific node. Used by DAG
2064/// combiner.
2066 (const SDValue Op,
2067 KnownBits &Known,
2068 const APInt &DemandedElts,
2069 const SelectionDAG &DAG,
2070 unsigned Depth) const {
2071 KnownBits Known2;
2072 Known.resetAll();
2073
2074 switch (Op.getOpcode()) {
2075 default: break;
2076 case SPISD::SELECT_ICC:
2077 case SPISD::SELECT_XCC:
2078 case SPISD::SELECT_FCC:
2079 Known = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
2080 Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
2081
2082 // Only known if known in both the LHS and RHS.
2083 Known = Known.intersectWith(Known2);
2084 break;
2085 }
2086}
2087
2088// Look at LHS/RHS/CC and see if they are a lowered setcc instruction. If so
2089// set LHS/RHS and SPCC to the LHS/RHS of the setcc and SPCC to the condition.
2090static void LookThroughSetCC(SDValue &LHS, SDValue &RHS,
2091 ISD::CondCode CC, unsigned &SPCC) {
2092 if (isNullConstant(RHS) && CC == ISD::SETNE &&
2093 (((LHS.getOpcode() == SPISD::SELECT_ICC ||
2094 LHS.getOpcode() == SPISD::SELECT_XCC) &&
2095 LHS.getOperand(3).getOpcode() == SPISD::CMPICC) ||
2096 (LHS.getOpcode() == SPISD::SELECT_FCC &&
2097 (LHS.getOperand(3).getOpcode() == SPISD::CMPFCC ||
2098 LHS.getOperand(3).getOpcode() == SPISD::CMPFCC_V9))) &&
2099 isOneConstant(LHS.getOperand(0)) && isNullConstant(LHS.getOperand(1))) {
2100 SDValue CMPCC = LHS.getOperand(3);
2101 SPCC = LHS.getConstantOperandVal(2);
2102 LHS = CMPCC.getOperand(0);
2103 RHS = CMPCC.getOperand(1);
2104 }
2105}
2106
2107// Convert to a target node and set target flags.
2109 SelectionDAG &DAG) const {
2110 if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op))
2111 return DAG.getTargetGlobalAddress(GA->getGlobal(),
2112 SDLoc(GA),
2113 GA->getValueType(0),
2114 GA->getOffset(), TF);
2115
2116 if (const ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op))
2117 return DAG.getTargetConstantPool(CP->getConstVal(), CP->getValueType(0),
2118 CP->getAlign(), CP->getOffset(), TF);
2119
2120 if (const BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(Op))
2121 return DAG.getTargetBlockAddress(BA->getBlockAddress(),
2122 Op.getValueType(),
2123 0,
2124 TF);
2125
2126 if (const ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Op))
2127 return DAG.getTargetExternalSymbol(ES->getSymbol(),
2128 ES->getValueType(0), TF);
2129
2130 llvm_unreachable("Unhandled address SDNode");
2131}
2132
2133// Split Op into high and low parts according to HiTF and LoTF.
2134// Return an ADD node combining the parts.
2136 unsigned HiTF, unsigned LoTF,
2137 SelectionDAG &DAG) const {
2138 SDLoc DL(Op);
2139 EVT VT = Op.getValueType();
2140 SDValue Hi = DAG.getNode(SPISD::Hi, DL, VT, withTargetFlags(Op, HiTF, DAG));
2141 SDValue Lo = DAG.getNode(SPISD::Lo, DL, VT, withTargetFlags(Op, LoTF, DAG));
2142 return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
2143}
2144
2145// Build SDNodes for producing an address from a GlobalAddress, ConstantPool,
2146// or ExternalSymbol SDNode.
2148 SDLoc DL(Op);
2149 EVT VT = getPointerTy(DAG.getDataLayout());
2150
2151 // Handle PIC mode first. SPARC needs a got load for every variable!
2152 if (isPositionIndependent()) {
2153 const Module *M = DAG.getMachineFunction().getFunction().getParent();
2154 PICLevel::Level picLevel = M->getPICLevel();
2155 SDValue Idx;
2156
2157 if (picLevel == PICLevel::SmallPIC) {
2158 // This is the pic13 code model, the GOT is known to be smaller than 8KiB.
2159 Idx = DAG.getNode(SPISD::Lo, DL, Op.getValueType(),
2161 } else {
2162 // This is the pic32 code model, the GOT is known to be smaller than 4GB.
2165 }
2166
2167 SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, VT);
2168 SDValue AbsAddr = DAG.getNode(ISD::ADD, DL, VT, GlobalBase, Idx);
2169 // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
2170 // function has calls.
2172 MFI.setHasCalls(true);
2173 return DAG.getLoad(VT, DL, DAG.getEntryNode(), AbsAddr,
2175 }
2176
2177 // This is one of the absolute code models.
2178 switch(getTargetMachine().getCodeModel()) {
2179 default:
2180 llvm_unreachable("Unsupported absolute code model");
2181 case CodeModel::Small:
2182 // abs32.
2185 case CodeModel::Medium: {
2186 // abs44.
2189 H44 = DAG.getNode(ISD::SHL, DL, VT, H44, DAG.getConstant(12, DL, MVT::i32));
2191 L44 = DAG.getNode(SPISD::Lo, DL, VT, L44);
2192 return DAG.getNode(ISD::ADD, DL, VT, H44, L44);
2193 }
2194 case CodeModel::Large: {
2195 // abs64.
2198 Hi = DAG.getNode(ISD::SHL, DL, VT, Hi, DAG.getConstant(32, DL, MVT::i32));
2201 return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
2202 }
2203 }
2204}
2205
2207 SelectionDAG &DAG) const {
2208 return makeAddress(Op, DAG);
2209}
2210
2212 SelectionDAG &DAG) const {
2213 return makeAddress(Op, DAG);
2214}
2215
2217 SelectionDAG &DAG) const {
2218 return makeAddress(Op, DAG);
2219}
2220
2222 SelectionDAG &DAG) const {
2223
2224 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
2225 if (DAG.getTarget().useEmulatedTLS())
2226 return LowerToTLSEmulatedModel(GA, DAG);
2227
2228 SDLoc DL(GA);
2229 const GlobalValue *GV = GA->getGlobal();
2230 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2231
2233
2234 if (model == TLSModel::GeneralDynamic || model == TLSModel::LocalDynamic) {
2235 unsigned HiTF = ((model == TLSModel::GeneralDynamic)
2238 unsigned LoTF = ((model == TLSModel::GeneralDynamic)
2241 unsigned addTF = ((model == TLSModel::GeneralDynamic)
2244 unsigned callTF = ((model == TLSModel::GeneralDynamic)
2247
2248 SDValue HiLo = makeHiLoPair(Op, HiTF, LoTF, DAG);
2250 SDValue Argument = DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Base, HiLo,
2251 withTargetFlags(Op, addTF, DAG));
2252
2253 SDValue Chain = DAG.getEntryNode();
2254 SDValue InGlue;
2255
2256 Chain = DAG.getCALLSEQ_START(Chain, 1, 0, DL);
2257 Chain = DAG.getCopyToReg(Chain, DL, SP::O0, Argument, InGlue);
2258 InGlue = Chain.getValue(1);
2259 SDValue Callee = DAG.getTargetExternalSymbol("__tls_get_addr", PtrVT);
2260 SDValue Symbol = withTargetFlags(Op, callTF, DAG);
2261
2262 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2263 const uint32_t *Mask = Subtarget->getRegisterInfo()->getCallPreservedMask(
2265 assert(Mask && "Missing call preserved mask for calling convention");
2266 SDValue Ops[] = {Chain,
2267 Callee,
2268 Symbol,
2269 DAG.getRegister(SP::O0, PtrVT),
2270 DAG.getRegisterMask(Mask),
2271 InGlue};
2272 Chain = DAG.getNode(SPISD::TLS_CALL, DL, NodeTys, Ops);
2273 InGlue = Chain.getValue(1);
2274 Chain = DAG.getCALLSEQ_END(Chain, 1, 0, InGlue, DL);
2275 InGlue = Chain.getValue(1);
2276 SDValue Ret = DAG.getCopyFromReg(Chain, DL, SP::O0, PtrVT, InGlue);
2277
2278 if (model != TLSModel::LocalDynamic)
2279 return Ret;
2280
2281 SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT,
2283 SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT,
2285 HiLo = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);
2286 return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Ret, HiLo,
2288 }
2289
2290 if (model == TLSModel::InitialExec) {
2291 unsigned ldTF = ((PtrVT == MVT::i64)? SparcMCExpr::VK_Sparc_TLS_IE_LDX
2293
2295
2296 // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
2297 // function has calls.
2299 MFI.setHasCalls(true);
2300
2301 SDValue TGA = makeHiLoPair(Op,
2304 SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Base, TGA);
2306 DL, PtrVT, Ptr,
2307 withTargetFlags(Op, ldTF, DAG));
2308 return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT,
2309 DAG.getRegister(SP::G7, PtrVT), Offset,
2312 }
2313
2314 assert(model == TLSModel::LocalExec);
2315 SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT,
2317 SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT,
2319 SDValue Offset = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);
2320
2321 return DAG.getNode(ISD::ADD, DL, PtrVT,
2322 DAG.getRegister(SP::G7, PtrVT), Offset);
2323}
2324
2326 ArgListTy &Args, SDValue Arg,
2327 const SDLoc &DL,
2328 SelectionDAG &DAG) const {
2330 EVT ArgVT = Arg.getValueType();
2331 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
2332
2333 ArgListEntry Entry;
2334 Entry.Node = Arg;
2335 Entry.Ty = ArgTy;
2336
2337 if (ArgTy->isFP128Ty()) {
2338 // Create a stack object and pass the pointer to the library function.
2339 int FI = MFI.CreateStackObject(16, Align(8), false);
2340 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
2341 Chain = DAG.getStore(Chain, DL, Entry.Node, FIPtr, MachinePointerInfo(),
2342 Align(8));
2343
2344 Entry.Node = FIPtr;
2345 Entry.Ty = PointerType::getUnqual(ArgTy);
2346 }
2347 Args.push_back(Entry);
2348 return Chain;
2349}
2350
2351SDValue
2353 const char *LibFuncName,
2354 unsigned numArgs) const {
2355
2356 ArgListTy Args;
2357
2359 auto PtrVT = getPointerTy(DAG.getDataLayout());
2360
2361 SDValue Callee = DAG.getExternalSymbol(LibFuncName, PtrVT);
2362 Type *RetTy = Op.getValueType().getTypeForEVT(*DAG.getContext());
2363 Type *RetTyABI = RetTy;
2364 SDValue Chain = DAG.getEntryNode();
2365 SDValue RetPtr;
2366
2367 if (RetTy->isFP128Ty()) {
2368 // Create a Stack Object to receive the return value of type f128.
2369 ArgListEntry Entry;
2370 int RetFI = MFI.CreateStackObject(16, Align(8), false);
2371 RetPtr = DAG.getFrameIndex(RetFI, PtrVT);
2372 Entry.Node = RetPtr;
2373 Entry.Ty = PointerType::getUnqual(RetTy);
2374 if (!Subtarget->is64Bit()) {
2375 Entry.IsSRet = true;
2376 Entry.IndirectType = RetTy;
2377 }
2378 Entry.IsReturned = false;
2379 Args.push_back(Entry);
2380 RetTyABI = Type::getVoidTy(*DAG.getContext());
2381 }
2382
2383 assert(Op->getNumOperands() >= numArgs && "Not enough operands!");
2384 for (unsigned i = 0, e = numArgs; i != e; ++i) {
2385 Chain = LowerF128_LibCallArg(Chain, Args, Op.getOperand(i), SDLoc(Op), DAG);
2386 }
2388 CLI.setDebugLoc(SDLoc(Op)).setChain(Chain)
2389 .setCallee(CallingConv::C, RetTyABI, Callee, std::move(Args));
2390
2391 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
2392
2393 // chain is in second result.
2394 if (RetTyABI == RetTy)
2395 return CallInfo.first;
2396
2397 assert (RetTy->isFP128Ty() && "Unexpected return type!");
2398
2399 Chain = CallInfo.second;
2400
2401 // Load RetPtr to get the return value.
2402 return DAG.getLoad(Op.getValueType(), SDLoc(Op), Chain, RetPtr,
2404}
2405
2407 unsigned &SPCC, const SDLoc &DL,
2408 SelectionDAG &DAG) const {
2409
2410 const char *LibCall = nullptr;
2411 bool is64Bit = Subtarget->is64Bit();
2412 switch(SPCC) {
2413 default: llvm_unreachable("Unhandled conditional code!");
2414 case SPCC::FCC_E : LibCall = is64Bit? "_Qp_feq" : "_Q_feq"; break;
2415 case SPCC::FCC_NE : LibCall = is64Bit? "_Qp_fne" : "_Q_fne"; break;
2416 case SPCC::FCC_L : LibCall = is64Bit? "_Qp_flt" : "_Q_flt"; break;
2417 case SPCC::FCC_G : LibCall = is64Bit? "_Qp_fgt" : "_Q_fgt"; break;
2418 case SPCC::FCC_LE : LibCall = is64Bit? "_Qp_fle" : "_Q_fle"; break;
2419 case SPCC::FCC_GE : LibCall = is64Bit? "_Qp_fge" : "_Q_fge"; break;
2420 case SPCC::FCC_UL :
2421 case SPCC::FCC_ULE:
2422 case SPCC::FCC_UG :
2423 case SPCC::FCC_UGE:
2424 case SPCC::FCC_U :
2425 case SPCC::FCC_O :
2426 case SPCC::FCC_LG :
2427 case SPCC::FCC_UE : LibCall = is64Bit? "_Qp_cmp" : "_Q_cmp"; break;
2428 }
2429
2430 auto PtrVT = getPointerTy(DAG.getDataLayout());
2431 SDValue Callee = DAG.getExternalSymbol(LibCall, PtrVT);
2433 ArgListTy Args;
2434 SDValue Chain = DAG.getEntryNode();
2435 Chain = LowerF128_LibCallArg(Chain, Args, LHS, DL, DAG);
2436 Chain = LowerF128_LibCallArg(Chain, Args, RHS, DL, DAG);
2437
2439 CLI.setDebugLoc(DL).setChain(Chain)
2440 .setCallee(CallingConv::C, RetTy, Callee, std::move(Args));
2441
2442 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
2443
2444 // result is in first, and chain is in second result.
2445 SDValue Result = CallInfo.first;
2446
2447 switch(SPCC) {
2448 default: {
2449 SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2450 SPCC = SPCC::ICC_NE;
2451 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2452 }
2453 case SPCC::FCC_UL : {
2454 SDValue Mask = DAG.getConstant(1, DL, Result.getValueType());
2455 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2456 SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2457 SPCC = SPCC::ICC_NE;
2458 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2459 }
2460 case SPCC::FCC_ULE: {
2461 SDValue RHS = DAG.getConstant(2, DL, Result.getValueType());
2462 SPCC = SPCC::ICC_NE;
2463 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2464 }
2465 case SPCC::FCC_UG : {
2466 SDValue RHS = DAG.getConstant(1, DL, Result.getValueType());
2467 SPCC = SPCC::ICC_G;
2468 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2469 }
2470 case SPCC::FCC_UGE: {
2471 SDValue RHS = DAG.getConstant(1, DL, Result.getValueType());
2472 SPCC = SPCC::ICC_NE;
2473 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2474 }
2475
2476 case SPCC::FCC_U : {
2477 SDValue RHS = DAG.getConstant(3, DL, Result.getValueType());
2478 SPCC = SPCC::ICC_E;
2479 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2480 }
2481 case SPCC::FCC_O : {
2482 SDValue RHS = DAG.getConstant(3, DL, Result.getValueType());
2483 SPCC = SPCC::ICC_NE;
2484 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2485 }
2486 case SPCC::FCC_LG : {
2487 SDValue Mask = DAG.getConstant(3, DL, Result.getValueType());
2488 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2489 SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2490 SPCC = SPCC::ICC_NE;
2491 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2492 }
2493 case SPCC::FCC_UE : {
2494 SDValue Mask = DAG.getConstant(3, DL, Result.getValueType());
2495 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2496 SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2497 SPCC = SPCC::ICC_E;
2498 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2499 }
2500 }
2501}
2502
2503static SDValue
2505 const SparcTargetLowering &TLI) {
2506
2507 if (Op.getOperand(0).getValueType() == MVT::f64)
2508 return TLI.LowerF128Op(Op, DAG,
2509 TLI.getLibcallName(RTLIB::FPEXT_F64_F128), 1);
2510
2511 if (Op.getOperand(0).getValueType() == MVT::f32)
2512 return TLI.LowerF128Op(Op, DAG,
2513 TLI.getLibcallName(RTLIB::FPEXT_F32_F128), 1);
2514
2515 llvm_unreachable("fpextend with non-float operand!");
2516 return SDValue();
2517}
2518
2519static SDValue
2521 const SparcTargetLowering &TLI) {
2522 // FP_ROUND on f64 and f32 are legal.
2523 if (Op.getOperand(0).getValueType() != MVT::f128)
2524 return Op;
2525
2526 if (Op.getValueType() == MVT::f64)
2527 return TLI.LowerF128Op(Op, DAG,
2528 TLI.getLibcallName(RTLIB::FPROUND_F128_F64), 1);
2529 if (Op.getValueType() == MVT::f32)
2530 return TLI.LowerF128Op(Op, DAG,
2531 TLI.getLibcallName(RTLIB::FPROUND_F128_F32), 1);
2532
2533 llvm_unreachable("fpround to non-float!");
2534 return SDValue();
2535}
2536
2538 const SparcTargetLowering &TLI,
2539 bool hasHardQuad) {
2540 SDLoc dl(Op);
2541 EVT VT = Op.getValueType();
2542 assert(VT == MVT::i32 || VT == MVT::i64);
2543
2544 // Expand f128 operations to fp128 abi calls.
2545 if (Op.getOperand(0).getValueType() == MVT::f128
2546 && (!hasHardQuad || !TLI.isTypeLegal(VT))) {
2547 const char *libName = TLI.getLibcallName(VT == MVT::i32
2548 ? RTLIB::FPTOSINT_F128_I32
2549 : RTLIB::FPTOSINT_F128_I64);
2550 return TLI.LowerF128Op(Op, DAG, libName, 1);
2551 }
2552
2553 // Expand if the resulting type is illegal.
2554 if (!TLI.isTypeLegal(VT))
2555 return SDValue();
2556
2557 // Otherwise, Convert the fp value to integer in an FP register.
2558 if (VT == MVT::i32)
2559 Op = DAG.getNode(SPISD::FTOI, dl, MVT::f32, Op.getOperand(0));
2560 else
2561 Op = DAG.getNode(SPISD::FTOX, dl, MVT::f64, Op.getOperand(0));
2562
2563 return DAG.getNode(ISD::BITCAST, dl, VT, Op);
2564}
2565
2567 const SparcTargetLowering &TLI,
2568 bool hasHardQuad) {
2569 SDLoc dl(Op);
2570 EVT OpVT = Op.getOperand(0).getValueType();
2571 assert(OpVT == MVT::i32 || (OpVT == MVT::i64));
2572
2573 EVT floatVT = (OpVT == MVT::i32) ? MVT::f32 : MVT::f64;
2574
2575 // Expand f128 operations to fp128 ABI calls.
2576 if (Op.getValueType() == MVT::f128
2577 && (!hasHardQuad || !TLI.isTypeLegal(OpVT))) {
2578 const char *libName = TLI.getLibcallName(OpVT == MVT::i32
2579 ? RTLIB::SINTTOFP_I32_F128
2580 : RTLIB::SINTTOFP_I64_F128);
2581 return TLI.LowerF128Op(Op, DAG, libName, 1);
2582 }
2583
2584 // Expand if the operand type is illegal.
2585 if (!TLI.isTypeLegal(OpVT))
2586 return SDValue();
2587
2588 // Otherwise, Convert the int value to FP in an FP register.
2589 SDValue Tmp = DAG.getNode(ISD::BITCAST, dl, floatVT, Op.getOperand(0));
2590 unsigned opcode = (OpVT == MVT::i32)? SPISD::ITOF : SPISD::XTOF;
2591 return DAG.getNode(opcode, dl, Op.getValueType(), Tmp);
2592}
2593
2595 const SparcTargetLowering &TLI,
2596 bool hasHardQuad) {
2597 SDLoc dl(Op);
2598 EVT VT = Op.getValueType();
2599
2600 // Expand if it does not involve f128 or the target has support for
2601 // quad floating point instructions and the resulting type is legal.
2602 if (Op.getOperand(0).getValueType() != MVT::f128 ||
2603 (hasHardQuad && TLI.isTypeLegal(VT)))
2604 return SDValue();
2605
2606 assert(VT == MVT::i32 || VT == MVT::i64);
2607
2608 return TLI.LowerF128Op(Op, DAG,
2609 TLI.getLibcallName(VT == MVT::i32
2610 ? RTLIB::FPTOUINT_F128_I32
2611 : RTLIB::FPTOUINT_F128_I64),
2612 1);
2613}
2614
2616 const SparcTargetLowering &TLI,
2617 bool hasHardQuad) {
2618 SDLoc dl(Op);
2619 EVT OpVT = Op.getOperand(0).getValueType();
2620 assert(OpVT == MVT::i32 || OpVT == MVT::i64);
2621
2622 // Expand if it does not involve f128 or the target has support for
2623 // quad floating point instructions and the operand type is legal.
2624 if (Op.getValueType() != MVT::f128 || (hasHardQuad && TLI.isTypeLegal(OpVT)))
2625 return SDValue();
2626
2627 return TLI.LowerF128Op(Op, DAG,
2628 TLI.getLibcallName(OpVT == MVT::i32
2629 ? RTLIB::UINTTOFP_I32_F128
2630 : RTLIB::UINTTOFP_I64_F128),
2631 1);
2632}
2633
2635 const SparcTargetLowering &TLI, bool hasHardQuad,
2636 bool isV9, bool is64Bit) {
2637 SDValue Chain = Op.getOperand(0);
2638 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
2639 SDValue LHS = Op.getOperand(2);
2640 SDValue RHS = Op.getOperand(3);
2641 SDValue Dest = Op.getOperand(4);
2642 SDLoc dl(Op);
2643 unsigned Opc, SPCC = ~0U;
2644
2645 // If this is a br_cc of a "setcc", and if the setcc got lowered into
2646 // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2647 LookThroughSetCC(LHS, RHS, CC, SPCC);
2648 assert(LHS.getValueType() == RHS.getValueType());
2649
2650 // Get the condition flag.
2651 SDValue CompareFlag;
2652 if (LHS.getValueType().isInteger()) {
2653 // On V9 processors running in 64-bit mode, if CC compares two `i64`s
2654 // and the RHS is zero we might be able to use a specialized branch.
2655 if (is64Bit && isV9 && LHS.getValueType() == MVT::i64 &&
2657 return DAG.getNode(SPISD::BR_REG, dl, MVT::Other, Chain, Dest,
2658 DAG.getConstant(intCondCCodeToRcond(CC), dl, MVT::i32),
2659 LHS);
2660
2661 CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
2662 if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
2663 if (isV9)
2664 // 32-bit compares use the icc flags, 64-bit uses the xcc flags.
2665 Opc = LHS.getValueType() == MVT::i32 ? SPISD::BPICC : SPISD::BPXCC;
2666 else
2667 // Non-v9 targets don't have xcc.
2668 Opc = SPISD::BRICC;
2669 } else {
2670 if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
2671 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2672 CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
2673 Opc = isV9 ? SPISD::BPICC : SPISD::BRICC;
2674 } else {
2675 unsigned CmpOpc = isV9 ? SPISD::CMPFCC_V9 : SPISD::CMPFCC;
2676 CompareFlag = DAG.getNode(CmpOpc, dl, MVT::Glue, LHS, RHS);
2677 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2678 Opc = isV9 ? SPISD::BRFCC_V9 : SPISD::BRFCC;
2679 }
2680 }
2681 return DAG.getNode(Opc, dl, MVT::Other, Chain, Dest,
2682 DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
2683}
2684
2686 const SparcTargetLowering &TLI, bool hasHardQuad,
2687 bool isV9, bool is64Bit) {
2688 SDValue LHS = Op.getOperand(0);
2689 SDValue RHS = Op.getOperand(1);
2690 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
2691 SDValue TrueVal = Op.getOperand(2);
2692 SDValue FalseVal = Op.getOperand(3);
2693 SDLoc dl(Op);
2694 unsigned Opc, SPCC = ~0U;
2695
2696 // If this is a select_cc of a "setcc", and if the setcc got lowered into
2697 // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2698 LookThroughSetCC(LHS, RHS, CC, SPCC);
2699 assert(LHS.getValueType() == RHS.getValueType());
2700
2701 SDValue CompareFlag;
2702 if (LHS.getValueType().isInteger()) {
2703 // On V9 processors running in 64-bit mode, if CC compares two `i64`s
2704 // and the RHS is zero we might be able to use a specialized select.
2705 // All SELECT_CC between any two scalar integer types are eligible for
2706 // lowering to specialized instructions. Additionally, f32 and f64 types
2707 // are also eligible, but for f128 we can only use the specialized
2708 // instruction when we have hardquad.
2709 EVT ValType = TrueVal.getValueType();
2710 bool IsEligibleType = ValType.isScalarInteger() || ValType == MVT::f32 ||
2711 ValType == MVT::f64 ||
2712 (ValType == MVT::f128 && hasHardQuad);
2713 if (is64Bit && isV9 && LHS.getValueType() == MVT::i64 &&
2714 isNullConstant(RHS) && !ISD::isUnsignedIntSetCC(CC) && IsEligibleType)
2715 return DAG.getNode(
2716 SPISD::SELECT_REG, dl, TrueVal.getValueType(), TrueVal, FalseVal,
2717 DAG.getConstant(intCondCCodeToRcond(CC), dl, MVT::i32), LHS);
2718
2719 CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
2720 Opc = LHS.getValueType() == MVT::i32 ?
2722 if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
2723 } else {
2724 if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
2725 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2726 CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
2727 Opc = SPISD::SELECT_ICC;
2728 } else {
2729 unsigned CmpOpc = isV9 ? SPISD::CMPFCC_V9 : SPISD::CMPFCC;
2730 CompareFlag = DAG.getNode(CmpOpc, dl, MVT::Glue, LHS, RHS);
2731 Opc = SPISD::SELECT_FCC;
2732 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2733 }
2734 }
2735 return DAG.getNode(Opc, dl, TrueVal.getValueType(), TrueVal, FalseVal,
2736 DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
2737}
2738
2740 const SparcTargetLowering &TLI) {
2743 auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
2744
2745 // Need frame address to find the address of VarArgsFrameIndex.
2747
2748 // vastart just stores the address of the VarArgsFrameIndex slot into the
2749 // memory location argument.
2750 SDLoc DL(Op);
2751 SDValue Offset =
2752 DAG.getNode(ISD::ADD, DL, PtrVT, DAG.getRegister(SP::I6, PtrVT),
2753 DAG.getIntPtrConstant(FuncInfo->getVarArgsFrameOffset(), DL));
2754 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2755 return DAG.getStore(Op.getOperand(0), DL, Offset, Op.getOperand(1),
2756 MachinePointerInfo(SV));
2757}
2758
2760 SDNode *Node = Op.getNode();
2761 EVT VT = Node->getValueType(0);
2762 SDValue InChain = Node->getOperand(0);
2763 SDValue VAListPtr = Node->getOperand(1);
2764 EVT PtrVT = VAListPtr.getValueType();
2765 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2766 SDLoc DL(Node);
2767 SDValue VAList =
2768 DAG.getLoad(PtrVT, DL, InChain, VAListPtr, MachinePointerInfo(SV));
2769 // Increment the pointer, VAList, to the next vaarg.
2770 SDValue NextPtr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
2772 DL));
2773 // Store the incremented VAList to the legalized pointer.
2774 InChain = DAG.getStore(VAList.getValue(1), DL, NextPtr, VAListPtr,
2775 MachinePointerInfo(SV));
2776 // Load the actual argument out of the pointer VAList.
2777 // We can't count on greater alignment than the word size.
2778 return DAG.getLoad(
2779 VT, DL, InChain, VAList, MachinePointerInfo(),
2780 Align(std::min(PtrVT.getFixedSizeInBits(), VT.getFixedSizeInBits()) / 8));
2781}
2782
2784 const SparcSubtarget *Subtarget) {
2785 SDValue Chain = Op.getOperand(0); // Legalize the chain.
2786 SDValue Size = Op.getOperand(1); // Legalize the size.
2787 MaybeAlign Alignment =
2788 cast<ConstantSDNode>(Op.getOperand(2))->getMaybeAlignValue();
2789 Align StackAlign = Subtarget->getFrameLowering()->getStackAlign();
2790 EVT VT = Size->getValueType(0);
2791 SDLoc dl(Op);
2792
2793 // TODO: implement over-aligned alloca. (Note: also implies
2794 // supporting support for overaligned function frames + dynamic
2795 // allocations, at all, which currently isn't supported)
2796 if (Alignment && *Alignment > StackAlign) {
2797 const MachineFunction &MF = DAG.getMachineFunction();
2798 report_fatal_error("Function \"" + Twine(MF.getName()) + "\": "
2799 "over-aligned dynamic alloca not supported.");
2800 }
2801
2802 // The resultant pointer needs to be above the register spill area
2803 // at the bottom of the stack.
2804 unsigned regSpillArea;
2805 if (Subtarget->is64Bit()) {
2806 regSpillArea = 128;
2807 } else {
2808 // On Sparc32, the size of the spill area is 92. Unfortunately,
2809 // that's only 4-byte aligned, not 8-byte aligned (the stack
2810 // pointer is 8-byte aligned). So, if the user asked for an 8-byte
2811 // aligned dynamic allocation, we actually need to add 96 to the
2812 // bottom of the stack, instead of 92, to ensure 8-byte alignment.
2813
2814 // That also means adding 4 to the size of the allocation --
2815 // before applying the 8-byte rounding. Unfortunately, we the
2816 // value we get here has already had rounding applied. So, we need
2817 // to add 8, instead, wasting a bit more memory.
2818
2819 // Further, this only actually needs to be done if the required
2820 // alignment is > 4, but, we've lost that info by this point, too,
2821 // so we always apply it.
2822
2823 // (An alternative approach would be to always reserve 96 bytes
2824 // instead of the required 92, but then we'd waste 4 extra bytes
2825 // in every frame, not just those with dynamic stack allocations)
2826
2827 // TODO: modify code in SelectionDAGBuilder to make this less sad.
2828
2829 Size = DAG.getNode(ISD::ADD, dl, VT, Size,
2830 DAG.getConstant(8, dl, VT));
2831 regSpillArea = 96;
2832 }
2833
2834 unsigned SPReg = SP::O6;
2835 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
2836 SDValue NewSP = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
2837 Chain = DAG.getCopyToReg(SP.getValue(1), dl, SPReg, NewSP); // Output chain
2838
2839 regSpillArea += Subtarget->getStackPointerBias();
2840
2841 SDValue NewVal = DAG.getNode(ISD::ADD, dl, VT, NewSP,
2842 DAG.getConstant(regSpillArea, dl, VT));
2843 SDValue Ops[2] = { NewVal, Chain };
2844 return DAG.getMergeValues(Ops, dl);
2845}
2846
2847
2849 SDLoc dl(Op);
2850 SDValue Chain = DAG.getNode(SPISD::FLUSHW,
2851 dl, MVT::Other, DAG.getEntryNode());
2852 return Chain;
2853}
2854
2856 const SparcSubtarget *Subtarget,
2857 bool AlwaysFlush = false) {
2859 MFI.setFrameAddressIsTaken(true);
2860
2861 EVT VT = Op.getValueType();
2862 SDLoc dl(Op);
2863 unsigned FrameReg = SP::I6;
2864 unsigned stackBias = Subtarget->getStackPointerBias();
2865
2866 SDValue FrameAddr;
2867 SDValue Chain;
2868
2869 // flush first to make sure the windowed registers' values are in stack
2870 Chain = (depth || AlwaysFlush) ? getFLUSHW(Op, DAG) : DAG.getEntryNode();
2871
2872 FrameAddr = DAG.getCopyFromReg(Chain, dl, FrameReg, VT);
2873
2874 unsigned Offset = (Subtarget->is64Bit()) ? (stackBias + 112) : 56;
2875
2876 while (depth--) {
2877 SDValue Ptr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
2878 DAG.getIntPtrConstant(Offset, dl));
2879 FrameAddr = DAG.getLoad(VT, dl, Chain, Ptr, MachinePointerInfo());
2880 }
2881 if (Subtarget->is64Bit())
2882 FrameAddr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
2883 DAG.getIntPtrConstant(stackBias, dl));
2884 return FrameAddr;
2885}
2886
2887
2889 const SparcSubtarget *Subtarget) {
2890
2891 uint64_t depth = Op.getConstantOperandVal(0);
2892
2893 return getFRAMEADDR(depth, Op, DAG, Subtarget);
2894
2895}
2896
2898 const SparcTargetLowering &TLI,
2899 const SparcSubtarget *Subtarget) {
2901 MachineFrameInfo &MFI = MF.getFrameInfo();
2902 MFI.setReturnAddressIsTaken(true);
2903
2905 return SDValue();
2906
2907 EVT VT = Op.getValueType();
2908 SDLoc dl(Op);
2909 uint64_t depth = Op.getConstantOperandVal(0);
2910
2911 SDValue RetAddr;
2912 if (depth == 0) {
2913 auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
2914 Register RetReg = MF.addLiveIn(SP::I7, TLI.getRegClassFor(PtrVT));
2915 RetAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, RetReg, VT);
2916 return RetAddr;
2917 }
2918
2919 // Need frame address to find return address of the caller.
2920 SDValue FrameAddr = getFRAMEADDR(depth - 1, Op, DAG, Subtarget, true);
2921
2922 unsigned Offset = (Subtarget->is64Bit()) ? 120 : 60;
2924 dl, VT,
2925 FrameAddr,
2926 DAG.getIntPtrConstant(Offset, dl));
2927 RetAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), Ptr, MachinePointerInfo());
2928
2929 return RetAddr;
2930}
2931
2932static SDValue LowerF64Op(SDValue SrcReg64, const SDLoc &dl, SelectionDAG &DAG,
2933 unsigned opcode) {
2934 assert(SrcReg64.getValueType() == MVT::f64 && "LowerF64Op called on non-double!");
2935 assert(opcode == ISD::FNEG || opcode == ISD::FABS);
2936
2937 // Lower fneg/fabs on f64 to fneg/fabs on f32.
2938 // fneg f64 => fneg f32:sub_even, fmov f32:sub_odd.
2939 // fabs f64 => fabs f32:sub_even, fmov f32:sub_odd.
2940
2941 // Note: in little-endian, the floating-point value is stored in the
2942 // registers are in the opposite order, so the subreg with the sign
2943 // bit is the highest-numbered (odd), rather than the
2944 // lowest-numbered (even).
2945
2946 SDValue Hi32 = DAG.getTargetExtractSubreg(SP::sub_even, dl, MVT::f32,
2947 SrcReg64);
2948 SDValue Lo32 = DAG.getTargetExtractSubreg(SP::sub_odd, dl, MVT::f32,
2949 SrcReg64);
2950
2951 if (DAG.getDataLayout().isLittleEndian())
2952 Lo32 = DAG.getNode(opcode, dl, MVT::f32, Lo32);
2953 else
2954 Hi32 = DAG.getNode(opcode, dl, MVT::f32, Hi32);
2955
2956 SDValue DstReg64 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2957 dl, MVT::f64), 0);
2958 DstReg64 = DAG.getTargetInsertSubreg(SP::sub_even, dl, MVT::f64,
2959 DstReg64, Hi32);
2960 DstReg64 = DAG.getTargetInsertSubreg(SP::sub_odd, dl, MVT::f64,
2961 DstReg64, Lo32);
2962 return DstReg64;
2963}
2964
2965// Lower a f128 load into two f64 loads.
2967{
2968 SDLoc dl(Op);
2969 LoadSDNode *LdNode = cast<LoadSDNode>(Op.getNode());
2970 assert(LdNode->getOffset().isUndef() && "Unexpected node type");
2971
2972 Align Alignment = commonAlignment(LdNode->getOriginalAlign(), 8);
2973
2974 SDValue Hi64 =
2975 DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LdNode->getBasePtr(),
2976 LdNode->getPointerInfo(), Alignment);
2977 EVT addrVT = LdNode->getBasePtr().getValueType();
2978 SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
2979 LdNode->getBasePtr(),
2980 DAG.getConstant(8, dl, addrVT));
2981 SDValue Lo64 = DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LoPtr,
2982 LdNode->getPointerInfo().getWithOffset(8),
2983 Alignment);
2984
2985 SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
2986 SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
2987
2988 SDNode *InFP128 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2989 dl, MVT::f128);
2990 InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
2991 MVT::f128,
2992 SDValue(InFP128, 0),
2993 Hi64,
2994 SubRegEven);
2995 InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
2996 MVT::f128,
2997 SDValue(InFP128, 0),
2998 Lo64,
2999 SubRegOdd);
3000 SDValue OutChains[2] = { SDValue(Hi64.getNode(), 1),
3001 SDValue(Lo64.getNode(), 1) };
3002 SDValue OutChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
3003 SDValue Ops[2] = {SDValue(InFP128,0), OutChain};
3004 return DAG.getMergeValues(Ops, dl);
3005}
3006
3008{
3009 LoadSDNode *LdNode = cast<LoadSDNode>(Op.getNode());
3010
3011 EVT MemVT = LdNode->getMemoryVT();
3012 if (MemVT == MVT::f128)
3013 return LowerF128Load(Op, DAG);
3014
3015 return Op;
3016}
3017
3018// Lower a f128 store into two f64 stores.
3020 SDLoc dl(Op);
3021 StoreSDNode *StNode = cast<StoreSDNode>(Op.getNode());
3022 assert(StNode->getOffset().isUndef() && "Unexpected node type");
3023
3024 SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
3025 SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
3026
3027 SDNode *Hi64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
3028 dl,
3029 MVT::f64,
3030 StNode->getValue(),
3031 SubRegEven);
3032 SDNode *Lo64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
3033 dl,
3034 MVT::f64,
3035 StNode->getValue(),
3036 SubRegOdd);
3037
3038 Align Alignment = commonAlignment(StNode->getOriginalAlign(), 8);
3039
3040 SDValue OutChains[2];
3041 OutChains[0] =
3042 DAG.getStore(StNode->getChain(), dl, SDValue(Hi64, 0),
3043 StNode->getBasePtr(), StNode->getPointerInfo(),
3044 Alignment);
3045 EVT addrVT = StNode->getBasePtr().getValueType();
3046 SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
3047 StNode->getBasePtr(),
3048 DAG.getConstant(8, dl, addrVT));
3049 OutChains[1] = DAG.getStore(StNode->getChain(), dl, SDValue(Lo64, 0), LoPtr,
3050 StNode->getPointerInfo().getWithOffset(8),
3051 Alignment);
3052 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
3053}
3054
3056{
3057 SDLoc dl(Op);
3058 StoreSDNode *St = cast<StoreSDNode>(Op.getNode());
3059
3060 EVT MemVT = St->getMemoryVT();
3061 if (MemVT == MVT::f128)
3062 return LowerF128Store(Op, DAG);
3063
3064 if (MemVT == MVT::i64) {
3065 // Custom handling for i64 stores: turn it into a bitcast and a
3066 // v2i32 store.
3067 SDValue Val = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, St->getValue());
3068 SDValue Chain = DAG.getStore(
3069 St->getChain(), dl, Val, St->getBasePtr(), St->getPointerInfo(),
3070 St->getOriginalAlign(), St->getMemOperand()->getFlags(),
3071 St->getAAInfo());
3072 return Chain;
3073 }
3074
3075 return SDValue();
3076}
3077
3079 assert((Op.getOpcode() == ISD::FNEG || Op.getOpcode() == ISD::FABS)
3080 && "invalid opcode");
3081
3082 SDLoc dl(Op);
3083
3084 if (Op.getValueType() == MVT::f64)
3085 return LowerF64Op(Op.getOperand(0), dl, DAG, Op.getOpcode());
3086 if (Op.getValueType() != MVT::f128)
3087 return Op;
3088
3089 // Lower fabs/fneg on f128 to fabs/fneg on f64
3090 // fabs/fneg f128 => fabs/fneg f64:sub_even64, fmov f64:sub_odd64
3091 // (As with LowerF64Op, on little-endian, we need to negate the odd
3092 // subreg)
3093
3094 SDValue SrcReg128 = Op.getOperand(0);
3095 SDValue Hi64 = DAG.getTargetExtractSubreg(SP::sub_even64, dl, MVT::f64,
3096 SrcReg128);
3097 SDValue Lo64 = DAG.getTargetExtractSubreg(SP::sub_odd64, dl, MVT::f64,
3098 SrcReg128);
3099
3100 if (DAG.getDataLayout().isLittleEndian()) {
3101 if (isV9)
3102 Lo64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Lo64);
3103 else
3104 Lo64 = LowerF64Op(Lo64, dl, DAG, Op.getOpcode());
3105 } else {
3106 if (isV9)
3107 Hi64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Hi64);
3108 else
3109 Hi64 = LowerF64Op(Hi64, dl, DAG, Op.getOpcode());
3110 }
3111
3112 SDValue DstReg128 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
3113 dl, MVT::f128), 0);
3114 DstReg128 = DAG.getTargetInsertSubreg(SP::sub_even64, dl, MVT::f128,
3115 DstReg128, Hi64);
3116 DstReg128 = DAG.getTargetInsertSubreg(SP::sub_odd64, dl, MVT::f128,
3117 DstReg128, Lo64);
3118 return DstReg128;
3119}
3120
3122
3123 if (Op.getValueType() != MVT::i64)
3124 return Op;
3125
3126 SDLoc dl(Op);
3127 SDValue Src1 = Op.getOperand(0);
3128 SDValue Src1Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src1);
3129 SDValue Src1Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Src1,
3130 DAG.getConstant(32, dl, MVT::i64));
3131 Src1Hi = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src1Hi);
3132
3133 SDValue Src2 = Op.getOperand(1);
3134 SDValue Src2Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src2);
3135 SDValue Src2Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Src2,
3136 DAG.getConstant(32, dl, MVT::i64));
3137 Src2Hi = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src2Hi);
3138
3139
3140 bool hasChain = false;
3141 unsigned hiOpc = Op.getOpcode();
3142 switch (Op.getOpcode()) {
3143 default: llvm_unreachable("Invalid opcode");
3144 case ISD::ADDC: hiOpc = ISD::ADDE; break;
3145 case ISD::ADDE: hasChain = true; break;
3146 case ISD::SUBC: hiOpc = ISD::SUBE; break;
3147 case ISD::SUBE: hasChain = true; break;
3148 }
3149 SDValue Lo;
3150 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Glue);
3151 if (hasChain) {
3152 Lo = DAG.getNode(Op.getOpcode(), dl, VTs, Src1Lo, Src2Lo,
3153 Op.getOperand(2));
3154 } else {
3155 Lo = DAG.getNode(Op.getOpcode(), dl, VTs, Src1Lo, Src2Lo);
3156 }
3157 SDValue Hi = DAG.getNode(hiOpc, dl, VTs, Src1Hi, Src2Hi, Lo.getValue(1));
3158 SDValue Carry = Hi.getValue(1);
3159
3160 Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Lo);
3161 Hi = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Hi);
3162 Hi = DAG.getNode(ISD::SHL, dl, MVT::i64, Hi,
3163 DAG.getConstant(32, dl, MVT::i64));
3164
3165 SDValue Dst = DAG.getNode(ISD::OR, dl, MVT::i64, Hi, Lo);
3166 SDValue Ops[2] = { Dst, Carry };
3167 return DAG.getMergeValues(Ops, dl);
3168}
3169
3170// Custom lower UMULO/SMULO for SPARC. This code is similar to ExpandNode()
3171// in LegalizeDAG.cpp except the order of arguments to the library function.
3173 const SparcTargetLowering &TLI)
3174{
3175 unsigned opcode = Op.getOpcode();
3176 assert((opcode == ISD::UMULO || opcode == ISD::SMULO) && "Invalid Opcode.");
3177
3178 bool isSigned = (opcode == ISD::SMULO);
3179 EVT VT = MVT::i64;
3180 EVT WideVT = MVT::i128;
3181 SDLoc dl(Op);
3182 SDValue LHS = Op.getOperand(0);
3183
3184 if (LHS.getValueType() != VT)
3185 return Op;
3186
3187 SDValue ShiftAmt = DAG.getConstant(63, dl, VT);
3188
3189 SDValue RHS = Op.getOperand(1);
3190 SDValue HiLHS, HiRHS;
3191 if (isSigned) {
3192 HiLHS = DAG.getNode(ISD::SRA, dl, VT, LHS, ShiftAmt);
3193 HiRHS = DAG.getNode(ISD::SRA, dl, MVT::i64, RHS, ShiftAmt);
3194 } else {
3195 HiLHS = DAG.getConstant(0, dl, VT);
3196 HiRHS = DAG.getConstant(0, dl, MVT::i64);
3197 }
3198
3199 SDValue Args[] = { HiLHS, LHS, HiRHS, RHS };
3200
3202 CallOptions.setSExt(isSigned);
3203 SDValue MulResult = TLI.makeLibCall(DAG,
3204 RTLIB::MUL_I128, WideVT,
3205 Args, CallOptions, dl).first;
3206 SDValue BottomHalf, TopHalf;
3207 std::tie(BottomHalf, TopHalf) = DAG.SplitScalar(MulResult, dl, VT, VT);
3208 if (isSigned) {
3209 SDValue Tmp1 = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, ShiftAmt);
3210 TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, Tmp1, ISD::SETNE);
3211 } else {
3212 TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, DAG.getConstant(0, dl, VT),
3213 ISD::SETNE);
3214 }
3215 // MulResult is a node with an illegal type. Because such things are not
3216 // generally permitted during this phase of legalization, ensure that
3217 // nothing is left using the node. The above EXTRACT_ELEMENT nodes should have
3218 // been folded.
3219 assert(MulResult->use_empty() && "Illegally typed node still in use!");
3220
3221 SDValue Ops[2] = { BottomHalf, TopHalf } ;
3222 return DAG.getMergeValues(Ops, dl);
3223}
3224
3226 if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getSuccessOrdering())) {
3227 // Expand with a fence.
3228 return SDValue();
3229 }
3230
3231 // Monotonic load/stores are legal.
3232 return Op;
3233}
3234
3236 SelectionDAG &DAG) const {
3237 unsigned IntNo = Op.getConstantOperandVal(0);
3238 SDLoc dl(Op);
3239 switch (IntNo) {
3240 default: return SDValue(); // Don't custom lower most intrinsics.
3241 case Intrinsic::thread_pointer: {
3242 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3243 return DAG.getRegister(SP::G7, PtrVT);
3244 }
3245 }
3246}
3247
3250
3251 bool hasHardQuad = Subtarget->hasHardQuad();
3252 bool isV9 = Subtarget->isV9();
3253 bool is64Bit = Subtarget->is64Bit();
3254
3255 switch (Op.getOpcode()) {
3256 default: llvm_unreachable("Should not custom lower this!");
3257
3258 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG, *this,
3259 Subtarget);
3260 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG,
3261 Subtarget);
3263 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
3264 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
3265 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
3266 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG, *this,
3267 hasHardQuad);
3268 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG, *this,
3269 hasHardQuad);
3270 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG, *this,
3271 hasHardQuad);
3272 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG, *this,
3273 hasHardQuad);
3274 case ISD::BR_CC:
3275 return LowerBR_CC(Op, DAG, *this, hasHardQuad, isV9, is64Bit);
3276 case ISD::SELECT_CC:
3277 return LowerSELECT_CC(Op, DAG, *this, hasHardQuad, isV9, is64Bit);
3278 case ISD::VASTART: return LowerVASTART(Op, DAG, *this);
3279 case ISD::VAARG: return LowerVAARG(Op, DAG);
3281 Subtarget);
3282
3283 case ISD::LOAD: return LowerLOAD(Op, DAG);
3284 case ISD::STORE: return LowerSTORE(Op, DAG);
3285 case ISD::FADD: return LowerF128Op(Op, DAG,
3286 getLibcallName(RTLIB::ADD_F128), 2);
3287 case ISD::FSUB: return LowerF128Op(Op, DAG,
3288 getLibcallName(RTLIB::SUB_F128), 2);
3289 case ISD::FMUL: return LowerF128Op(Op, DAG,
3290 getLibcallName(RTLIB::MUL_F128), 2);
3291 case ISD::FDIV: return LowerF128Op(Op, DAG,
3292 getLibcallName(RTLIB::DIV_F128), 2);
3293 case ISD::FSQRT: return LowerF128Op(Op, DAG,
3294 getLibcallName(RTLIB::SQRT_F128),1);
3295 case ISD::FABS:
3296 case ISD::FNEG: return LowerFNEGorFABS(Op, DAG, isV9);
3297 case ISD::FP_EXTEND: return LowerF128_FPEXTEND(Op, DAG, *this);
3298 case ISD::FP_ROUND: return LowerF128_FPROUND(Op, DAG, *this);
3299 case ISD::ADDC:
3300 case ISD::ADDE:
3301 case ISD::SUBC:
3302 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
3303 case ISD::UMULO:
3304 case ISD::SMULO: return LowerUMULO_SMULO(Op, DAG, *this);
3305 case ISD::ATOMIC_LOAD:
3306 case ISD::ATOMIC_STORE: return LowerATOMIC_LOAD_STORE(Op, DAG);
3308 }
3309}
3310
3312 const SDLoc &DL,
3313 SelectionDAG &DAG) const {
3314 APInt V = C->getValueAPF().bitcastToAPInt();
3315 SDValue Lo = DAG.getConstant(V.zextOrTrunc(32), DL, MVT::i32);
3316 SDValue Hi = DAG.getConstant(V.lshr(32).zextOrTrunc(32), DL, MVT::i32);
3317 if (DAG.getDataLayout().isLittleEndian())
3318 std::swap(Lo, Hi);
3319 return DAG.getBuildVector(MVT::v2i32, DL, {Hi, Lo});
3320}
3321
3323 DAGCombinerInfo &DCI) const {
3324 SDLoc dl(N);
3325 SDValue Src = N->getOperand(0);
3326
3327 if (isa<ConstantFPSDNode>(Src) && N->getSimpleValueType(0) == MVT::v2i32 &&
3328 Src.getSimpleValueType() == MVT::f64)
3329 return bitcastConstantFPToInt(cast<ConstantFPSDNode>(Src), dl, DCI.DAG);
3330
3331 return SDValue();
3332}
3333
3335 DAGCombinerInfo &DCI) const {
3336 switch (N->getOpcode()) {
3337 default:
3338 break;
3339 case ISD::BITCAST:
3340 return PerformBITCASTCombine(N, DCI);
3341 }
3342 return SDValue();
3343}
3344
3347 MachineBasicBlock *BB) const {
3348 switch (MI.getOpcode()) {
3349 default: llvm_unreachable("Unknown SELECT_CC!");
3350 case SP::SELECT_CC_Int_ICC:
3351 case SP::SELECT_CC_FP_ICC:
3352 case SP::SELECT_CC_DFP_ICC:
3353 case SP::SELECT_CC_QFP_ICC:
3354 if (Subtarget->isV9())
3355 return expandSelectCC(MI, BB, SP::BPICC);
3356 return expandSelectCC(MI, BB, SP::BCOND);
3357 case SP::SELECT_CC_Int_XCC:
3358 case SP::SELECT_CC_FP_XCC:
3359 case SP::SELECT_CC_DFP_XCC:
3360 case SP::SELECT_CC_QFP_XCC:
3361 return expandSelectCC(MI, BB, SP::BPXCC);
3362 case SP::SELECT_CC_Int_FCC:
3363 case SP::SELECT_CC_FP_FCC:
3364 case SP::SELECT_CC_DFP_FCC:
3365 case SP::SELECT_CC_QFP_FCC:
3366 if (Subtarget->isV9())
3367 return expandSelectCC(MI, BB, SP::FBCOND_V9);
3368 return expandSelectCC(MI, BB, SP::FBCOND);
3369 }
3370}
3371
3374 unsigned BROpcode) const {
3375 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
3376 DebugLoc dl = MI.getDebugLoc();
3377 unsigned CC = (SPCC::CondCodes)MI.getOperand(3).getImm();
3378
3379 // To "insert" a SELECT_CC instruction, we actually have to insert the
3380 // triangle control-flow pattern. The incoming instruction knows the
3381 // destination vreg to set, the condition code register to branch on, the
3382 // true/false values to select between, and the condition code for the branch.
3383 //
3384 // We produce the following control flow:
3385 // ThisMBB
3386 // | \
3387 // | IfFalseMBB
3388 // | /
3389 // SinkMBB
3390 const BasicBlock *LLVM_BB = BB->getBasicBlock();
3392
3393 MachineBasicBlock *ThisMBB = BB;
3394 MachineFunction *F = BB->getParent();
3395 MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
3396 MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
3397 F->insert(It, IfFalseMBB);
3398 F->insert(It, SinkMBB);
3399
3400 // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
3401 SinkMBB->splice(SinkMBB->begin(), ThisMBB,
3402 std::next(MachineBasicBlock::iterator(MI)), ThisMBB->end());
3403 SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
3404
3405 // Set the new successors for ThisMBB.
3406 ThisMBB->addSuccessor(IfFalseMBB);
3407 ThisMBB->addSuccessor(SinkMBB);
3408
3409 BuildMI(ThisMBB, dl, TII.get(BROpcode))
3410 .addMBB(SinkMBB)
3411 .addImm(CC);
3412
3413 // IfFalseMBB just falls through to SinkMBB.
3414 IfFalseMBB->addSuccessor(SinkMBB);
3415
3416 // %Result = phi [ %TrueValue, ThisMBB ], [ %FalseValue, IfFalseMBB ]
3417 BuildMI(*SinkMBB, SinkMBB->begin(), dl, TII.get(SP::PHI),
3418 MI.getOperand(0).getReg())
3419 .addReg(MI.getOperand(1).getReg())
3420 .addMBB(ThisMBB)
3421 .addReg(MI.getOperand(2).getReg())
3422 .addMBB(IfFalseMBB);
3423
3424 MI.eraseFromParent(); // The pseudo instruction is gone now.
3425 return SinkMBB;
3426}
3427
3428//===----------------------------------------------------------------------===//
3429// Sparc Inline Assembly Support
3430//===----------------------------------------------------------------------===//
3431
3432/// getConstraintType - Given a constraint letter, return the type of
3433/// constraint it is for this target.
3436 if (Constraint.size() == 1) {
3437 switch (Constraint[0]) {
3438 default: break;
3439 case 'r':
3440 case 'f':
3441 case 'e':
3442 return C_RegisterClass;
3443 case 'I': // SIMM13
3444 return C_Immediate;
3445 }
3446 }
3447
3448 return TargetLowering::getConstraintType(Constraint);
3449}
3450
3453 const char *constraint) const {
3455 Value *CallOperandVal = info.CallOperandVal;
3456 // If we don't have a value, we can't do a match,
3457 // but allow it at the lowest weight.
3458 if (!CallOperandVal)
3459 return CW_Default;
3460
3461 // Look at the constraint type.
3462 switch (*constraint) {
3463 default:
3465 break;
3466 case 'I': // SIMM13
3467 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
3468 if (isInt<13>(C->getSExtValue()))
3469 weight = CW_Constant;
3470 }
3471 break;
3472 }
3473 return weight;
3474}
3475
3476/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
3477/// vector. If it is invalid, don't add anything to Ops.
3479 SDValue Op, StringRef Constraint, std::vector<SDValue> &Ops,
3480 SelectionDAG &DAG) const {
3481 SDValue Result;
3482
3483 // Only support length 1 constraints for now.
3484 if (Constraint.size() > 1)
3485 return;
3486
3487 char ConstraintLetter = Constraint[0];
3488 switch (ConstraintLetter) {
3489 default: break;
3490 case 'I':
3491 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
3492 if (isInt<13>(C->getSExtValue())) {
3493 Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
3494 Op.getValueType());
3495 break;
3496 }
3497 return;
3498 }
3499 }
3500
3501 if (Result.getNode()) {
3502 Ops.push_back(Result);
3503 return;
3504 }
3505 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
3506}
3507
3508std::pair<unsigned, const TargetRegisterClass *>
3510 StringRef Constraint,
3511 MVT VT) const {
3512 if (Constraint.empty())
3513 return std::make_pair(0U, nullptr);
3514
3515 if (Constraint.size() == 1) {
3516 switch (Constraint[0]) {
3517 case 'r':
3518 if (VT == MVT::v2i32)
3519 return std::make_pair(0U, &SP::IntPairRegClass);
3520 else if (Subtarget->is64Bit())
3521 return std::make_pair(0U, &SP::I64RegsRegClass);
3522 else
3523 return std::make_pair(0U, &SP::IntRegsRegClass);
3524 case 'f':
3525 if (VT == MVT::f32 || VT == MVT::i32)
3526 return std::make_pair(0U, &SP::FPRegsRegClass);
3527 else if (VT == MVT::f64 || VT == MVT::i64)
3528 return std::make_pair(0U, &SP::LowDFPRegsRegClass);
3529 else if (VT == MVT::f128)
3530 return std::make_pair(0U, &SP::LowQFPRegsRegClass);
3531 // This will generate an error message
3532 return std::make_pair(0U, nullptr);
3533 case 'e':
3534 if (VT == MVT::f32 || VT == MVT::i32)
3535 return std::make_pair(0U, &SP::FPRegsRegClass);
3536 else if (VT == MVT::f64 || VT == MVT::i64 )
3537 return std::make_pair(0U, &SP::DFPRegsRegClass);
3538 else if (VT == MVT::f128)
3539 return std::make_pair(0U, &SP::QFPRegsRegClass);
3540 // This will generate an error message
3541 return std::make_pair(0U, nullptr);
3542 }
3543 }
3544
3545 if (Constraint.front() != '{')
3546 return std::make_pair(0U, nullptr);
3547
3548 assert(Constraint.back() == '}' && "Not a brace enclosed constraint?");
3549 StringRef RegName(Constraint.data() + 1, Constraint.size() - 2);
3550 if (RegName.empty())
3551 return std::make_pair(0U, nullptr);
3552
3553 unsigned long long RegNo;
3554 // Handle numbered register aliases.
3555 if (RegName[0] == 'r' &&
3556 getAsUnsignedInteger(RegName.begin() + 1, 10, RegNo)) {
3557 // r0-r7 -> g0-g7
3558 // r8-r15 -> o0-o7
3559 // r16-r23 -> l0-l7
3560 // r24-r31 -> i0-i7
3561 if (RegNo > 31)
3562 return std::make_pair(0U, nullptr);
3563 const char RegTypes[] = {'g', 'o', 'l', 'i'};
3564 char RegType = RegTypes[RegNo / 8];
3565 char RegIndex = '0' + (RegNo % 8);
3566 char Tmp[] = {'{', RegType, RegIndex, '}', 0};
3567 return getRegForInlineAsmConstraint(TRI, Tmp, VT);
3568 }
3569
3570 // Rewrite the fN constraint according to the value type if needed.
3571 if (VT != MVT::f32 && VT != MVT::Other && RegName[0] == 'f' &&
3572 getAsUnsignedInteger(RegName.begin() + 1, 10, RegNo)) {
3573 if (VT == MVT::f64 && (RegNo % 2 == 0)) {
3575 TRI, StringRef("{d" + utostr(RegNo / 2) + "}"), VT);
3576 } else if (VT == MVT::f128 && (RegNo % 4 == 0)) {
3578 TRI, StringRef("{q" + utostr(RegNo / 4) + "}"), VT);
3579 } else {
3580 return std::make_pair(0U, nullptr);
3581 }
3582 }
3583
3584 auto ResultPair =
3586 if (!ResultPair.second)
3587 return std::make_pair(0U, nullptr);
3588
3589 // Force the use of I64Regs over IntRegs for 64-bit values.
3590 if (Subtarget->is64Bit() && VT == MVT::i64) {
3591 assert(ResultPair.second == &SP::IntRegsRegClass &&
3592 "Unexpected register class");
3593 return std::make_pair(ResultPair.first, &SP::I64RegsRegClass);
3594 }
3595
3596 return ResultPair;
3597}
3598
3599bool
3601 // The Sparc target isn't yet aware of offsets.
3602 return false;
3603}
3604
3607 SelectionDAG &DAG) const {
3608
3609 SDLoc dl(N);
3610
3611 RTLIB::Libcall libCall = RTLIB::UNKNOWN_LIBCALL;
3612
3613 switch (N->getOpcode()) {
3614 default:
3615 llvm_unreachable("Do not know how to custom type legalize this operation!");
3616
3617 case ISD::FP_TO_SINT:
3618 case ISD::FP_TO_UINT:
3619 // Custom lower only if it involves f128 or i64.
3620 if (N->getOperand(0).getValueType() != MVT::f128
3621 || N->getValueType(0) != MVT::i64)
3622 return;
3623 libCall = ((N->getOpcode() == ISD::FP_TO_SINT)
3624 ? RTLIB::FPTOSINT_F128_I64
3625 : RTLIB::FPTOUINT_F128_I64);
3626
3627 Results.push_back(LowerF128Op(SDValue(N, 0),
3628 DAG,
3629 getLibcallName(libCall),
3630 1));
3631 return;
3632 case ISD::READCYCLECOUNTER: {
3633 assert(Subtarget->hasLeonCycleCounter());
3634 SDValue Lo = DAG.getCopyFromReg(N->getOperand(0), dl, SP::ASR23, MVT::i32);
3635 SDValue Hi = DAG.getCopyFromReg(Lo, dl, SP::G0, MVT::i32);
3636 SDValue Ops[] = { Lo, Hi };
3637 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Ops);
3638 Results.push_back(Pair);
3639 Results.push_back(N->getOperand(0));
3640 return;
3641 }
3642 case ISD::SINT_TO_FP:
3643 case ISD::UINT_TO_FP:
3644 // Custom lower only if it involves f128 or i64.
3645 if (N->getValueType(0) != MVT::f128
3646 || N->getOperand(0).getValueType() != MVT::i64)
3647 return;
3648
3649 libCall = ((N->getOpcode() == ISD::SINT_TO_FP)
3650 ? RTLIB::SINTTOFP_I64_F128
3651 : RTLIB::UINTTOFP_I64_F128);
3652
3653 Results.push_back(LowerF128Op(SDValue(N, 0),
3654 DAG,
3655 getLibcallName(libCall),
3656 1));
3657 return;
3658 case ISD::LOAD: {
3659 LoadSDNode *Ld = cast<LoadSDNode>(N);
3660 // Custom handling only for i64: turn i64 load into a v2i32 load,
3661 // and a bitcast.
3662 if (Ld->getValueType(0) != MVT::i64 || Ld->getMemoryVT() != MVT::i64)
3663 return;
3664
3665 SDLoc dl(N);
3666 SDValue LoadRes = DAG.getExtLoad(
3667 Ld->getExtensionType(), dl, MVT::v2i32, Ld->getChain(),
3668 Ld->getBasePtr(), Ld->getPointerInfo(), MVT::v2i32,
3669 Ld->getOriginalAlign(), Ld->getMemOperand()->getFlags(),
3670 Ld->getAAInfo());
3671
3672 SDValue Res = DAG.getNode(ISD::BITCAST, dl, MVT::i64, LoadRes);
3673 Results.push_back(Res);
3674 Results.push_back(LoadRes.getValue(1));
3675 return;
3676 }
3677 }
3678}
3679
3680// Override to enable LOAD_STACK_GUARD lowering on Linux.
3682 if (!Subtarget->isTargetLinux())
3684 return true;
3685}
3686
3687// Override to disable global variable loading on Linux.
3689 if (!Subtarget->isTargetLinux())
3691}
3692
3694 SDNode *Node) const {
3695 assert(MI.getOpcode() == SP::SUBCCrr || MI.getOpcode() == SP::SUBCCri);
3696 // If the result is dead, replace it with %g0.
3697 if (!Node->hasAnyUseOfValue(0))
3698 MI.getOperand(0).setReg(SP::G0);
3699}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG)
static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
Function Alias Analysis Results
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
uint64_t Size
static bool isSigned(unsigned int Opcode)
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define RegName(no)
static LPCC::CondCode IntCondCCodeToICC(SDValue CC, const SDLoc &DL, SDValue &RHS, SelectionDAG &DAG)
lazy value info
#define F(x, y, z)
Definition: MD5.cpp:55
#define G(x, y, z)
Definition: MD5.cpp:56
unsigned const TargetRegisterInfo * TRI
Module.h This file contains the declarations for the Module class.
LLVMContext & Context
const char LLVMTargetMachineRef TM
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
static bool CC_Sparc_Assign_Ret_Split_64(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
static bool CC_Sparc_Assign_Split_64(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue getFRAMEADDR(uint64_t depth, SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget, bool AlwaysFlush=false)
static unsigned toCallerWindow(unsigned Reg)
static SDValue LowerF128Store(SDValue Op, SelectionDAG &DAG)
static SPCC::CondCodes intCondCCodeToRcond(ISD::CondCode CC)
intCondCCodeToRcond - Convert a DAG integer condition code to a SPARC rcond condition.
static SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG)
static void fixupVariableFloatArgs(SmallVectorImpl< CCValAssign > &ArgLocs, ArrayRef< ISD::OutputArg > Outs)
static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
static SPCC::CondCodes FPCondCCodeToFCC(ISD::CondCode CC)
FPCondCCodeToFCC - Convert a DAG floatingp oint condition code to a SPARC FCC condition.
static bool isAnyArgRegReserved(const SparcRegisterInfo *TRI, const MachineFunction &MF)
static SDValue getFLUSHW(SDValue Op, SelectionDAG &DAG)
static bool hasReturnsTwiceAttr(SelectionDAG &DAG, SDValue Callee, const CallBase *Call)
static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
static SDValue LowerUMULO_SMULO(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
static SDValue LowerF128_FPROUND(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
static SDValue LowerF64Op(SDValue SrcReg64, const SDLoc &dl, SelectionDAG &DAG, unsigned opcode)
static bool RetCC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad, bool isV9, bool is64Bit)
static void emitReservedArgRegCallError(const MachineFunction &MF)
static SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG)
static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG)
static bool RetCC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad, bool isV9, bool is64Bit)
static SDValue LowerF128_FPEXTEND(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
static SDValue LowerFNEGorFABS(SDValue Op, SelectionDAG &DAG, bool isV9)
static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG)
static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static bool Analyze_CC_Sparc64_Half(bool IsReturn, unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerF128Load(SDValue Op, SelectionDAG &DAG)
static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, const SparcSubtarget *Subtarget)
static void LookThroughSetCC(SDValue &LHS, SDValue &RHS, ISD::CondCode CC, unsigned &SPCC)
static bool Analyze_CC_Sparc64_Full(bool IsReturn, unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
This file contains some functions that are useful when dealing with strings.
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
static bool is64Bit(const char *name)
Value * RHS
Value * LHS
Class for arbitrary precision integers.
Definition: APInt.h:76
This class represents an incoming formal argument to a Function.
Definition: Argument.h:28
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:727
BinOp getOperation() const
Definition: Instructions.h:821
LLVM Basic Block Representation.
Definition: BasicBlock.h:60
CCState - This class holds information needed while lowering arguments and return values.
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set,...
void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...
MCRegister AllocateReg(MCPhysReg Reg)
AllocateReg - Attempt to allocate one register.
bool CheckReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
CheckReturn - Analyze the return values of a function, returning true if the return can be performed ...
void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeReturn - Analyze the returned values of a return, incorporating info about the result values i...
int64_t AllocateStack(unsigned Size, Align Alignment)
AllocateStack - Allocate a chunk of stack space with the specified size and alignment.
void AnalyzeCallOperands(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeCallOperands - Analyze the outgoing arguments to a call, incorporating info about the passed v...
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeFormalArguments - Analyze an array of argument values, incorporating info about the formals in...
void addLoc(const CCValAssign &V)
CCValAssign - Represent assignment of one arg/retval to a location.
bool isRegLoc() const
Register getLocReg() const
LocInfo getLocInfo() const
static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)
static CCValAssign getReg(unsigned ValNo, MVT ValVT, unsigned RegNo, MVT LocVT, LocInfo HTP, bool IsCustom=false)
bool needsCustom() const
bool isMemLoc() const
static CCValAssign getCustomReg(unsigned ValNo, MVT ValVT, unsigned RegNo, MVT LocVT, LocInfo HTP)
bool isExtInLoc() const
int64_t getLocMemOffset() const
unsigned getValNo() const
static CCValAssign getCustomMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1259
This is the shared class of boolean and integer constants.
Definition: Constants.h:79
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
bool isLittleEndian() const
Layout endianness...
Definition: DataLayout.h:238
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:504
A debug info location.
Definition: DebugLoc.h:33
Diagnostic information for unsupported feature in backend.
bool hasStructRetAttr() const
Determine if the function returns a structure through first or second pointer argument.
Definition: Function.h:658
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.cpp:669
const GlobalValue * getGlobal() const
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:655
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
Machine Value Type.
static auto integer_fixedlen_vector_valuetypes()
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static MVT getIntegerVT(unsigned BitWidth)
static auto fp_valuetypes()
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
void setFrameAddressIsTaken(bool T)
void setHasTailCall(bool V=true)
void setReturnAddressIsTaken(bool s)
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) c